FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
svq3.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 The FFmpeg Project
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * How to use this decoder:
23  * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24  * have stsd atoms to describe media trak properties. A stsd atom for a
25  * video trak contains 1 or more ImageDescription atoms. These atoms begin
26  * with the 4-byte length of the atom followed by the codec fourcc. Some
27  * decoders need information in this atom to operate correctly. Such
28  * is the case with SVQ3. In order to get the best use out of this decoder,
29  * the calling app must make the SVQ3 ImageDescription atom available
30  * via the AVCodecContext's extradata[_size] field:
31  *
32  * AVCodecContext.extradata = pointer to ImageDescription, first characters
33  * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34  * AVCodecContext.extradata_size = size of ImageDescription atom memory
35  * buffer (which will be the same as the ImageDescription atom size field
36  * from the QT file, minus 4 bytes since the length is missing)
37  *
38  * You will know you have these parameters passed correctly when the decoder
39  * correctly decodes this file:
40  * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41  */
42 
43 #include "libavutil/attributes.h"
44 #include "internal.h"
45 #include "avcodec.h"
46 #include "mpegvideo.h"
47 #include "h264.h"
48 
49 #include "h264data.h" // FIXME FIXME FIXME
50 
51 #include "h264_mvpred.h"
52 #include "golomb.h"
53 #include "hpeldsp.h"
54 #include "rectangle.h"
55 #include "vdpau_internal.h"
56 
57 #if CONFIG_ZLIB
58 #include <zlib.h>
59 #endif
60 
61 #include "svq1.h"
62 #include "svq3.h"
63 
64 /**
65  * @file
66  * svq3 decoder.
67  */
68 
69 typedef struct {
79  uint32_t watermark_key;
81  int buf_size;
87 } SVQ3Context;
88 
89 #define FULLPEL_MODE 1
90 #define HALFPEL_MODE 2
91 #define THIRDPEL_MODE 3
92 #define PREDICT_MODE 4
93 
94 /* dual scan (from some older h264 draft)
95  * o-->o-->o o
96  * | /|
97  * o o o / o
98  * | / | |/ |
99  * o o o o
100  * /
101  * o-->o-->o-->o
102  */
103 static const uint8_t svq3_scan[16] = {
104  0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
105  2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
106  0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
107  0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
108 };
109 
110 static const uint8_t luma_dc_zigzag_scan[16] = {
111  0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
112  3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
113  1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
114  3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
115 };
116 
117 static const uint8_t svq3_pred_0[25][2] = {
118  { 0, 0 },
119  { 1, 0 }, { 0, 1 },
120  { 0, 2 }, { 1, 1 }, { 2, 0 },
121  { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
122  { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
123  { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
124  { 2, 4 }, { 3, 3 }, { 4, 2 },
125  { 4, 3 }, { 3, 4 },
126  { 4, 4 }
127 };
128 
129 static const int8_t svq3_pred_1[6][6][5] = {
130  { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
131  { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
132  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
133  { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
134  { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
135  { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
136  { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
137  { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
138  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
139  { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
140  { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
141  { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
142 };
143 
144 static const struct {
147 } svq3_dct_tables[2][16] = {
148  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
149  { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
150  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
151  { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
152 };
153 
154 static const uint32_t svq3_dequant_coeff[32] = {
155  3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
156  9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
157  24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
158  61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
159 };
160 
161 static int svq3_decode_end(AVCodecContext *avctx);
162 
163 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
164 {
165  const int qmul = svq3_dequant_coeff[qp];
166 #define stride 16
167  int i;
168  int temp[16];
169  static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
170 
171  for (i = 0; i < 4; i++) {
172  const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
173  const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
174  const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
175  const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
176 
177  temp[4 * i + 0] = z0 + z3;
178  temp[4 * i + 1] = z1 + z2;
179  temp[4 * i + 2] = z1 - z2;
180  temp[4 * i + 3] = z0 - z3;
181  }
182 
183  for (i = 0; i < 4; i++) {
184  const int offset = x_offset[i];
185  const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
186  const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
187  const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
188  const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
189 
190  output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
191  output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
192  output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
193  output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
194  }
195 }
196 #undef stride
197 
198 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
199  int stride, int qp, int dc)
200 {
201  const int qmul = svq3_dequant_coeff[qp];
202  int i;
203 
204  if (dc) {
205  dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
206  : qmul * (block[0] >> 3) / 2);
207  block[0] = 0;
208  }
209 
210  for (i = 0; i < 4; i++) {
211  const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
212  const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
213  const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
214  const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
215 
216  block[0 + 4 * i] = z0 + z3;
217  block[1 + 4 * i] = z1 + z2;
218  block[2 + 4 * i] = z1 - z2;
219  block[3 + 4 * i] = z0 - z3;
220  }
221 
222  for (i = 0; i < 4; i++) {
223  const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
224  const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
225  const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
226  const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
227  const int rr = (dc + 0x80000);
228 
229  dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
230  dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
231  dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
232  dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
233  }
234 
235  memset(block, 0, 16 * sizeof(int16_t));
236 }
237 
238 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
239  int index, const int type)
240 {
241  static const uint8_t *const scan_patterns[4] =
243 
244  int run, level, sign, limit;
245  unsigned vlc;
246  const int intra = 3 * type >> 2;
247  const uint8_t *const scan = scan_patterns[type];
248 
249  for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
250  for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
251  if ((int32_t)vlc < 0)
252  return -1;
253 
254  sign = (vlc & 1) ? 0 : -1;
255  vlc = vlc + 1 >> 1;
256 
257  if (type == 3) {
258  if (vlc < 3) {
259  run = 0;
260  level = vlc;
261  } else if (vlc < 4) {
262  run = 1;
263  level = 1;
264  } else {
265  run = vlc & 0x3;
266  level = (vlc + 9 >> 2) - run;
267  }
268  } else {
269  if (vlc < 16U) {
270  run = svq3_dct_tables[intra][vlc].run;
271  level = svq3_dct_tables[intra][vlc].level;
272  } else if (intra) {
273  run = vlc & 0x7;
274  level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
275  } else {
276  run = vlc & 0xF;
277  level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
278  }
279  }
280 
281 
282  if ((index += run) >= limit)
283  return -1;
284 
285  block[scan[index]] = (level ^ sign) - sign;
286  }
287 
288  if (type != 2) {
289  break;
290  }
291  }
292 
293  return 0;
294 }
295 
296 static inline void svq3_mc_dir_part(SVQ3Context *s,
297  int x, int y, int width, int height,
298  int mx, int my, int dxy,
299  int thirdpel, int dir, int avg)
300 {
301  H264Context *h = &s->h;
302  const Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
303  uint8_t *src, *dest;
304  int i, emu = 0;
305  int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
306 
307  mx += x;
308  my += y;
309 
310  if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
311  my < 0 || my >= s->v_edge_pos - height - 1) {
312  emu = 1;
313  mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
314  my = av_clip(my, -16, s->v_edge_pos - height + 15);
315  }
316 
317  /* form component predictions */
318  dest = h->cur_pic.f.data[0] + x + y * h->linesize;
319  src = pic->f.data[0] + mx + my * h->linesize;
320 
321  if (emu) {
323  h->linesize, h->linesize,
324  width + 1, height + 1,
325  mx, my, s->h_edge_pos, s->v_edge_pos);
326  src = h->edge_emu_buffer;
327  }
328  if (thirdpel)
329  (avg ? h->dsp.avg_tpel_pixels_tab
330  : h->dsp.put_tpel_pixels_tab)[dxy](dest, src, h->linesize,
331  width, height);
332  else
333  (avg ? s->hdsp.avg_pixels_tab
334  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, h->linesize,
335  height);
336 
337  if (!(h->flags & CODEC_FLAG_GRAY)) {
338  mx = mx + (mx < (int) x) >> 1;
339  my = my + (my < (int) y) >> 1;
340  width = width >> 1;
341  height = height >> 1;
342  blocksize++;
343 
344  for (i = 1; i < 3; i++) {
345  dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * h->uvlinesize;
346  src = pic->f.data[i] + mx + my * h->uvlinesize;
347 
348  if (emu) {
350  h->uvlinesize, h->uvlinesize,
351  width + 1, height + 1,
352  mx, my, (s->h_edge_pos >> 1),
353  s->v_edge_pos >> 1);
354  src = h->edge_emu_buffer;
355  }
356  if (thirdpel)
357  (avg ? h->dsp.avg_tpel_pixels_tab
358  : h->dsp.put_tpel_pixels_tab)[dxy](dest, src,
359  h->uvlinesize,
360  width, height);
361  else
362  (avg ? s->hdsp.avg_pixels_tab
363  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
364  h->uvlinesize,
365  height);
366  }
367  }
368 }
369 
370 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
371  int dir, int avg)
372 {
373  int i, j, k, mx, my, dx, dy, x, y;
374  H264Context *h = &s->h;
375  const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
376  const int part_height = 16 >> ((unsigned)(size + 1) / 3);
377  const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
378  const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
379  const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
380 
381  for (i = 0; i < 16; i += part_height)
382  for (j = 0; j < 16; j += part_width) {
383  const int b_xy = (4 * h->mb_x + (j >> 2)) +
384  (4 * h->mb_y + (i >> 2)) * h->b_stride;
385  int dxy;
386  x = 16 * h->mb_x + j;
387  y = 16 * h->mb_y + i;
388  k = (j >> 2 & 1) + (i >> 1 & 2) +
389  (j >> 1 & 4) + (i & 8);
390 
391  if (mode != PREDICT_MODE) {
392  pred_motion(h, k, part_width >> 2, dir, 1, &mx, &my);
393  } else {
394  mx = s->next_pic->motion_val[0][b_xy][0] << 1;
395  my = s->next_pic->motion_val[0][b_xy][1] << 1;
396 
397  if (dir == 0) {
398  mx = mx * h->frame_num_offset /
399  h->prev_frame_num_offset + 1 >> 1;
400  my = my * h->frame_num_offset /
401  h->prev_frame_num_offset + 1 >> 1;
402  } else {
403  mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
404  h->prev_frame_num_offset + 1 >> 1;
405  my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
406  h->prev_frame_num_offset + 1 >> 1;
407  }
408  }
409 
410  /* clip motion vector prediction to frame border */
411  mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
412  my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
413 
414  /* get (optional) motion vector differential */
415  if (mode == PREDICT_MODE) {
416  dx = dy = 0;
417  } else {
418  dy = svq3_get_se_golomb(&h->gb);
419  dx = svq3_get_se_golomb(&h->gb);
420 
421  if (dx == INVALID_VLC || dy == INVALID_VLC) {
422  av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
423  return -1;
424  }
425  }
426 
427  /* compute motion vector */
428  if (mode == THIRDPEL_MODE) {
429  int fx, fy;
430  mx = (mx + 1 >> 1) + dx;
431  my = (my + 1 >> 1) + dy;
432  fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
433  fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
434  dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
435 
436  svq3_mc_dir_part(s, x, y, part_width, part_height,
437  fx, fy, dxy, 1, dir, avg);
438  mx += mx;
439  my += my;
440  } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
441  mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
442  my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
443  dxy = (mx & 1) + 2 * (my & 1);
444 
445  svq3_mc_dir_part(s, x, y, part_width, part_height,
446  mx >> 1, my >> 1, dxy, 0, dir, avg);
447  mx *= 3;
448  my *= 3;
449  } else {
450  mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
451  my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
452 
453  svq3_mc_dir_part(s, x, y, part_width, part_height,
454  mx, my, 0, 0, dir, avg);
455  mx *= 6;
456  my *= 6;
457  }
458 
459  /* update mv_cache */
460  if (mode != PREDICT_MODE) {
461  int32_t mv = pack16to32(mx, my);
462 
463  if (part_height == 8 && i < 8) {
464  AV_WN32A(h->mv_cache[dir][scan8[k] + 1 * 8], mv);
465 
466  if (part_width == 8 && j < 8)
467  AV_WN32A(h->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
468  }
469  if (part_width == 8 && j < 8)
470  AV_WN32A(h->mv_cache[dir][scan8[k] + 1], mv);
471  if (part_width == 4 || part_height == 4)
472  AV_WN32A(h->mv_cache[dir][scan8[k]], mv);
473  }
474 
475  /* write back motion vectors */
476  fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
477  part_width >> 2, part_height >> 2, h->b_stride,
478  pack16to32(mx, my), 4);
479  }
480 
481  return 0;
482 }
483 
484 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
485 {
486  H264Context *h = &s->h;
487  int i, j, k, m, dir, mode;
488  int cbp = 0;
489  uint32_t vlc;
490  int8_t *top, *left;
491  const int mb_xy = h->mb_xy;
492  const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride;
493 
494  h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
495  h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
496  h->topright_samples_available = 0xFFFF;
497 
498  if (mb_type == 0) { /* SKIP */
499  if (h->pict_type == AV_PICTURE_TYPE_P ||
500  s->next_pic->mb_type[mb_xy] == -1) {
501  svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
502  0, 0, 0, 0, 0, 0);
503 
504  if (h->pict_type == AV_PICTURE_TYPE_B)
505  svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
506  0, 0, 0, 0, 1, 1);
507 
508  mb_type = MB_TYPE_SKIP;
509  } else {
510  mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
511  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
512  return -1;
513  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
514  return -1;
515 
516  mb_type = MB_TYPE_16x16;
517  }
518  } else if (mb_type < 8) { /* INTER */
519  if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
520  mode = THIRDPEL_MODE;
521  else if (s->halfpel_flag &&
522  s->thirdpel_flag == !get_bits1(&h->gb))
523  mode = HALFPEL_MODE;
524  else
525  mode = FULLPEL_MODE;
526 
527  /* fill caches */
528  /* note ref_cache should contain here:
529  * ????????
530  * ???11111
531  * N??11111
532  * N??11111
533  * N??11111
534  */
535 
536  for (m = 0; m < 2; m++) {
537  if (h->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
538  for (i = 0; i < 4; i++)
539  AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8],
540  h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
541  } else {
542  for (i = 0; i < 4; i++)
543  AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]);
544  }
545  if (h->mb_y > 0) {
546  memcpy(h->mv_cache[m][scan8[0] - 1 * 8],
547  h->cur_pic.motion_val[m][b_xy - h->b_stride],
548  4 * 2 * sizeof(int16_t));
549  memset(&h->ref_cache[m][scan8[0] - 1 * 8],
550  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
551 
552  if (h->mb_x < h->mb_width - 1) {
553  AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8],
554  h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
555  h->ref_cache[m][scan8[0] + 4 - 1 * 8] =
556  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
557  h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
558  } else
559  h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
560  if (h->mb_x > 0) {
561  AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8],
562  h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
563  h->ref_cache[m][scan8[0] - 1 - 1 * 8] =
564  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
565  } else
566  h->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
567  } else
568  memset(&h->ref_cache[m][scan8[0] - 1 * 8 - 1],
569  PART_NOT_AVAILABLE, 8);
570 
571  if (h->pict_type != AV_PICTURE_TYPE_B)
572  break;
573  }
574 
575  /* decode motion vector(s) and form prediction(s) */
576  if (h->pict_type == AV_PICTURE_TYPE_P) {
577  if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
578  return -1;
579  } else { /* AV_PICTURE_TYPE_B */
580  if (mb_type != 2) {
581  if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
582  return -1;
583  } else {
584  for (i = 0; i < 4; i++)
585  memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
586  0, 4 * 2 * sizeof(int16_t));
587  }
588  if (mb_type != 1) {
589  if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
590  return -1;
591  } else {
592  for (i = 0; i < 4; i++)
593  memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
594  0, 4 * 2 * sizeof(int16_t));
595  }
596  }
597 
598  mb_type = MB_TYPE_16x16;
599  } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
600  memset(h->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
601 
602  if (mb_type == 8) {
603  if (h->mb_x > 0) {
604  for (i = 0; i < 4; i++)
605  h->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
606  if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
607  h->left_samples_available = 0x5F5F;
608  }
609  if (h->mb_y > 0) {
610  h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
611  h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
612  h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
613  h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
614 
615  if (h->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
616  h->top_samples_available = 0x33FF;
617  }
618 
619  /* decode prediction codes for luma blocks */
620  for (i = 0; i < 16; i += 2) {
621  vlc = svq3_get_ue_golomb(&h->gb);
622 
623  if (vlc >= 25U) {
624  av_log(h->avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
625  return -1;
626  }
627 
628  left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
629  top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
630 
631  left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
632  left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
633 
634  if (left[1] == -1 || left[2] == -1) {
635  av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
636  return -1;
637  }
638  }
639  } else { /* mb_type == 33, DC_128_PRED block type */
640  for (i = 0; i < 4; i++)
641  memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
642  }
643 
645 
646  if (mb_type == 8) {
648 
649  h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
650  h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
651  } else {
652  for (i = 0; i < 4; i++)
653  memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
654 
655  h->top_samples_available = 0x33FF;
656  h->left_samples_available = 0x5F5F;
657  }
658 
659  mb_type = MB_TYPE_INTRA4x4;
660  } else { /* INTRA16x16 */
661  dir = i_mb_type_info[mb_type - 8].pred_mode;
662  dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
663 
664  if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) < 0) {
665  av_log(h->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
666  return h->intra16x16_pred_mode;
667  }
668 
669  cbp = i_mb_type_info[mb_type - 8].cbp;
670  mb_type = MB_TYPE_INTRA16x16;
671  }
672 
673  if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
674  for (i = 0; i < 4; i++)
675  memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
676  0, 4 * 2 * sizeof(int16_t));
677  if (h->pict_type == AV_PICTURE_TYPE_B) {
678  for (i = 0; i < 4; i++)
679  memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
680  0, 4 * 2 * sizeof(int16_t));
681  }
682  }
683  if (!IS_INTRA4x4(mb_type)) {
684  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
685  }
686  if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
687  memset(h->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
688  }
689 
690  if (!IS_INTRA16x16(mb_type) &&
691  (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
692  if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48U){
693  av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
694  return -1;
695  }
696 
697  cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
698  : golomb_to_inter_cbp[vlc];
699  }
700  if (IS_INTRA16x16(mb_type) ||
701  (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
702  h->qscale += svq3_get_se_golomb(&h->gb);
703 
704  if (h->qscale > 31u) {
705  av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", h->qscale);
706  return -1;
707  }
708  }
709  if (IS_INTRA16x16(mb_type)) {
710  AV_ZERO128(h->mb_luma_dc[0] + 0);
711  AV_ZERO128(h->mb_luma_dc[0] + 8);
712  if (svq3_decode_block(&h->gb, h->mb_luma_dc[0], 0, 1)) {
714  "error while decoding intra luma dc\n");
715  return -1;
716  }
717  }
718 
719  if (cbp) {
720  const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
721  const int type = ((h->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
722 
723  for (i = 0; i < 4; i++)
724  if ((cbp & (1 << i))) {
725  for (j = 0; j < 4; j++) {
726  k = index ? (1 * (j & 1) + 2 * (i & 1) +
727  2 * (j & 2) + 4 * (i & 2))
728  : (4 * i + j);
729  h->non_zero_count_cache[scan8[k]] = 1;
730 
731  if (svq3_decode_block(&h->gb, &h->mb[16 * k], index, type)) {
733  "error while decoding block\n");
734  return -1;
735  }
736  }
737  }
738 
739  if ((cbp & 0x30)) {
740  for (i = 1; i < 3; ++i)
741  if (svq3_decode_block(&h->gb, &h->mb[16 * 16 * i], 0, 3)) {
743  "error while decoding chroma dc block\n");
744  return -1;
745  }
746 
747  if ((cbp & 0x20)) {
748  for (i = 1; i < 3; i++) {
749  for (j = 0; j < 4; j++) {
750  k = 16 * i + j;
751  h->non_zero_count_cache[scan8[k]] = 1;
752 
753  if (svq3_decode_block(&h->gb, &h->mb[16 * k], 1, 1)) {
755  "error while decoding chroma ac block\n");
756  return -1;
757  }
758  }
759  }
760  }
761  }
762  }
763 
764  h->cbp = cbp;
765  h->cur_pic.mb_type[mb_xy] = mb_type;
766 
767  if (IS_INTRA(mb_type))
769 
770  return 0;
771 }
772 
774 {
775  SVQ3Context *s = avctx->priv_data;
776  H264Context *h = &s->h;
777  const int mb_xy = h->mb_xy;
778  int i, header;
779  unsigned slice_id;
780 
781  header = get_bits(&h->gb, 8);
782 
783  if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
784  /* TODO: what? */
785  av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
786  return -1;
787  } else {
788  int length = header >> 5 & 3;
789 
791  8 * show_bits(&h->gb, 8 * length) +
792  8 * length;
793 
794  if (s->next_slice_index > h->gb.size_in_bits) {
795  av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
796  return -1;
797  }
798 
799  h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1);
800  skip_bits(&h->gb, 8);
801 
802  if (s->watermark_key) {
803  uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]);
804  AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1],
805  header ^ s->watermark_key);
806  }
807  if (length > 0) {
808  memmove((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
809  &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
810  }
811  skip_bits_long(&h->gb, 0);
812  }
813 
814  if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
815  av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %d \n", slice_id);
816  return -1;
817  }
818 
819  h->slice_type = golomb_to_pict_type[slice_id];
820 
821  if ((header & 0x9F) == 2) {
822  i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
823  h->mb_skip_run = get_bits(&h->gb, i) -
824  (h->mb_y * h->mb_width + h->mb_x);
825  } else {
826  skip_bits1(&h->gb);
827  h->mb_skip_run = 0;
828  }
829 
830  h->slice_num = get_bits(&h->gb, 8);
831  h->qscale = get_bits(&h->gb, 5);
832  s->adaptive_quant = get_bits1(&h->gb);
833 
834  /* unknown fields */
835  skip_bits1(&h->gb);
836 
837  if (s->unknown_flag)
838  skip_bits1(&h->gb);
839 
840  skip_bits1(&h->gb);
841  skip_bits(&h->gb, 2);
842 
843  if (skip_1stop_8data_bits(&h->gb) < 0)
844  return AVERROR_INVALIDDATA;
845 
846  /* reset intra predictors and invalidate motion vector references */
847  if (h->mb_x > 0) {
848  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
849  -1, 4 * sizeof(int8_t));
850  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_x],
851  -1, 8 * sizeof(int8_t) * h->mb_x);
852  }
853  if (h->mb_y > 0) {
854  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
855  -1, 8 * sizeof(int8_t) * (h->mb_width - h->mb_x));
856 
857  if (h->mb_x > 0)
858  h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
859  }
860 
861  return 0;
862 }
863 
865 {
866  SVQ3Context *s = avctx->priv_data;
867  H264Context *h = &s->h;
868  int m;
869  unsigned char *extradata;
870  unsigned char *extradata_end;
871  unsigned int size;
872  int marker_found = 0;
873  int ret;
874 
875  s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
876  s->last_pic = av_mallocz(sizeof(*s->last_pic));
877  s->next_pic = av_mallocz(sizeof(*s->next_pic));
878  if (!s->next_pic || !s->last_pic || !s->cur_pic) {
879  ret = AVERROR(ENOMEM);
880  goto fail;
881  }
882 
883  if ((ret = ff_h264_decode_init(avctx)) < 0)
884  goto fail;
885 
886  ff_hpeldsp_init(&s->hdsp, avctx->flags);
887  h->flags = avctx->flags;
888  h->is_complex = 1;
889  h->sps.chroma_format_idc = 1;
891  avctx->pix_fmt = avctx->codec->pix_fmts[0];
892 
893  h->chroma_qp[0] = h->chroma_qp[1] = 4;
894  h->chroma_x_shift = h->chroma_y_shift = 1;
895 
896  s->halfpel_flag = 1;
897  s->thirdpel_flag = 1;
898  s->unknown_flag = 0;
899 
900  /* prowl for the "SEQH" marker in the extradata */
901  extradata = (unsigned char *)avctx->extradata;
902  extradata_end = avctx->extradata + avctx->extradata_size;
903  if (extradata) {
904  for (m = 0; m + 8 < avctx->extradata_size; m++) {
905  if (!memcmp(extradata, "SEQH", 4)) {
906  marker_found = 1;
907  break;
908  }
909  extradata++;
910  }
911  }
912 
913  /* if a match was found, parse the extra data */
914  if (marker_found) {
915  GetBitContext gb;
916  int frame_size_code;
917 
918  size = AV_RB32(&extradata[4]);
919  if (size > extradata_end - extradata - 8) {
920  ret = AVERROR_INVALIDDATA;
921  goto fail;
922  }
923  init_get_bits(&gb, extradata + 8, size * 8);
924 
925  /* 'frame size code' and optional 'width, height' */
926  frame_size_code = get_bits(&gb, 3);
927  switch (frame_size_code) {
928  case 0:
929  avctx->width = 160;
930  avctx->height = 120;
931  break;
932  case 1:
933  avctx->width = 128;
934  avctx->height = 96;
935  break;
936  case 2:
937  avctx->width = 176;
938  avctx->height = 144;
939  break;
940  case 3:
941  avctx->width = 352;
942  avctx->height = 288;
943  break;
944  case 4:
945  avctx->width = 704;
946  avctx->height = 576;
947  break;
948  case 5:
949  avctx->width = 240;
950  avctx->height = 180;
951  break;
952  case 6:
953  avctx->width = 320;
954  avctx->height = 240;
955  break;
956  case 7:
957  avctx->width = get_bits(&gb, 12);
958  avctx->height = get_bits(&gb, 12);
959  break;
960  }
961 
962  s->halfpel_flag = get_bits1(&gb);
963  s->thirdpel_flag = get_bits1(&gb);
964 
965  /* unknown fields */
966  skip_bits1(&gb);
967  skip_bits1(&gb);
968  skip_bits1(&gb);
969  skip_bits1(&gb);
970 
971  h->low_delay = get_bits1(&gb);
972 
973  /* unknown field */
974  skip_bits1(&gb);
975 
976  if (skip_1stop_8data_bits(&gb) < 0) {
977  ret = AVERROR_INVALIDDATA;
978  goto fail;
979  }
980 
981  s->unknown_flag = get_bits1(&gb);
982  avctx->has_b_frames = !h->low_delay;
983  if (s->unknown_flag) {
984 #if CONFIG_ZLIB
985  unsigned watermark_width = svq3_get_ue_golomb(&gb);
986  unsigned watermark_height = svq3_get_ue_golomb(&gb);
987  int u1 = svq3_get_ue_golomb(&gb);
988  int u2 = get_bits(&gb, 8);
989  int u3 = get_bits(&gb, 2);
990  int u4 = svq3_get_ue_golomb(&gb);
991  unsigned long buf_len = watermark_width *
992  watermark_height * 4;
993  int offset = get_bits_count(&gb) + 7 >> 3;
994  uint8_t *buf;
995 
996  if (watermark_height <= 0 ||
997  (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height) {
998  ret = -1;
999  goto fail;
1000  }
1001 
1002  buf = av_malloc(buf_len);
1003  av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n",
1004  watermark_width, watermark_height);
1005  av_log(avctx, AV_LOG_DEBUG,
1006  "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1007  u1, u2, u3, u4, offset);
1008  if (uncompress(buf, &buf_len, extradata + 8 + offset,
1009  size - offset) != Z_OK) {
1010  av_log(avctx, AV_LOG_ERROR,
1011  "could not uncompress watermark logo\n");
1012  av_free(buf);
1013  ret = -1;
1014  goto fail;
1015  }
1016  s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1017  s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1018  av_log(avctx, AV_LOG_DEBUG,
1019  "watermark key %#x\n", s->watermark_key);
1020  av_free(buf);
1021 #else
1022  av_log(avctx, AV_LOG_ERROR,
1023  "this svq3 file contains watermark which need zlib support compiled in\n");
1024  ret = -1;
1025  goto fail;
1026 #endif
1027  }
1028  }
1029 
1030  h->width = avctx->width;
1031  h->height = avctx->height;
1032  h->mb_width = (h->width + 15) / 16;
1033  h->mb_height = (h->height + 15) / 16;
1034  h->mb_stride = h->mb_width + 1;
1035  h->mb_num = h->mb_width * h->mb_height;
1036  h->b_stride = 4 * h->mb_width;
1037  s->h_edge_pos = h->mb_width * 16;
1038  s->v_edge_pos = h->mb_height * 16;
1039 
1040  if ((ret = ff_h264_alloc_tables(h)) < 0) {
1041  av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1042  goto fail;
1043  }
1044 
1045  return 0;
1046 fail:
1047  svq3_decode_end(avctx);
1048  return ret;
1049 }
1050 
1051 static void free_picture(AVCodecContext *avctx, Picture *pic)
1052 {
1053  int i;
1054  for (i = 0; i < 2; i++) {
1055  av_buffer_unref(&pic->motion_val_buf[i]);
1056  av_buffer_unref(&pic->ref_index_buf[i]);
1057  }
1059 
1060  av_frame_unref(&pic->f);
1061 }
1062 
1063 static int get_buffer(AVCodecContext *avctx, Picture *pic)
1064 {
1065  SVQ3Context *s = avctx->priv_data;
1066  H264Context *h = &s->h;
1067  const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1068  const int mb_array_size = h->mb_stride * h->mb_height;
1069  const int b4_stride = h->mb_width * 4 + 1;
1070  const int b4_array_size = b4_stride * h->mb_height * 4;
1071  int ret;
1072 
1073  if (!pic->motion_val_buf[0]) {
1074  int i;
1075 
1076  pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1077  if (!pic->mb_type_buf)
1078  return AVERROR(ENOMEM);
1079  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1080 
1081  for (i = 0; i < 2; i++) {
1082  pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1083  pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1084  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1085  ret = AVERROR(ENOMEM);
1086  goto fail;
1087  }
1088 
1089  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1090  pic->ref_index[i] = pic->ref_index_buf[i]->data;
1091  }
1092  }
1093  pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1094 
1095  ret = ff_get_buffer(avctx, &pic->f,
1096  pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1097  if (ret < 0)
1098  goto fail;
1099 
1100  if (!h->edge_emu_buffer) {
1101  h->edge_emu_buffer = av_mallocz(pic->f.linesize[0] * 17);
1102  if (!h->edge_emu_buffer)
1103  return AVERROR(ENOMEM);
1104  }
1105 
1106  h->linesize = pic->f.linesize[0];
1107  h->uvlinesize = pic->f.linesize[1];
1108 
1109  return 0;
1110 fail:
1111  free_picture(avctx, pic);
1112  return ret;
1113 }
1114 
1115 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1116  int *got_frame, AVPacket *avpkt)
1117 {
1118  SVQ3Context *s = avctx->priv_data;
1119  H264Context *h = &s->h;
1120  int buf_size = avpkt->size;
1121  int left;
1122  uint8_t *buf;
1123  int ret, m, i;
1124 
1125  /* special case for last picture */
1126  if (buf_size == 0) {
1127  if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
1128  ret = av_frame_ref(data, &s->next_pic->f);
1129  if (ret < 0)
1130  return ret;
1131  s->last_frame_output = 1;
1132  *got_frame = 1;
1133  }
1134  return 0;
1135  }
1136 
1137  h->mb_x = h->mb_y = h->mb_xy = 0;
1138 
1139  if (s->watermark_key) {
1140  av_fast_padded_malloc(&s->buf, &s->buf_size, buf_size);
1141  if (!s->buf)
1142  return AVERROR(ENOMEM);
1143  memcpy(s->buf, avpkt->data, buf_size);
1144  buf = s->buf;
1145  } else {
1146  buf = avpkt->data;
1147  }
1148 
1149  init_get_bits(&h->gb, buf, 8 * buf_size);
1150 
1151  if (svq3_decode_slice_header(avctx))
1152  return -1;
1153 
1154  h->pict_type = h->slice_type;
1155 
1156  if (h->pict_type != AV_PICTURE_TYPE_B)
1157  FFSWAP(Picture*, s->next_pic, s->last_pic);
1158 
1159  av_frame_unref(&s->cur_pic->f);
1160 
1161  /* for skipping the frame */
1162  s->cur_pic->f.pict_type = h->pict_type;
1164 
1165  ret = get_buffer(avctx, s->cur_pic);
1166  if (ret < 0)
1167  return ret;
1168 
1169  h->cur_pic_ptr = s->cur_pic;
1170  av_frame_unref(&h->cur_pic.f);
1171  h->cur_pic = *s->cur_pic;
1172  ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
1173  if (ret < 0)
1174  return ret;
1175 
1176  for (i = 0; i < 16; i++) {
1177  h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1178  h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1179  }
1180  for (i = 0; i < 16; i++) {
1181  h->block_offset[16 + i] =
1182  h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1183  h->block_offset[48 + 16 + i] =
1184  h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1185  }
1186 
1187  if (h->pict_type != AV_PICTURE_TYPE_I) {
1188  if (!s->last_pic->f.data[0]) {
1189  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1191  ret = get_buffer(avctx, s->last_pic);
1192  if (ret < 0)
1193  return ret;
1194  memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]);
1195  memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) *
1196  s->last_pic->f.linesize[1]);
1197  memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) *
1198  s->last_pic->f.linesize[2]);
1199  }
1200 
1201  if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
1202  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1204  ret = get_buffer(avctx, s->next_pic);
1205  if (ret < 0)
1206  return ret;
1207  memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]);
1208  memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) *
1209  s->next_pic->f.linesize[1]);
1210  memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) *
1211  s->next_pic->f.linesize[2]);
1212  }
1213  }
1214 
1215  if (avctx->debug & FF_DEBUG_PICT_INFO)
1217  "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1219  s->halfpel_flag, s->thirdpel_flag,
1220  s->adaptive_quant, h->qscale, h->slice_num);
1221 
1222  if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1224  avctx->skip_frame >= AVDISCARD_ALL)
1225  return 0;
1226 
1227  if (s->next_p_frame_damaged) {
1228  if (h->pict_type == AV_PICTURE_TYPE_B)
1229  return 0;
1230  else
1231  s->next_p_frame_damaged = 0;
1232  }
1233 
1234  if (h->pict_type == AV_PICTURE_TYPE_B) {
1236 
1237  if (h->frame_num_offset < 0)
1238  h->frame_num_offset += 256;
1239  if (h->frame_num_offset == 0 ||
1241  av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1242  return -1;
1243  }
1244  } else {
1245  h->prev_frame_num = h->frame_num;
1246  h->frame_num = h->slice_num;
1248 
1249  if (h->prev_frame_num_offset < 0)
1250  h->prev_frame_num_offset += 256;
1251  }
1252 
1253  for (m = 0; m < 2; m++) {
1254  int i;
1255  for (i = 0; i < 4; i++) {
1256  int j;
1257  for (j = -1; j < 4; j++)
1258  h->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1259  if (i < 3)
1260  h->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1261  }
1262  }
1263 
1264  for (h->mb_y = 0; h->mb_y < h->mb_height; h->mb_y++) {
1265  for (h->mb_x = 0; h->mb_x < h->mb_width; h->mb_x++) {
1266  unsigned mb_type;
1267  h->mb_xy = h->mb_x + h->mb_y * h->mb_stride;
1268 
1269  if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
1270  ((get_bits_count(&h->gb) & 7) == 0 ||
1271  show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) {
1272  skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb));
1273  h->gb.size_in_bits = 8 * buf_size;
1274 
1275  if (svq3_decode_slice_header(avctx))
1276  return -1;
1277 
1278  /* TODO: support s->mb_skip_run */
1279  }
1280 
1281  mb_type = svq3_get_ue_golomb(&h->gb);
1282 
1283  if (h->pict_type == AV_PICTURE_TYPE_I)
1284  mb_type += 8;
1285  else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1286  mb_type += 4;
1287  if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1289  "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
1290  return -1;
1291  }
1292 
1293  if (mb_type != 0 || h->cbp)
1295 
1296  if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1297  h->cur_pic.mb_type[h->mb_x + h->mb_y * h->mb_stride] =
1298  (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1299  }
1300 
1301  ff_draw_horiz_band(avctx, s->cur_pic,
1302  s->last_pic->f.data[0] ? s->last_pic : NULL,
1303  16 * h->mb_y, 16, h->picture_structure, 0,
1304  h->low_delay);
1305  }
1306 
1307  left = buf_size*8 - get_bits_count(&h->gb);
1308 
1309  if (h->mb_y != h->mb_height || h->mb_x != h->mb_width) {
1310  av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, h->mb_y, h->mb_x, left);
1311  //av_hex_dump(stderr, buf+buf_size-8, 8);
1312  }
1313 
1314  if (left < 0) {
1315  av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1316  return -1;
1317  }
1318 
1319  if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1320  ret = av_frame_ref(data, &s->cur_pic->f);
1321  else if (s->last_pic->f.data[0])
1322  ret = av_frame_ref(data, &s->last_pic->f);
1323  if (ret < 0)
1324  return ret;
1325 
1326  /* Do not output the last pic after seeking. */
1327  if (s->last_pic->f.data[0] || h->low_delay)
1328  *got_frame = 1;
1329 
1330  if (h->pict_type != AV_PICTURE_TYPE_B) {
1331  FFSWAP(Picture*, s->cur_pic, s->next_pic);
1332  } else {
1333  av_frame_unref(&s->cur_pic->f);
1334  }
1335 
1336  return buf_size;
1337 }
1338 
1340 {
1341  SVQ3Context *s = avctx->priv_data;
1342  H264Context *h = &s->h;
1343 
1344  free_picture(avctx, s->cur_pic);
1345  free_picture(avctx, s->next_pic);
1346  free_picture(avctx, s->last_pic);
1347  av_freep(&s->cur_pic);
1348  av_freep(&s->next_pic);
1349  av_freep(&s->last_pic);
1350 
1351  av_frame_unref(&h->cur_pic.f);
1352 
1354 
1355  av_freep(&s->buf);
1356  s->buf_size = 0;
1358 
1359  return 0;
1360 }
1361 
1363  .name = "svq3",
1364  .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1365  .type = AVMEDIA_TYPE_VIDEO,
1366  .id = AV_CODEC_ID_SVQ3,
1367  .priv_data_size = sizeof(SVQ3Context),
1371  .capabilities = CODEC_CAP_DRAW_HORIZ_BAND |
1372  CODEC_CAP_DR1 |
1374  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,
1375  AV_PIX_FMT_NONE},
1376 };