FFmpeg
svq3.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 The FFmpeg Project
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * How to use this decoder:
23  * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24  * have stsd atoms to describe media trak properties. A stsd atom for a
25  * video trak contains 1 or more ImageDescription atoms. These atoms begin
26  * with the 4-byte length of the atom followed by the codec fourcc. Some
27  * decoders need information in this atom to operate correctly. Such
28  * is the case with SVQ3. In order to get the best use out of this decoder,
29  * the calling app must make the SVQ3 ImageDescription atom available
30  * via the AVCodecContext's extradata[_size] field:
31  *
32  * AVCodecContext.extradata = pointer to ImageDescription, first characters
33  * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34  * AVCodecContext.extradata_size = size of ImageDescription atom memory
35  * buffer (which will be the same as the ImageDescription atom size field
36  * from the QT file, minus 4 bytes since the length is missing)
37  *
38  * You will know you have these parameters passed correctly when the decoder
39  * correctly decodes this file:
40  * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41  */
42 
43 #include <inttypes.h>
44 
45 #include "libavutil/attributes.h"
46 #include "libavutil/crc.h"
47 #include "libavutil/mem.h"
48 #include "libavutil/mem_internal.h"
49 
50 #include "codec_internal.h"
51 #include "decode.h"
52 #include "avcodec.h"
53 #include "mpegutils.h"
54 #include "h264data.h"
55 #include "h264dsp.h"
56 #include "h264pred.h"
57 #include "h264_parse.h"
58 #include "golomb.h"
59 #include "hpeldsp.h"
60 #include "mathops.h"
61 #include "rectangle.h"
62 #include "tpeldsp.h"
63 #include "videodsp.h"
64 
65 #if CONFIG_ZLIB
66 #include <zlib.h>
67 #endif
68 
69 /**
70  * @file
71  * svq3 decoder.
72  */
73 
74 #define NUM_PICS 3
75 
76 typedef struct SVQ3Frame {
78 
79  int16_t (*motion_val[2])[2];
80 
81  uint32_t *mb_type;
82 } SVQ3Frame;
83 
84 typedef struct SVQ3Context {
86 
92 
98  uint8_t *slice_buf;
99  unsigned slice_buf_size;
103  uint32_t watermark_key;
108  int qscale;
109  int cbp;
114 
118 
119  int mb_x, mb_y;
120  int mb_xy;
123  int b_stride;
124 
125  uint32_t *mb2br_xy;
126 
129 
132 
133  unsigned int top_samples_available;
135 
136  uint8_t *edge_emu_buffer;
137 
138  DECLARE_ALIGNED(16, int16_t, mv_cache)[2][5 * 8][2];
139  DECLARE_ALIGNED(8, int8_t, ref_cache)[2][5 * 8];
140  DECLARE_ALIGNED(16, int16_t, mb)[16 * 48 * 2];
141  DECLARE_ALIGNED(16, int16_t, mb_luma_dc)[3][16 * 2];
142  DECLARE_ALIGNED(8, uint8_t, non_zero_count_cache)[15 * 8];
143  uint32_t dequant4_coeff[QP_MAX_NUM + 1][16];
144  int block_offset[2 * (16 * 3)];
146 
147  uint32_t *mb_type_buf;
148  int16_t (*motion_val_buf)[2];
149 } SVQ3Context;
150 
151 #define FULLPEL_MODE 1
152 #define HALFPEL_MODE 2
153 #define THIRDPEL_MODE 3
154 #define PREDICT_MODE 4
155 
156 /* dual scan (from some older H.264 draft)
157  * o-->o-->o o
158  * | /|
159  * o o o / o
160  * | / | |/ |
161  * o o o o
162  * /
163  * o-->o-->o-->o
164  */
165 static const uint8_t svq3_scan[16] = {
166  0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
167  2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
168  0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
169  0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
170 };
171 
172 static const uint8_t luma_dc_zigzag_scan[16] = {
173  0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
174  3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
175  1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
176  3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
177 };
178 
179 static const uint8_t svq3_pred_0[25][2] = {
180  { 0, 0 },
181  { 1, 0 }, { 0, 1 },
182  { 0, 2 }, { 1, 1 }, { 2, 0 },
183  { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
184  { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
185  { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
186  { 2, 4 }, { 3, 3 }, { 4, 2 },
187  { 4, 3 }, { 3, 4 },
188  { 4, 4 }
189 };
190 
191 static const int8_t svq3_pred_1[6][6][5] = {
192  { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
193  { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
194  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
195  { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
196  { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
197  { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
198  { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
199  { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
200  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
201  { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
202  { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
203  { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
204 };
205 
206 static const struct {
207  uint8_t run;
208  uint8_t level;
209 } svq3_dct_tables[2][16] = {
210  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
211  { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
212  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
213  { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
214 };
215 
216 static const uint32_t svq3_dequant_coeff[32] = {
217  3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
218  9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
219  24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
220  61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
221 };
222 
223 static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
224 {
225  const unsigned qmul = svq3_dequant_coeff[qp];
226 #define stride 16
227  int i;
228  int temp[16];
229  static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
230 
231  for (i = 0; i < 4; i++) {
232  const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
233  const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
234  const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
235  const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
236 
237  temp[4 * i + 0] = z0 + z3;
238  temp[4 * i + 1] = z1 + z2;
239  temp[4 * i + 2] = z1 - z2;
240  temp[4 * i + 3] = z0 - z3;
241  }
242 
243  for (i = 0; i < 4; i++) {
244  const int offset = x_offset[i];
245  const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
246  const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
247  const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
248  const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
249 
250  output[stride * 0 + offset] = (int)((z0 + z3) * qmul + 0x80000) >> 20;
251  output[stride * 2 + offset] = (int)((z1 + z2) * qmul + 0x80000) >> 20;
252  output[stride * 8 + offset] = (int)((z1 - z2) * qmul + 0x80000) >> 20;
253  output[stride * 10 + offset] = (int)((z0 - z3) * qmul + 0x80000) >> 20;
254  }
255 }
256 #undef stride
257 
258 static void svq3_add_idct_c(uint8_t *dst, int16_t *block,
259  int stride, int qp, int dc)
260 {
261  const int qmul = svq3_dequant_coeff[qp];
262  int i;
263 
264  if (dc) {
265  dc = 13 * 13 * (dc == 1 ? 1538U* block[0]
266  : qmul * (block[0] >> 3) / 2);
267  block[0] = 0;
268  }
269 
270  for (i = 0; i < 4; i++) {
271  const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
272  const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
273  const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
274  const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
275 
276  block[0 + 4 * i] = z0 + z3;
277  block[1 + 4 * i] = z1 + z2;
278  block[2 + 4 * i] = z1 - z2;
279  block[3 + 4 * i] = z0 - z3;
280  }
281 
282  for (i = 0; i < 4; i++) {
283  const unsigned z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
284  const unsigned z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
285  const unsigned z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
286  const unsigned z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
287  const int rr = (dc + 0x80000u);
288 
289  dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((int)((z0 + z3) * qmul + rr) >> 20));
290  dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((int)((z1 + z2) * qmul + rr) >> 20));
291  dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((int)((z1 - z2) * qmul + rr) >> 20));
292  dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((int)((z0 - z3) * qmul + rr) >> 20));
293  }
294 
295  memset(block, 0, 16 * sizeof(int16_t));
296 }
297 
298 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
299  int index, const int type)
300 {
301  static const uint8_t *const scan_patterns[4] = {
303  };
304 
305  int run, level, sign, limit;
306  unsigned vlc;
307  const int intra = 3 * type >> 2;
308  const uint8_t *const scan = scan_patterns[type];
309 
310  for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
311  for (; (vlc = get_interleaved_ue_golomb(gb)) != 0; index++) {
312  if ((int32_t)vlc < 0)
313  return -1;
314 
315  sign = (vlc & 1) ? 0 : -1;
316  vlc = vlc + 1 >> 1;
317 
318  if (type == 3) {
319  if (vlc < 3) {
320  run = 0;
321  level = vlc;
322  } else if (vlc < 4) {
323  run = 1;
324  level = 1;
325  } else {
326  run = vlc & 0x3;
327  level = (vlc + 9 >> 2) - run;
328  }
329  } else {
330  if (vlc < 16U) {
331  run = svq3_dct_tables[intra][vlc].run;
332  level = svq3_dct_tables[intra][vlc].level;
333  } else if (intra) {
334  run = vlc & 0x7;
335  level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
336  } else {
337  run = vlc & 0xF;
338  level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
339  }
340  }
341 
342 
343  if ((index += run) >= limit)
344  return -1;
345 
346  block[scan[index]] = (level ^ sign) - sign;
347  }
348 
349  if (type != 2) {
350  break;
351  }
352  }
353 
354  return 0;
355 }
356 
357 static av_always_inline int
358 svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C,
359  int i, int list, int part_width)
360 {
361  const int topright_ref = s->ref_cache[list][i - 8 + part_width];
362 
363  if (topright_ref != PART_NOT_AVAILABLE) {
364  *C = s->mv_cache[list][i - 8 + part_width];
365  return topright_ref;
366  } else {
367  *C = s->mv_cache[list][i - 8 - 1];
368  return s->ref_cache[list][i - 8 - 1];
369  }
370 }
371 
372 /**
373  * Get the predicted MV.
374  * @param n the block index
375  * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
376  * @param mx the x component of the predicted motion vector
377  * @param my the y component of the predicted motion vector
378  */
379 static av_always_inline void svq3_pred_motion(const SVQ3Context *s, int n,
380  int part_width, int list,
381  int ref, int *const mx, int *const my)
382 {
383  const int index8 = scan8[n];
384  const int top_ref = s->ref_cache[list][index8 - 8];
385  const int left_ref = s->ref_cache[list][index8 - 1];
386  const int16_t *const A = s->mv_cache[list][index8 - 1];
387  const int16_t *const B = s->mv_cache[list][index8 - 8];
388  const int16_t *C;
389  int diagonal_ref, match_count;
390 
391 /* mv_cache
392  * B . . A T T T T
393  * U . . L . . , .
394  * U . . L . . . .
395  * U . . L . . , .
396  * . . . L . . . .
397  */
398 
399  diagonal_ref = svq3_fetch_diagonal_mv(s, &C, index8, list, part_width);
400  match_count = (diagonal_ref == ref) + (top_ref == ref) + (left_ref == ref);
401  if (match_count > 1) { //most common
402  *mx = mid_pred(A[0], B[0], C[0]);
403  *my = mid_pred(A[1], B[1], C[1]);
404  } else if (match_count == 1) {
405  if (left_ref == ref) {
406  *mx = A[0];
407  *my = A[1];
408  } else if (top_ref == ref) {
409  *mx = B[0];
410  *my = B[1];
411  } else {
412  *mx = C[0];
413  *my = C[1];
414  }
415  } else {
416  if (top_ref == PART_NOT_AVAILABLE &&
417  diagonal_ref == PART_NOT_AVAILABLE &&
418  left_ref != PART_NOT_AVAILABLE) {
419  *mx = A[0];
420  *my = A[1];
421  } else {
422  *mx = mid_pred(A[0], B[0], C[0]);
423  *my = mid_pred(A[1], B[1], C[1]);
424  }
425  }
426 }
427 
428 static inline void svq3_mc_dir_part(SVQ3Context *s,
429  int x, int y, int width, int height,
430  int mx, int my, int dxy,
431  int thirdpel, int dir, int avg)
432 {
433  const SVQ3Frame *pic = (dir == 0) ? s->last_pic : s->next_pic;
434  uint8_t *src, *dest;
435  int i, emu = 0;
436  int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
437  int linesize = s->cur_pic->f->linesize[0];
438  int uvlinesize = s->cur_pic->f->linesize[1];
439 
440  mx += x;
441  my += y;
442 
443  if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
444  my < 0 || my >= s->v_edge_pos - height - 1) {
445  emu = 1;
446  mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
447  my = av_clip(my, -16, s->v_edge_pos - height + 15);
448  }
449 
450  /* form component predictions */
451  dest = s->cur_pic->f->data[0] + x + y * linesize;
452  src = pic->f->data[0] + mx + my * linesize;
453 
454  if (emu) {
455  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
456  linesize, linesize,
457  width + 1, height + 1,
458  mx, my, s->h_edge_pos, s->v_edge_pos);
459  src = s->edge_emu_buffer;
460  }
461  if (thirdpel)
462  (avg ? s->tdsp.avg_tpel_pixels_tab
463  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, linesize,
464  width, height);
465  else
466  (avg ? s->hdsp.avg_pixels_tab
467  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, linesize,
468  height);
469 
470  if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
471  mx = mx + (mx < (int) x) >> 1;
472  my = my + (my < (int) y) >> 1;
473  width = width >> 1;
474  height = height >> 1;
475  blocksize++;
476 
477  for (i = 1; i < 3; i++) {
478  dest = s->cur_pic->f->data[i] + (x >> 1) + (y >> 1) * uvlinesize;
479  src = pic->f->data[i] + mx + my * uvlinesize;
480 
481  if (emu) {
482  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
483  uvlinesize, uvlinesize,
484  width + 1, height + 1,
485  mx, my, (s->h_edge_pos >> 1),
486  s->v_edge_pos >> 1);
487  src = s->edge_emu_buffer;
488  }
489  if (thirdpel)
490  (avg ? s->tdsp.avg_tpel_pixels_tab
491  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
492  uvlinesize,
493  width, height);
494  else
495  (avg ? s->hdsp.avg_pixels_tab
496  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
497  uvlinesize,
498  height);
499  }
500  }
501 }
502 
503 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
504  int dir, int avg)
505 {
506  int i, j, k, mx, my, dx, dy, x, y;
507  // 0->16x16,1->8x16,2->16x8,3->8x8,4->4x8,5->8x4,6->4x4
508  const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
509  const int part_height = 16 >> ((unsigned)(size + 1) / 3);
510  const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
511  const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
512  const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
513 
514  for (i = 0; i < 16; i += part_height)
515  for (j = 0; j < 16; j += part_width) {
516  const int b_xy = (4 * s->mb_x + (j >> 2)) +
517  (4 * s->mb_y + (i >> 2)) * s->b_stride;
518  int dxy;
519  x = 16 * s->mb_x + j;
520  y = 16 * s->mb_y + i;
521  k = (j >> 2 & 1) + (i >> 1 & 2) +
522  (j >> 1 & 4) + (i & 8);
523 
524  if (mode != PREDICT_MODE) {
525  svq3_pred_motion(s, k, part_width >> 2, dir, 1, &mx, &my);
526  } else {
527  mx = s->next_pic->motion_val[0][b_xy][0] * 2;
528  my = s->next_pic->motion_val[0][b_xy][1] * 2;
529 
530  if (dir == 0) {
531  mx = mx * s->frame_num_offset /
532  s->prev_frame_num_offset + 1 >> 1;
533  my = my * s->frame_num_offset /
534  s->prev_frame_num_offset + 1 >> 1;
535  } else {
536  mx = mx * (s->frame_num_offset - s->prev_frame_num_offset) /
537  s->prev_frame_num_offset + 1 >> 1;
538  my = my * (s->frame_num_offset - s->prev_frame_num_offset) /
539  s->prev_frame_num_offset + 1 >> 1;
540  }
541  }
542 
543  /* clip motion vector prediction to frame border */
544  mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
545  my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
546 
547  /* get (optional) motion vector differential */
548  if (mode == PREDICT_MODE) {
549  dx = dy = 0;
550  } else {
551  dy = get_interleaved_se_golomb(&s->gb_slice);
552  dx = get_interleaved_se_golomb(&s->gb_slice);
553 
554  if (dx != (int16_t)dx || dy != (int16_t)dy) {
555  av_log(s->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
556  return -1;
557  }
558  }
559 
560  /* compute motion vector */
561  if (mode == THIRDPEL_MODE) {
562  int fx, fy;
563  mx = (mx + 1 >> 1) + dx;
564  my = (my + 1 >> 1) + dy;
565  fx = (unsigned)(mx + 0x30000) / 3 - 0x10000;
566  fy = (unsigned)(my + 0x30000) / 3 - 0x10000;
567  dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
568 
569  svq3_mc_dir_part(s, x, y, part_width, part_height,
570  fx, fy, dxy, 1, dir, avg);
571  mx += mx;
572  my += my;
573  } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
574  mx = (unsigned)(mx + 1 + 0x30000) / 3 + dx - 0x10000;
575  my = (unsigned)(my + 1 + 0x30000) / 3 + dy - 0x10000;
576  dxy = (mx & 1) + 2 * (my & 1);
577 
578  svq3_mc_dir_part(s, x, y, part_width, part_height,
579  mx >> 1, my >> 1, dxy, 0, dir, avg);
580  mx *= 3;
581  my *= 3;
582  } else {
583  mx = (unsigned)(mx + 3 + 0x60000) / 6 + dx - 0x10000;
584  my = (unsigned)(my + 3 + 0x60000) / 6 + dy - 0x10000;
585 
586  svq3_mc_dir_part(s, x, y, part_width, part_height,
587  mx, my, 0, 0, dir, avg);
588  mx *= 6;
589  my *= 6;
590  }
591 
592  /* update mv_cache */
593  if (mode != PREDICT_MODE) {
594  int32_t mv = pack16to32(mx, my);
595 
596  if (part_height == 8 && i < 8) {
597  AV_WN32A(s->mv_cache[dir][scan8[k] + 1 * 8], mv);
598 
599  if (part_width == 8 && j < 8)
600  AV_WN32A(s->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
601  }
602  if (part_width == 8 && j < 8)
603  AV_WN32A(s->mv_cache[dir][scan8[k] + 1], mv);
604  if (part_width == 4 || part_height == 4)
605  AV_WN32A(s->mv_cache[dir][scan8[k]], mv);
606  }
607 
608  /* write back motion vectors */
609  fill_rectangle(s->cur_pic->motion_val[dir][b_xy],
610  part_width >> 2, part_height >> 2, s->b_stride,
611  pack16to32(mx, my), 4);
612  }
613 
614  return 0;
615 }
616 
618  int mb_type, const int *block_offset,
619  int linesize, uint8_t *dest_y)
620 {
621  int i;
622  if (!IS_INTRA4x4(mb_type)) {
623  for (i = 0; i < 16; i++)
624  if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
625  uint8_t *const ptr = dest_y + block_offset[i];
626  svq3_add_idct_c(ptr, s->mb + i * 16, linesize,
627  s->qscale, IS_INTRA(mb_type) ? 1 : 0);
628  }
629  }
630 }
631 
633  int mb_type,
634  const int *block_offset,
635  int linesize,
636  uint8_t *dest_y)
637 {
638  int i;
639  int qscale = s->qscale;
640 
641  if (IS_INTRA4x4(mb_type)) {
642  for (i = 0; i < 16; i++) {
643  uint8_t *const ptr = dest_y + block_offset[i];
644  const int dir = s->intra4x4_pred_mode_cache[scan8[i]];
645 
646  uint8_t *topright;
647  int nnz;
648  if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
649  av_assert2(s->mb_y || linesize <= block_offset[i]);
650  topright = ptr + 4 - linesize;
651  } else
652  topright = NULL;
653 
654  s->hpc.pred4x4[dir](ptr, topright, linesize);
655  nnz = s->non_zero_count_cache[scan8[i]];
656  if (nnz) {
657  svq3_add_idct_c(ptr, s->mb + i * 16, linesize, qscale, 0);
658  }
659  }
660  } else {
661  s->hpc.pred16x16[s->intra16x16_pred_mode](dest_y, linesize);
662  svq3_luma_dc_dequant_idct_c(s->mb, s->mb_luma_dc[0], qscale);
663  }
664 }
665 
667 {
668  const int mb_x = s->mb_x;
669  const int mb_y = s->mb_y;
670  const int mb_xy = s->mb_xy;
671  const int mb_type = s->cur_pic->mb_type[mb_xy];
672  uint8_t *dest_y, *dest_cb, *dest_cr;
673  int linesize, uvlinesize;
674  int i, j;
675  const int *block_offset = &s->block_offset[0];
676  const int block_h = 16 >> 1;
677 
678  linesize = s->cur_pic->f->linesize[0];
679  uvlinesize = s->cur_pic->f->linesize[1];
680 
681  dest_y = s->cur_pic->f->data[0] + (mb_x + mb_y * linesize) * 16;
682  dest_cb = s->cur_pic->f->data[1] + mb_x * 8 + mb_y * uvlinesize * block_h;
683  dest_cr = s->cur_pic->f->data[2] + mb_x * 8 + mb_y * uvlinesize * block_h;
684 
685  s->vdsp.prefetch(dest_y + (s->mb_x & 3) * 4 * linesize + 64, linesize, 4);
686  s->vdsp.prefetch(dest_cb + (s->mb_x & 7) * uvlinesize + 64, dest_cr - dest_cb, 2);
687 
688  if (IS_INTRA(mb_type)) {
689  s->hpc.pred8x8[s->chroma_pred_mode](dest_cb, uvlinesize);
690  s->hpc.pred8x8[s->chroma_pred_mode](dest_cr, uvlinesize);
691 
692  hl_decode_mb_predict_luma(s, mb_type, block_offset, linesize, dest_y);
693  }
694 
695  hl_decode_mb_idct_luma(s, mb_type, block_offset, linesize, dest_y);
696 
697  if (s->cbp & 0x30) {
698  uint8_t *dest[2] = { dest_cb, dest_cr };
699  s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 1,
700  s->dequant4_coeff[4][0]);
701  s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 2,
702  s->dequant4_coeff[4][0]);
703  for (j = 1; j < 3; j++) {
704  for (i = j * 16; i < j * 16 + 4; i++)
705  if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
706  uint8_t *const ptr = dest[j - 1] + block_offset[i];
707  svq3_add_idct_c(ptr, s->mb + i * 16,
708  uvlinesize, ff_h264_chroma_qp[0][s->qscale + 12] - 12, 2);
709  }
710  }
711  }
712 }
713 
714 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
715 {
716  int i, j, k, m, dir, mode;
717  int cbp = 0;
718  uint32_t vlc;
719  int8_t *top, *left;
720  const int mb_xy = s->mb_xy;
721  const int b_xy = 4 * s->mb_x + 4 * s->mb_y * s->b_stride;
722 
723  s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
724  s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
725 
726  if (mb_type == 0) { /* SKIP */
727  if (s->pict_type == AV_PICTURE_TYPE_P ||
728  s->next_pic->mb_type[mb_xy] == -1) {
729  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
730  0, 0, 0, 0, 0, 0);
731 
732  if (s->pict_type == AV_PICTURE_TYPE_B)
733  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
734  0, 0, 0, 0, 1, 1);
735 
736  mb_type = MB_TYPE_SKIP;
737  } else {
738  mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
739  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
740  return -1;
741  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
742  return -1;
743 
744  mb_type = MB_TYPE_16x16;
745  }
746  } else if (mb_type < 8) { /* INTER */
747  if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&s->gb_slice))
749  else if (s->halfpel_flag &&
750  s->thirdpel_flag == !get_bits1(&s->gb_slice))
751  mode = HALFPEL_MODE;
752  else
753  mode = FULLPEL_MODE;
754 
755  /* fill caches */
756  /* note ref_cache should contain here:
757  * ????????
758  * ???11111
759  * N??11111
760  * N??11111
761  * N??11111
762  */
763 
764  for (m = 0; m < 2; m++) {
765  if (s->mb_x > 0 && s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6] != -1) {
766  for (i = 0; i < 4; i++)
767  AV_COPY32(s->mv_cache[m][scan8[0] - 1 + i * 8],
768  s->cur_pic->motion_val[m][b_xy - 1 + i * s->b_stride]);
769  } else {
770  for (i = 0; i < 4; i++)
771  AV_ZERO32(s->mv_cache[m][scan8[0] - 1 + i * 8]);
772  }
773  if (s->mb_y > 0) {
774  memcpy(s->mv_cache[m][scan8[0] - 1 * 8],
775  s->cur_pic->motion_val[m][b_xy - s->b_stride],
776  4 * 2 * sizeof(int16_t));
777  memset(&s->ref_cache[m][scan8[0] - 1 * 8],
778  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
779 
780  if (s->mb_x < s->mb_width - 1) {
781  AV_COPY32(s->mv_cache[m][scan8[0] + 4 - 1 * 8],
782  s->cur_pic->motion_val[m][b_xy - s->b_stride + 4]);
783  s->ref_cache[m][scan8[0] + 4 - 1 * 8] =
784  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride + 1] + 6] == -1 ||
785  s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
786  } else
787  s->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
788  if (s->mb_x > 0) {
789  AV_COPY32(s->mv_cache[m][scan8[0] - 1 - 1 * 8],
790  s->cur_pic->motion_val[m][b_xy - s->b_stride - 1]);
791  s->ref_cache[m][scan8[0] - 1 - 1 * 8] =
792  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
793  } else
794  s->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
795  } else
796  memset(&s->ref_cache[m][scan8[0] - 1 * 8 - 1],
797  PART_NOT_AVAILABLE, 8);
798 
799  if (s->pict_type != AV_PICTURE_TYPE_B)
800  break;
801  }
802 
803  /* decode motion vector(s) and form prediction(s) */
804  if (s->pict_type == AV_PICTURE_TYPE_P) {
805  if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
806  return -1;
807  } else { /* AV_PICTURE_TYPE_B */
808  if (mb_type != 2) {
809  if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
810  return -1;
811  } else {
812  for (i = 0; i < 4; i++)
813  memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
814  0, 4 * 2 * sizeof(int16_t));
815  }
816  if (mb_type != 1) {
817  if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
818  return -1;
819  } else {
820  for (i = 0; i < 4; i++)
821  memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
822  0, 4 * 2 * sizeof(int16_t));
823  }
824  }
825 
826  mb_type = MB_TYPE_16x16;
827  } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
828  int8_t *i4x4 = s->intra4x4_pred_mode + s->mb2br_xy[s->mb_xy];
829  int8_t *i4x4_cache = s->intra4x4_pred_mode_cache;
830 
831  memset(s->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
832 
833  if (mb_type == 8) {
834  if (s->mb_x > 0) {
835  for (i = 0; i < 4; i++)
836  s->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6 - i];
837  if (s->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
838  s->left_samples_available = 0x5F5F;
839  }
840  if (s->mb_y > 0) {
841  s->intra4x4_pred_mode_cache[4 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 0];
842  s->intra4x4_pred_mode_cache[5 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 1];
843  s->intra4x4_pred_mode_cache[6 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 2];
844  s->intra4x4_pred_mode_cache[7 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 3];
845 
846  if (s->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
847  s->top_samples_available = 0x33FF;
848  }
849 
850  /* decode prediction codes for luma blocks */
851  for (i = 0; i < 16; i += 2) {
852  vlc = get_interleaved_ue_golomb(&s->gb_slice);
853 
854  if (vlc >= 25U) {
855  av_log(s->avctx, AV_LOG_ERROR,
856  "luma prediction:%"PRIu32"\n", vlc);
857  return -1;
858  }
859 
860  left = &s->intra4x4_pred_mode_cache[scan8[i] - 1];
861  top = &s->intra4x4_pred_mode_cache[scan8[i] - 8];
862 
863  left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
864  left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
865 
866  if (left[1] == -1 || left[2] == -1) {
867  av_log(s->avctx, AV_LOG_ERROR, "weird prediction\n");
868  return -1;
869  }
870  }
871  } else { /* mb_type == 33, DC_128_PRED block type */
872  for (i = 0; i < 4; i++)
873  memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
874  }
875 
876  AV_COPY32(i4x4, i4x4_cache + 4 + 8 * 4);
877  i4x4[4] = i4x4_cache[7 + 8 * 3];
878  i4x4[5] = i4x4_cache[7 + 8 * 2];
879  i4x4[6] = i4x4_cache[7 + 8 * 1];
880 
881  if (mb_type == 8) {
882  ff_h264_check_intra4x4_pred_mode(s->intra4x4_pred_mode_cache,
883  s->avctx, s->top_samples_available,
884  s->left_samples_available);
885 
886  s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
887  s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
888  } else {
889  for (i = 0; i < 4; i++)
890  memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
891 
892  s->top_samples_available = 0x33FF;
893  s->left_samples_available = 0x5F5F;
894  }
895 
896  mb_type = MB_TYPE_INTRA4x4;
897  } else { /* INTRA16x16 */
898  dir = ff_h264_i_mb_type_info[mb_type - 8].pred_mode;
899  dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
900 
901  if ((s->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(s->avctx, s->top_samples_available,
902  s->left_samples_available, dir, 0)) < 0) {
903  av_log(s->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
904  return s->intra16x16_pred_mode;
905  }
906 
907  cbp = ff_h264_i_mb_type_info[mb_type - 8].cbp;
908  mb_type = MB_TYPE_INTRA16x16;
909  }
910 
911  if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) {
912  for (i = 0; i < 4; i++)
913  memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
914  0, 4 * 2 * sizeof(int16_t));
915  if (s->pict_type == AV_PICTURE_TYPE_B) {
916  for (i = 0; i < 4; i++)
917  memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
918  0, 4 * 2 * sizeof(int16_t));
919  }
920  }
921  if (!IS_INTRA4x4(mb_type)) {
922  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy], DC_PRED, 8);
923  }
924  if (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B) {
925  memset(s->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
926  }
927 
928  if (!IS_INTRA16x16(mb_type) &&
929  (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B)) {
930  if ((vlc = get_interleaved_ue_golomb(&s->gb_slice)) >= 48U){
931  av_log(s->avctx, AV_LOG_ERROR, "cbp_vlc=%"PRIu32"\n", vlc);
932  return -1;
933  }
934 
935  cbp = IS_INTRA(mb_type) ? ff_h264_golomb_to_intra4x4_cbp[vlc]
937  }
938  if (IS_INTRA16x16(mb_type) ||
939  (s->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
940  s->qscale += get_interleaved_se_golomb(&s->gb_slice);
941 
942  if (s->qscale > 31u) {
943  av_log(s->avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
944  return -1;
945  }
946  }
947  if (IS_INTRA16x16(mb_type)) {
948  AV_ZERO128(s->mb_luma_dc[0] + 0);
949  AV_ZERO128(s->mb_luma_dc[0] + 8);
950  if (svq3_decode_block(&s->gb_slice, s->mb_luma_dc[0], 0, 1)) {
951  av_log(s->avctx, AV_LOG_ERROR,
952  "error while decoding intra luma dc\n");
953  return -1;
954  }
955  }
956 
957  if (cbp) {
958  const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
959  const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
960 
961  for (i = 0; i < 4; i++)
962  if ((cbp & (1 << i))) {
963  for (j = 0; j < 4; j++) {
964  k = index ? (1 * (j & 1) + 2 * (i & 1) +
965  2 * (j & 2) + 4 * (i & 2))
966  : (4 * i + j);
967  s->non_zero_count_cache[scan8[k]] = 1;
968 
969  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], index, type)) {
970  av_log(s->avctx, AV_LOG_ERROR,
971  "error while decoding block\n");
972  return -1;
973  }
974  }
975  }
976 
977  if ((cbp & 0x30)) {
978  for (i = 1; i < 3; ++i)
979  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * 16 * i], 0, 3)) {
980  av_log(s->avctx, AV_LOG_ERROR,
981  "error while decoding chroma dc block\n");
982  return -1;
983  }
984 
985  if ((cbp & 0x20)) {
986  for (i = 1; i < 3; i++) {
987  for (j = 0; j < 4; j++) {
988  k = 16 * i + j;
989  s->non_zero_count_cache[scan8[k]] = 1;
990 
991  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], 1, 1)) {
992  av_log(s->avctx, AV_LOG_ERROR,
993  "error while decoding chroma ac block\n");
994  return -1;
995  }
996  }
997  }
998  }
999  }
1000  }
1001 
1002  s->cbp = cbp;
1003  s->cur_pic->mb_type[mb_xy] = mb_type;
1004 
1005  if (IS_INTRA(mb_type))
1006  s->chroma_pred_mode = ff_h264_check_intra_pred_mode(s->avctx, s->top_samples_available,
1007  s->left_samples_available, DC_PRED8x8, 1);
1008 
1009  return 0;
1010 }
1011 
1013 {
1014  SVQ3Context *s = avctx->priv_data;
1015  const int mb_xy = s->mb_xy;
1016  int i, header;
1017  unsigned slice_id;
1018 
1019  header = get_bits(&s->gb, 8);
1020 
1021  if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
1022  /* TODO: what? */
1023  av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
1024  return -1;
1025  } else {
1026  int slice_bits, slice_bytes, slice_length;
1027  int length = header >> 5 & 3;
1028 
1029  slice_length = show_bits(&s->gb, 8 * length);
1030  slice_bits = slice_length * 8;
1031  slice_bytes = slice_length + length - 1;
1032 
1033  skip_bits(&s->gb, 8);
1034 
1035  av_fast_padded_malloc(&s->slice_buf, &s->slice_buf_size, slice_bytes);
1036  if (!s->slice_buf)
1037  return AVERROR(ENOMEM);
1038 
1039  if (slice_bytes * 8LL > get_bits_left(&s->gb)) {
1040  av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
1041  return AVERROR_INVALIDDATA;
1042  }
1043  memcpy(s->slice_buf, s->gb.buffer + s->gb.index / 8, slice_bytes);
1044 
1045  if (length > 0) {
1046  memmove(s->slice_buf, &s->slice_buf[slice_length], length - 1);
1047  }
1048 
1049  if (s->watermark_key) {
1050  uint32_t header = AV_RL32(&s->slice_buf[1]);
1051  AV_WL32(&s->slice_buf[1], header ^ s->watermark_key);
1052  }
1053  init_get_bits(&s->gb_slice, s->slice_buf, slice_bits);
1054 
1055  skip_bits_long(&s->gb, slice_bytes * 8);
1056  }
1057 
1058  if ((slice_id = get_interleaved_ue_golomb(&s->gb_slice)) >= 3) {
1059  av_log(s->avctx, AV_LOG_ERROR, "illegal slice type %u \n", slice_id);
1060  return -1;
1061  }
1062 
1063  s->slice_type = ff_h264_golomb_to_pict_type[slice_id];
1064 
1065  if ((header & 0x9F) == 2) {
1066  i = (s->mb_num < 64) ? 6 : (1 + av_log2(s->mb_num - 1));
1067  get_bits(&s->gb_slice, i);
1068  } else if (get_bits1(&s->gb_slice)) {
1069  avpriv_report_missing_feature(s->avctx, "Media key encryption");
1070  return AVERROR_PATCHWELCOME;
1071  }
1072 
1073  s->slice_num = get_bits(&s->gb_slice, 8);
1074  s->qscale = get_bits(&s->gb_slice, 5);
1075  s->adaptive_quant = get_bits1(&s->gb_slice);
1076 
1077  /* unknown fields */
1078  skip_bits1(&s->gb_slice);
1079 
1080  if (s->has_watermark)
1081  skip_bits1(&s->gb_slice);
1082 
1083  skip_bits1(&s->gb_slice);
1084  skip_bits(&s->gb_slice, 2);
1085 
1086  if (skip_1stop_8data_bits(&s->gb_slice) < 0)
1087  return AVERROR_INVALIDDATA;
1088 
1089  /* reset intra predictors and invalidate motion vector references */
1090  if (s->mb_x > 0) {
1091  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - 1] + 3,
1092  -1, 4 * sizeof(int8_t));
1093  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_x],
1094  -1, 8 * sizeof(int8_t) * s->mb_x);
1095  }
1096  if (s->mb_y > 0) {
1097  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_stride],
1098  -1, 8 * sizeof(int8_t) * (s->mb_width - s->mb_x));
1099 
1100  if (s->mb_x > 0)
1101  s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] = -1;
1102  }
1103 
1104  return 0;
1105 }
1106 
1108 {
1109  int q, x;
1110  const int max_qp = 51;
1111 
1112  for (q = 0; q < max_qp + 1; q++) {
1113  int shift = ff_h264_quant_div6[q] + 2;
1114  int idx = ff_h264_quant_rem6[q];
1115  for (x = 0; x < 16; x++)
1116  s->dequant4_coeff[q][(x >> 2) | ((x << 2) & 0xF)] =
1117  ((uint32_t)ff_h264_dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] * 16) << shift;
1118  }
1119 }
1120 
1122  int seqh_offset)
1123 {
1124  const uint8_t *extradata = avctx->extradata + seqh_offset;
1125  unsigned int size = AV_RB32(extradata + 4);
1126  GetBitContext gb;
1127  int ret;
1128 
1129  if (size > avctx->extradata_size - seqh_offset - 8)
1130  return AVERROR_INVALIDDATA;
1131  extradata += 8;
1132  init_get_bits(&gb, extradata, size * 8);
1133 
1134  /* 'frame size code' and optional 'width, height' */
1135  int frame_size_code = get_bits(&gb, 3);
1136  int w, h;
1137  switch (frame_size_code) {
1138  case 0:
1139  w = 160;
1140  h = 120;
1141  break;
1142  case 1:
1143  w = 128;
1144  h = 96;
1145  break;
1146  case 2:
1147  w = 176;
1148  h = 144;
1149  break;
1150  case 3:
1151  w = 352;
1152  h = 288;
1153  break;
1154  case 4:
1155  w = 704;
1156  h = 576;
1157  break;
1158  case 5:
1159  w = 240;
1160  h = 180;
1161  break;
1162  case 6:
1163  w = 320;
1164  h = 240;
1165  break;
1166  case 7:
1167  w = get_bits(&gb, 12);
1168  h = get_bits(&gb, 12);
1169  break;
1170  }
1171  ret = ff_set_dimensions(avctx, w, h);
1172  if (ret < 0)
1173  return ret;
1174 
1175  s->halfpel_flag = get_bits1(&gb);
1176  s->thirdpel_flag = get_bits1(&gb);
1177 
1178  /* unknown fields */
1179  int unk0 = get_bits1(&gb);
1180  int unk1 = get_bits1(&gb);
1181  int unk2 = get_bits1(&gb);
1182  int unk3 = get_bits1(&gb);
1183 
1184  s->low_delay = get_bits1(&gb);
1185  avctx->has_b_frames = !s->low_delay;
1186 
1187  /* unknown field */
1188  int unk4 = get_bits1(&gb);
1189 
1190  av_log(avctx, AV_LOG_DEBUG, "Unknown fields %d %d %d %d %d\n",
1191  unk0, unk1, unk2, unk3, unk4);
1192 
1193  if (skip_1stop_8data_bits(&gb) < 0)
1194  return AVERROR_INVALIDDATA;
1195 
1196  s->has_watermark = get_bits1(&gb);
1197 
1198  if (!s->has_watermark)
1199  return 0;
1200 
1201 #if CONFIG_ZLIB
1202  unsigned watermark_width = get_interleaved_ue_golomb(&gb);
1203  unsigned watermark_height = get_interleaved_ue_golomb(&gb);
1204  int u1 = get_interleaved_ue_golomb(&gb);
1205  int u2 = get_bits(&gb, 8);
1206  int u3 = get_bits(&gb, 2);
1207  int u4 = get_interleaved_ue_golomb(&gb);
1208  unsigned long buf_len = watermark_width *
1209  watermark_height * 4;
1210  int offset = get_bits_count(&gb) + 7 >> 3;
1211 
1212  if (watermark_height <= 0 ||
1213  get_bits_left(&gb) <= 0 ||
1214  (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height)
1215  return AVERROR_INVALIDDATA;
1216 
1217  av_log(avctx, AV_LOG_DEBUG, "watermark size: %ux%u\n",
1218  watermark_width, watermark_height);
1219  av_log(avctx, AV_LOG_DEBUG,
1220  "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1221  u1, u2, u3, u4, offset);
1222 
1223  uint8_t *buf = av_malloc(buf_len);
1224  if (!buf)
1225  return AVERROR(ENOMEM);
1226 
1227  if (uncompress(buf, &buf_len, extradata + offset,
1228  size - offset) != Z_OK) {
1229  av_log(avctx, AV_LOG_ERROR,
1230  "could not uncompress watermark logo\n");
1231  av_free(buf);
1232  return AVERROR_EXTERNAL;
1233  }
1234  s->watermark_key = av_bswap16(av_crc(av_crc_get_table(AV_CRC_16_CCITT), 0, buf, buf_len));
1235 
1236  s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1237  av_log(avctx, AV_LOG_DEBUG,
1238  "watermark key %#"PRIx32"\n", s->watermark_key);
1239  av_free(buf);
1240 
1241  return 0;
1242 #else
1243  av_log(avctx, AV_LOG_ERROR,
1244  "this svq3 file contains watermark which need zlib support compiled in\n");
1245  return AVERROR(ENOSYS);
1246 #endif
1247 }
1248 
1250 {
1251  SVQ3Context *s = avctx->priv_data;
1252  int m, x, y;
1253  unsigned char *extradata;
1254  int ret;
1255 
1256  s->cur_pic = &s->frames[0];
1257  s->last_pic = &s->frames[1];
1258  s->next_pic = &s->frames[2];
1259 
1260  s->cur_pic->f = av_frame_alloc();
1261  s->last_pic->f = av_frame_alloc();
1262  s->next_pic->f = av_frame_alloc();
1263  if (!s->cur_pic->f || !s->last_pic->f || !s->next_pic->f)
1264  return AVERROR(ENOMEM);
1265 
1266  ff_h264dsp_init(&s->h264dsp, 8, 1);
1267  ff_h264_pred_init(&s->hpc, AV_CODEC_ID_SVQ3, 8, 1);
1268  ff_videodsp_init(&s->vdsp, 8);
1269 
1270 
1271  avctx->bits_per_raw_sample = 8;
1272 
1273  ff_hpeldsp_init(&s->hdsp, avctx->flags);
1274  ff_tpeldsp_init(&s->tdsp);
1275 
1276  avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
1277  avctx->color_range = AVCOL_RANGE_JPEG;
1278 
1279  s->avctx = avctx;
1280  s->halfpel_flag = 1;
1281  s->thirdpel_flag = 1;
1282  s->has_watermark = 0;
1283 
1284  /* prowl for the "SEQH" marker in the extradata */
1285  extradata = (unsigned char *)avctx->extradata;
1286  if (extradata) {
1287  for (m = 0; m + 8 < avctx->extradata_size; m++) {
1288  if (!memcmp(extradata, "SEQH", 4)) {
1289  /* if a match was found, parse the extra data */
1290  ret = svq3_decode_extradata(avctx, s, m);
1291  if (ret < 0)
1292  return ret;
1293  break;
1294  }
1295  extradata++;
1296  }
1297  }
1298 
1299  s->mb_width = (avctx->width + 15) / 16;
1300  s->mb_height = (avctx->height + 15) / 16;
1301  s->mb_stride = s->mb_width + 1;
1302  s->mb_num = s->mb_width * s->mb_height;
1303  s->b_stride = 4 * s->mb_width;
1304  s->h_edge_pos = s->mb_width * 16;
1305  s->v_edge_pos = s->mb_height * 16;
1306 
1307  const unsigned big_mb_num = s->mb_stride * (s->mb_height + 2) + 1;
1308 
1309  s->mb_type_buf = av_calloc(big_mb_num, NUM_PICS * sizeof(*s->mb_type_buf));
1310  if (!s->mb_type_buf)
1311  return AVERROR(ENOMEM);
1312  uint32_t *mb_type_buf = s->mb_type_buf + 2 * s->mb_stride + 1;
1313 
1314  const unsigned b4_stride = s->mb_width * 4 + 1;
1315  const unsigned b4_array_size = b4_stride * s->mb_height * 4;
1316  const unsigned motion_val_buf_size = b4_array_size + 4;
1317 
1318  s->motion_val_buf = av_calloc(motion_val_buf_size,
1319  NUM_PICS * 2 * sizeof(*s->motion_val_buf));
1320  if (!s->motion_val_buf)
1321  return AVERROR(ENOMEM);
1322  int16_t (*motion_val_buf)[2] = s->motion_val_buf + 4;
1323 
1324  for (size_t i = 0; i < NUM_PICS; ++i) {
1325  SVQ3Frame *const pic = &s->frames[i];
1326 
1327  pic->mb_type = mb_type_buf;
1328  mb_type_buf += big_mb_num;
1329  for (size_t j = 0; j < FF_ARRAY_ELEMS(pic->motion_val); ++j) {
1330  pic->motion_val[j] = motion_val_buf;
1331  motion_val_buf += motion_val_buf_size;
1332  }
1333  }
1334 
1335  s->intra4x4_pred_mode = av_mallocz(s->mb_stride * 2 * 8);
1336  if (!s->intra4x4_pred_mode)
1337  return AVERROR(ENOMEM);
1338 
1339  s->mb2br_xy = av_mallocz(s->mb_stride * (s->mb_height + 1) *
1340  sizeof(*s->mb2br_xy));
1341  if (!s->mb2br_xy)
1342  return AVERROR(ENOMEM);
1343 
1344  for (y = 0; y < s->mb_height; y++)
1345  for (x = 0; x < s->mb_width; x++) {
1346  const int mb_xy = x + y * s->mb_stride;
1347 
1348  s->mb2br_xy[mb_xy] = 8 * (mb_xy % (2 * s->mb_stride));
1349  }
1350 
1352 
1353  return 0;
1354 }
1355 
1356 static int get_buffer(AVCodecContext *avctx, SVQ3Frame *pic)
1357 {
1358  SVQ3Context *s = avctx->priv_data;
1359  int ret = ff_get_buffer(avctx, pic->f,
1360  (s->pict_type != AV_PICTURE_TYPE_B) ?
1362  if (ret < 0)
1363  return ret;
1364 
1365  if (!s->edge_emu_buffer) {
1366  s->edge_emu_buffer = av_calloc(pic->f->linesize[0], 17);
1367  if (!s->edge_emu_buffer)
1368  return AVERROR(ENOMEM);
1369  }
1370 
1371  return 0;
1372 }
1373 
1375 {
1376  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1377  av_frame_unref(pic->f);
1378  int ret = get_buffer(avctx, pic);
1379  if (ret < 0)
1380  return ret;
1381 
1382  memset(pic->f->data[0], 0, avctx->height * pic->f->linesize[0]);
1383  memset(pic->f->data[1], 0x80, (avctx->height / 2) *
1384  pic->f->linesize[1]);
1385  memset(pic->f->data[2], 0x80, (avctx->height / 2) *
1386  pic->f->linesize[2]);
1387 
1388  return 0;
1389 }
1390 
1391 static int svq3_decode_frame(AVCodecContext *avctx, AVFrame *rframe,
1392  int *got_frame, AVPacket *avpkt)
1393 {
1394  SVQ3Context *s = avctx->priv_data;
1395  int buf_size = avpkt->size;
1396  int left;
1397  int ret, m, i;
1398 
1399  /* special case for last picture */
1400  if (buf_size == 0) {
1401  if (s->next_pic->f->data[0] && !s->low_delay) {
1402  av_frame_move_ref(rframe, s->next_pic->f);
1403  *got_frame = 1;
1404  }
1405  return 0;
1406  }
1407 
1408  s->mb_x = s->mb_y = s->mb_xy = 0;
1409 
1410  ret = init_get_bits8(&s->gb, avpkt->data, avpkt->size);
1411  if (ret < 0)
1412  return ret;
1413 
1414  ret = svq3_decode_slice_header(avctx);
1415  if (ret < 0)
1416  return ret;
1417 
1418  if (avpkt->size < s->mb_width * s->mb_height / 8)
1419  return AVERROR_INVALIDDATA;
1420 
1421  s->pict_type = s->slice_type;
1422 
1423  if (s->pict_type != AV_PICTURE_TYPE_B)
1424  FFSWAP(SVQ3Frame*, s->next_pic, s->last_pic);
1425 
1426  av_frame_unref(s->cur_pic->f);
1427 
1428  /* for skipping the frame */
1429  s->cur_pic->f->pict_type = s->pict_type;
1430  if (s->pict_type == AV_PICTURE_TYPE_I)
1431  s->cur_pic->f->flags |= AV_FRAME_FLAG_KEY;
1432  else
1433  s->cur_pic->f->flags &= ~AV_FRAME_FLAG_KEY;
1434 
1435  ret = get_buffer(avctx, s->cur_pic);
1436  if (ret < 0)
1437  return ret;
1438 
1439  for (i = 0; i < 16; i++) {
1440  s->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1441  s->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1442  }
1443  for (i = 0; i < 16; i++) {
1444  s->block_offset[16 + i] =
1445  s->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1446  s->block_offset[48 + 16 + i] =
1447  s->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1448  }
1449 
1450  if (s->pict_type != AV_PICTURE_TYPE_I) {
1451  if (!s->last_pic->f->data[0]) {
1452  ret = alloc_dummy_frame(avctx, s->last_pic);
1453  if (ret < 0)
1454  return ret;
1455  }
1456 
1457  if (s->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f->data[0]) {
1458  ret = alloc_dummy_frame(avctx, s->next_pic);
1459  if (ret < 0)
1460  return ret;
1461  }
1462  }
1463 
1464  if (avctx->debug & FF_DEBUG_PICT_INFO)
1465  av_log(s->avctx, AV_LOG_DEBUG,
1466  "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1467  av_get_picture_type_char(s->pict_type),
1468  s->halfpel_flag, s->thirdpel_flag,
1469  s->adaptive_quant, s->qscale, s->slice_num);
1470 
1471  if (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B ||
1472  avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I ||
1473  avctx->skip_frame >= AVDISCARD_ALL)
1474  return 0;
1475 
1476  if (s->pict_type == AV_PICTURE_TYPE_B) {
1477  s->frame_num_offset = s->slice_num - s->prev_frame_num;
1478 
1479  if (s->frame_num_offset < 0)
1480  s->frame_num_offset += 256;
1481  if (s->frame_num_offset == 0 ||
1482  s->frame_num_offset >= s->prev_frame_num_offset) {
1483  av_log(s->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1484  return -1;
1485  }
1486  } else {
1487  s->prev_frame_num = s->frame_num;
1488  s->frame_num = s->slice_num;
1489  s->prev_frame_num_offset = s->frame_num - s->prev_frame_num;
1490 
1491  if (s->prev_frame_num_offset < 0)
1492  s->prev_frame_num_offset += 256;
1493  }
1494 
1495  for (m = 0; m < 2; m++) {
1496  int i;
1497  for (i = 0; i < 4; i++) {
1498  int j;
1499  for (j = -1; j < 4; j++)
1500  s->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1501  if (i < 3)
1502  s->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1503  }
1504  }
1505 
1506  for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
1507  for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
1508  unsigned mb_type;
1509  s->mb_xy = s->mb_x + s->mb_y * s->mb_stride;
1510 
1511  if ((get_bits_left(&s->gb_slice)) <= 7) {
1512  if (((get_bits_count(&s->gb_slice) & 7) == 0 ||
1513  show_bits(&s->gb_slice, get_bits_left(&s->gb_slice) & 7) == 0)) {
1514 
1515  ret = svq3_decode_slice_header(avctx);
1516  if (ret < 0)
1517  return ret;
1518  }
1519  if (s->slice_type != s->pict_type) {
1520  avpriv_request_sample(avctx, "non constant slice type");
1521  }
1522  /* TODO: support s->mb_skip_run */
1523  }
1524 
1525  mb_type = get_interleaved_ue_golomb(&s->gb_slice);
1526 
1527  if (s->pict_type == AV_PICTURE_TYPE_I)
1528  mb_type += 8;
1529  else if (s->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1530  mb_type += 4;
1531  if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1532  av_log(s->avctx, AV_LOG_ERROR,
1533  "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
1534  return -1;
1535  }
1536 
1537  if (mb_type != 0 || s->cbp)
1538  hl_decode_mb(s);
1539 
1540  if (s->pict_type != AV_PICTURE_TYPE_B && !s->low_delay)
1541  s->cur_pic->mb_type[s->mb_x + s->mb_y * s->mb_stride] =
1542  (s->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1543  }
1544 
1545  ff_draw_horiz_band(avctx, s->cur_pic->f,
1546  s->last_pic->f->data[0] ? s->last_pic->f : NULL,
1547  16 * s->mb_y, 16, PICT_FRAME, 0,
1548  s->low_delay);
1549  }
1550 
1551  left = buf_size*8 - get_bits_count(&s->gb_slice);
1552 
1553  if (s->mb_y != s->mb_height || s->mb_x != s->mb_width) {
1554  av_log(avctx, AV_LOG_INFO, "frame num %"PRId64" incomplete pic x %d y %d left %d\n", avctx->frame_num, s->mb_y, s->mb_x, left);
1555  //av_hex_dump(stderr, buf+buf_size-8, 8);
1556  }
1557 
1558  if (left < 0) {
1559  av_log(avctx, AV_LOG_ERROR, "frame num %"PRId64" left %d\n", avctx->frame_num, left);
1560  return -1;
1561  }
1562 
1563  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay)
1564  ret = av_frame_ref(rframe, s->cur_pic->f);
1565  else if (s->last_pic->f->data[0])
1566  ret = av_frame_ref(rframe, s->last_pic->f);
1567  if (ret < 0)
1568  return ret;
1569 
1570  /* Do not output the last pic after seeking. */
1571  if (s->last_pic->f->data[0] || s->low_delay)
1572  *got_frame = 1;
1573 
1574  if (s->pict_type != AV_PICTURE_TYPE_B) {
1575  FFSWAP(SVQ3Frame*, s->cur_pic, s->next_pic);
1576  } else {
1577  av_frame_unref(s->cur_pic->f);
1578  }
1579 
1580  return buf_size;
1581 }
1582 
1584 {
1585  SVQ3Context *s = avctx->priv_data;
1586 
1587  for (int i = 0; i < NUM_PICS; i++)
1588  av_frame_free(&s->frames[i].f);
1589  av_freep(&s->motion_val_buf);
1590  av_freep(&s->mb_type_buf);
1591  av_freep(&s->slice_buf);
1592  av_freep(&s->intra4x4_pred_mode);
1593  av_freep(&s->edge_emu_buffer);
1594  av_freep(&s->mb2br_xy);
1595 
1596  return 0;
1597 }
1598 
1600  .p.name = "svq3",
1601  CODEC_LONG_NAME("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1602  .p.type = AVMEDIA_TYPE_VIDEO,
1603  .p.id = AV_CODEC_ID_SVQ3,
1604  .priv_data_size = sizeof(SVQ3Context),
1608  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND |
1611  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1612 };
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:33
SVQ3Context::frame_num
int frame_num
Definition: svq3.c:110
SVQ3Context::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: svq3.c:136
IS_INTRA4x4
#define IS_INTRA4x4(a)
Definition: mpegutils.h:69
A
#define A(x)
Definition: vpx_arith.h:28
ff_draw_horiz_band
void ff_draw_horiz_band(AVCodecContext *avctx, const AVFrame *cur, const AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:54
svq3_dequant_coeff
static const uint32_t svq3_dequant_coeff[32]
Definition: svq3.c:216
SVQ3Context::next_pic
SVQ3Frame * next_pic
Definition: svq3.c:94
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:276
SVQ3Context::slice_type
enum AVPictureType slice_type
Definition: svq3.c:116
SVQ3Context::gb_slice
GetBitContext gb_slice
Definition: svq3.c:97
SVQ3Context::vdsp
VideoDSPContext vdsp
Definition: svq3.c:91
SVQ3Context::slice_num
int slice_num
Definition: svq3.c:107
level
uint8_t level
Definition: svq3.c:208
av_clip
#define av_clip
Definition: common.h:100
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:689
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
DC_PRED8x8
#define DC_PRED8x8
Definition: h264pred.h:68
svq3_decode_slice_header
static int svq3_decode_slice_header(AVCodecContext *avctx)
Definition: svq3.c:1012
SVQ3Context::frames
SVQ3Frame frames[NUM_PICS]
Definition: svq3.c:145
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:422
mem_internal.h
SVQ3Context::avctx
AVCodecContext * avctx
Definition: svq3.c:85
DC_128_PRED
@ DC_128_PRED
Definition: vp9.h:58
SVQ3Context::mb_num
int mb_num
Definition: svq3.c:122
SVQ3Context::v_edge_pos
int v_edge_pos
Definition: svq3.c:106
AVPictureType
AVPictureType
Definition: avutil.h:276
ff_h264_chroma_qp
const uint8_t ff_h264_chroma_qp[7][QP_MAX_NUM+1]
Definition: h264data.c:203
mv
static const int8_t mv[256][2]
Definition: 4xm.c:81
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:226
SVQ3Context::left_samples_available
unsigned int left_samples_available
Definition: svq3.c:134
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:250
get_interleaved_ue_golomb
static unsigned get_interleaved_ue_golomb(GetBitContext *gb)
Definition: golomb.h:143
ff_h264_golomb_to_inter_cbp
const uint8_t ff_h264_golomb_to_inter_cbp[48]
Definition: h264data.c:48
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
h264_parse.h
mode
Definition: swscale.c:56
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
SVQ3Context::h_edge_pos
int h_edge_pos
Definition: svq3.c:105
w
uint8_t w
Definition: llviddspenc.c:38
u
#define u(width, name, range_min, range_max)
Definition: cbs_apv.c:68
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:767
IMbInfo::cbp
uint8_t cbp
Definition: h264data.h:37
AVPacket::data
uint8_t * data
Definition: packet.h:558
DC_PRED
@ DC_PRED
Definition: vp9.h:48
MB_TYPE_INTRA4x4
#define MB_TYPE_INTRA4x4
Definition: mpegutils.h:38
SVQ3Context::slice_buf
uint8_t * slice_buf
Definition: svq3.c:98
VERT_LEFT_PRED
@ VERT_LEFT_PRED
Definition: vp9.h:53
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:41
SVQ3Context::mb
int16_t mb[16 *48 *2]
Definition: svq3.c:140
PREDICT_MODE
#define PREDICT_MODE
Definition: svq3.c:154
FFCodec
Definition: codec_internal.h:127
AV_WN32A
#define AV_WN32A(p, v)
Definition: intreadwrite.h:534
ff_h264_golomb_to_intra4x4_cbp
const uint8_t ff_h264_golomb_to_intra4x4_cbp[48]
Definition: h264data.c:42
SVQ3Context::frame_num_offset
int frame_num_offset
Definition: svq3.c:111
mpegutils.h
MB_TYPE_INTRA16x16
#define MB_TYPE_INTRA16x16
Definition: mpegutils.h:39
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:512
SVQ3Context::slice_buf_size
unsigned slice_buf_size
Definition: svq3.c:99
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1375
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
get_buffer
static int get_buffer(AVCodecContext *avctx, SVQ3Frame *pic)
Definition: svq3.c:1356
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
NUM_PICS
#define NUM_PICS
Definition: svq3.c:74
crc.h
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:379
golomb.h
exp golomb vlc stuff
close
static av_cold void close(AVCodecParserContext *s)
Definition: apv_parser.c:135
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:333
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:57
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
SVQ3Context::last_pic
SVQ3Frame * last_pic
Definition: svq3.c:95
SVQ3Context::qscale
int qscale
Definition: svq3.c:108
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1662
GetBitContext
Definition: get_bits.h:109
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:488
SVQ3Context::tdsp
TpelDSPContext tdsp
Definition: svq3.c:90
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
scan8
static const uint8_t scan8[16 *3+3]
Definition: h264_parse.h:40
SVQ3Context::thirdpel_flag
int thirdpel_flag
Definition: svq3.c:101
ff_h264_golomb_to_pict_type
const uint8_t ff_h264_golomb_to_pict_type[5]
Definition: h264data.c:37
pack16to32
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264_parse.h:127
alloc_dummy_frame
static av_cold int alloc_dummy_frame(AVCodecContext *avctx, SVQ3Frame *pic)
Definition: svq3.c:1374
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
SVQ3Context::intra4x4_pred_mode_cache
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: svq3.c:130
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
SVQ3Context::gb
GetBitContext gb
Definition: svq3.c:96
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:106
SVQ3Context::cbp
int cbp
Definition: svq3.c:109
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:539
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
FULLPEL_MODE
#define FULLPEL_MODE
Definition: svq3.c:151
SVQ3Context::mb_y
int mb_y
Definition: svq3.c:119
SVQ3Context::mb_x
int mb_x
Definition: svq3.c:119
SVQ3Context::adaptive_quant
int adaptive_quant
Definition: svq3.c:104
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:515
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:697
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:346
s
#define s(width, name)
Definition: cbs_vp9.c:198
TpelDSPContext
thirdpel DSP context
Definition: tpeldsp.h:42
SVQ3Context::pict_type
enum AVPictureType pict_type
Definition: svq3.c:115
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:662
svq3_mc_dir
static int svq3_mc_dir(SVQ3Context *s, int size, int mode, int dir, int avg)
Definition: svq3.c:503
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:411
ff_tpeldsp_init
av_cold void ff_tpeldsp_init(TpelDSPContext *c)
Definition: tpeldsp.c:312
QP_MAX_NUM
#define QP_MAX_NUM
Definition: h264.h:27
h264data.h
B
#define B
Definition: huffyuv.h:42
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1553
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
svq3_pred_motion
static av_always_inline void svq3_pred_motion(const SVQ3Context *s, int n, int part_width, int list, int ref, int *const mx, int *const my)
Get the predicted MV.
Definition: svq3.c:379
decode.h
IS_SKIP
#define IS_SKIP(a)
Definition: mpegutils.h:75
SVQ3Context::top_samples_available
unsigned int top_samples_available
Definition: svq3.c:133
AV_CODEC_ID_SVQ3
@ AV_CODEC_ID_SVQ3
Definition: codec_id.h:75
SVQ3Context::b_stride
int b_stride
Definition: svq3.c:123
SVQ3Context::prev_frame_num_offset
int prev_frame_num_offset
Definition: svq3.c:112
SVQ3Context::h264dsp
H264DSPContext h264dsp
Definition: svq3.c:87
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:331
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:57
IMbInfo::pred_mode
uint8_t pred_mode
Definition: h264data.h:36
if
if(ret)
Definition: filter_design.txt:179
SVQ3Frame::motion_val
int16_t(*[2] motion_val)[2]
Definition: svq3.c:79
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:232
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
run
uint8_t run
Definition: svq3.c:207
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:669
SVQ3Context::mb_width
int mb_width
Definition: svq3.c:121
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
SVQ3Context::mb2br_xy
uint32_t * mb2br_xy
Definition: svq3.c:125
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:386
h264dsp.h
mathops.h
list
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
Definition: filter_design.txt:25
ff_h264_chroma_dc_scan
const uint8_t ff_h264_chroma_dc_scan[4]
Definition: h264data.c:54
SVQ3Context
Definition: svq3.c:84
AV_ZERO128
#define AV_ZERO128(d)
Definition: intreadwrite.h:670
SVQ3Context::mb_luma_dc
int16_t mb_luma_dc[3][16 *2]
Definition: svq3.c:141
tpeldsp.h
index
int index
Definition: gxfenc.c:90
hl_decode_mb_idct_luma
static av_always_inline void hl_decode_mb_idct_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
Definition: svq3.c:617
HpelDSPContext
Half-pel DSP context.
Definition: hpeldsp.h:46
H264DSPContext
Context for storing H.264 DSP functions.
Definition: h264dsp.h:42
SVQ3Context::intra16x16_pred_mode
int intra16x16_pred_mode
Definition: svq3.c:128
IS_INTRA
#define IS_INTRA(x, y)
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:231
SVQ3Context::hpc
H264PredContext hpc
Definition: svq3.c:88
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1720
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
init_dequant4_coeff_table
static void init_dequant4_coeff_table(SVQ3Context *s)
Definition: svq3.c:1107
ff_zigzag_scan
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:148
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
svq3_fetch_diagonal_mv
static av_always_inline int svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C, int i, int list, int part_width)
Definition: svq3.c:358
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:302
AVPacket::size
int size
Definition: packet.h:559
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
codec_internal.h
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:104
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
rectangle.h
hl_decode_mb
static void hl_decode_mb(SVQ3Context *s)
Definition: svq3.c:666
get_interleaved_se_golomb
static int get_interleaved_se_golomb(GetBitContext *gb)
Definition: golomb.h:301
size
int size
Definition: twinvq_data.h:10344
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
MB_TYPE_SKIP
#define MB_TYPE_SKIP
Definition: mpegutils.h:61
avg
#define avg(a, b, c, d)
Definition: colorspacedsp_template.c:28
header
static const uint8_t header[24]
Definition: sdr2.c:68
av_crc_get_table
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
Definition: crc.c:374
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
ff_h264_quant_rem6
const uint8_t ff_h264_quant_rem6[QP_MAX_NUM+1]
Definition: h264data.c:174
skip_bits1
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:411
IS_INTRA16x16
#define IS_INTRA16x16(a)
Definition: mpegutils.h:70
hl_decode_mb_predict_luma
static av_always_inline void hl_decode_mb_predict_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
Definition: svq3.c:632
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
SVQ3Context::prev_frame_num
int prev_frame_num
Definition: svq3.c:113
svq3_add_idct_c
static void svq3_add_idct_c(uint8_t *dst, int16_t *block, int stride, int qp, int dc)
Definition: svq3.c:258
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:40
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
svq3_luma_dc_dequant_idct_c
static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
Definition: svq3.c:223
stride
#define stride
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AV_CRC_16_CCITT
@ AV_CRC_16_CCITT
Definition: crc.h:51
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:514
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:369
SVQ3Frame
Definition: svq3.c:76
THIRDPEL_MODE
#define THIRDPEL_MODE
Definition: svq3.c:153
SVQ3Context::mv_cache
int16_t mv_cache[2][5 *8][2]
Definition: svq3.c:138
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:53
av_always_inline
#define av_always_inline
Definition: attributes.h:63
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:523
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
AV_COPY32
#define AV_COPY32(d, s)
Definition: intreadwrite.h:634
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
svq3_decode_frame
static int svq3_decode_frame(AVCodecContext *avctx, AVFrame *rframe, int *got_frame, AVPacket *avpkt)
Definition: svq3.c:1391
svq3_decode_extradata
static av_cold int svq3_decode_extradata(AVCodecContext *avctx, SVQ3Context *s, int seqh_offset)
Definition: svq3.c:1121
SVQ3Context::non_zero_count_cache
uint8_t non_zero_count_cache[15 *8]
Definition: svq3.c:142
AVCodecContext::height
int height
Definition: avcodec.h:592
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:631
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
svq3_decode_mb
static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
Definition: svq3.c:714
svq3_scan
static const uint8_t svq3_scan[16]
Definition: svq3.c:165
avcodec.h
limit
static double limit(double x)
Definition: vf_pseudocolor.c:142
ff_h264dsp_init
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:66
SVQ3Context::halfpel_flag
int halfpel_flag
Definition: svq3.c:100
AVCodecContext::frame_num
int64_t frame_num
Frame counter, set by libavcodec.
Definition: avcodec.h:1878
mid_pred
#define mid_pred
Definition: mathops.h:97
svq3_pred_1
static const int8_t svq3_pred_1[6][6][5]
Definition: svq3.c:191
ret
ret
Definition: filter_design.txt:187
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
SVQ3Frame::mb_type
uint32_t * mb_type
Definition: svq3.c:81
SVQ3Context::mb_height
int mb_height
Definition: svq3.c:121
SVQ3Context::hdsp
HpelDSPContext hdsp
Definition: svq3.c:89
SVQ3Context::low_delay
int low_delay
Definition: svq3.c:117
h264pred.h
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
svq3_decode_block
static int svq3_decode_block(GetBitContext *gb, int16_t *block, int index, const int type)
Definition: svq3.c:298
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
U
#define U(x)
Definition: vpx_arith.h:37
skip_1stop_8data_bits
static int skip_1stop_8data_bits(GetBitContext *gb)
Definition: get_bits.h:694
AVCodecContext
main external API structure.
Definition: avcodec.h:431
ff_h264_dequant4_coeff_init
const uint8_t ff_h264_dequant4_coeff_init[6][3]
Definition: h264data.c:152
SVQ3Frame::f
AVFrame * f
Definition: svq3.c:77
SVQ3Context::mb_type_buf
uint32_t * mb_type_buf
Definition: svq3.c:147
SVQ3Context::block_offset
int block_offset[2 *(16 *3)]
Definition: svq3.c:144
ff_h264_pred_init
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:437
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:280
SVQ3Context::motion_val_buf
int16_t(* motion_val_buf)[2]
Definition: svq3.c:148
av_crc
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block.
Definition: crc.c:392
mode
mode
Definition: ebur128.h:83
ff_h264_check_intra4x4_pred_mode
int ff_h264_check_intra4x4_pred_mode(int8_t *pred_mode_cache, void *logctx, int top_samples_available, int left_samples_available)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264_parse.c:134
ff_h264_i_mb_type_info
const IMbInfo ff_h264_i_mb_type_info[26]
Definition: h264data.c:66
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:825
SVQ3Context::chroma_pred_mode
int chroma_pred_mode
Definition: svq3.c:127
SVQ3Context::watermark_key
uint32_t watermark_key
Definition: svq3.c:103
SVQ3Context::mb_xy
int mb_xy
Definition: svq3.c:120
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
temp
else temp
Definition: vf_mcdeint.c:271
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
luma_dc_zigzag_scan
static const uint8_t luma_dc_zigzag_scan[16]
Definition: svq3.c:172
PART_NOT_AVAILABLE
#define PART_NOT_AVAILABLE
Definition: h264pred.h:89
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
ff_h264_quant_div6
const uint8_t ff_h264_quant_div6[QP_MAX_NUM+1]
Definition: h264data.c:182
VideoDSPContext
Definition: videodsp.h:40
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1374
H264PredContext
Context for storing H.264 prediction functions.
Definition: h264pred.h:94
svq3_mc_dir_part
static void svq3_mc_dir_part(SVQ3Context *s, int x, int y, int width, int height, int mx, int my, int dxy, int thirdpel, int dir, int avg)
Definition: svq3.c:428
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
IS_INTER
#define IS_INTER(a)
Definition: mpegutils.h:73
mem.h
svq3_decode_end
static av_cold int svq3_decode_end(AVCodecContext *avctx)
Definition: svq3.c:1583
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
svq3_dct_tables
static const struct @252 svq3_dct_tables[2][16]
SVQ3Context::dequant4_coeff
uint32_t dequant4_coeff[QP_MAX_NUM+1][16]
Definition: svq3.c:143
SVQ3Context::ref_cache
int8_t ref_cache[2][5 *8]
Definition: svq3.c:139
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVPacket
This structure stores compressed data.
Definition: packet.h:535
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
videodsp.h
SVQ3Context::mb_stride
int mb_stride
Definition: svq3.c:122
DIAG_DOWN_LEFT_PRED
@ DIAG_DOWN_LEFT_PRED
Definition: vp9.h:49
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:592
int32_t
int32_t
Definition: audioconvert.c:56
hpeldsp.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:472
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
svq3_decode_init
static av_cold int svq3_decode_init(AVCodecContext *avctx)
Definition: svq3.c:1249
h
h
Definition: vp9dsp_template.c:2070
width
#define width
Definition: dsp.h:89
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:228
av_bswap16
#define av_bswap16
Definition: bswap.h:28
ff_svq3_decoder
const FFCodec ff_svq3_decoder
Definition: svq3.c:1599
SVQ3Context::cur_pic
SVQ3Frame * cur_pic
Definition: svq3.c:93
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
SVQ3Context::has_watermark
int has_watermark
Definition: svq3.c:102
SVQ3Context::intra4x4_pred_mode
int8_t * intra4x4_pred_mode
Definition: svq3.c:131
src
#define src
Definition: vp8dsp.c:248
svq3_pred_0
static const uint8_t svq3_pred_0[25][2]
Definition: svq3.c:179
HALFPEL_MODE
#define HALFPEL_MODE
Definition: svq3.c:152
ff_h264_check_intra_pred_mode
int ff_h264_check_intra_pred_mode(void *logctx, int top_samples_available, int left_samples_available, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264_parse.c:182
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:337