FFmpeg
vp3.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2003-2004 The FFmpeg project
3  * Copyright (C) 2019 Peter Ross
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * On2 VP3/VP4 Video Decoder
25  *
26  * VP3 Video Decoder by Mike Melanson (mike at multimedia.cx)
27  * For more information about the VP3 coding process, visit:
28  * http://wiki.multimedia.cx/index.php?title=On2_VP3
29  *
30  * Theora decoder by Alex Beregszaszi
31  */
32 
33 #include "config_components.h"
34 
35 #include <stddef.h>
36 #include <string.h>
37 
38 #include "libavutil/attributes.h"
39 #include "libavutil/emms.h"
40 #include "libavutil/imgutils.h"
41 #include "libavutil/mem.h"
42 #include "libavutil/mem_internal.h"
43 #include "libavutil/thread.h"
44 
45 #include "avcodec.h"
46 #include "codec_internal.h"
47 #include "decode.h"
48 #include "get_bits.h"
49 #include "hpeldsp.h"
50 #include "jpegquanttables.h"
51 #include "mathops.h"
52 #include "progressframe.h"
53 #include "libavutil/refstruct.h"
54 #include "thread.h"
55 #include "videodsp.h"
56 #include "vp3data.h"
57 #include "vp4data.h"
58 #include "vp3dsp.h"
59 #include "xiph.h"
60 
61 #define VP3_MV_VLC_BITS 6
62 #define VP4_MV_VLC_BITS 6
63 #define SUPERBLOCK_VLC_BITS 6
64 
65 #define FRAGMENT_PIXELS 8
66 
67 // FIXME split things out into their own arrays
68 typedef struct Vp3Fragment {
69  int16_t dc;
70  uint8_t coding_method;
71  uint8_t qpi;
72 } Vp3Fragment;
73 
74 #define SB_NOT_CODED 0
75 #define SB_PARTIALLY_CODED 1
76 #define SB_FULLY_CODED 2
77 
78 // This is the maximum length of a single long bit run that can be encoded
79 // for superblock coding or block qps. Theora special-cases this to read a
80 // bit instead of flipping the current bit to allow for runs longer than 4129.
81 #define MAXIMUM_LONG_BIT_RUN 4129
82 
83 #define MODE_INTER_NO_MV 0
84 #define MODE_INTRA 1
85 #define MODE_INTER_PLUS_MV 2
86 #define MODE_INTER_LAST_MV 3
87 #define MODE_INTER_PRIOR_LAST 4
88 #define MODE_USING_GOLDEN 5
89 #define MODE_GOLDEN_MV 6
90 #define MODE_INTER_FOURMV 7
91 #define CODING_MODE_COUNT 8
92 
93 /* special internal mode */
94 #define MODE_COPY 8
95 
96 static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb);
97 static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb);
98 
99 
100 /* There are 6 preset schemes, plus a free-form scheme */
101 static const int ModeAlphabet[6][CODING_MODE_COUNT] = {
102  /* scheme 1: Last motion vector dominates */
107 
108  /* scheme 2 */
113 
114  /* scheme 3 */
119 
120  /* scheme 4 */
125 
126  /* scheme 5: No motion vector dominates */
131 
132  /* scheme 6 */
137 };
138 
139 static const uint8_t hilbert_offset[16][2] = {
140  { 0, 0 }, { 1, 0 }, { 1, 1 }, { 0, 1 },
141  { 0, 2 }, { 0, 3 }, { 1, 3 }, { 1, 2 },
142  { 2, 2 }, { 2, 3 }, { 3, 3 }, { 3, 2 },
143  { 3, 1 }, { 2, 1 }, { 2, 0 }, { 3, 0 }
144 };
145 
146 enum {
152 };
153 
154 static const uint8_t vp4_pred_block_type_map[8] = {
163 };
164 
165 static VLCElem superblock_run_length_vlc[88]; /* version < 2 */
166 static VLCElem fragment_run_length_vlc[56]; /* version < 2 */
167 static VLCElem motion_vector_vlc[112]; /* version < 2 */
168 
169 // The VP4 tables reuse this vlc.
170 static VLCElem mode_code_vlc[24 + 2108 * CONFIG_VP4_DECODER];
171 
172 #if CONFIG_VP4_DECODER
173 static const VLCElem *vp4_mv_vlc_table[2][7]; /* version >= 2 */
174 static const VLCElem *block_pattern_vlc[2]; /* version >= 2 */
175 #endif
176 
177 typedef struct {
178  int dc;
179  int type;
180 } VP4Predictor;
181 
182 #define MIN_DEQUANT_VAL 2
183 
184 typedef struct HuffEntry {
185  uint8_t len, sym;
186 } HuffEntry;
187 
188 typedef struct HuffTable {
190  uint8_t nb_entries;
191 } HuffTable;
192 
193 typedef struct CoeffVLCs {
194  const VLCElem *vlc_tabs[80];
195  VLC vlcs[80];
196 } CoeffVLCs;
197 
198 typedef struct Vp3DecodeContext {
201  int version;
202  int width, height;
207  int keyframe;
208  uint8_t idct_permutation[64];
209  uint8_t idct_scantable[64];
213  DECLARE_ALIGNED(16, int16_t, block)[64];
217 
218  int qps[3];
219  int nqps;
220 
230  unsigned char *superblock_coding;
231 
232  int macroblock_count; /* y macroblock count */
238  int yuv_macroblock_count; /* y+u+v macroblock count */
239 
243 
246  int data_offset[3];
247  uint8_t offset_x;
248  uint8_t offset_y;
250 
251  int8_t (*motion_val[2])[2];
252 
253  /* tables */
254  uint16_t coded_dc_scale_factor[2][64];
255  uint32_t coded_ac_scale_factor[64];
256  uint8_t base_matrix[384][64];
257  uint8_t qr_count[2][3];
258  uint8_t qr_size[2][3][64];
259  uint16_t qr_base[2][3][64];
260 
261  /**
262  * This is a list of all tokens in bitstream order. Reordering takes place
263  * by pulling from each level during IDCT. As a consequence, IDCT must be
264  * in Hilbert order, making the minimum slice height 64 for 4:2:0 and 32
265  * otherwise. The 32 different tokens with up to 12 bits of extradata are
266  * collapsed into 3 types, packed as follows:
267  * (from the low to high bits)
268  *
269  * 2 bits: type (0,1,2)
270  * 0: EOB run, 14 bits for run length (12 needed)
271  * 1: zero run, 7 bits for run length
272  * 7 bits for the next coefficient (3 needed)
273  * 2: coefficient, 14 bits (11 needed)
274  *
275  * Coefficients are signed, so are packed in the highest bits for automatic
276  * sign extension.
277  */
278  int16_t *dct_tokens[3][64];
279  int16_t *dct_tokens_base;
280 #define TOKEN_EOB(eob_run) ((eob_run) << 2)
281 #define TOKEN_ZERO_RUN(coeff, zero_run) (((coeff) * 512) + ((zero_run) << 2) + 1)
282 #define TOKEN_COEFF(coeff) (((coeff) * 4) + 2)
283 
284  /**
285  * number of blocks that contain DCT coefficients at
286  * the given level or higher
287  */
288  int num_coded_frags[3][64];
290 
291  /* this is a list of indexes into the all_fragments array indicating
292  * which of the fragments are coded */
294 
298 
299  /**
300  * The first 16 of the following VLCs are for the dc coefficients;
301  * the others are four groups of 16 VLCs each for ac coefficients.
302  * This is a RefStruct reference to share these VLCs between threads.
303  */
305 
306  /* these arrays need to be on 16-byte boundaries since SSE2 operations
307  * index into them */
308  DECLARE_ALIGNED(16, int16_t, qmat)[3][2][3][64]; ///< qmat[qpi][is_inter][plane]
309 
310  /* This table contains superblock_count * 16 entries. Each set of 16
311  * numbers corresponds to the fragment indexes 0..15 of the superblock.
312  * An entry will be -1 to indicate that no entry corresponds to that
313  * index. */
315 
316  /* This is an array that indicates how a particular macroblock
317  * is coded. */
318  unsigned char *macroblock_coding;
319 
320  uint8_t *edge_emu_buffer;
321 
322  /* Huffman decode */
324 
325  uint8_t filter_limit_values[64];
327 
328  VP4Predictor * dc_pred_row; /* dc_pred_row[y_superblock_width * 4] */
330 
331 /************************************************************************
332  * VP3 specific functions
333  ************************************************************************/
334 
335 static av_cold void free_tables(AVCodecContext *avctx)
336 {
337  Vp3DecodeContext *s = avctx->priv_data;
338 
339  av_freep(&s->superblock_coding);
340  av_freep(&s->all_fragments);
341  av_freep(&s->nkf_coded_fragment_list);
342  av_freep(&s->kf_coded_fragment_list);
343  av_freep(&s->dct_tokens_base);
344  av_freep(&s->superblock_fragments);
345  av_freep(&s->macroblock_coding);
346  av_freep(&s->dc_pred_row);
347  av_freep(&s->motion_val[0]);
348  av_freep(&s->motion_val[1]);
349 }
350 
352 {
353  Vp3DecodeContext *s = avctx->priv_data;
354 
355  ff_progress_frame_unref(&s->golden_frame);
356  ff_progress_frame_unref(&s->last_frame);
357  ff_progress_frame_unref(&s->current_frame);
358 }
359 
361 {
362  Vp3DecodeContext *s = avctx->priv_data;
363 
364  free_tables(avctx);
365  av_freep(&s->edge_emu_buffer);
366 
367  s->theora_tables = 0;
368 
369  /* release all frames */
370  vp3_decode_flush(avctx);
371 
372  av_refstruct_unref(&s->coeff_vlc);
373 
374  return 0;
375 }
376 
377 /**
378  * This function sets up all of the various blocks mappings:
379  * superblocks <-> fragments, macroblocks <-> fragments,
380  * superblocks <-> macroblocks
381  *
382  * @return 0 is successful; returns 1 if *anything* went wrong.
383  */
385 {
386  int j = 0;
387 
388  for (int plane = 0; plane < 3; plane++) {
389  int sb_width = plane ? s->c_superblock_width
390  : s->y_superblock_width;
391  int sb_height = plane ? s->c_superblock_height
392  : s->y_superblock_height;
393  int frag_width = s->fragment_width[!!plane];
394  int frag_height = s->fragment_height[!!plane];
395 
396  for (int sb_y = 0; sb_y < sb_height; sb_y++)
397  for (int sb_x = 0; sb_x < sb_width; sb_x++)
398  for (int i = 0; i < 16; i++) {
399  int x = 4 * sb_x + hilbert_offset[i][0];
400  int y = 4 * sb_y + hilbert_offset[i][1];
401 
402  if (x < frag_width && y < frag_height)
403  s->superblock_fragments[j++] = s->fragment_start[plane] +
404  y * frag_width + x;
405  else
406  s->superblock_fragments[j++] = -1;
407  }
408  }
409 
410  return 0; /* successful path out */
411 }
412 
413 /*
414  * This function sets up the dequantization tables used for a particular
415  * frame.
416  */
417 static void init_dequantizer(Vp3DecodeContext *s, int qpi)
418 {
419  int ac_scale_factor = s->coded_ac_scale_factor[s->qps[qpi]];
420 
421  for (int inter = 0; inter < 2; inter++) {
422  for (int plane = 0; plane < 3; plane++) {
423  int dc_scale_factor = s->coded_dc_scale_factor[!!plane][s->qps[qpi]];
424  int sum = 0, bmi, bmj, qistart, qri;
425  for (qri = 0; qri < s->qr_count[inter][plane]; qri++) {
426  sum += s->qr_size[inter][plane][qri];
427  if (s->qps[qpi] <= sum)
428  break;
429  }
430  qistart = sum - s->qr_size[inter][plane][qri];
431  bmi = s->qr_base[inter][plane][qri];
432  bmj = s->qr_base[inter][plane][qri + 1];
433  for (int i = 0; i < 64; i++) {
434  int coeff = (2 * (sum - s->qps[qpi]) * s->base_matrix[bmi][i] -
435  2 * (qistart - s->qps[qpi]) * s->base_matrix[bmj][i] +
436  s->qr_size[inter][plane][qri]) /
437  (2 * s->qr_size[inter][plane][qri]);
438 
439  int qmin = 8 << (inter + !i);
440  int qscale = i ? ac_scale_factor : dc_scale_factor;
441  int qbias = (1 + inter) * 3;
442  s->qmat[qpi][inter][plane][s->idct_permutation[i]] =
443  (i == 0 || s->version < 2) ? av_clip((qscale * coeff) / 100 * 4, qmin, 4096)
444  : (qscale * (coeff - qbias) / 100 + qbias) * 4;
445  }
446  /* all DC coefficients use the same quant so as not to interfere
447  * with DC prediction */
448  s->qmat[qpi][inter][plane][0] = s->qmat[0][inter][plane][0];
449  }
450  }
451 }
452 
453 /*
454  * This function initializes the loop filter boundary limits if the frame's
455  * quality index is different from the previous frame's.
456  *
457  * The filter_limit_values may not be larger than 127.
458  */
460 {
461  ff_vp3dsp_set_bounding_values(s->bounding_values_array, s->filter_limit_values[s->qps[0]]);
462 }
463 
464 /*
465  * This function unpacks all of the superblock/macroblock/fragment coding
466  * information from the bitstream.
467  */
469 {
470  const int superblock_starts[3] = {
471  0, s->u_superblock_start, s->v_superblock_start
472  };
473  int bit = 0;
474  int current_superblock = 0;
475  int current_run = 0;
476  int num_partial_superblocks = 0;
477 
478  int current_fragment;
479  int plane0_num_coded_frags = 0;
480 
481  if (s->keyframe) {
482  memset(s->superblock_coding, SB_FULLY_CODED, s->superblock_count);
483  } else {
484  /* unpack the list of partially-coded superblocks */
485  bit = get_bits1(gb) ^ 1;
486  current_run = 0;
487 
488  while (current_superblock < s->superblock_count && get_bits_left(gb) > 0) {
489  if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
490  bit = get_bits1(gb);
491  else
492  bit ^= 1;
493 
494  current_run = get_vlc2(gb, superblock_run_length_vlc,
496  if (current_run == 34)
497  current_run += get_bits(gb, 12);
498 
499  if (current_run > s->superblock_count - current_superblock) {
500  av_log(s->avctx, AV_LOG_ERROR,
501  "Invalid partially coded superblock run length\n");
502  return -1;
503  }
504 
505  memset(s->superblock_coding + current_superblock, bit, current_run);
506 
507  current_superblock += current_run;
508  if (bit)
509  num_partial_superblocks += current_run;
510  }
511 
512  /* unpack the list of fully coded superblocks if any of the blocks were
513  * not marked as partially coded in the previous step */
514  if (num_partial_superblocks < s->superblock_count) {
515  int superblocks_decoded = 0;
516 
517  current_superblock = 0;
518  bit = get_bits1(gb) ^ 1;
519  current_run = 0;
520 
521  while (superblocks_decoded < s->superblock_count - num_partial_superblocks &&
522  get_bits_left(gb) > 0) {
523  if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
524  bit = get_bits1(gb);
525  else
526  bit ^= 1;
527 
528  current_run = get_vlc2(gb, superblock_run_length_vlc,
530  if (current_run == 34)
531  current_run += get_bits(gb, 12);
532 
533  for (int j = 0; j < current_run; current_superblock++) {
534  if (current_superblock >= s->superblock_count) {
535  av_log(s->avctx, AV_LOG_ERROR,
536  "Invalid fully coded superblock run length\n");
537  return -1;
538  }
539 
540  /* skip any superblocks already marked as partially coded */
541  if (s->superblock_coding[current_superblock] == SB_NOT_CODED) {
542  s->superblock_coding[current_superblock] = 2 * bit;
543  j++;
544  }
545  }
546  superblocks_decoded += current_run;
547  }
548  }
549 
550  /* if there were partial blocks, initialize bitstream for
551  * unpacking fragment codings */
552  if (num_partial_superblocks) {
553  current_run = 0;
554  bit = get_bits1(gb);
555  /* toggle the bit because as soon as the first run length is
556  * fetched the bit will be toggled again */
557  bit ^= 1;
558  }
559  }
560 
561  /* figure out which fragments are coded; iterate through each
562  * superblock (all planes) */
563  s->total_num_coded_frags = 0;
564  memset(s->macroblock_coding, MODE_COPY, s->macroblock_count);
565 
566  s->coded_fragment_list[0] = s->keyframe ? s->kf_coded_fragment_list
567  : s->nkf_coded_fragment_list;
568 
569  for (int plane = 0; plane < 3; plane++) {
570  int sb_start = superblock_starts[plane];
571  int sb_end = sb_start + (plane ? s->c_superblock_count
572  : s->y_superblock_count);
573  int num_coded_frags = 0;
574 
575  if (s->keyframe) {
576  if (s->num_kf_coded_fragment[plane] == -1) {
577  for (int i = sb_start; i < sb_end; i++) {
578  /* iterate through all 16 fragments in a superblock */
579  for (int j = 0; j < 16; j++) {
580  /* if the fragment is in bounds, check its coding status */
581  current_fragment = s->superblock_fragments[i * 16 + j];
582  if (current_fragment != -1) {
583  s->coded_fragment_list[plane][num_coded_frags++] =
584  current_fragment;
585  }
586  }
587  }
588  s->num_kf_coded_fragment[plane] = num_coded_frags;
589  } else
590  num_coded_frags = s->num_kf_coded_fragment[plane];
591  } else {
592  for (int i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) {
593  if (get_bits_left(gb) < plane0_num_coded_frags >> 2) {
594  return AVERROR_INVALIDDATA;
595  }
596  /* iterate through all 16 fragments in a superblock */
597  for (int j = 0; j < 16; j++) {
598  /* if the fragment is in bounds, check its coding status */
599  current_fragment = s->superblock_fragments[i * 16 + j];
600  if (current_fragment != -1) {
601  int coded = s->superblock_coding[i];
602 
603  if (coded == SB_PARTIALLY_CODED) {
604  /* fragment may or may not be coded; this is the case
605  * that cares about the fragment coding runs */
606  if (current_run-- == 0) {
607  bit ^= 1;
608  current_run = get_vlc2(gb, fragment_run_length_vlc, 5, 2);
609  }
610  coded = bit;
611  }
612 
613  if (coded) {
614  /* default mode; actual mode will be decoded in
615  * the next phase */
616  s->all_fragments[current_fragment].coding_method =
618  s->coded_fragment_list[plane][num_coded_frags++] =
619  current_fragment;
620  } else {
621  /* not coded; copy this fragment from the prior frame */
622  s->all_fragments[current_fragment].coding_method =
623  MODE_COPY;
624  }
625  }
626  }
627  }
628  }
629  if (!plane)
630  plane0_num_coded_frags = num_coded_frags;
631  s->total_num_coded_frags += num_coded_frags;
632  for (int i = 0; i < 64; i++)
633  s->num_coded_frags[plane][i] = num_coded_frags;
634  if (plane < 2)
635  s->coded_fragment_list[plane + 1] = s->coded_fragment_list[plane] +
636  num_coded_frags;
637  }
638  return 0;
639 }
640 
641 #define BLOCK_X (2 * mb_x + (k & 1))
642 #define BLOCK_Y (2 * mb_y + (k >> 1))
643 
644 #if CONFIG_VP4_DECODER
645 /**
646  * @return number of blocks, or > yuv_macroblock_count on error.
647  * return value is always >= 1.
648  */
649 static int vp4_get_mb_count(Vp3DecodeContext *s, GetBitContext *gb)
650 {
651  int v = 1;
652  int bits;
653  while ((bits = show_bits(gb, 9)) == 0x1ff) {
654  skip_bits(gb, 9);
655  v += 256;
656  if (v > s->yuv_macroblock_count) {
657  av_log(s->avctx, AV_LOG_ERROR, "Invalid run length\n");
658  return v;
659  }
660  }
661 #define body(n) { \
662  skip_bits(gb, 2 + n); \
663  v += (1 << n) + get_bits(gb, n); }
664 #define thresh(n) (0x200 - (0x80 >> n))
665 #define else_if(n) else if (bits < thresh(n)) body(n)
666  if (bits < 0x100) {
667  skip_bits(gb, 1);
668  } else if (bits < thresh(0)) {
669  skip_bits(gb, 2);
670  v += 1;
671  }
672  else_if(1)
673  else_if(2)
674  else_if(3)
675  else_if(4)
676  else_if(5)
677  else_if(6)
678  else body(7)
679 #undef body
680 #undef thresh
681 #undef else_if
682  return v;
683 }
684 
685 static int vp4_get_block_pattern(GetBitContext *gb, int *next_block_pattern_table)
686 {
687  int v = get_vlc2(gb, block_pattern_vlc[*next_block_pattern_table], 5, 1);
688  *next_block_pattern_table = vp4_block_pattern_table_selector[v];
689  return v + 1;
690 }
691 
692 static int vp4_unpack_macroblocks(Vp3DecodeContext *s, GetBitContext *gb)
693 {
694  int fragment;
695  int next_block_pattern_table;
696  int bit, current_run, has_partial;
697 
698  memset(s->macroblock_coding, MODE_COPY, s->macroblock_count);
699 
700  if (s->keyframe)
701  return 0;
702 
703  has_partial = 0;
704  bit = get_bits1(gb);
705  for (int i = 0; i < s->yuv_macroblock_count; i += current_run) {
706  if (get_bits_left(gb) <= 0)
707  return AVERROR_INVALIDDATA;
708  current_run = vp4_get_mb_count(s, gb);
709  if (current_run > s->yuv_macroblock_count - i)
710  return -1;
711  memset(s->superblock_coding + i, 2 * bit, current_run);
712  bit ^= 1;
713  has_partial |= bit;
714  }
715 
716  if (has_partial) {
717  if (get_bits_left(gb) <= 0)
718  return AVERROR_INVALIDDATA;
719  bit = get_bits1(gb);
720  current_run = vp4_get_mb_count(s, gb);
721  for (int i = 0; i < s->yuv_macroblock_count; i++) {
722  if (!s->superblock_coding[i]) {
723  if (!current_run) {
724  bit ^= 1;
725  current_run = vp4_get_mb_count(s, gb);
726  }
727  s->superblock_coding[i] = bit;
728  current_run--;
729  }
730  }
731  if (current_run) /* handle situation when vp4_get_mb_count() fails */
732  return -1;
733  }
734 
735  next_block_pattern_table = 0;
736  for (int plane = 0, i = 0; plane < 3; plane++) {
737  int sb_width = plane ? s->c_superblock_width : s->y_superblock_width;
738  int sb_height = plane ? s->c_superblock_height : s->y_superblock_height;
739  int mb_width = plane ? s->c_macroblock_width : s->macroblock_width;
740  int mb_height = plane ? s->c_macroblock_height : s->macroblock_height;
741  int fragment_width = s->fragment_width[!!plane];
742  int fragment_height = s->fragment_height[!!plane];
743 
744  for (int sb_y = 0; sb_y < sb_height; sb_y++) {
745  for (int sb_x = 0; sb_x < sb_width; sb_x++) {
746  for (int j = 0; j < 4; j++) {
747  int mb_x = 2 * sb_x + (j >> 1);
748  int mb_y = 2 * sb_y + (j >> 1) ^ (j & 1);
749  int mb_coded, pattern, coded;
750 
751  if (mb_x >= mb_width || mb_y >= mb_height)
752  continue;
753 
754  mb_coded = s->superblock_coding[i++];
755 
756  if (mb_coded == SB_FULLY_CODED)
757  pattern = 0xF;
758  else if (mb_coded == SB_PARTIALLY_CODED)
759  pattern = vp4_get_block_pattern(gb, &next_block_pattern_table);
760  else
761  pattern = 0;
762 
763  for (int k = 0; k < 4; k++) {
764  if (BLOCK_X >= fragment_width || BLOCK_Y >= fragment_height)
765  continue;
766  fragment = s->fragment_start[plane] + BLOCK_Y * fragment_width + BLOCK_X;
767  coded = pattern & (8 >> k);
768  /* MODE_INTER_NO_MV is the default for coded fragments.
769  the actual method is decoded in the next phase. */
770  s->all_fragments[fragment].coding_method = coded ? MODE_INTER_NO_MV : MODE_COPY;
771  }
772  }
773  }
774  }
775  }
776  return 0;
777 }
778 #endif
779 
780 /*
781  * This function unpacks all the coding mode data for individual macroblocks
782  * from the bitstream.
783  */
785 {
786  int scheme;
787  int current_macroblock;
788  int current_fragment;
789  int coding_mode;
790  int custom_mode_alphabet[CODING_MODE_COUNT];
791  const int *alphabet;
792  Vp3Fragment *frag;
793 
794  if (s->keyframe) {
795  for (int i = 0; i < s->fragment_count; i++)
796  s->all_fragments[i].coding_method = MODE_INTRA;
797  } else {
798  /* fetch the mode coding scheme for this frame */
799  scheme = get_bits(gb, 3);
800 
801  /* is it a custom coding scheme? */
802  if (scheme == 0) {
803  for (int i = 0; i < 8; i++)
804  custom_mode_alphabet[i] = MODE_INTER_NO_MV;
805  for (int i = 0; i < 8; i++)
806  custom_mode_alphabet[get_bits(gb, 3)] = i;
807  alphabet = custom_mode_alphabet;
808  } else
809  alphabet = ModeAlphabet[scheme - 1];
810 
811  /* iterate through all of the macroblocks that contain 1 or more
812  * coded fragments */
813  for (int sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
814  for (int sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
815  if (get_bits_left(gb) <= 0)
816  return -1;
817 
818  for (int j = 0; j < 4; j++) {
819  int k;
820  int mb_x = 2 * sb_x + (j >> 1);
821  int mb_y = 2 * sb_y + (((j >> 1) + j) & 1);
822  current_macroblock = mb_y * s->macroblock_width + mb_x;
823 
824  if (mb_x >= s->macroblock_width ||
825  mb_y >= s->macroblock_height)
826  continue;
827 
828  /* coding modes are only stored if the macroblock has
829  * at least one luma block coded, otherwise it must be
830  * INTER_NO_MV */
831  for (k = 0; k < 4; k++) {
832  current_fragment = BLOCK_Y *
833  s->fragment_width[0] + BLOCK_X;
834  if (s->all_fragments[current_fragment].coding_method != MODE_COPY)
835  break;
836  }
837  if (k == 4) {
838  s->macroblock_coding[current_macroblock] = MODE_INTER_NO_MV;
839  continue;
840  }
841 
842  /* mode 7 means get 3 bits for each coding mode */
843  if (scheme == 7)
844  coding_mode = get_bits(gb, 3);
845  else
846  coding_mode = alphabet[get_vlc2(gb, mode_code_vlc, 4, 2)];
847 
848  s->macroblock_coding[current_macroblock] = coding_mode;
849  for (k = 0; k < 4; k++) {
850  frag = s->all_fragments + BLOCK_Y * s->fragment_width[0] + BLOCK_X;
851  if (frag->coding_method != MODE_COPY)
852  frag->coding_method = coding_mode;
853  }
854 
855 #define SET_CHROMA_MODES \
856  if (frag[s->fragment_start[1]].coding_method != MODE_COPY) \
857  frag[s->fragment_start[1]].coding_method = coding_mode; \
858  if (frag[s->fragment_start[2]].coding_method != MODE_COPY) \
859  frag[s->fragment_start[2]].coding_method = coding_mode;
860 
861  if (s->chroma_y_shift) {
862  frag = s->all_fragments + mb_y *
863  s->fragment_width[1] + mb_x;
865  } else if (s->chroma_x_shift) {
866  frag = s->all_fragments +
867  2 * mb_y * s->fragment_width[1] + mb_x;
868  for (k = 0; k < 2; k++) {
870  frag += s->fragment_width[1];
871  }
872  } else {
873  for (k = 0; k < 4; k++) {
874  frag = s->all_fragments +
875  BLOCK_Y * s->fragment_width[1] + BLOCK_X;
877  }
878  }
879  }
880  }
881  }
882  }
883 
884  return 0;
885 }
886 
887 static int vp4_get_mv(GetBitContext *gb, int axis, int last_motion)
888 {
889 #if CONFIG_VP4_DECODER
890  int v = get_vlc2(gb, vp4_mv_vlc_table[axis][vp4_mv_table_selector[FFABS(last_motion)]],
891  VP4_MV_VLC_BITS, 2);
892  return last_motion < 0 ? -v : v;
893 #else
894  return 0;
895 #endif
896 }
897 
898 /*
899  * This function unpacks all the motion vectors for the individual
900  * macroblocks from the bitstream.
901  */
903 {
904  int coding_mode;
905  int motion_x[4];
906  int motion_y[4];
907  int last_motion_x = 0;
908  int last_motion_y = 0;
909  int prior_last_motion_x = 0;
910  int prior_last_motion_y = 0;
911  int last_gold_motion_x = 0;
912  int last_gold_motion_y = 0;
913  int current_macroblock;
914  int current_fragment;
915  int frag;
916 
917  if (s->keyframe)
918  return 0;
919 
920  /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme; 2 is VP4 code scheme */
921  coding_mode = s->version < 2 ? get_bits1(gb) : 2;
922 
923  /* iterate through all of the macroblocks that contain 1 or more
924  * coded fragments */
925  for (int sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
926  for (int sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
927  if (get_bits_left(gb) <= 0)
928  return -1;
929 
930  for (int j = 0; j < 4; j++) {
931  int mb_x = 2 * sb_x + (j >> 1);
932  int mb_y = 2 * sb_y + (((j >> 1) + j) & 1);
933  current_macroblock = mb_y * s->macroblock_width + mb_x;
934 
935  if (mb_x >= s->macroblock_width ||
936  mb_y >= s->macroblock_height ||
937  s->macroblock_coding[current_macroblock] == MODE_COPY)
938  continue;
939 
940  switch (s->macroblock_coding[current_macroblock]) {
941  case MODE_GOLDEN_MV:
942  if (coding_mode == 2) { /* VP4 */
943  last_gold_motion_x = motion_x[0] = vp4_get_mv(gb, 0, last_gold_motion_x);
944  last_gold_motion_y = motion_y[0] = vp4_get_mv(gb, 1, last_gold_motion_y);
945  break;
946  }
948  case MODE_INTER_PLUS_MV:
949  /* all 6 fragments use the same motion vector */
950  if (coding_mode == 0) {
951  motion_x[0] = get_vlc2(gb, motion_vector_vlc,
952  VP3_MV_VLC_BITS, 2);
953  motion_y[0] = get_vlc2(gb, motion_vector_vlc,
954  VP3_MV_VLC_BITS, 2);
955  } else if (coding_mode == 1) {
956  motion_x[0] = fixed_motion_vector_table[get_bits(gb, 6)];
957  motion_y[0] = fixed_motion_vector_table[get_bits(gb, 6)];
958  } else { /* VP4 */
959  motion_x[0] = vp4_get_mv(gb, 0, last_motion_x);
960  motion_y[0] = vp4_get_mv(gb, 1, last_motion_y);
961  }
962 
963  /* vector maintenance, only on MODE_INTER_PLUS_MV */
964  if (s->macroblock_coding[current_macroblock] == MODE_INTER_PLUS_MV) {
965  prior_last_motion_x = last_motion_x;
966  prior_last_motion_y = last_motion_y;
967  last_motion_x = motion_x[0];
968  last_motion_y = motion_y[0];
969  }
970  break;
971 
972  case MODE_INTER_FOURMV:
973  /* vector maintenance */
974  prior_last_motion_x = last_motion_x;
975  prior_last_motion_y = last_motion_y;
976 
977  /* fetch 4 vectors from the bitstream, one for each
978  * Y fragment, then average for the C fragment vectors */
979  for (int k = 0; k < 4; k++) {
980  current_fragment = BLOCK_Y * s->fragment_width[0] + BLOCK_X;
981  if (s->all_fragments[current_fragment].coding_method != MODE_COPY) {
982  if (coding_mode == 0) {
983  motion_x[k] = get_vlc2(gb, motion_vector_vlc,
984  VP3_MV_VLC_BITS, 2);
985  motion_y[k] = get_vlc2(gb, motion_vector_vlc,
986  VP3_MV_VLC_BITS, 2);
987  } else if (coding_mode == 1) {
988  motion_x[k] = fixed_motion_vector_table[get_bits(gb, 6)];
989  motion_y[k] = fixed_motion_vector_table[get_bits(gb, 6)];
990  } else { /* VP4 */
991  motion_x[k] = vp4_get_mv(gb, 0, prior_last_motion_x);
992  motion_y[k] = vp4_get_mv(gb, 1, prior_last_motion_y);
993  }
994  last_motion_x = motion_x[k];
995  last_motion_y = motion_y[k];
996  } else {
997  motion_x[k] = 0;
998  motion_y[k] = 0;
999  }
1000  }
1001  break;
1002 
1003  case MODE_INTER_LAST_MV:
1004  /* all 6 fragments use the last motion vector */
1005  motion_x[0] = last_motion_x;
1006  motion_y[0] = last_motion_y;
1007 
1008  /* no vector maintenance (last vector remains the
1009  * last vector) */
1010  break;
1011 
1012  case MODE_INTER_PRIOR_LAST:
1013  /* all 6 fragments use the motion vector prior to the
1014  * last motion vector */
1015  motion_x[0] = prior_last_motion_x;
1016  motion_y[0] = prior_last_motion_y;
1017 
1018  /* vector maintenance */
1019  prior_last_motion_x = last_motion_x;
1020  prior_last_motion_y = last_motion_y;
1021  last_motion_x = motion_x[0];
1022  last_motion_y = motion_y[0];
1023  break;
1024 
1025  default:
1026  /* covers intra, inter without MV, golden without MV */
1027  motion_x[0] = 0;
1028  motion_y[0] = 0;
1029 
1030  /* no vector maintenance */
1031  break;
1032  }
1033 
1034  /* assign the motion vectors to the correct fragments */
1035  for (int k = 0; k < 4; k++) {
1036  current_fragment =
1037  BLOCK_Y * s->fragment_width[0] + BLOCK_X;
1038  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1039  s->motion_val[0][current_fragment][0] = motion_x[k];
1040  s->motion_val[0][current_fragment][1] = motion_y[k];
1041  } else {
1042  s->motion_val[0][current_fragment][0] = motion_x[0];
1043  s->motion_val[0][current_fragment][1] = motion_y[0];
1044  }
1045  }
1046 
1047  if (s->chroma_y_shift) {
1048  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1049  motion_x[0] = RSHIFT(motion_x[0] + motion_x[1] +
1050  motion_x[2] + motion_x[3], 2);
1051  motion_y[0] = RSHIFT(motion_y[0] + motion_y[1] +
1052  motion_y[2] + motion_y[3], 2);
1053  }
1054  if (s->version <= 2) {
1055  motion_x[0] = (motion_x[0] >> 1) | (motion_x[0] & 1);
1056  motion_y[0] = (motion_y[0] >> 1) | (motion_y[0] & 1);
1057  }
1058  frag = mb_y * s->fragment_width[1] + mb_x;
1059  s->motion_val[1][frag][0] = motion_x[0];
1060  s->motion_val[1][frag][1] = motion_y[0];
1061  } else if (s->chroma_x_shift) {
1062  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1063  motion_x[0] = RSHIFT(motion_x[0] + motion_x[1], 1);
1064  motion_y[0] = RSHIFT(motion_y[0] + motion_y[1], 1);
1065  motion_x[1] = RSHIFT(motion_x[2] + motion_x[3], 1);
1066  motion_y[1] = RSHIFT(motion_y[2] + motion_y[3], 1);
1067  } else {
1068  motion_x[1] = motion_x[0];
1069  motion_y[1] = motion_y[0];
1070  }
1071  if (s->version <= 2) {
1072  motion_x[0] = (motion_x[0] >> 1) | (motion_x[0] & 1);
1073  motion_x[1] = (motion_x[1] >> 1) | (motion_x[1] & 1);
1074  }
1075  frag = 2 * mb_y * s->fragment_width[1] + mb_x;
1076  for (int k = 0; k < 2; k++) {
1077  s->motion_val[1][frag][0] = motion_x[k];
1078  s->motion_val[1][frag][1] = motion_y[k];
1079  frag += s->fragment_width[1];
1080  }
1081  } else {
1082  for (int k = 0; k < 4; k++) {
1083  frag = BLOCK_Y * s->fragment_width[1] + BLOCK_X;
1084  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1085  s->motion_val[1][frag][0] = motion_x[k];
1086  s->motion_val[1][frag][1] = motion_y[k];
1087  } else {
1088  s->motion_val[1][frag][0] = motion_x[0];
1089  s->motion_val[1][frag][1] = motion_y[0];
1090  }
1091  }
1092  }
1093  }
1094  }
1095  }
1096 
1097  return 0;
1098 }
1099 
1101 {
1102  int num_blocks = s->total_num_coded_frags;
1103 
1104  for (int qpi = 0; qpi < s->nqps - 1 && num_blocks > 0; qpi++) {
1105  int i = 0, blocks_decoded = 0, num_blocks_at_qpi = 0;
1106  int bit, run_length;
1107 
1108  bit = get_bits1(gb) ^ 1;
1109  run_length = 0;
1110 
1111  do {
1112  if (run_length == MAXIMUM_LONG_BIT_RUN)
1113  bit = get_bits1(gb);
1114  else
1115  bit ^= 1;
1116 
1117  run_length = get_vlc2(gb, superblock_run_length_vlc,
1118  SUPERBLOCK_VLC_BITS, 2);
1119  if (run_length == 34)
1120  run_length += get_bits(gb, 12);
1121  blocks_decoded += run_length;
1122 
1123  if (!bit)
1124  num_blocks_at_qpi += run_length;
1125 
1126  for (int j = 0; j < run_length; i++) {
1127  if (i >= s->total_num_coded_frags)
1128  return -1;
1129 
1130  if (s->all_fragments[s->coded_fragment_list[0][i]].qpi == qpi) {
1131  s->all_fragments[s->coded_fragment_list[0][i]].qpi += bit;
1132  j++;
1133  }
1134  }
1135  } while (blocks_decoded < num_blocks && get_bits_left(gb) > 0);
1136 
1137  num_blocks -= num_blocks_at_qpi;
1138  }
1139 
1140  return 0;
1141 }
1142 
1143 static inline int get_eob_run(GetBitContext *gb, int token)
1144 {
1145  int v = eob_run_table[token].base;
1146  if (eob_run_table[token].bits)
1147  v += get_bits(gb, eob_run_table[token].bits);
1148  return v;
1149 }
1150 
1151 static inline int get_coeff(GetBitContext *gb, int token, int16_t *coeff)
1152 {
1153  int bits_to_get, zero_run;
1154 
1155  bits_to_get = coeff_get_bits[token];
1156  if (bits_to_get)
1157  bits_to_get = get_bits(gb, bits_to_get);
1158  *coeff = coeff_tables[token][bits_to_get];
1159 
1160  zero_run = zero_run_base[token];
1161  if (zero_run_get_bits[token])
1162  zero_run += get_bits(gb, zero_run_get_bits[token]);
1163 
1164  return zero_run;
1165 }
1166 
1167 /*
1168  * This function is called by unpack_dct_coeffs() to extract the VLCs from
1169  * the bitstream. The VLCs encode tokens which are used to unpack DCT
1170  * data. This function unpacks all the VLCs for either the Y plane or both
1171  * C planes, and is called for DC coefficients or different AC coefficient
1172  * levels (since different coefficient types require different VLC tables.
1173  *
1174  * This function returns a residual eob run. E.g, if a particular token gave
1175  * instructions to EOB the next 5 fragments and there were only 2 fragments
1176  * left in the current fragment range, 3 would be returned so that it could
1177  * be passed into the next call to this same function.
1178  */
1180  const VLCElem *vlc_table, int coeff_index,
1181  int plane,
1182  int eob_run)
1183 {
1184  int j = 0;
1185  int token;
1186  int zero_run = 0;
1187  int16_t coeff = 0;
1188  int blocks_ended;
1189  int coeff_i = 0;
1190  int num_coeffs = s->num_coded_frags[plane][coeff_index];
1191  int16_t *dct_tokens = s->dct_tokens[plane][coeff_index];
1192 
1193  /* local references to structure members to avoid repeated dereferences */
1194  const int *coded_fragment_list = s->coded_fragment_list[plane];
1195  Vp3Fragment *all_fragments = s->all_fragments;
1196 
1197  if (num_coeffs < 0) {
1198  av_log(s->avctx, AV_LOG_ERROR,
1199  "Invalid number of coefficients at level %d\n", coeff_index);
1200  return AVERROR_INVALIDDATA;
1201  }
1202 
1203  if (eob_run > num_coeffs) {
1204  coeff_i =
1205  blocks_ended = num_coeffs;
1206  eob_run -= num_coeffs;
1207  } else {
1208  coeff_i =
1209  blocks_ended = eob_run;
1210  eob_run = 0;
1211  }
1212 
1213  // insert fake EOB token to cover the split between planes or zzi
1214  if (blocks_ended)
1215  dct_tokens[j++] = blocks_ended << 2;
1216 
1217  while (coeff_i < num_coeffs && get_bits_left(gb) > 0) {
1218  /* decode a VLC into a token */
1219  token = get_vlc2(gb, vlc_table, 11, 3);
1220  /* use the token to get a zero run, a coefficient, and an eob run */
1221  if ((unsigned) token <= 6U) {
1222  eob_run = get_eob_run(gb, token);
1223  if (!eob_run)
1224  eob_run = INT_MAX;
1225 
1226  // record only the number of blocks ended in this plane,
1227  // any spill will be recorded in the next plane.
1228  if (eob_run > num_coeffs - coeff_i) {
1229  dct_tokens[j++] = TOKEN_EOB(num_coeffs - coeff_i);
1230  blocks_ended += num_coeffs - coeff_i;
1231  eob_run -= num_coeffs - coeff_i;
1232  coeff_i = num_coeffs;
1233  } else {
1234  dct_tokens[j++] = TOKEN_EOB(eob_run);
1235  blocks_ended += eob_run;
1236  coeff_i += eob_run;
1237  eob_run = 0;
1238  }
1239  } else if (token >= 0) {
1240  zero_run = get_coeff(gb, token, &coeff);
1241 
1242  if (zero_run) {
1243  dct_tokens[j++] = TOKEN_ZERO_RUN(coeff, zero_run);
1244  } else {
1245  // Save DC into the fragment structure. DC prediction is
1246  // done in raster order, so the actual DC can't be in with
1247  // other tokens. We still need the token in dct_tokens[]
1248  // however, or else the structure collapses on itself.
1249  if (!coeff_index)
1250  all_fragments[coded_fragment_list[coeff_i]].dc = coeff;
1251 
1252  dct_tokens[j++] = TOKEN_COEFF(coeff);
1253  }
1254 
1255  if (coeff_index + zero_run > 64) {
1256  av_log(s->avctx, AV_LOG_DEBUG,
1257  "Invalid zero run of %d with %d coeffs left\n",
1258  zero_run, 64 - coeff_index);
1259  zero_run = 64 - coeff_index;
1260  }
1261 
1262  // zero runs code multiple coefficients,
1263  // so don't try to decode coeffs for those higher levels
1264  for (int i = coeff_index + 1; i <= coeff_index + zero_run; i++)
1265  s->num_coded_frags[plane][i]--;
1266  coeff_i++;
1267  } else {
1268  av_log(s->avctx, AV_LOG_ERROR, "Invalid token %d\n", token);
1269  return -1;
1270  }
1271  }
1272 
1273  if (blocks_ended > s->num_coded_frags[plane][coeff_index])
1274  av_log(s->avctx, AV_LOG_ERROR, "More blocks ended than coded!\n");
1275 
1276  // decrement the number of blocks that have higher coefficients for each
1277  // EOB run at this level
1278  if (blocks_ended)
1279  for (int i = coeff_index + 1; i < 64; i++)
1280  s->num_coded_frags[plane][i] -= blocks_ended;
1281 
1282  // setup the next buffer
1283  if (plane < 2)
1284  s->dct_tokens[plane + 1][coeff_index] = dct_tokens + j;
1285  else if (coeff_index < 63)
1286  s->dct_tokens[0][coeff_index + 1] = dct_tokens + j;
1287 
1288  return eob_run;
1289 }
1290 
1292  int first_fragment,
1293  int fragment_width,
1294  int fragment_height);
1295 /*
1296  * This function unpacks all of the DCT coefficient data from the
1297  * bitstream.
1298  */
1300 {
1301  const VLCElem *const *coeff_vlc = s->coeff_vlc->vlc_tabs;
1302  int dc_y_table;
1303  int dc_c_table;
1304  int ac_y_table;
1305  int ac_c_table;
1306  int residual_eob_run = 0;
1307  const VLCElem *y_tables[64], *c_tables[64];
1308 
1309  s->dct_tokens[0][0] = s->dct_tokens_base;
1310 
1311  if (get_bits_left(gb) < 16)
1312  return AVERROR_INVALIDDATA;
1313 
1314  /* fetch the DC table indexes */
1315  dc_y_table = get_bits(gb, 4);
1316  dc_c_table = get_bits(gb, 4);
1317 
1318  /* unpack the Y plane DC coefficients */
1319  residual_eob_run = unpack_vlcs(s, gb, coeff_vlc[dc_y_table], 0,
1320  0, residual_eob_run);
1321  if (residual_eob_run < 0)
1322  return residual_eob_run;
1323  if (get_bits_left(gb) < 8)
1324  return AVERROR_INVALIDDATA;
1325 
1326  /* reverse prediction of the Y-plane DC coefficients */
1327  reverse_dc_prediction(s, 0, s->fragment_width[0], s->fragment_height[0]);
1328 
1329  /* unpack the C plane DC coefficients */
1330  residual_eob_run = unpack_vlcs(s, gb, coeff_vlc[dc_c_table], 0,
1331  1, residual_eob_run);
1332  if (residual_eob_run < 0)
1333  return residual_eob_run;
1334  residual_eob_run = unpack_vlcs(s, gb, coeff_vlc[dc_c_table], 0,
1335  2, residual_eob_run);
1336  if (residual_eob_run < 0)
1337  return residual_eob_run;
1338 
1339  /* reverse prediction of the C-plane DC coefficients */
1340  if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1341  reverse_dc_prediction(s, s->fragment_start[1],
1342  s->fragment_width[1], s->fragment_height[1]);
1343  reverse_dc_prediction(s, s->fragment_start[2],
1344  s->fragment_width[1], s->fragment_height[1]);
1345  }
1346 
1347  if (get_bits_left(gb) < 8)
1348  return AVERROR_INVALIDDATA;
1349  /* fetch the AC table indexes */
1350  ac_y_table = get_bits(gb, 4);
1351  ac_c_table = get_bits(gb, 4);
1352 
1353  /* build tables of AC VLC tables */
1354  for (int i = 1; i <= 5; i++) {
1355  /* AC VLC table group 1 */
1356  y_tables[i] = coeff_vlc[ac_y_table + 16];
1357  c_tables[i] = coeff_vlc[ac_c_table + 16];
1358  }
1359  for (int i = 6; i <= 14; i++) {
1360  /* AC VLC table group 2 */
1361  y_tables[i] = coeff_vlc[ac_y_table + 32];
1362  c_tables[i] = coeff_vlc[ac_c_table + 32];
1363  }
1364  for (int i = 15; i <= 27; i++) {
1365  /* AC VLC table group 3 */
1366  y_tables[i] = coeff_vlc[ac_y_table + 48];
1367  c_tables[i] = coeff_vlc[ac_c_table + 48];
1368  }
1369  for (int i = 28; i <= 63; i++) {
1370  /* AC VLC table group 4 */
1371  y_tables[i] = coeff_vlc[ac_y_table + 64];
1372  c_tables[i] = coeff_vlc[ac_c_table + 64];
1373  }
1374 
1375  /* decode all AC coefficients */
1376  for (int i = 1; i <= 63; i++) {
1377  residual_eob_run = unpack_vlcs(s, gb, y_tables[i], i,
1378  0, residual_eob_run);
1379  if (residual_eob_run < 0)
1380  return residual_eob_run;
1381 
1382  residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
1383  1, residual_eob_run);
1384  if (residual_eob_run < 0)
1385  return residual_eob_run;
1386  residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
1387  2, residual_eob_run);
1388  if (residual_eob_run < 0)
1389  return residual_eob_run;
1390  }
1391 
1392  return 0;
1393 }
1394 
1395 #if CONFIG_VP4_DECODER
1396 /**
1397  * eob_tracker[] is instead of TOKEN_EOB(value)
1398  * a dummy TOKEN_EOB(0) value is used to make vp3_dequant work
1399  *
1400  * @return < 0 on error
1401  */
1402 static int vp4_unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb,
1403  const VLCElem *const vlc_tables[64],
1404  int plane, int eob_tracker[64], int fragment)
1405 {
1406  int token;
1407  int zero_run = 0;
1408  int16_t coeff = 0;
1409  int coeff_i = 0;
1410  int eob_run;
1411 
1412  while (!eob_tracker[coeff_i]) {
1413  if (get_bits_left(gb) < 1)
1414  return AVERROR_INVALIDDATA;
1415 
1416  token = get_vlc2(gb, vlc_tables[coeff_i], 11, 3);
1417 
1418  /* use the token to get a zero run, a coefficient, and an eob run */
1419  if ((unsigned) token <= 6U) {
1420  eob_run = get_eob_run(gb, token);
1421  *s->dct_tokens[plane][coeff_i]++ = TOKEN_EOB(0);
1422  eob_tracker[coeff_i] = eob_run - 1;
1423  return 0;
1424  } else if (token >= 0) {
1425  zero_run = get_coeff(gb, token, &coeff);
1426 
1427  if (zero_run) {
1428  if (coeff_i + zero_run > 64) {
1429  av_log(s->avctx, AV_LOG_DEBUG,
1430  "Invalid zero run of %d with %d coeffs left\n",
1431  zero_run, 64 - coeff_i);
1432  zero_run = 64 - coeff_i;
1433  }
1434  *s->dct_tokens[plane][coeff_i]++ = TOKEN_ZERO_RUN(coeff, zero_run);
1435  coeff_i += zero_run;
1436  } else {
1437  if (!coeff_i)
1438  s->all_fragments[fragment].dc = coeff;
1439 
1440  *s->dct_tokens[plane][coeff_i]++ = TOKEN_COEFF(coeff);
1441  }
1442  coeff_i++;
1443  if (coeff_i >= 64) /* > 64 occurs when there is a zero_run overflow */
1444  return 0; /* stop */
1445  } else {
1446  av_log(s->avctx, AV_LOG_ERROR, "Invalid token %d\n", token);
1447  return -1;
1448  }
1449  }
1450  *s->dct_tokens[plane][coeff_i]++ = TOKEN_EOB(0);
1451  eob_tracker[coeff_i]--;
1452  return 0;
1453 }
1454 
1455 static void vp4_dc_predictor_reset(VP4Predictor *p)
1456 {
1457  p->dc = 0;
1458  p->type = VP4_DC_UNDEFINED;
1459 }
1460 
1461 static void vp4_dc_pred_before(const Vp3DecodeContext *s, VP4Predictor dc_pred[6][6], int sb_x)
1462 {
1463  for (int i = 0; i < 4; i++)
1464  dc_pred[0][i + 1] = s->dc_pred_row[sb_x * 4 + i];
1465 
1466  for (int j = 1; j < 5; j++)
1467  for (int i = 0; i < 4; i++)
1468  vp4_dc_predictor_reset(&dc_pred[j][i + 1]);
1469 }
1470 
1471 static void vp4_dc_pred_after(Vp3DecodeContext *s, VP4Predictor dc_pred[6][6], int sb_x)
1472 {
1473  for (int i = 0; i < 4; i++)
1474  s->dc_pred_row[sb_x * 4 + i] = dc_pred[4][i + 1];
1475 
1476  for (int i = 1; i < 5; i++)
1477  dc_pred[i][0] = dc_pred[i][4];
1478 }
1479 
1480 /* note: dc_pred points to the current block */
1481 static int vp4_dc_pred(const Vp3DecodeContext *s, const VP4Predictor * dc_pred, const int * last_dc, int type, int plane)
1482 {
1483  int count = 0;
1484  int dc = 0;
1485 
1486  if (dc_pred[-6].type == type) {
1487  dc += dc_pred[-6].dc;
1488  count++;
1489  }
1490 
1491  if (dc_pred[6].type == type) {
1492  dc += dc_pred[6].dc;
1493  count++;
1494  }
1495 
1496  if (count != 2 && dc_pred[-1].type == type) {
1497  dc += dc_pred[-1].dc;
1498  count++;
1499  }
1500 
1501  if (count != 2 && dc_pred[1].type == type) {
1502  dc += dc_pred[1].dc;
1503  count++;
1504  }
1505 
1506  /* using division instead of shift to correctly handle negative values */
1507  return count == 2 ? dc / 2 : last_dc[type];
1508 }
1509 
1510 static void vp4_set_tokens_base(Vp3DecodeContext *s)
1511 {
1512  int16_t *base = s->dct_tokens_base;
1513  for (int plane = 0; plane < 3; plane++) {
1514  for (int i = 0; i < 64; i++) {
1515  s->dct_tokens[plane][i] = base;
1516  base += s->fragment_width[!!plane] * s->fragment_height[!!plane];
1517  }
1518  }
1519 }
1520 
1521 static int vp4_unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
1522 {
1523  const VLCElem *const *coeff_vlc = s->coeff_vlc->vlc_tabs;
1524  int dc_y_table;
1525  int dc_c_table;
1526  int ac_y_table;
1527  int ac_c_table;
1528  const VLCElem *tables[2][64];
1529  int eob_tracker[64];
1530  VP4Predictor dc_pred[6][6];
1531  int last_dc[NB_VP4_DC_TYPES];
1532 
1533  if (get_bits_left(gb) < 16)
1534  return AVERROR_INVALIDDATA;
1535 
1536  /* fetch the DC table indexes */
1537  dc_y_table = get_bits(gb, 4);
1538  dc_c_table = get_bits(gb, 4);
1539 
1540  ac_y_table = get_bits(gb, 4);
1541  ac_c_table = get_bits(gb, 4);
1542 
1543  /* build tables of DC/AC VLC tables */
1544 
1545  /* DC table group */
1546  tables[0][0] = coeff_vlc[dc_y_table];
1547  tables[1][0] = coeff_vlc[dc_c_table];
1548  for (int i = 1; i <= 5; i++) {
1549  /* AC VLC table group 1 */
1550  tables[0][i] = coeff_vlc[ac_y_table + 16];
1551  tables[1][i] = coeff_vlc[ac_c_table + 16];
1552  }
1553  for (int i = 6; i <= 14; i++) {
1554  /* AC VLC table group 2 */
1555  tables[0][i] = coeff_vlc[ac_y_table + 32];
1556  tables[1][i] = coeff_vlc[ac_c_table + 32];
1557  }
1558  for (int i = 15; i <= 27; i++) {
1559  /* AC VLC table group 3 */
1560  tables[0][i] = coeff_vlc[ac_y_table + 48];
1561  tables[1][i] = coeff_vlc[ac_c_table + 48];
1562  }
1563  for (int i = 28; i <= 63; i++) {
1564  /* AC VLC table group 4 */
1565  tables[0][i] = coeff_vlc[ac_y_table + 64];
1566  tables[1][i] = coeff_vlc[ac_c_table + 64];
1567  }
1568 
1569  vp4_set_tokens_base(s);
1570 
1571  memset(last_dc, 0, sizeof(last_dc));
1572 
1573  for (int plane = 0; plane < ((s->avctx->flags & AV_CODEC_FLAG_GRAY) ? 1 : 3); plane++) {
1574  memset(eob_tracker, 0, sizeof(eob_tracker));
1575 
1576  /* initialise dc prediction */
1577  for (int i = 0; i < s->fragment_width[!!plane]; i++)
1578  vp4_dc_predictor_reset(&s->dc_pred_row[i]);
1579 
1580  for (int j = 0; j < 6; j++)
1581  for (int i = 0; i < 6; i++)
1582  vp4_dc_predictor_reset(&dc_pred[j][i]);
1583 
1584  for (int sb_y = 0; sb_y * 4 < s->fragment_height[!!plane]; sb_y++) {
1585  for (int sb_x = 0; sb_x *4 < s->fragment_width[!!plane]; sb_x++) {
1586  vp4_dc_pred_before(s, dc_pred, sb_x);
1587  for (int j = 0; j < 16; j++) {
1588  int hx = hilbert_offset[j][0];
1589  int hy = hilbert_offset[j][1];
1590  int x = 4 * sb_x + hx;
1591  int y = 4 * sb_y + hy;
1592  VP4Predictor *this_dc_pred = &dc_pred[hy + 1][hx + 1];
1593  int fragment, dc_block_type;
1594 
1595  if (x >= s->fragment_width[!!plane] || y >= s->fragment_height[!!plane])
1596  continue;
1597 
1598  fragment = s->fragment_start[plane] + y * s->fragment_width[!!plane] + x;
1599 
1600  if (s->all_fragments[fragment].coding_method == MODE_COPY)
1601  continue;
1602 
1603  if (vp4_unpack_vlcs(s, gb, tables[!!plane], plane, eob_tracker, fragment) < 0)
1604  return -1;
1605 
1606  dc_block_type = vp4_pred_block_type_map[s->all_fragments[fragment].coding_method];
1607 
1608  s->all_fragments[fragment].dc +=
1609  vp4_dc_pred(s, this_dc_pred, last_dc, dc_block_type, plane);
1610 
1611  this_dc_pred->type = dc_block_type,
1612  this_dc_pred->dc = last_dc[dc_block_type] = s->all_fragments[fragment].dc;
1613  }
1614  vp4_dc_pred_after(s, dc_pred, sb_x);
1615  }
1616  }
1617  }
1618 
1619  vp4_set_tokens_base(s);
1620 
1621  return 0;
1622 }
1623 #endif
1624 
1625 /*
1626  * This function reverses the DC prediction for each coded fragment in
1627  * the frame. Much of this function is adapted directly from the original
1628  * VP3 source code.
1629  */
1630 #define COMPATIBLE_FRAME(x) \
1631  (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type)
1632 #define DC_COEFF(u) s->all_fragments[u].dc
1633 
1635  int first_fragment,
1636  int fragment_width,
1637  int fragment_height)
1638 {
1639 #define PUL 8
1640 #define PU 4
1641 #define PUR 2
1642 #define PL 1
1643 
1644  int i = first_fragment;
1645 
1646  int predicted_dc;
1647 
1648  /* DC values for the left, up-left, up, and up-right fragments */
1649  int vl, vul, vu, vur;
1650 
1651  /* indexes for the left, up-left, up, and up-right fragments */
1652  int l, ul, u, ur;
1653 
1654  /*
1655  * The 6 fields mean:
1656  * 0: up-left multiplier
1657  * 1: up multiplier
1658  * 2: up-right multiplier
1659  * 3: left multiplier
1660  */
1661  static const int predictor_transform[16][4] = {
1662  { 0, 0, 0, 0 },
1663  { 0, 0, 0, 128 }, // PL
1664  { 0, 0, 128, 0 }, // PUR
1665  { 0, 0, 53, 75 }, // PUR|PL
1666  { 0, 128, 0, 0 }, // PU
1667  { 0, 64, 0, 64 }, // PU |PL
1668  { 0, 128, 0, 0 }, // PU |PUR
1669  { 0, 0, 53, 75 }, // PU |PUR|PL
1670  { 128, 0, 0, 0 }, // PUL
1671  { 0, 0, 0, 128 }, // PUL|PL
1672  { 64, 0, 64, 0 }, // PUL|PUR
1673  { 0, 0, 53, 75 }, // PUL|PUR|PL
1674  { 0, 128, 0, 0 }, // PUL|PU
1675  { -104, 116, 0, 116 }, // PUL|PU |PL
1676  { 24, 80, 24, 0 }, // PUL|PU |PUR
1677  { -104, 116, 0, 116 } // PUL|PU |PUR|PL
1678  };
1679 
1680  /* This table shows which types of blocks can use other blocks for
1681  * prediction. For example, INTRA is the only mode in this table to
1682  * have a frame number of 0. That means INTRA blocks can only predict
1683  * from other INTRA blocks. There are 2 golden frame coding types;
1684  * blocks encoding in these modes can only predict from other blocks
1685  * that were encoded with these 1 of these 2 modes. */
1686  static const unsigned char compatible_frame[9] = {
1687  1, /* MODE_INTER_NO_MV */
1688  0, /* MODE_INTRA */
1689  1, /* MODE_INTER_PLUS_MV */
1690  1, /* MODE_INTER_LAST_MV */
1691  1, /* MODE_INTER_PRIOR_MV */
1692  2, /* MODE_USING_GOLDEN */
1693  2, /* MODE_GOLDEN_MV */
1694  1, /* MODE_INTER_FOUR_MV */
1695  3 /* MODE_COPY */
1696  };
1697  int current_frame_type;
1698 
1699  /* there is a last DC predictor for each of the 3 frame types */
1700  short last_dc[3];
1701 
1702  int transform = 0;
1703 
1704  vul =
1705  vu =
1706  vur =
1707  vl = 0;
1708  last_dc[0] =
1709  last_dc[1] =
1710  last_dc[2] = 0;
1711 
1712  /* for each fragment row... */
1713  for (int y = 0; y < fragment_height; y++) {
1714  /* for each fragment in a row... */
1715  for (int x = 0; x < fragment_width; x++, i++) {
1716 
1717  /* reverse prediction if this block was coded */
1718  if (s->all_fragments[i].coding_method != MODE_COPY) {
1719  current_frame_type =
1720  compatible_frame[s->all_fragments[i].coding_method];
1721 
1722  transform = 0;
1723  if (x) {
1724  l = i - 1;
1725  vl = DC_COEFF(l);
1726  if (COMPATIBLE_FRAME(l))
1727  transform |= PL;
1728  }
1729  if (y) {
1730  u = i - fragment_width;
1731  vu = DC_COEFF(u);
1732  if (COMPATIBLE_FRAME(u))
1733  transform |= PU;
1734  if (x) {
1735  ul = i - fragment_width - 1;
1736  vul = DC_COEFF(ul);
1737  if (COMPATIBLE_FRAME(ul))
1738  transform |= PUL;
1739  }
1740  if (x + 1 < fragment_width) {
1741  ur = i - fragment_width + 1;
1742  vur = DC_COEFF(ur);
1743  if (COMPATIBLE_FRAME(ur))
1744  transform |= PUR;
1745  }
1746  }
1747 
1748  if (transform == 0) {
1749  /* if there were no fragments to predict from, use last
1750  * DC saved */
1751  predicted_dc = last_dc[current_frame_type];
1752  } else {
1753  /* apply the appropriate predictor transform */
1754  predicted_dc =
1755  (predictor_transform[transform][0] * vul) +
1756  (predictor_transform[transform][1] * vu) +
1757  (predictor_transform[transform][2] * vur) +
1758  (predictor_transform[transform][3] * vl);
1759 
1760  predicted_dc /= 128;
1761 
1762  /* check for outranging on the [ul u l] and
1763  * [ul u ur l] predictors */
1764  if ((transform == 15) || (transform == 13)) {
1765  if (FFABS(predicted_dc - vu) > 128)
1766  predicted_dc = vu;
1767  else if (FFABS(predicted_dc - vl) > 128)
1768  predicted_dc = vl;
1769  else if (FFABS(predicted_dc - vul) > 128)
1770  predicted_dc = vul;
1771  }
1772  }
1773 
1774  /* at long last, apply the predictor */
1775  DC_COEFF(i) += predicted_dc;
1776  /* save the DC */
1777  last_dc[current_frame_type] = DC_COEFF(i);
1778  }
1779  }
1780  }
1781 }
1782 
1783 static void apply_loop_filter(Vp3DecodeContext *s, int plane,
1784  int ystart, int yend)
1785 {
1786  int *bounding_values = s->bounding_values_array + 127;
1787 
1788  int width = s->fragment_width[!!plane];
1789  int height = s->fragment_height[!!plane];
1790  int fragment = s->fragment_start[plane] + ystart * width;
1791  ptrdiff_t stride = s->current_frame.f->linesize[plane];
1792  uint8_t *plane_data = s->current_frame.f->data[plane];
1793  if (!s->flipped_image)
1794  stride = -stride;
1795  plane_data += s->data_offset[plane] + 8 * ystart * stride;
1796 
1797  for (int y = ystart; y < yend; y++) {
1798  for (int x = 0; x < width; x++) {
1799  /* This code basically just deblocks on the edges of coded blocks.
1800  * However, it has to be much more complicated because of the
1801  * brain damaged deblock ordering used in VP3/Theora. Order matters
1802  * because some pixels get filtered twice. */
1803  if (s->all_fragments[fragment].coding_method != MODE_COPY) {
1804  /* do not perform left edge filter for left columns frags */
1805  if (x > 0) {
1806  s->vp3dsp.h_loop_filter(
1807  plane_data + 8 * x,
1808  stride, bounding_values);
1809  }
1810 
1811  /* do not perform top edge filter for top row fragments */
1812  if (y > 0) {
1813  s->vp3dsp.v_loop_filter(
1814  plane_data + 8 * x,
1815  stride, bounding_values);
1816  }
1817 
1818  /* do not perform right edge filter for right column
1819  * fragments or if right fragment neighbor is also coded
1820  * in this frame (it will be filtered in next iteration) */
1821  if ((x < width - 1) &&
1822  (s->all_fragments[fragment + 1].coding_method == MODE_COPY)) {
1823  s->vp3dsp.h_loop_filter(
1824  plane_data + 8 * x + 8,
1825  stride, bounding_values);
1826  }
1827 
1828  /* do not perform bottom edge filter for bottom row
1829  * fragments or if bottom fragment neighbor is also coded
1830  * in this frame (it will be filtered in the next row) */
1831  if ((y < height - 1) &&
1832  (s->all_fragments[fragment + width].coding_method == MODE_COPY)) {
1833  s->vp3dsp.v_loop_filter(
1834  plane_data + 8 * x + 8 * stride,
1835  stride, bounding_values);
1836  }
1837  }
1838 
1839  fragment++;
1840  }
1841  plane_data += 8 * stride;
1842  }
1843 }
1844 
1845 /**
1846  * Pull DCT tokens from the 64 levels to decode and dequant the coefficients
1847  * for the next block in coding order
1848  */
1849 static inline int vp3_dequant(Vp3DecodeContext *s, const Vp3Fragment *frag,
1850  int plane, int inter, int16_t block[64])
1851 {
1852  const int16_t *dequantizer = s->qmat[frag->qpi][inter][plane];
1853  const uint8_t *perm = s->idct_scantable;
1854  int i = 0;
1855 
1856  do {
1857  int token = *s->dct_tokens[plane][i];
1858  switch (token & 3) {
1859  case 0: // EOB
1860  if (--token < 4) // 0-3 are token types so the EOB run must now be 0
1861  s->dct_tokens[plane][i]++;
1862  else
1863  *s->dct_tokens[plane][i] = token & ~3;
1864  goto end;
1865  case 1: // zero run
1866  s->dct_tokens[plane][i]++;
1867  i += (token >> 2) & 0x7f;
1868  if (i > 63) {
1869  av_log(s->avctx, AV_LOG_ERROR, "Coefficient index overflow\n");
1870  return i;
1871  }
1872  block[perm[i]] = (token >> 9) * dequantizer[perm[i]];
1873  i++;
1874  break;
1875  case 2: // coeff
1876  block[perm[i]] = (token >> 2) * dequantizer[perm[i]];
1877  s->dct_tokens[plane][i++]++;
1878  break;
1879  default: // shouldn't happen
1880  return i;
1881  }
1882  } while (i < 64);
1883  // return value is expected to be a valid level
1884  i--;
1885 end:
1886  // the actual DC+prediction is in the fragment structure
1887  block[0] = frag->dc * s->qmat[0][inter][plane][0];
1888  return i;
1889 }
1890 
1891 /**
1892  * called when all pixels up to row y are complete
1893  */
1895 {
1896  int h, cy;
1898 
1899  if (HAVE_THREADS && s->avctx->active_thread_type & FF_THREAD_FRAME) {
1900  int y_flipped = s->flipped_image ? s->height - y : y;
1901 
1902  /* At the end of the frame, report INT_MAX instead of the height of
1903  * the frame. This makes the other threads' ff_thread_await_progress()
1904  * calls cheaper, because they don't have to clip their values. */
1905  ff_progress_frame_report(&s->current_frame,
1906  y_flipped == s->height ? INT_MAX
1907  : y_flipped - 1);
1908  }
1909 
1910  if (!s->avctx->draw_horiz_band)
1911  return;
1912 
1913  h = y - s->last_slice_end;
1914  s->last_slice_end = y;
1915  y -= h;
1916 
1917  if (!s->flipped_image)
1918  y = s->height - y - h;
1919 
1920  cy = y >> s->chroma_y_shift;
1921  offset[0] = s->current_frame.f->linesize[0] * y;
1922  offset[1] = s->current_frame.f->linesize[1] * cy;
1923  offset[2] = s->current_frame.f->linesize[2] * cy;
1924  for (int i = 3; i < AV_NUM_DATA_POINTERS; i++)
1925  offset[i] = 0;
1926 
1927  emms_c();
1928  s->avctx->draw_horiz_band(s->avctx, s->current_frame.f, offset, y, 3, h);
1929 }
1930 
1931 /**
1932  * Wait for the reference frame of the current fragment.
1933  * The progress value is in luma pixel rows.
1934  */
1936  int motion_y, int y)
1937 {
1938  const ProgressFrame *ref_frame;
1939  int ref_row;
1940  int border = motion_y & 1;
1941 
1942  if (fragment->coding_method == MODE_USING_GOLDEN ||
1943  fragment->coding_method == MODE_GOLDEN_MV)
1944  ref_frame = &s->golden_frame;
1945  else
1946  ref_frame = &s->last_frame;
1947 
1948  ref_row = y + (motion_y >> 1);
1949  ref_row = FFMAX(FFABS(ref_row), ref_row + 8 + border);
1950 
1952 }
1953 
1954 #if CONFIG_VP4_DECODER
1955 /**
1956  * @return non-zero if temp (edge_emu_buffer) was populated
1957  */
1958 static int vp4_mc_loop_filter(Vp3DecodeContext *s, int plane, int motion_x, int motion_y, int bx, int by,
1959  const uint8_t *motion_source, ptrdiff_t stride,
1960  int src_x, int src_y, uint8_t *temp)
1961 {
1962  int motion_shift = plane ? 4 : 2;
1963  int subpel_mask = plane ? 3 : 1;
1964  int *bounding_values = s->bounding_values_array + 127;
1965 
1966  int x, y;
1967  int x2, y2;
1968  int x_subpel, y_subpel;
1969  int x_offset, y_offset;
1970 
1971  int block_width = plane ? 8 : 16;
1972  int plane_width = s->width >> (plane && s->chroma_x_shift);
1973  int plane_height = s->height >> (plane && s->chroma_y_shift);
1974 
1975 #define loop_stride 12
1976  uint8_t loop[12 * loop_stride];
1977 
1978  /* using division instead of shift to correctly handle negative values */
1979  x = 8 * bx + motion_x / motion_shift;
1980  y = 8 * by + motion_y / motion_shift;
1981 
1982  x_subpel = motion_x & subpel_mask;
1983  y_subpel = motion_y & subpel_mask;
1984 
1985  if (x_subpel || y_subpel) {
1986  x--;
1987  y--;
1988 
1989  if (x_subpel)
1990  x = FFMIN(x, x + FFSIGN(motion_x));
1991 
1992  if (y_subpel)
1993  y = FFMIN(y, y + FFSIGN(motion_y));
1994 
1995  x2 = x + block_width;
1996  y2 = y + block_width;
1997 
1998  if (x2 < 0 || x2 >= plane_width || y2 < 0 || y2 >= plane_height)
1999  return 0;
2000 
2001  x_offset = (-(x + 2) & 7) + 2;
2002  y_offset = (-(y + 2) & 7) + 2;
2003 
2004  av_assert1(!(x_offset > 8 + x_subpel && y_offset > 8 + y_subpel));
2005 
2006  s->vdsp.emulated_edge_mc(loop, motion_source - stride - 1,
2007  loop_stride, stride,
2008  12, 12, src_x - 1, src_y - 1,
2009  plane_width,
2010  plane_height);
2011 
2012  if (x_offset <= 8 + x_subpel)
2013  ff_vp3dsp_h_loop_filter_12(loop + x_offset, loop_stride, bounding_values);
2014 
2015  if (y_offset <= 8 + y_subpel)
2016  ff_vp3dsp_v_loop_filter_12(loop + y_offset*loop_stride, loop_stride, bounding_values);
2017 
2018  } else {
2019 
2020  x_offset = -x & 7;
2021  y_offset = -y & 7;
2022 
2023  if (!x_offset && !y_offset)
2024  return 0;
2025 
2026  s->vdsp.emulated_edge_mc(loop, motion_source - stride - 1,
2027  loop_stride, stride,
2028  12, 12, src_x - 1, src_y - 1,
2029  plane_width,
2030  plane_height);
2031 
2032 #define safe_loop_filter(name, ptr, stride, bounding_values) \
2033  if (VP3_LOOP_FILTER_NO_UNALIGNED_SUPPORT && (uintptr_t)(ptr) & 7) \
2034  s->vp3dsp.name##_unaligned(ptr, stride, bounding_values); \
2035  else \
2036  s->vp3dsp.name(ptr, stride, bounding_values);
2037 
2038  if (x_offset)
2039  safe_loop_filter(h_loop_filter, loop + loop_stride + x_offset + 1, loop_stride, bounding_values);
2040 
2041  if (y_offset)
2042  safe_loop_filter(v_loop_filter, loop + (y_offset + 1)*loop_stride + 1, loop_stride, bounding_values);
2043  }
2044 
2045  for (int i = 0; i < 9; i++)
2046  memcpy(temp + i*stride, loop + (i + 1) * loop_stride + 1, 9);
2047 
2048  return 1;
2049 }
2050 #endif
2051 
2052 /*
2053  * Perform the final rendering for a particular slice of data.
2054  * The slice number ranges from 0..(c_superblock_height - 1).
2055  */
2056 static void render_slice(Vp3DecodeContext *s, int slice)
2057 {
2058  int16_t *block = s->block;
2059  int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef;
2060  /* When decoding keyframes, the earlier frames may not be available,
2061  * so we just use the current frame in this case instead;
2062  * it also avoid using undefined pointer arithmetic. Nothing is
2063  * ever read from these frames in case of a keyframe. */
2064  const AVFrame *last_frame = s->last_frame.f ?
2065  s->last_frame.f : s->current_frame.f;
2066  const AVFrame *golden_frame = s->golden_frame.f ?
2067  s->golden_frame.f : s->current_frame.f;
2068  int motion_halfpel_index;
2069  int first_pixel;
2070 
2071  if (slice >= s->c_superblock_height)
2072  return;
2073 
2074  for (int plane = 0; plane < 3; plane++) {
2075  uint8_t *output_plane = s->current_frame.f->data[plane] +
2076  s->data_offset[plane];
2077  const uint8_t *last_plane = last_frame->data[plane] +
2078  s->data_offset[plane];
2079  const uint8_t *golden_plane = golden_frame->data[plane] +
2080  s->data_offset[plane];
2081  ptrdiff_t stride = s->current_frame.f->linesize[plane];
2082  int plane_width = s->width >> (plane && s->chroma_x_shift);
2083  int plane_height = s->height >> (plane && s->chroma_y_shift);
2084  const int8_t (*motion_val)[2] = s->motion_val[!!plane];
2085 
2086  int sb_y = slice << (!plane && s->chroma_y_shift);
2087  int slice_height = sb_y + 1 + (!plane && s->chroma_y_shift);
2088  int slice_width = plane ? s->c_superblock_width
2089  : s->y_superblock_width;
2090 
2091  int fragment_width = s->fragment_width[!!plane];
2092  int fragment_height = s->fragment_height[!!plane];
2093  int fragment_start = s->fragment_start[plane];
2094 
2095  int do_await = !plane && HAVE_THREADS &&
2096  (s->avctx->active_thread_type & FF_THREAD_FRAME);
2097 
2098  if (!s->flipped_image)
2099  stride = -stride;
2100  if (CONFIG_GRAY && plane && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
2101  continue;
2102 
2103  /* for each superblock row in the slice (both of them)... */
2104  for (; sb_y < slice_height; sb_y++) {
2105  /* for each superblock in a row... */
2106  for (int sb_x = 0; sb_x < slice_width; sb_x++) {
2107  /* for each block in a superblock... */
2108  for (int j = 0; j < 16; j++) {
2109  int x = 4 * sb_x + hilbert_offset[j][0];
2110  int y = 4 * sb_y + hilbert_offset[j][1];
2111  int fragment = y * fragment_width + x;
2112 
2113  int i = fragment_start + fragment;
2114 
2115  // bounds check
2116  if (x >= fragment_width || y >= fragment_height)
2117  continue;
2118 
2119  first_pixel = 8 * y * stride + 8 * x;
2120 
2121  if (do_await &&
2122  s->all_fragments[i].coding_method != MODE_INTRA)
2123  await_reference_row(s, &s->all_fragments[i],
2124  motion_val[fragment][1],
2125  (16 * y) >> s->chroma_y_shift);
2126 
2127  /* transform if this block was coded */
2128  if (s->all_fragments[i].coding_method != MODE_COPY) {
2129  const uint8_t *motion_source;
2130  if ((s->all_fragments[i].coding_method == MODE_USING_GOLDEN) ||
2131  (s->all_fragments[i].coding_method == MODE_GOLDEN_MV))
2132  motion_source = golden_plane;
2133  else
2134  motion_source = last_plane;
2135 
2136  motion_source += first_pixel;
2137  motion_halfpel_index = 0;
2138 
2139  /* sort out the motion vector if this fragment is coded
2140  * using a motion vector method */
2141  if ((s->all_fragments[i].coding_method > MODE_INTRA) &&
2142  (s->all_fragments[i].coding_method != MODE_USING_GOLDEN)) {
2143  int src_x, src_y;
2144  int standard_mc = 1;
2145  motion_x = motion_val[fragment][0];
2146  motion_y = motion_val[fragment][1];
2147 #if CONFIG_VP4_DECODER
2148  if (plane && s->version >= 2) {
2149  motion_x = (motion_x >> 1) | (motion_x & 1);
2150  motion_y = (motion_y >> 1) | (motion_y & 1);
2151  }
2152 #endif
2153 
2154  src_x = (motion_x >> 1) + 8 * x;
2155  src_y = (motion_y >> 1) + 8 * y;
2156 
2157  motion_halfpel_index = motion_x & 0x01;
2158  motion_source += (motion_x >> 1);
2159 
2160  motion_halfpel_index |= (motion_y & 0x01) << 1;
2161  motion_source += ((motion_y >> 1) * stride);
2162 
2163 #if CONFIG_VP4_DECODER
2164  if (s->version >= 2) {
2165  uint8_t *temp = s->edge_emu_buffer;
2166  if (stride < 0)
2167  temp -= 8 * stride;
2168  if (vp4_mc_loop_filter(s, plane, motion_val[fragment][0], motion_val[fragment][1], x, y, motion_source, stride, src_x, src_y, temp)) {
2169  motion_source = temp;
2170  standard_mc = 0;
2171  }
2172  }
2173 #endif
2174 
2175  if (standard_mc && (
2176  src_x < 0 || src_y < 0 ||
2177  src_x + 9 >= plane_width ||
2178  src_y + 9 >= plane_height)) {
2179  uint8_t *temp = s->edge_emu_buffer;
2180  if (stride < 0)
2181  temp -= 8 * stride;
2182 
2183  s->vdsp.emulated_edge_mc(temp, motion_source,
2184  stride, stride,
2185  9, 9, src_x, src_y,
2186  plane_width,
2187  plane_height);
2188  motion_source = temp;
2189  }
2190  }
2191 
2192  /* first, take care of copying a block from either the
2193  * previous or the golden frame */
2194  if (s->all_fragments[i].coding_method != MODE_INTRA) {
2195  /* Note, it is possible to implement all MC cases
2196  * with put_no_rnd_pixels_l2 which would look more
2197  * like the VP3 source but this would be slower as
2198  * put_no_rnd_pixels_tab is better optimized */
2199  if (motion_halfpel_index != 3) {
2200  s->hdsp.put_no_rnd_pixels_tab[1][motion_halfpel_index](
2201  output_plane + first_pixel,
2202  motion_source, stride, 8);
2203  } else {
2204  /* d is 0 if motion_x and _y have the same sign,
2205  * else -1 */
2206  int d = (motion_x ^ motion_y) >> 31;
2207  s->vp3dsp.put_no_rnd_pixels_l2(output_plane + first_pixel,
2208  motion_source - d,
2209  motion_source + stride + 1 + d,
2210  stride, 8);
2211  }
2212  }
2213 
2214  /* invert DCT and place (or add) in final output */
2215 
2216  if (s->all_fragments[i].coding_method == MODE_INTRA) {
2217  vp3_dequant(s, s->all_fragments + i,
2218  plane, 0, block);
2219  s->vp3dsp.idct_put(output_plane + first_pixel,
2220  stride,
2221  block);
2222  } else {
2223  if (vp3_dequant(s, s->all_fragments + i,
2224  plane, 1, block)) {
2225  s->vp3dsp.idct_add(output_plane + first_pixel,
2226  stride,
2227  block);
2228  } else {
2229  s->vp3dsp.idct_dc_add(output_plane + first_pixel,
2230  stride, block);
2231  }
2232  }
2233  } else {
2234  /* copy directly from the previous frame */
2235  s->hdsp.put_pixels_tab[1][0](
2236  output_plane + first_pixel,
2237  last_plane + first_pixel,
2238  stride, 8);
2239  }
2240  }
2241  }
2242 
2243  // Filter up to the last row in the superblock row
2244  if (s->version < 2 && !s->skip_loop_filter)
2245  apply_loop_filter(s, plane, 4 * sb_y - !!sb_y,
2246  FFMIN(4 * sb_y + 3, fragment_height - 1));
2247  }
2248  }
2249 
2250  /* this looks like a good place for slice dispatch... */
2251  /* algorithm:
2252  * if (slice == s->macroblock_height - 1)
2253  * dispatch (both last slice & 2nd-to-last slice);
2254  * else if (slice > 0)
2255  * dispatch (slice - 1);
2256  */
2257 
2258  vp3_draw_horiz_band(s, FFMIN((32 << s->chroma_y_shift) * (slice + 1) - 16,
2259  s->height - 16));
2260 }
2261 
2262 static av_cold void init_tables_once(void)
2263 {
2265 
2267  SUPERBLOCK_VLC_BITS, 34,
2269  NULL, 0, 0, 1, 0);
2270 
2273  NULL, 0, 0, 0, 0);
2274 
2276  &motion_vector_vlc_table[0][1], 2,
2277  &motion_vector_vlc_table[0][0], 2, 1,
2278  -31, 0);
2279 
2281  mode_code_vlc_len, 1,
2282  NULL, 0, 0, 0, 0);
2283 
2284 #if CONFIG_VP4_DECODER
2285  for (int j = 0; j < 2; j++)
2286  for (int i = 0; i < 7; i++) {
2287  vp4_mv_vlc_table[j][i] =
2289  &vp4_mv_vlc[j][i][0][1], 2,
2290  &vp4_mv_vlc[j][i][0][0], 2, 1,
2291  -31, 0);
2292  }
2293 
2294  /* version >= 2 */
2295  for (int i = 0; i < 2; i++) {
2296  block_pattern_vlc[i] =
2297  ff_vlc_init_tables(&state, 5, 14,
2298  &vp4_block_pattern_vlc[i][0][1], 2, 1,
2299  &vp4_block_pattern_vlc[i][0][0], 2, 1, 0);
2300  }
2301 #endif
2302 }
2303 
2304 /// Allocate tables for per-frame data in Vp3DecodeContext
2306 {
2307  Vp3DecodeContext *s = avctx->priv_data;
2308  int y_fragment_count, c_fragment_count;
2309 
2310  free_tables(avctx);
2311 
2312  y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
2313  c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
2314 
2315  /* superblock_coding is used by unpack_superblocks (VP3/Theora) and vp4_unpack_macroblocks (VP4) */
2316  s->superblock_coding = av_mallocz(FFMAX(s->superblock_count, s->yuv_macroblock_count));
2317  s->all_fragments = av_calloc(s->fragment_count, sizeof(*s->all_fragments));
2318 
2319  s-> kf_coded_fragment_list = av_calloc(s->fragment_count, sizeof(int));
2320  s->nkf_coded_fragment_list = av_calloc(s->fragment_count, sizeof(int));
2321  memset(s-> num_kf_coded_fragment, -1, sizeof(s-> num_kf_coded_fragment));
2322 
2323  s->dct_tokens_base = av_calloc(s->fragment_count,
2324  64 * sizeof(*s->dct_tokens_base));
2325  s->motion_val[0] = av_calloc(y_fragment_count, sizeof(*s->motion_val[0]));
2326  s->motion_val[1] = av_calloc(c_fragment_count, sizeof(*s->motion_val[1]));
2327 
2328  /* work out the block mapping tables */
2329  s->superblock_fragments = av_calloc(s->superblock_count, 16 * sizeof(int));
2330  s->macroblock_coding = av_mallocz(s->macroblock_count + 1);
2331 
2332  s->dc_pred_row = av_malloc_array(s->y_superblock_width * 4, sizeof(*s->dc_pred_row));
2333 
2334  if (!s->superblock_coding || !s->all_fragments ||
2335  !s->dct_tokens_base || !s->kf_coded_fragment_list ||
2336  !s->nkf_coded_fragment_list ||
2337  !s->superblock_fragments || !s->macroblock_coding ||
2338  !s->dc_pred_row ||
2339  !s->motion_val[0] || !s->motion_val[1]) {
2340  return -1;
2341  }
2342 
2344 
2345  return 0;
2346 }
2347 
2348 
2349 static av_cold void free_vlc_tables(AVRefStructOpaque unused, void *obj)
2350 {
2351  CoeffVLCs *vlcs = obj;
2352 
2353  for (int i = 0; i < FF_ARRAY_ELEMS(vlcs->vlcs); i++)
2354  ff_vlc_free(&vlcs->vlcs[i]);
2355 }
2356 
2358 {
2359  static AVOnce init_static_once = AV_ONCE_INIT;
2360  Vp3DecodeContext *s = avctx->priv_data;
2361  int ret;
2362  int c_width;
2363  int c_height;
2364  int y_fragment_count, c_fragment_count;
2365 
2366  if (avctx->codec_tag == MKTAG('V', 'P', '4', '0')) {
2367  s->version = 3;
2368 #if !CONFIG_VP4_DECODER
2369  av_log(avctx, AV_LOG_ERROR, "This build does not support decoding VP4.\n");
2371 #endif
2372  } else if (avctx->codec_tag == MKTAG('V', 'P', '3', '0'))
2373  s->version = 0;
2374  else
2375  s->version = 1;
2376 
2377  s->avctx = avctx;
2378  s->width = FFALIGN(avctx->coded_width, 16);
2379  s->height = FFALIGN(avctx->coded_height, 16);
2380  if (s->width < 18)
2381  return AVERROR_PATCHWELCOME;
2382  if (avctx->codec_id != AV_CODEC_ID_THEORA)
2383  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2385  ff_hpeldsp_init(&s->hdsp, avctx->flags | AV_CODEC_FLAG_BITEXACT);
2386  ff_videodsp_init(&s->vdsp, 8);
2387  ff_vp3dsp_init(&s->vp3dsp);
2388 
2389  for (int i = 0; i < 64; i++) {
2390 #define TRANSPOSE(x) (((x) >> 3) | (((x) & 7) << 3))
2391  s->idct_permutation[i] = TRANSPOSE(i);
2392  s->idct_scantable[i] = TRANSPOSE(ff_zigzag_direct[i]);
2393 #undef TRANSPOSE
2394  }
2395 
2396  /* initialize to an impossible value which will force a recalculation
2397  * in the first frame decode */
2398  for (int i = 0; i < 3; i++)
2399  s->qps[i] = -1;
2400 
2401  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift);
2402  if (ret)
2403  return ret;
2404 
2405  s->y_superblock_width = (s->width + 31) / 32;
2406  s->y_superblock_height = (s->height + 31) / 32;
2407  s->y_superblock_count = s->y_superblock_width * s->y_superblock_height;
2408 
2409  /* work out the dimensions for the C planes */
2410  c_width = s->width >> s->chroma_x_shift;
2411  c_height = s->height >> s->chroma_y_shift;
2412  s->c_superblock_width = (c_width + 31) / 32;
2413  s->c_superblock_height = (c_height + 31) / 32;
2414  s->c_superblock_count = s->c_superblock_width * s->c_superblock_height;
2415 
2416  s->superblock_count = s->y_superblock_count + (s->c_superblock_count * 2);
2417  s->u_superblock_start = s->y_superblock_count;
2418  s->v_superblock_start = s->u_superblock_start + s->c_superblock_count;
2419 
2420  s->macroblock_width = (s->width + 15) / 16;
2421  s->macroblock_height = (s->height + 15) / 16;
2422  s->macroblock_count = s->macroblock_width * s->macroblock_height;
2423  s->c_macroblock_width = (c_width + 15) / 16;
2424  s->c_macroblock_height = (c_height + 15) / 16;
2425  s->c_macroblock_count = s->c_macroblock_width * s->c_macroblock_height;
2426  s->yuv_macroblock_count = s->macroblock_count + 2 * s->c_macroblock_count;
2427 
2428  s->fragment_width[0] = s->width / FRAGMENT_PIXELS;
2429  s->fragment_height[0] = s->height / FRAGMENT_PIXELS;
2430  s->fragment_width[1] = s->fragment_width[0] >> s->chroma_x_shift;
2431  s->fragment_height[1] = s->fragment_height[0] >> s->chroma_y_shift;
2432 
2433  /* fragment count covers all 8x8 blocks for all 3 planes */
2434  y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
2435  c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
2436  s->fragment_count = y_fragment_count + 2 * c_fragment_count;
2437  s->fragment_start[1] = y_fragment_count;
2438  s->fragment_start[2] = y_fragment_count + c_fragment_count;
2439 
2440  if (!s->theora_tables) {
2441  for (int i = 0; i < 64; i++) {
2442  s->coded_dc_scale_factor[0][i] = s->version < 2 ? vp31_dc_scale_factor[i] : vp4_y_dc_scale_factor[i];
2443  s->coded_dc_scale_factor[1][i] = s->version < 2 ? vp31_dc_scale_factor[i] : vp4_uv_dc_scale_factor[i];
2444  s->coded_ac_scale_factor[i] = s->version < 2 ? vp31_ac_scale_factor[i] : vp4_ac_scale_factor[i];
2445  s->base_matrix[0][i] = s->version < 2 ? vp31_intra_y_dequant[i] : vp4_generic_dequant[i];
2446  s->base_matrix[1][i] = s->version < 2 ? ff_mjpeg_std_chrominance_quant_tbl[i] : vp4_generic_dequant[i];
2447  s->base_matrix[2][i] = s->version < 2 ? vp31_inter_dequant[i] : vp4_generic_dequant[i];
2448  s->filter_limit_values[i] = s->version < 2 ? vp31_filter_limit_values[i] : vp4_filter_limit_values[i];
2449  }
2450 
2451  for (int inter = 0; inter < 2; inter++) {
2452  for (int plane = 0; plane < 3; plane++) {
2453  s->qr_count[inter][plane] = 1;
2454  s->qr_size[inter][plane][0] = 63;
2455  s->qr_base[inter][plane][0] =
2456  s->qr_base[inter][plane][1] = 2 * inter + (!!plane) * !inter;
2457  }
2458  }
2459  }
2460 
2461  if (ff_thread_sync_ref(avctx, offsetof(Vp3DecodeContext, coeff_vlc)) != FF_THREAD_IS_COPY) {
2462  CoeffVLCs *vlcs = av_refstruct_alloc_ext(sizeof(*s->coeff_vlc), 0,
2464  if (!vlcs)
2465  return AVERROR(ENOMEM);
2466 
2467  s->coeff_vlc = vlcs;
2468 
2469  if (!s->theora_tables) {
2470  const uint8_t (*bias_tabs)[32][2];
2471 
2472  /* init VLC tables */
2473  bias_tabs = CONFIG_VP4_DECODER && s->version >= 2 ? vp4_bias : vp3_bias;
2474  for (int i = 0; i < FF_ARRAY_ELEMS(vlcs->vlcs); i++) {
2475  ret = ff_vlc_init_from_lengths(&vlcs->vlcs[i], 11, 32,
2476  &bias_tabs[i][0][1], 2,
2477  &bias_tabs[i][0][0], 2, 1,
2478  0, 0, avctx);
2479  if (ret < 0)
2480  return ret;
2481  vlcs->vlc_tabs[i] = vlcs->vlcs[i].table;
2482  }
2483  } else {
2484  for (int i = 0; i < FF_ARRAY_ELEMS(vlcs->vlcs); i++) {
2485  const HuffTable *tab = &s->huffman_table[i];
2486 
2487  ret = ff_vlc_init_from_lengths(&vlcs->vlcs[i], 11, tab->nb_entries,
2488  &tab->entries[0].len, sizeof(*tab->entries),
2489  &tab->entries[0].sym, sizeof(*tab->entries), 1,
2490  0, 0, avctx);
2491  if (ret < 0)
2492  return ret;
2493  vlcs->vlc_tabs[i] = vlcs->vlcs[i].table;
2494  }
2495  }
2496  }
2497 
2498  ff_thread_once(&init_static_once, init_tables_once);
2499 
2500  return allocate_tables(avctx);
2501 }
2502 
2503 #if HAVE_THREADS
2504 static void ref_frames(Vp3DecodeContext *dst, const Vp3DecodeContext *src)
2505 {
2506  ff_progress_frame_replace(&dst->current_frame, &src->current_frame);
2507  ff_progress_frame_replace(&dst->golden_frame, &src->golden_frame);
2508 }
2509 
2510 static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
2511 {
2512  Vp3DecodeContext *s = dst->priv_data;
2513  const Vp3DecodeContext *s1 = src->priv_data;
2514  int qps_changed = 0;
2515 
2516  // copy previous frame data
2517  ref_frames(s, s1);
2518 
2519  if (s != s1) {
2520  // copy qscale data if necessary
2521  for (int i = 0; i < 3; i++) {
2522  if (s->qps[i] != s1->qps[1]) {
2523  qps_changed = 1;
2524  memcpy(&s->qmat[i], &s1->qmat[i], sizeof(s->qmat[i]));
2525  }
2526  }
2527 
2528  if (s->qps[0] != s1->qps[0])
2529  memcpy(&s->bounding_values_array, &s1->bounding_values_array,
2530  sizeof(s->bounding_values_array));
2531 
2532  if (qps_changed) {
2533  memcpy(s->qps, s1->qps, sizeof(s->qps));
2534  s->nqps = s1->nqps;
2535  }
2536  }
2537  return 0;
2538 }
2539 #endif
2540 
2542  int *got_frame, AVPacket *avpkt)
2543 {
2544  const uint8_t *buf = avpkt->data;
2545  int buf_size = avpkt->size;
2546  Vp3DecodeContext *s = avctx->priv_data;
2547  GetBitContext gb;
2548  int ret;
2549 
2550  if ((ret = init_get_bits8(&gb, buf, buf_size)) < 0)
2551  return ret;
2552 
2553 #if CONFIG_THEORA_DECODER
2554  if (s->theora && get_bits1(&gb)) {
2555  int type = get_bits(&gb, 7);
2556  skip_bits_long(&gb, 6*8); /* "theora" */
2557 
2558  if (s->avctx->active_thread_type&FF_THREAD_FRAME) {
2559  av_log(avctx, AV_LOG_ERROR, "midstream reconfiguration with multithreading is unsupported, try -threads 1\n");
2560  return AVERROR_PATCHWELCOME;
2561  }
2562  if (type == 0) {
2563  vp3_decode_end(avctx);
2564  ret = theora_decode_header(avctx, &gb);
2565 
2566  if (ret >= 0)
2567  ret = vp3_decode_init(avctx);
2568  if (ret < 0) {
2569  vp3_decode_end(avctx);
2570  return ret;
2571  }
2572  return buf_size;
2573  } else if (type == 2) {
2574  vp3_decode_end(avctx);
2575  ret = theora_decode_tables(avctx, &gb);
2576  if (ret >= 0)
2577  ret = vp3_decode_init(avctx);
2578  if (ret < 0) {
2579  vp3_decode_end(avctx);
2580  return ret;
2581  }
2582  return buf_size;
2583  }
2584 
2585  av_log(avctx, AV_LOG_ERROR,
2586  "Header packet passed to frame decoder, skipping\n");
2587  return -1;
2588  }
2589 #endif
2590 
2591  s->keyframe = !get_bits1(&gb);
2592  if (!s->all_fragments) {
2593  av_log(avctx, AV_LOG_ERROR, "Data packet without prior valid headers\n");
2594  return -1;
2595  }
2596  if (!s->theora)
2597  skip_bits(&gb, 1);
2598 
2599  int last_qps[3];
2600  for (int i = 0; i < 3; i++)
2601  last_qps[i] = s->qps[i];
2602 
2603  s->nqps = 0;
2604  do {
2605  s->qps[s->nqps++] = get_bits(&gb, 6);
2606  } while (s->theora >= 0x030200 && s->nqps < 3 && get_bits1(&gb));
2607  for (int i = s->nqps; i < 3; i++)
2608  s->qps[i] = -1;
2609 
2610  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2611  av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%"PRId64": Q index = %d\n",
2612  s->keyframe ? "key" : "", avctx->frame_num + 1, s->qps[0]);
2613 
2614  s->skip_loop_filter = !s->filter_limit_values[s->qps[0]] ||
2615  avctx->skip_loop_filter >= (s->keyframe ? AVDISCARD_ALL
2616  : AVDISCARD_NONKEY);
2617 
2618  if (s->qps[0] != last_qps[0])
2620 
2621  for (int i = 0; i < s->nqps; i++)
2622  // reinit all dequantizers if the first one changed, because
2623  // the DC of the first quantizer must be used for all matrices
2624  if (s->qps[i] != last_qps[i] || s->qps[0] != last_qps[0])
2625  init_dequantizer(s, i);
2626 
2627  if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe)
2628  return buf_size;
2629 
2630  ret = ff_progress_frame_get_buffer(avctx, &s->last_frame,
2632  if (ret < 0) {
2633  // Don't goto error here, as one can't report progress on or
2634  // unref a non-existent frame.
2635  return ret;
2636  }
2637  FFSWAP(ProgressFrame, s->last_frame, s->current_frame);
2638  s->current_frame.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I
2640  if (s->keyframe)
2641  s->current_frame.f->flags |= AV_FRAME_FLAG_KEY;
2642  else
2643  s->current_frame.f->flags &= ~AV_FRAME_FLAG_KEY;
2644 
2645  if (!s->edge_emu_buffer) {
2646  s->edge_emu_buffer = av_malloc(9 * FFABS(s->current_frame.f->linesize[0]));
2647  if (!s->edge_emu_buffer) {
2648  ret = AVERROR(ENOMEM);
2649  goto error;
2650  }
2651  }
2652 
2653  if (s->keyframe) {
2654  if (!s->theora) {
2655  skip_bits(&gb, 4); /* width code */
2656  skip_bits(&gb, 4); /* height code */
2657  if (s->version) {
2658  int version = get_bits(&gb, 5);
2659 #if !CONFIG_VP4_DECODER
2660  if (version >= 2) {
2661  av_log(avctx, AV_LOG_ERROR, "This build does not support decoding VP4.\n");
2663  goto error;
2664  }
2665 #endif
2666  s->version = version;
2667  if (avctx->frame_num == 0)
2668  av_log(s->avctx, AV_LOG_DEBUG,
2669  "VP version: %d\n", s->version);
2670  }
2671  }
2672  if (s->version || s->theora) {
2673  if (get_bits1(&gb))
2674  av_log(s->avctx, AV_LOG_ERROR,
2675  "Warning, unsupported keyframe coding type?!\n");
2676  skip_bits(&gb, 2); /* reserved? */
2677 
2678 #if CONFIG_VP4_DECODER
2679  if (s->version >= 2) {
2680  int mb_height, mb_width;
2681  int mb_width_mul, mb_width_div, mb_height_mul, mb_height_div;
2682 
2683  mb_height = get_bits(&gb, 8);
2684  mb_width = get_bits(&gb, 8);
2685  if (mb_height != s->macroblock_height ||
2686  mb_width != s->macroblock_width)
2687  avpriv_request_sample(s->avctx, "macroblock dimension mismatch");
2688 
2689  mb_width_mul = get_bits(&gb, 5);
2690  mb_width_div = get_bits(&gb, 3);
2691  mb_height_mul = get_bits(&gb, 5);
2692  mb_height_div = get_bits(&gb, 3);
2693  if (mb_width_mul != 1 || mb_width_div != 1 || mb_height_mul != 1 || mb_height_div != 1)
2694  avpriv_request_sample(s->avctx, "unexpected macroblock dimension multiplier/divider");
2695 
2696  if (get_bits(&gb, 2))
2697  avpriv_request_sample(s->avctx, "unknown bits");
2698  }
2699 #endif
2700  }
2701  ff_progress_frame_replace(&s->golden_frame, &s->current_frame);
2702  } else {
2703  if (!s->golden_frame.f) {
2704  av_log(s->avctx, AV_LOG_WARNING,
2705  "vp3: first frame not a keyframe\n");
2706 
2707  if ((ret = ff_progress_frame_get_buffer(avctx, &s->golden_frame,
2708  AV_GET_BUFFER_FLAG_REF)) < 0)
2709  goto error;
2710  s->golden_frame.f->pict_type = AV_PICTURE_TYPE_I;
2711  ff_progress_frame_replace(&s->last_frame, &s->golden_frame);
2712  ff_progress_frame_report(&s->golden_frame, INT_MAX);
2713  }
2714  }
2715  ff_thread_finish_setup(avctx);
2716 
2717  memset(s->all_fragments, 0, s->fragment_count * sizeof(Vp3Fragment));
2718 
2719  if (s->version < 2) {
2720  if ((ret = unpack_superblocks(s, &gb)) < 0) {
2721  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n");
2722  goto error;
2723  }
2724 #if CONFIG_VP4_DECODER
2725  } else {
2726  if ((ret = vp4_unpack_macroblocks(s, &gb)) < 0) {
2727  av_log(s->avctx, AV_LOG_ERROR, "error in vp4_unpack_macroblocks\n");
2728  goto error;
2729  }
2730 #endif
2731  }
2732  if ((ret = unpack_modes(s, &gb)) < 0) {
2733  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n");
2734  goto error;
2735  }
2736  if (ret = unpack_vectors(s, &gb)) {
2737  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n");
2738  goto error;
2739  }
2740  if ((ret = unpack_block_qpis(s, &gb)) < 0) {
2741  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_block_qpis\n");
2742  goto error;
2743  }
2744 
2745  if (s->version < 2) {
2746  if ((ret = unpack_dct_coeffs(s, &gb)) < 0) {
2747  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n");
2748  goto error;
2749  }
2750 #if CONFIG_VP4_DECODER
2751  } else {
2752  if ((ret = vp4_unpack_dct_coeffs(s, &gb)) < 0) {
2753  av_log(s->avctx, AV_LOG_ERROR, "error in vp4_unpack_dct_coeffs\n");
2754  goto error;
2755  }
2756 #endif
2757  }
2758 
2759  for (int i = 0; i < 3; i++) {
2760  int height = s->height >> (i && s->chroma_y_shift);
2761  if (s->flipped_image)
2762  s->data_offset[i] = 0;
2763  else
2764  s->data_offset[i] = (height - 1) * s->current_frame.f->linesize[i];
2765  }
2766 
2767  s->last_slice_end = 0;
2768  for (int i = 0; i < s->c_superblock_height; i++)
2769  render_slice(s, i);
2770 
2771  // filter the last row
2772  if (s->version < 2)
2773  for (int i = 0; i < 3; i++) {
2774  int row = (s->height >> (3 + (i && s->chroma_y_shift))) - 1;
2775  apply_loop_filter(s, i, row, row + 1);
2776  }
2777  vp3_draw_horiz_band(s, s->height);
2778 
2779  ff_progress_frame_unref(&s->last_frame);
2780 
2781  /* output frame, offset as needed */
2782  if ((ret = av_frame_ref(frame, s->current_frame.f)) < 0)
2783  return ret;
2784 
2785  frame->crop_left = s->offset_x;
2786  frame->crop_right = avctx->coded_width - avctx->width - s->offset_x;
2787  frame->crop_top = s->offset_y;
2788  frame->crop_bottom = avctx->coded_height - avctx->height - s->offset_y;
2789 
2790  *got_frame = 1;
2791 
2792  return buf_size;
2793 
2794 error:
2795  ff_progress_frame_report(&s->current_frame, INT_MAX);
2796  ff_progress_frame_unref(&s->last_frame);
2797 
2798  return ret;
2799 }
2800 
2801 static int read_huffman_tree(HuffTable *huff, GetBitContext *gb, int length,
2802  AVCodecContext *avctx)
2803 {
2804  if (get_bits1(gb)) {
2805  int token;
2806  if (huff->nb_entries >= 32) { /* overflow */
2807  av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2808  return -1;
2809  }
2810  token = get_bits(gb, 5);
2811  ff_dlog(avctx, "code length %d, curr entry %d, token %d\n",
2812  length, huff->nb_entries, token);
2813  huff->entries[huff->nb_entries++] = (HuffEntry){ length, token };
2814  } else {
2815  /* The following bound follows from the fact that nb_entries <= 32. */
2816  if (length >= 31) { /* overflow */
2817  av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2818  return -1;
2819  }
2820  length++;
2821  if (read_huffman_tree(huff, gb, length, avctx))
2822  return -1;
2823  if (read_huffman_tree(huff, gb, length, avctx))
2824  return -1;
2825  }
2826  return 0;
2827 }
2828 
2829 #if CONFIG_THEORA_DECODER
2830 static const enum AVPixelFormat theora_pix_fmts[4] = {
2832 };
2833 
2834 static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
2835 {
2836  Vp3DecodeContext *s = avctx->priv_data;
2837  int visible_width, visible_height, colorspace;
2838  uint8_t offset_x = 0, offset_y = 0;
2839  int ret;
2840  AVRational fps, aspect;
2841 
2842  if (get_bits_left(gb) < 206)
2843  return AVERROR_INVALIDDATA;
2844 
2845  s->theora_header = 0;
2846  s->theora = get_bits(gb, 24);
2847  av_log(avctx, AV_LOG_DEBUG, "Theora bitstream version %X\n", s->theora);
2848  if (!s->theora) {
2849  s->theora = 1;
2850  avpriv_request_sample(s->avctx, "theora 0");
2851  }
2852 
2853  /* 3.2.0 aka alpha3 has the same frame orientation as original vp3
2854  * but previous versions have the image flipped relative to vp3 */
2855  if (s->theora < 0x030200) {
2856  s->flipped_image = 1;
2857  av_log(avctx, AV_LOG_DEBUG,
2858  "Old (<alpha3) Theora bitstream, flipped image\n");
2859  }
2860 
2861  visible_width =
2862  s->width = get_bits(gb, 16) << 4;
2863  visible_height =
2864  s->height = get_bits(gb, 16) << 4;
2865 
2866  if (s->theora >= 0x030200) {
2867  visible_width = get_bits(gb, 24);
2868  visible_height = get_bits(gb, 24);
2869 
2870  offset_x = get_bits(gb, 8); /* offset x */
2871  offset_y = get_bits(gb, 8); /* offset y, from bottom */
2872  }
2873 
2874  /* sanity check */
2875  if (av_image_check_size(visible_width, visible_height, 0, avctx) < 0 ||
2876  visible_width + offset_x > s->width ||
2877  visible_height + offset_y > s->height ||
2878  visible_width + 512 < s->width ||
2879  visible_height + 512 < s->height ||
2880  visible_width < 18
2881  ) {
2882  av_log(avctx, AV_LOG_ERROR,
2883  "Invalid frame dimensions - w:%d h:%d x:%d y:%d (%dx%d).\n",
2884  visible_width, visible_height, offset_x, offset_y,
2885  s->width, s->height);
2886  return AVERROR_INVALIDDATA;
2887  }
2888 
2889  fps.num = get_bits_long(gb, 32);
2890  fps.den = get_bits_long(gb, 32);
2891  if (fps.num && fps.den) {
2892  if (fps.num < 0 || fps.den < 0) {
2893  av_log(avctx, AV_LOG_ERROR, "Invalid framerate\n");
2894  return AVERROR_INVALIDDATA;
2895  }
2896  av_reduce(&avctx->framerate.den, &avctx->framerate.num,
2897  fps.den, fps.num, 1 << 30);
2898  }
2899 
2900  aspect.num = get_bits(gb, 24);
2901  aspect.den = get_bits(gb, 24);
2902  if (aspect.num && aspect.den) {
2904  &avctx->sample_aspect_ratio.den,
2905  aspect.num, aspect.den, 1 << 30);
2906  ff_set_sar(avctx, avctx->sample_aspect_ratio);
2907  }
2908 
2909  if (s->theora < 0x030200)
2910  skip_bits(gb, 5); /* keyframe frequency force */
2911  colorspace = get_bits(gb, 8);
2912  skip_bits(gb, 24); /* bitrate */
2913 
2914  skip_bits(gb, 6); /* quality hint */
2915 
2916  if (s->theora >= 0x030200) {
2917  skip_bits(gb, 5); /* keyframe frequency force */
2918  avctx->pix_fmt = theora_pix_fmts[get_bits(gb, 2)];
2919  if (avctx->pix_fmt == AV_PIX_FMT_NONE) {
2920  av_log(avctx, AV_LOG_ERROR, "Invalid pixel format\n");
2921  return AVERROR_INVALIDDATA;
2922  }
2923  skip_bits(gb, 3); /* reserved */
2924  } else
2925  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2926 
2927  if (s->width < 18)
2928  return AVERROR_PATCHWELCOME;
2929  ret = ff_set_dimensions(avctx, s->width, s->height);
2930  if (ret < 0)
2931  return ret;
2932  if (!(avctx->flags2 & AV_CODEC_FLAG2_IGNORE_CROP)) {
2933  avctx->width = visible_width;
2934  avctx->height = visible_height;
2935  // translate offsets from theora axis ([0,0] lower left)
2936  // to normal axis ([0,0] upper left)
2937  s->offset_x = offset_x;
2938  s->offset_y = s->height - visible_height - offset_y;
2939  }
2940 
2941  if (colorspace == 1)
2943  else if (colorspace == 2)
2945 
2946  if (colorspace == 1 || colorspace == 2) {
2947  avctx->colorspace = AVCOL_SPC_BT470BG;
2948  avctx->color_trc = AVCOL_TRC_BT709;
2949  }
2950 
2951  s->theora_header = 1;
2952  return 0;
2953 }
2954 
2955 static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
2956 {
2957  Vp3DecodeContext *s = avctx->priv_data;
2958  int n, matrices, ret;
2959 
2960  if (!s->theora_header)
2961  return AVERROR_INVALIDDATA;
2962 
2963  if (s->theora >= 0x030200) {
2964  n = get_bits(gb, 3);
2965  /* loop filter limit values table */
2966  if (n)
2967  for (int i = 0; i < 64; i++)
2968  s->filter_limit_values[i] = get_bits(gb, n);
2969  }
2970 
2971  if (s->theora >= 0x030200)
2972  n = get_bits(gb, 4) + 1;
2973  else
2974  n = 16;
2975  /* quality threshold table */
2976  for (int i = 0; i < 64; i++)
2977  s->coded_ac_scale_factor[i] = get_bits(gb, n);
2978 
2979  if (s->theora >= 0x030200)
2980  n = get_bits(gb, 4) + 1;
2981  else
2982  n = 16;
2983  /* dc scale factor table */
2984  for (int i = 0; i < 64; i++)
2985  s->coded_dc_scale_factor[0][i] =
2986  s->coded_dc_scale_factor[1][i] = get_bits(gb, n);
2987 
2988  if (s->theora >= 0x030200)
2989  matrices = get_bits(gb, 9) + 1;
2990  else
2991  matrices = 3;
2992 
2993  if (matrices > 384) {
2994  av_log(avctx, AV_LOG_ERROR, "invalid number of base matrixes\n");
2995  return -1;
2996  }
2997 
2998  for (int j = 0; j < matrices; j++)
2999  for (int i = 0; i < 64; i++)
3000  s->base_matrix[j][i] = get_bits(gb, 8);
3001 
3002  for (int inter = 0; inter <= 1; inter++) {
3003  for (int plane = 0; plane <= 2; plane++) {
3004  int newqr = 1;
3005  if (inter || plane > 0)
3006  newqr = get_bits1(gb);
3007  if (!newqr) {
3008  int qtj, plj;
3009  if (inter && get_bits1(gb)) {
3010  qtj = 0;
3011  plj = plane;
3012  } else {
3013  qtj = (3 * inter + plane - 1) / 3;
3014  plj = (plane + 2) % 3;
3015  }
3016  s->qr_count[inter][plane] = s->qr_count[qtj][plj];
3017  memcpy(s->qr_size[inter][plane], s->qr_size[qtj][plj],
3018  sizeof(s->qr_size[0][0]));
3019  memcpy(s->qr_base[inter][plane], s->qr_base[qtj][plj],
3020  sizeof(s->qr_base[0][0]));
3021  } else {
3022  int qri = 0;
3023  int qi = 0;
3024 
3025  for (;;) {
3026  int i = get_bits(gb, av_log2(matrices - 1) + 1);
3027  if (i >= matrices) {
3028  av_log(avctx, AV_LOG_ERROR,
3029  "invalid base matrix index\n");
3030  return -1;
3031  }
3032  s->qr_base[inter][plane][qri] = i;
3033  if (qi >= 63)
3034  break;
3035  i = get_bits(gb, av_log2(63 - qi) + 1) + 1;
3036  s->qr_size[inter][plane][qri++] = i;
3037  qi += i;
3038  }
3039 
3040  if (qi > 63) {
3041  av_log(avctx, AV_LOG_ERROR, "invalid qi %d > 63\n", qi);
3042  return -1;
3043  }
3044  s->qr_count[inter][plane] = qri;
3045  }
3046  }
3047  }
3048 
3049  /* Huffman tables */
3050  for (int i = 0; i < FF_ARRAY_ELEMS(s->huffman_table); i++) {
3051  s->huffman_table[i].nb_entries = 0;
3052  if ((ret = read_huffman_tree(&s->huffman_table[i], gb, 0, avctx)) < 0)
3053  return ret;
3054  }
3055 
3056  s->theora_tables = 1;
3057 
3058  return 0;
3059 }
3060 
3061 static av_cold int theora_decode_init(AVCodecContext *avctx)
3062 {
3063  Vp3DecodeContext *s = avctx->priv_data;
3064  GetBitContext gb;
3065  int ptype;
3066  const uint8_t *header_start[3];
3067  int header_len[3];
3068  int ret;
3069 
3070  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
3071 
3072  s->theora = 1;
3073 
3074  if (!avctx->extradata_size) {
3075  av_log(avctx, AV_LOG_ERROR, "Missing extradata!\n");
3076  return -1;
3077  }
3078 
3080  42, header_start, header_len) < 0) {
3081  av_log(avctx, AV_LOG_ERROR, "Corrupt extradata\n");
3082  return -1;
3083  }
3084 
3085  for (int i = 0; i < 3; i++) {
3086  if (header_len[i] <= 0)
3087  continue;
3088  ret = init_get_bits8(&gb, header_start[i], header_len[i]);
3089  if (ret < 0)
3090  return ret;
3091 
3092  ptype = get_bits(&gb, 8);
3093 
3094  if (!(ptype & 0x80)) {
3095  av_log(avctx, AV_LOG_ERROR, "Invalid extradata!\n");
3096 // return -1;
3097  }
3098 
3099  // FIXME: Check for this as well.
3100  skip_bits_long(&gb, 6 * 8); /* "theora" */
3101 
3102  switch (ptype) {
3103  case 0x80:
3104  if (theora_decode_header(avctx, &gb) < 0)
3105  return -1;
3106  break;
3107  case 0x81:
3108 // FIXME: is this needed? it breaks sometimes
3109 // theora_decode_comments(avctx, gb);
3110  break;
3111  case 0x82:
3112  if (theora_decode_tables(avctx, &gb))
3113  return -1;
3114  break;
3115  default:
3116  av_log(avctx, AV_LOG_ERROR,
3117  "Unknown Theora config packet: %d\n", ptype & ~0x80);
3118  break;
3119  }
3120  if (ptype != 0x81 && get_bits_left(&gb) >= 8U)
3121  av_log(avctx, AV_LOG_WARNING,
3122  "%d bits left in packet %X\n",
3123  get_bits_left(&gb), ptype);
3124  if (s->theora < 0x030200)
3125  break;
3126  }
3127 
3128  return vp3_decode_init(avctx);
3129 }
3130 
3131 const FFCodec ff_theora_decoder = {
3132  .p.name = "theora",
3133  CODEC_LONG_NAME("Theora"),
3134  .p.type = AVMEDIA_TYPE_VIDEO,
3135  .p.id = AV_CODEC_ID_THEORA,
3136  .priv_data_size = sizeof(Vp3DecodeContext),
3137  .init = theora_decode_init,
3138  .close = vp3_decode_end,
3140  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
3142  .flush = vp3_decode_flush,
3143  UPDATE_THREAD_CONTEXT(vp3_update_thread_context),
3144  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
3147 };
3148 #endif
3149 
3151  .p.name = "vp3",
3152  CODEC_LONG_NAME("On2 VP3"),
3153  .p.type = AVMEDIA_TYPE_VIDEO,
3154  .p.id = AV_CODEC_ID_VP3,
3155  .priv_data_size = sizeof(Vp3DecodeContext),
3156  .init = vp3_decode_init,
3157  .close = vp3_decode_end,
3159  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
3161  .flush = vp3_decode_flush,
3162  UPDATE_THREAD_CONTEXT(vp3_update_thread_context),
3163  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
3165 };
3166 
3167 #if CONFIG_VP4_DECODER
3168 const FFCodec ff_vp4_decoder = {
3169  .p.name = "vp4",
3170  CODEC_LONG_NAME("On2 VP4"),
3171  .p.type = AVMEDIA_TYPE_VIDEO,
3172  .p.id = AV_CODEC_ID_VP4,
3173  .priv_data_size = sizeof(Vp3DecodeContext),
3174  .init = vp3_decode_init,
3175  .close = vp3_decode_end,
3177  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
3179  .flush = vp3_decode_flush,
3180  UPDATE_THREAD_CONTEXT(vp3_update_thread_context),
3181  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
3183 };
3184 #endif
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:32
vp4_ac_scale_factor
static const uint16_t vp4_ac_scale_factor[64]
Definition: vp4data.h:64
ff_progress_frame_report
void ff_progress_frame_report(ProgressFrame *f, int n)
Notify later decoding threads when part of their reference frame is ready.
Definition: decode.c:1967
vp4data.h
PUL
#define PUL
allocate_tables
static av_cold int allocate_tables(AVCodecContext *avctx)
Allocate tables for per-frame data in Vp3DecodeContext.
Definition: vp3.c:2305
vp3_dequant
static int vp3_dequant(Vp3DecodeContext *s, const Vp3Fragment *frag, int plane, int inter, int16_t block[64])
Pull DCT tokens from the 64 levels to decode and dequant the coefficients for the next block in codin...
Definition: vp3.c:1849
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:280
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
Vp3Fragment::dc
int16_t dc
Definition: vp3.c:69
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
ff_vlc_init_from_lengths
int ff_vlc_init_from_lengths(VLC *vlc, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc2()
Definition: vlc.c:306
av_clip
#define av_clip
Definition: common.h:100
Vp3DecodeContext::offset_x
uint8_t offset_x
Definition: vp3.c:247
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:43
VP3DSPContext
Definition: vp3dsp.h:29
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:688
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
vp4_get_mv
static int vp4_get_mv(GetBitContext *gb, int axis, int last_motion)
Definition: vp3.c:887
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:667
mem_internal.h
Vp3DecodeContext::c_macroblock_height
int c_macroblock_height
Definition: vp3.c:237
zero_run_base
static const uint8_t zero_run_base[32]
Definition: vp3data.h:133
MODE_INTER_PRIOR_LAST
#define MODE_INTER_PRIOR_LAST
Definition: vp3.c:87
VP4Predictor
Definition: vp3.c:177
Vp3DecodeContext::idct_scantable
uint8_t idct_scantable[64]
Definition: vp3.c:209
thread.h
HuffEntry::len
uint8_t len
Definition: exr.c:97
AVRefStructOpaque
RefStruct is an API for creating reference-counted objects with minimal overhead.
Definition: refstruct.h:58
VP4Predictor::dc
int dc
Definition: vp3.c:178
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:424
mode_code_vlc_len
static const uint8_t mode_code_vlc_len[8]
Definition: vp3data.h:97
superblock_run_length_vlc
static VLCElem superblock_run_length_vlc[88]
Definition: vp3.c:165
read_huffman_tree
static int read_huffman_tree(HuffTable *huff, GetBitContext *gb, int length, AVCodecContext *avctx)
Definition: vp3.c:2801
PUR
#define PUR
vp3dsp.h
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:435
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:660
u
#define u(width, name, range_min, range_max)
Definition: cbs_apv.c:68
AVPacket::data
uint8_t * data
Definition: packet.h:595
ff_vp3_decoder
const FFCodec ff_vp3_decoder
Definition: vp3.c:3150
ff_progress_frame_get_buffer
int ff_progress_frame_get_buffer(AVCodecContext *avctx, ProgressFrame *f, int flags)
Wrapper around ff_progress_frame_alloc() and ff_thread_get_buffer().
Definition: decode.c:1927
Vp3DecodeContext::all_fragments
Vp3Fragment * all_fragments
Definition: vp3.c:244
mode_code_vlc
static VLCElem mode_code_vlc[24+2108 *CONFIG_VP4_DECODER]
Definition: vp3.c:170
Vp3DecodeContext::filter_limit_values
uint8_t filter_limit_values[64]
Definition: vp3.c:325
FFCodec
Definition: codec_internal.h:127
fragment_run_length_vlc
static VLCElem fragment_run_length_vlc[56]
Definition: vp3.c:166
motion_vector_vlc
static VLCElem motion_vector_vlc[112]
Definition: vp3.c:167
base
uint8_t base
Definition: vp3data.h:128
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Definition: utils.c:91
Vp3Fragment::coding_method
uint8_t coding_method
Definition: vp3.c:70
thread.h
VP4_DC_INTER
@ VP4_DC_INTER
Definition: vp3.c:148
unpack_superblocks
static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:468
render_slice
static void render_slice(Vp3DecodeContext *s, int slice)
Definition: vp3.c:2056
CoeffVLCs::vlc_tabs
const VLCElem * vlc_tabs[80]
Definition: vp3.c:194
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1387
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:456
Vp3DecodeContext::height
int height
Definition: vp3.c:202
vlc_tables
static VLCElem vlc_tables[VLC_TABLES_SIZE]
Definition: imc.c:115
AV_CODEC_FLAG2_IGNORE_CROP
#define AV_CODEC_FLAG2_IGNORE_CROP
Discard cropping information from SPS.
Definition: avcodec.h:355
fragment
Definition: dashdec.c:38
Vp3DecodeContext::y_superblock_count
int y_superblock_count
Definition: vp3.c:224
xiph.h
bit
#define bit(string, value)
Definition: cbs_mpeg2.c:56
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:383
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:559
close
static av_cold void close(AVCodecParserContext *s)
Definition: apv_parser.c:197
Vp3DecodeContext::superblock_fragments
int * superblock_fragments
Definition: vp3.c:314
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:706
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:337
Vp3DecodeContext::golden_frame
ProgressFrame golden_frame
Definition: vp3.c:204
get_coeff
static int get_coeff(GetBitContext *gb, int token, int16_t *coeff)
Definition: vp3.c:1151
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
Vp3DecodeContext::qr_count
uint8_t qr_count[2][3]
Definition: vp3.c:257
Vp3DecodeContext::hdsp
HpelDSPContext hdsp
Definition: vp3.c:210
vp4_mv_vlc
static const uint8_t vp4_mv_vlc[2][7][63][2]
Definition: vp4data.h:112
BLOCK_Y
#define BLOCK_Y
Definition: vp3.c:642
Vp3DecodeContext::y_superblock_width
int y_superblock_width
Definition: vp3.c:222
CODING_MODE_COUNT
#define CODING_MODE_COUNT
Definition: vp3.c:91
FFSIGN
#define FFSIGN(a)
Definition: common.h:75
CoeffVLCs
Definition: rv60dec.c:89
GetBitContext
Definition: get_bits.h:109
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:10345
SET_CHROMA_MODES
#define SET_CHROMA_MODES
tables
Writing a table generator This documentation is preliminary Parts of the API are not good and should be changed Basic concepts A table generator consists of two *_tablegen c and *_tablegen h The h file will provide the variable declarations and initialization code for the tables
Definition: tablegen.txt:10
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:496
perm
perm
Definition: f_perms.c:75
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3484
MODE_INTER_LAST_MV
#define MODE_INTER_LAST_MV
Definition: vp3.c:86
Vp3DecodeContext::y_superblock_height
int y_superblock_height
Definition: vp3.c:223
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
Vp3DecodeContext::offset_y
uint8_t offset_y
Definition: vp3.c:248
Vp3DecodeContext::theora
int theora
Definition: vp3.c:200
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:615
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
loop
static int loop
Definition: ffplay.c:337
TRANSPOSE
#define TRANSPOSE(x)
AVRational::num
int num
Numerator.
Definition: rational.h:59
progressframe.h
refstruct.h
Vp3DecodeContext::num_kf_coded_fragment
int num_kf_coded_fragment[3]
Definition: vp3.c:297
TOKEN_ZERO_RUN
#define TOKEN_ZERO_RUN(coeff, zero_run)
Definition: vp3.c:281
vp4_pred_block_type_map
static const uint8_t vp4_pred_block_type_map[8]
Definition: vp3.c:154
FF_CODEC_CAP_USES_PROGRESSFRAMES
#define FF_CODEC_CAP_USES_PROGRESSFRAMES
The decoder might make use of the ProgressFrame API.
Definition: codec_internal.h:69
await_reference_row
static void await_reference_row(Vp3DecodeContext *s, const Vp3Fragment *fragment, int motion_y, int y)
Wait for the reference frame of the current fragment.
Definition: vp3.c:1935
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:653
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
motion_vector_vlc_table
static const uint8_t motion_vector_vlc_table[63][2]
Definition: vp3data.h:101
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:119
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:544
theora_decode_tables
static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:650
hilbert_offset
static const uint8_t hilbert_offset[16][2]
Definition: vp3.c:139
eob_run_table
static const struct @316 eob_run_table[7]
VLCInitState
For static VLCs, the number of bits can often be hardcoded at each get_vlc2() callsite.
Definition: vlc.h:220
emms_c
#define emms_c()
Definition: emms.h:89
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:523
Vp3DecodeContext::fragment_height
int fragment_height[2]
Definition: vp3.c:242
CoeffVLCs::vlcs
VLC vlcs[80]
Definition: vp3.c:195
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:347
s
#define s(width, name)
Definition: cbs_vp9.c:198
init_loop_filter
static void init_loop_filter(Vp3DecodeContext *s)
Definition: vp3.c:459
NB_VP4_DC_TYPES
@ NB_VP4_DC_TYPES
Definition: vp3.c:150
vp4_mv_table_selector
static const uint8_t vp4_mv_table_selector[32]
Definition: vp4data.h:105
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:411
vp3_decode_flush
static av_cold void vp3_decode_flush(AVCodecContext *avctx)
Definition: vp3.c:351
transform
static const int8_t transform[32][32]
Definition: dsp.c:27
HuffTable::nb_entries
uint8_t nb_entries
Definition: vp3.c:190
init_block_mapping
static int init_block_mapping(Vp3DecodeContext *s)
This function sets up all of the various blocks mappings: superblocks <-> fragments,...
Definition: vp3.c:384
SB_PARTIALLY_CODED
#define SB_PARTIALLY_CODED
Definition: vp3.c:75
bits
uint8_t bits
Definition: vp3data.h:128
SB_NOT_CODED
#define SB_NOT_CODED
Definition: vp3.c:74
av_refstruct_alloc_ext
static void * av_refstruct_alloc_ext(size_t size, unsigned flags, void *opaque, void(*free_cb)(AVRefStructOpaque opaque, void *obj))
A wrapper around av_refstruct_alloc_ext_c() for the common case of a non-const qualified opaque.
Definition: refstruct.h:94
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
Vp3Fragment::qpi
uint8_t qpi
Definition: vp3.c:71
ff_progress_frame_unref
void ff_progress_frame_unref(ProgressFrame *f)
Give up a reference to the underlying frame contained in a ProgressFrame and reset the ProgressFrame,...
Definition: decode.c:1950
ff_progress_frame_await
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_progress_frame_await() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_progress_frame_report() has been called on them. This includes draw_edges(). Porting codecs to frame threading
decode.h
get_bits.h
reverse_dc_prediction
static void reverse_dc_prediction(Vp3DecodeContext *s, int first_fragment, int fragment_width, int fragment_height)
Definition: vp3.c:1634
unpack_dct_coeffs
static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:1299
ModeAlphabet
static const int ModeAlphabet[6][CODING_MODE_COUNT]
Definition: vp3.c:101
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
av_mallocz
#define av_mallocz(s)
Definition: tableprint_vlc.h:31
RSHIFT
#define RSHIFT(a, b)
Definition: common.h:56
av_fallthrough
#define av_fallthrough
Definition: attributes.h:67
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:332
AVCOL_PRI_BT470BG
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:643
MODE_USING_GOLDEN
#define MODE_USING_GOLDEN
Definition: vp3.c:88
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:449
Vp3DecodeContext::macroblock_width
int macroblock_width
Definition: vp3.c:233
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
Vp3DecodeContext::idct_permutation
uint8_t idct_permutation[64]
Definition: vp3.c:208
if
if(ret)
Definition: filter_design.txt:179
init_dequantizer
static void init_dequantizer(Vp3DecodeContext *s, int qpi)
Definition: vp3.c:417
MODE_INTER_FOURMV
#define MODE_INTER_FOURMV
Definition: vp3.c:90
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:95
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:232
Vp3DecodeContext::c_superblock_width
int c_superblock_width
Definition: vp3.c:225
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
coeff_tables
static const int16_t *const coeff_tables[32]
Definition: vp3data.h:332
Vp3DecodeContext::offset_x_warned
int offset_x_warned
Definition: vp3.c:249
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
free_vlc_tables
static av_cold void free_vlc_tables(AVRefStructOpaque unused, void *obj)
Definition: vp3.c:2349
HuffTable
Definition: vp3.c:188
PU
#define PU
unpack_modes
static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:784
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
Vp3DecodeContext::superblock_count
int superblock_count
Definition: vp3.c:221
ff_vp3dsp_h_loop_filter_12
void ff_vp3dsp_h_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
theora_decode_header
static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:391
fragment_run_length_vlc_len
static const uint8_t fragment_run_length_vlc_len[30]
Definition: vp3data.h:92
vp4_bias
static const uint8_t vp4_bias[5 *16][32][2]
Definition: vp4data.h:329
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:106
mathops.h
Vp3DecodeContext::theora_header
int theora_header
Definition: vp3.c:200
TOKEN_COEFF
#define TOKEN_COEFF(coeff)
Definition: vp3.c:282
vp4_y_dc_scale_factor
static const uint8_t vp4_y_dc_scale_factor[64]
Definition: vp4data.h:42
Vp3DecodeContext::skip_loop_filter
int skip_loop_filter
Definition: vp3.c:216
FF_THREAD_IS_COPY
@ FF_THREAD_IS_COPY
Definition: thread.h:61
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:341
Vp3DecodeContext::coeff_vlc
CoeffVLCs * coeff_vlc
The first 16 of the following VLCs are for the dc coefficients; the others are four groups of 16 VLCs...
Definition: vp3.c:304
AV_CODEC_ID_VP4
@ AV_CODEC_ID_VP4
Definition: codec_id.h:300
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:645
jpegquanttables.h
vp31_ac_scale_factor
static const uint16_t vp31_ac_scale_factor[64]
Definition: vp3data.h:63
Vp3DecodeContext::qr_size
uint8_t qr_size[2][3][64]
Definition: vp3.c:258
AVOnce
#define AVOnce
Definition: thread.h:202
DC_COEFF
#define DC_COEFF(u)
Definition: vp3.c:1632
Vp3DecodeContext::vp3dsp
VP3DSPContext vp3dsp
Definition: vp3.c:212
Vp3DecodeContext::flipped_image
int flipped_image
Definition: vp3.c:214
vp31_intra_y_dequant
static const uint8_t vp31_intra_y_dequant[64]
Definition: vp3data.h:29
ff_vp3dsp_v_loop_filter_12
void ff_vp3dsp_v_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
HpelDSPContext
Half-pel DSP context.
Definition: hpeldsp.h:46
Vp3DecodeContext::fragment_width
int fragment_width[2]
Definition: vp3.c:241
ff_vp3dsp_set_bounding_values
void ff_vp3dsp_set_bounding_values(int *bounding_values_array, int filter_limit)
Definition: vp3dsp.c:477
Vp3DecodeContext::total_num_coded_frags
int total_num_coded_frags
Definition: vp3.c:289
SB_FULLY_CODED
#define SB_FULLY_CODED
Definition: vp3.c:76
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:231
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:503
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:551
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:302
AVPacket::size
int size
Definition: packet.h:596
fixed_motion_vector_table
static const int8_t fixed_motion_vector_table[64]
Definition: vp3data.h:115
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
codec_internal.h
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:104
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
i
#define i(width, name, range_min, range_max)
Definition: cbs_h264.c:63
unpack_vectors
static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:902
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
ff_vp4_decoder
const FFCodec ff_vp4_decoder
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:61
VLCElem
Definition: vlc.h:32
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:436
ref_frame
static int ref_frame(VVCFrame *dst, const VVCFrame *src)
Definition: dec.c:616
Vp3DecodeContext::dct_tokens
int16_t * dct_tokens[3][64]
This is a list of all tokens in bitstream order.
Definition: vp3.c:278
Vp3DecodeContext::coded_dc_scale_factor
uint16_t coded_dc_scale_factor[2][64]
Definition: vp3.c:254
Vp3DecodeContext::qps
int qps[3]
Definition: vp3.c:218
Vp3DecodeContext::block
int16_t block[64]
Definition: vp3.c:213
Vp3DecodeContext::chroma_y_shift
int chroma_y_shift
Definition: vp3.c:203
Vp3DecodeContext::data_offset
int data_offset[3]
Definition: vp3.c:246
state
static struct @583 state
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
Vp3DecodeContext::macroblock_coding
unsigned char * macroblock_coding
Definition: vp3.c:318
version
version
Definition: libkvazaar.c:313
VP4_DC_GOLDEN
@ VP4_DC_GOLDEN
Definition: vp3.c:149
vp3data.h
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
AVCOL_TRC_BT709
@ AVCOL_TRC_BT709
also ITU-R BT1361
Definition: pixfmt.h:668
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1584
av_refstruct_unref
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
Vp3DecodeContext::avctx
AVCodecContext * avctx
Definition: vp3.c:199
AV_CODEC_ID_VP3
@ AV_CODEC_ID_VP3
Definition: codec_id.h:81
emms.h
Vp3DecodeContext::nkf_coded_fragment_list
int * nkf_coded_fragment_list
Definition: vp3.c:296
Vp3DecodeContext::keyframe
int keyframe
Definition: vp3.c:207
MODE_INTRA
#define MODE_INTRA
Definition: vp3.c:84
apply_loop_filter
static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
Definition: vp3.c:1783
Vp3DecodeContext::macroblock_height
int macroblock_height
Definition: vp3.c:234
Vp3DecodeContext::yuv_macroblock_count
int yuv_macroblock_count
Definition: vp3.c:238
av_malloc
#define av_malloc(s)
Definition: ops_asmgen.c:44
Vp3DecodeContext::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: vp3.c:320
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:522
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:373
Vp3DecodeContext::c_macroblock_count
int c_macroblock_count
Definition: vp3.c:235
AV_CODEC_ID_THEORA
@ AV_CODEC_ID_THEORA
Definition: codec_id.h:82
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
vp3_decode_frame
static int vp3_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: vp3.c:2541
superblock_run_length_vlc_lens
static const uint8_t superblock_run_length_vlc_lens[34]
Definition: vp3data.h:85
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:58
ff_mjpeg_std_chrominance_quant_tbl
const uint8_t ff_mjpeg_std_chrominance_quant_tbl[64]
Definition: jpegquanttables.c:45
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
Vp3DecodeContext::macroblock_count
int macroblock_count
Definition: vp3.c:232
SUPERBLOCK_VLC_BITS
#define SUPERBLOCK_VLC_BITS
Definition: vp3.c:63
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:684
Vp3DecodeContext::current_frame
ProgressFrame current_frame
Definition: vp3.c:206
Vp3DecodeContext::v_superblock_start
int v_superblock_start
Definition: vp3.c:229
Vp3DecodeContext::c_superblock_height
int c_superblock_height
Definition: vp3.c:226
AVCodecContext::height
int height
Definition: avcodec.h:600
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:639
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
VP4_MV_VLC_BITS
#define VP4_MV_VLC_BITS
Definition: vp3.c:62
Vp3DecodeContext::coded_fragment_list
int * coded_fragment_list[3]
Definition: vp3.c:293
avcodec.h
Vp3DecodeContext::c_superblock_count
int c_superblock_count
Definition: vp3.c:227
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:137
PL
#define PL
AVCOL_PRI_BT470M
@ AVCOL_PRI_BT470M
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:641
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:580
ret
ret
Definition: filter_design.txt:187
Vp3DecodeContext::theora_tables
int theora_tables
Definition: vp3.c:200
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
free_tables
static av_cold void free_tables(AVCodecContext *avctx)
Definition: vp3.c:335
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
unpack_vlcs
static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, const VLCElem *vlc_table, int coeff_index, int plane, int eob_run)
Definition: vp3.c:1179
MODE_INTER_PLUS_MV
#define MODE_INTER_PLUS_MV
Definition: vp3.c:85
Vp3DecodeContext::num_coded_frags
int num_coded_frags[3][64]
number of blocks that contain DCT coefficients at the given level or higher
Definition: vp3.c:288
ff_thread_sync_ref
enum ThreadingStatus ff_thread_sync_ref(AVCodecContext *avctx, size_t offset)
Allows to synchronize objects whose lifetime is the whole decoding process among all frame threads.
Definition: decode.c:1978
vp4_block_pattern_table_selector
static const uint8_t vp4_block_pattern_table_selector[14]
Definition: vp4data.h:86
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
Vp3DecodeContext::chroma_x_shift
int chroma_x_shift
Definition: vp3.c:203
BLOCK_X
#define BLOCK_X
Definition: vp3.c:641
U
#define U(x)
Definition: vpx_arith.h:37
MODE_COPY
#define MODE_COPY
Definition: vp3.c:94
Vp3DecodeContext
Definition: vp3.c:198
ff_progress_frame_replace
void ff_progress_frame_replace(ProgressFrame *dst, const ProgressFrame *src)
Do nothing if dst and src already refer to the same AVFrame; otherwise unreference dst and if src is ...
Definition: decode.c:1957
ff_theora_decoder
const FFCodec ff_theora_decoder
vp4_filter_limit_values
static const uint8_t vp4_filter_limit_values[64]
Definition: vp4data.h:75
MODE_GOLDEN_MV
#define MODE_GOLDEN_MV
Definition: vp3.c:89
coeff_vlc
static const VLCElem * coeff_vlc[2][8][4]
Definition: atrac9dec.c:110
FRAGMENT_PIXELS
#define FRAGMENT_PIXELS
Definition: vp3.c:65
AVCodecContext
main external API structure.
Definition: avcodec.h:439
vp3_draw_horiz_band
static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y)
called when all pixels up to row y are complete
Definition: vp3.c:1894
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:799
vp4_generic_dequant
static const uint8_t vp4_generic_dequant[64]
Definition: vp4data.h:31
zero_run_get_bits
static const uint8_t zero_run_get_bits[32]
Definition: vp3data.h:140
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
VLC
Definition: vlc.h:50
Vp3DecodeContext::coded_ac_scale_factor
uint32_t coded_ac_scale_factor[64]
Definition: vp3.c:255
Vp3DecodeContext::bounding_values_array
int bounding_values_array[256+4]
Definition: vp3.c:326
output_plane
static void output_plane(const Plane *plane, int buf_sel, uint8_t *dst, ptrdiff_t dst_pitch, int dst_height)
Convert and output the current plane.
Definition: indeo3.c:1032
HuffEntry
Definition: exr.c:96
vp31_inter_dequant
static const uint8_t vp31_inter_dequant[64]
Definition: vp3data.h:41
temp
else temp
Definition: vf_mcdeint.c:271
body
static void body(uint32_t ABCD[4], const uint8_t *src, size_t nblocks)
Definition: md5.c:103
VLC::table
VLCElem * table
Definition: vlc.h:52
vp4_block_pattern_vlc
static const uint8_t vp4_block_pattern_vlc[2][14][2]
Definition: vp4data.h:90
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
avpriv_split_xiph_headers
int avpriv_split_xiph_headers(const uint8_t *extradata, int extradata_size, int first_header_size, const uint8_t *header_start[3], int header_len[3])
Split a single extradata buffer into the three headers that most Xiph codecs use.
Definition: xiph.c:26
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
MODE_INTER_NO_MV
#define MODE_INTER_NO_MV
Definition: vp3.c:83
VideoDSPContext
Definition: videodsp.h:32
ff_vlc_init_tables_from_lengths
const av_cold VLCElem * ff_vlc_init_tables_from_lengths(VLCInitState *state, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags)
Definition: vlc.c:366
HuffEntry::sym
uint8_t sym
Definition: vp3.c:185
Vp3DecodeContext::superblock_coding
unsigned char * superblock_coding
Definition: vp3.c:230
COMPATIBLE_FRAME
#define COMPATIBLE_FRAME(x)
Definition: vp3.c:1630
AVERROR_DECODER_NOT_FOUND
#define AVERROR_DECODER_NOT_FOUND
Decoder not found.
Definition: error.h:54
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:615
Vp3DecodeContext::fragment_start
int fragment_start[3]
Definition: vp3.c:245
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
ff_vlc_init_tables
static const VLCElem * ff_vlc_init_tables(VLCInitState *state, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, int flags)
Definition: vlc.h:254
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:322
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
vp3_bias
static const uint8_t vp3_bias[5 *16][32][2]
Definition: vp3data.h:370
get_eob_run
static int get_eob_run(GetBitContext *gb, int token)
Definition: vp3.c:1143
VP4_DC_UNDEFINED
@ VP4_DC_UNDEFINED
Definition: vp3.c:151
HuffTable::entries
HuffEntry entries[32]
Definition: vp3.c:189
VLC_INIT_STATIC_TABLE_FROM_LENGTHS
#define VLC_INIT_STATIC_TABLE_FROM_LENGTHS(vlc_table, nb_bits, nb_codes, lens, lens_wrap, syms, syms_wrap, syms_size, offset, flags)
Definition: vlc.h:288
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:464
Vp3DecodeContext::huffman_table
HuffTable huffman_table[5 *16]
Definition: vp3.c:323
ProgressFrame
The ProgressFrame structure.
Definition: progressframe.h:73
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
VLC_INIT_STATE
#define VLC_INIT_STATE(_table)
Definition: vlc.h:225
vp31_filter_limit_values
static const uint8_t vp31_filter_limit_values[64]
Definition: vp3data.h:74
AVPacket
This structure stores compressed data.
Definition: packet.h:572
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
VP4Predictor::type
int type
Definition: vp3.c:179
vp3_decode_init
static av_cold int vp3_decode_init(AVCodecContext *avctx)
Definition: vp3.c:2357
Vp3DecodeContext::base_matrix
uint8_t base_matrix[384][64]
Definition: vp3.c:256
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
videodsp.h
VP3_MV_VLC_BITS
#define VP3_MV_VLC_BITS
Definition: vp3.c:61
Vp3DecodeContext::fragment_count
int fragment_count
Definition: vp3.c:240
vp31_dc_scale_factor
static const uint8_t vp31_dc_scale_factor[64]
Definition: vp3data.h:52
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:600
imgutils.h
hpeldsp.h
Vp3DecodeContext::width
int width
Definition: vp3.c:202
Vp3DecodeContext::kf_coded_fragment_list
int * kf_coded_fragment_list
Definition: vp3.c:295
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
unpack_block_qpis
static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:1100
Vp3DecodeContext::qr_base
uint16_t qr_base[2][3][64]
Definition: vp3.c:259
vp3_decode_end
static av_cold int vp3_decode_end(AVCodecContext *avctx)
Definition: vp3.c:360
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
VP4_DC_INTRA
@ VP4_DC_INTRA
Definition: vp3.c:147
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2070
vp4_uv_dc_scale_factor
static const uint8_t vp4_uv_dc_scale_factor[64]
Definition: vp4data.h:53
MAXIMUM_LONG_BIT_RUN
#define MAXIMUM_LONG_BIT_RUN
Definition: vp3.c:81
init_tables_once
static av_cold void init_tables_once(void)
Definition: vp3.c:2262
stride
#define stride
Definition: h264pred_template.c:536
Vp3DecodeContext::version
int version
Definition: vp3.c:201
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
width
#define width
Definition: dsp.h:89
Vp3DecodeContext::motion_val
int8_t(*[2] motion_val)[2]
Definition: vp3.c:251
Vp3DecodeContext::last_slice_end
int last_slice_end
Definition: vp3.c:215
ff_vp3dsp_init
av_cold void ff_vp3dsp_init(VP3DSPContext *c)
Definition: vp3dsp.c:448
Vp3DecodeContext::dc_pred_row
VP4Predictor * dc_pred_row
Definition: vp3.c:328
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
Vp3DecodeContext::u_superblock_start
int u_superblock_start
Definition: vp3.c:228
coeff_get_bits
static const uint8_t coeff_get_bits[32]
Definition: vp3data.h:148
Vp3DecodeContext::dct_tokens_base
int16_t * dct_tokens_base
Definition: vp3.c:279
Vp3Fragment
Definition: vp3.c:68
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:624
src
#define src
Definition: vp8dsp.c:248
Vp3DecodeContext::nqps
int nqps
Definition: vp3.c:219
Vp3DecodeContext::qmat
int16_t qmat[3][2][3][64]
qmat[qpi][is_inter][plane]
Definition: vp3.c:308
Vp3DecodeContext::vdsp
VideoDSPContext vdsp
Definition: vp3.c:211
TOKEN_EOB
#define TOKEN_EOB(eob_run)
Definition: vp3.c:280
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:337
Vp3DecodeContext::c_macroblock_width
int c_macroblock_width
Definition: vp3.c:236
Vp3DecodeContext::last_frame
ProgressFrame last_frame
Definition: vp3.c:205