FFmpeg
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
vp9.c
Go to the documentation of this file.
1 /*
2  * VP9 compatible video decoder
3  *
4  * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5  * Copyright (C) 2013 Clément Bœsch <u pkh me>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "config_components.h"
25 
26 #include "avcodec.h"
27 #include "codec_internal.h"
28 #include "decode.h"
29 #include "get_bits.h"
30 #include "hwaccel_internal.h"
31 #include "hwconfig.h"
32 #include "profiles.h"
33 #include "progressframe.h"
34 #include "libavutil/refstruct.h"
35 #include "thread.h"
36 #include "pthread_internal.h"
37 
38 #include "videodsp.h"
39 #include "vp89_rac.h"
40 #include "vp9.h"
41 #include "vp9data.h"
42 #include "vp9dec.h"
43 #include "vpx_rac.h"
44 #include "libavutil/avassert.h"
45 #include "libavutil/mem.h"
46 #include "libavutil/pixdesc.h"
48 
49 #define VP9_SYNCCODE 0x498342
50 
51 #if HAVE_THREADS
52 DEFINE_OFFSET_ARRAY(VP9Context, vp9_context, pthread_init_cnt,
53  (offsetof(VP9Context, progress_mutex)),
54  (offsetof(VP9Context, progress_cond)));
55 
56 static int vp9_alloc_entries(AVCodecContext *avctx, int n) {
57  VP9Context *s = avctx->priv_data;
58 
59  if (avctx->active_thread_type & FF_THREAD_SLICE) {
60  if (s->entries)
61  av_freep(&s->entries);
62 
63  s->entries = av_malloc_array(n, sizeof(atomic_int));
64  if (!s->entries)
65  return AVERROR(ENOMEM);
66  }
67  return 0;
68 }
69 
70 static void vp9_report_tile_progress(VP9Context *s, int field, int n) {
71  pthread_mutex_lock(&s->progress_mutex);
72  atomic_fetch_add_explicit(&s->entries[field], n, memory_order_release);
73  pthread_cond_signal(&s->progress_cond);
74  pthread_mutex_unlock(&s->progress_mutex);
75 }
76 
77 static void vp9_await_tile_progress(VP9Context *s, int field, int n) {
78  if (atomic_load_explicit(&s->entries[field], memory_order_acquire) >= n)
79  return;
80 
81  pthread_mutex_lock(&s->progress_mutex);
82  while (atomic_load_explicit(&s->entries[field], memory_order_relaxed) != n)
83  pthread_cond_wait(&s->progress_cond, &s->progress_mutex);
84  pthread_mutex_unlock(&s->progress_mutex);
85 }
86 #else
87 static int vp9_alloc_entries(AVCodecContext *avctx, int n) { return 0; }
88 #endif
89 
91 {
92  av_freep(&td->b_base);
93  av_freep(&td->block_base);
95 }
96 
97 static void vp9_frame_unref(VP9Frame *f)
98 {
100  av_refstruct_unref(&f->header_ref);
101  av_refstruct_unref(&f->extradata);
102  av_refstruct_unref(&f->hwaccel_picture_private);
103  f->segmentation_map = NULL;
104 }
105 
107 {
108  VP9Context *s = avctx->priv_data;
109  int ret, sz;
110 
112  if (ret < 0)
113  return ret;
114 
115  sz = 64 * s->sb_cols * s->sb_rows;
116  if (sz != s->frame_extradata_pool_size) {
117  av_refstruct_pool_uninit(&s->frame_extradata_pool);
118  s->frame_extradata_pool = av_refstruct_pool_alloc(sz * (1 + sizeof(VP9mvrefPair)),
120  if (!s->frame_extradata_pool) {
121  s->frame_extradata_pool_size = 0;
122  ret = AVERROR(ENOMEM);
123  goto fail;
124  }
125  s->frame_extradata_pool_size = sz;
126  }
127  f->extradata = av_refstruct_pool_get(s->frame_extradata_pool);
128  if (!f->extradata) {
129  ret = AVERROR(ENOMEM);
130  goto fail;
131  }
132 
133  f->segmentation_map = f->extradata;
134  f->mv = (VP9mvrefPair *) ((char*)f->extradata + sz);
135 
136  ret = ff_hwaccel_frame_priv_alloc(avctx, &f->hwaccel_picture_private);
137  if (ret < 0)
138  goto fail;
139 
140  return 0;
141 
142 fail:
144  return ret;
145 }
146 
148 {
149  av_refstruct_replace(&dst->header_ref, src->header_ref);
150  dst->frame_header = src->frame_header;
151 
152  ff_progress_frame_replace(&dst->tf, &src->tf);
153 
154  av_refstruct_replace(&dst->extradata, src->extradata);
155 
156  dst->segmentation_map = src->segmentation_map;
157  dst->mv = src->mv;
158  dst->uses_2pass = src->uses_2pass;
159 
160  av_refstruct_replace(&dst->hwaccel_picture_private,
161  src->hwaccel_picture_private);
162 }
163 
164 static int update_size(AVCodecContext *avctx, int w, int h)
165 {
166 #define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + \
167  CONFIG_VP9_D3D11VA_HWACCEL * 2 + \
168  CONFIG_VP9_D3D12VA_HWACCEL + \
169  CONFIG_VP9_NVDEC_HWACCEL + \
170  CONFIG_VP9_VAAPI_HWACCEL + \
171  CONFIG_VP9_VDPAU_HWACCEL + \
172  CONFIG_VP9_VIDEOTOOLBOX_HWACCEL + \
173  CONFIG_VP9_VULKAN_HWACCEL)
174  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
175  VP9Context *s = avctx->priv_data;
176  uint8_t *p;
177  int bytesperpixel = s->bytesperpixel, ret, cols, rows;
178  int lflvl_len, i;
179 
180  av_assert0(w > 0 && h > 0);
181 
182  if (!(s->pix_fmt == s->gf_fmt && w == s->w && h == s->h)) {
183  if ((ret = ff_set_dimensions(avctx, w, h)) < 0)
184  return ret;
185 
186  switch (s->pix_fmt) {
187  case AV_PIX_FMT_YUV420P:
189 #if CONFIG_VP9_DXVA2_HWACCEL
190  *fmtp++ = AV_PIX_FMT_DXVA2_VLD;
191 #endif
192 #if CONFIG_VP9_D3D11VA_HWACCEL
193  *fmtp++ = AV_PIX_FMT_D3D11VA_VLD;
194  *fmtp++ = AV_PIX_FMT_D3D11;
195 #endif
196 #if CONFIG_VP9_D3D12VA_HWACCEL
197  *fmtp++ = AV_PIX_FMT_D3D12;
198 #endif
199 #if CONFIG_VP9_NVDEC_HWACCEL
200  *fmtp++ = AV_PIX_FMT_CUDA;
201 #endif
202 #if CONFIG_VP9_VAAPI_HWACCEL
203  *fmtp++ = AV_PIX_FMT_VAAPI;
204 #endif
205 #if CONFIG_VP9_VDPAU_HWACCEL
206  *fmtp++ = AV_PIX_FMT_VDPAU;
207 #endif
208 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
209  *fmtp++ = AV_PIX_FMT_VIDEOTOOLBOX;
210 #endif
211 #if CONFIG_VP9_VULKAN_HWACCEL
212  *fmtp++ = AV_PIX_FMT_VULKAN;
213 #endif
214  break;
216 #if CONFIG_VP9_NVDEC_HWACCEL
217  *fmtp++ = AV_PIX_FMT_CUDA;
218 #endif
219 #if CONFIG_VP9_VAAPI_HWACCEL
220  *fmtp++ = AV_PIX_FMT_VAAPI;
221 #endif
222 #if CONFIG_VP9_VDPAU_HWACCEL
223  *fmtp++ = AV_PIX_FMT_VDPAU;
224 #endif
225 #if CONFIG_VP9_VULKAN_HWACCEL
226  *fmtp++ = AV_PIX_FMT_VULKAN;
227 #endif
228  break;
229  case AV_PIX_FMT_YUV444P:
232 #if CONFIG_VP9_VAAPI_HWACCEL
233  *fmtp++ = AV_PIX_FMT_VAAPI;
234 #endif
235 #if CONFIG_VP9_VULKAN_HWACCEL
236  *fmtp++ = AV_PIX_FMT_VULKAN;
237 #endif
238  break;
239  case AV_PIX_FMT_GBRP:
240  case AV_PIX_FMT_GBRP10:
241  case AV_PIX_FMT_GBRP12:
242 #if CONFIG_VP9_VAAPI_HWACCEL
243  *fmtp++ = AV_PIX_FMT_VAAPI;
244 #endif
245 #if CONFIG_VP9_VULKAN_HWACCEL
246  *fmtp++ = AV_PIX_FMT_VULKAN;
247 #endif
248  break;
249  }
250 
251  *fmtp++ = s->pix_fmt;
252  *fmtp = AV_PIX_FMT_NONE;
253 
254  ret = ff_get_format(avctx, pix_fmts);
255  if (ret < 0)
256  return ret;
257 
258  avctx->pix_fmt = ret;
259  s->gf_fmt = s->pix_fmt;
260  s->w = w;
261  s->h = h;
262  }
263 
264  cols = (w + 7) >> 3;
265  rows = (h + 7) >> 3;
266 
267  if (s->intra_pred_data[0] && cols == s->cols && rows == s->rows && s->pix_fmt == s->last_fmt)
268  return 0;
269 
270  s->last_fmt = s->pix_fmt;
271  s->sb_cols = (w + 63) >> 6;
272  s->sb_rows = (h + 63) >> 6;
273  s->cols = (w + 7) >> 3;
274  s->rows = (h + 7) >> 3;
275  lflvl_len = avctx->active_thread_type == FF_THREAD_SLICE ? s->sb_rows : 1;
276 
277 #define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
278  av_freep(&s->intra_pred_data[0]);
279  // FIXME we slightly over-allocate here for subsampled chroma, but a little
280  // bit of padding shouldn't affect performance...
281  p = av_malloc(s->sb_cols * (128 + 192 * bytesperpixel +
282  lflvl_len * sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx)));
283  if (!p)
284  return AVERROR(ENOMEM);
285  assign(s->intra_pred_data[0], uint8_t *, 64 * bytesperpixel);
286  assign(s->intra_pred_data[1], uint8_t *, 64 * bytesperpixel);
287  assign(s->intra_pred_data[2], uint8_t *, 64 * bytesperpixel);
288  assign(s->above_y_nnz_ctx, uint8_t *, 16);
289  assign(s->above_mode_ctx, uint8_t *, 16);
290  assign(s->above_mv_ctx, VP9mv(*)[2], 16);
291  assign(s->above_uv_nnz_ctx[0], uint8_t *, 16);
292  assign(s->above_uv_nnz_ctx[1], uint8_t *, 16);
293  assign(s->above_partition_ctx, uint8_t *, 8);
294  assign(s->above_skip_ctx, uint8_t *, 8);
295  assign(s->above_txfm_ctx, uint8_t *, 8);
296  assign(s->above_segpred_ctx, uint8_t *, 8);
297  assign(s->above_intra_ctx, uint8_t *, 8);
298  assign(s->above_comp_ctx, uint8_t *, 8);
299  assign(s->above_ref_ctx, uint8_t *, 8);
300  assign(s->above_filter_ctx, uint8_t *, 8);
301  assign(s->lflvl, VP9Filter *, lflvl_len);
302 #undef assign
303 
304  if (s->td) {
305  for (i = 0; i < s->active_tile_cols; i++)
306  vp9_tile_data_free(&s->td[i]);
307  }
308 
309  if (s->s.h.bpp != s->last_bpp) {
310  ff_vp9dsp_init(&s->dsp, s->s.h.bpp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
311  ff_videodsp_init(&s->vdsp, s->s.h.bpp);
312  s->last_bpp = s->s.h.bpp;
313  }
314 
315  return 0;
316 }
317 
319 {
320  int i;
321  VP9Context *s = avctx->priv_data;
322  int chroma_blocks, chroma_eobs, bytesperpixel = s->bytesperpixel;
323  VP9TileData *td = &s->td[0];
324 
325  if (td->b_base && td->block_base && s->block_alloc_using_2pass == s->s.frames[CUR_FRAME].uses_2pass)
326  return 0;
327 
328  vp9_tile_data_free(td);
329  chroma_blocks = 64 * 64 >> (s->ss_h + s->ss_v);
330  chroma_eobs = 16 * 16 >> (s->ss_h + s->ss_v);
331  if (s->s.frames[CUR_FRAME].uses_2pass) {
332  int sbs = s->sb_cols * s->sb_rows;
333 
334  td->b_base = av_malloc_array(s->cols * s->rows, sizeof(VP9Block));
335  td->block_base = av_mallocz(((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
336  16 * 16 + 2 * chroma_eobs) * sbs);
337  if (!td->b_base || !td->block_base)
338  return AVERROR(ENOMEM);
339  td->uvblock_base[0] = td->block_base + sbs * 64 * 64 * bytesperpixel;
340  td->uvblock_base[1] = td->uvblock_base[0] + sbs * chroma_blocks * bytesperpixel;
341  td->eob_base = (uint8_t *) (td->uvblock_base[1] + sbs * chroma_blocks * bytesperpixel);
342  td->uveob_base[0] = td->eob_base + 16 * 16 * sbs;
343  td->uveob_base[1] = td->uveob_base[0] + chroma_eobs * sbs;
344 
346  td->block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
347  if (!td->block_structure)
348  return AVERROR(ENOMEM);
349  }
350  } else {
351  for (i = 1; i < s->active_tile_cols; i++)
352  vp9_tile_data_free(&s->td[i]);
353 
354  for (i = 0; i < s->active_tile_cols; i++) {
355  s->td[i].b_base = av_malloc(sizeof(VP9Block));
356  s->td[i].block_base = av_mallocz((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
357  16 * 16 + 2 * chroma_eobs);
358  if (!s->td[i].b_base || !s->td[i].block_base)
359  return AVERROR(ENOMEM);
360  s->td[i].uvblock_base[0] = s->td[i].block_base + 64 * 64 * bytesperpixel;
361  s->td[i].uvblock_base[1] = s->td[i].uvblock_base[0] + chroma_blocks * bytesperpixel;
362  s->td[i].eob_base = (uint8_t *) (s->td[i].uvblock_base[1] + chroma_blocks * bytesperpixel);
363  s->td[i].uveob_base[0] = s->td[i].eob_base + 16 * 16;
364  s->td[i].uveob_base[1] = s->td[i].uveob_base[0] + chroma_eobs;
365 
367  s->td[i].block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
368  if (!s->td[i].block_structure)
369  return AVERROR(ENOMEM);
370  }
371  }
372  }
373  s->block_alloc_using_2pass = s->s.frames[CUR_FRAME].uses_2pass;
374 
375  return 0;
376 }
377 
378 // The sign bit is at the end, not the start, of a bit sequence
380 {
381  int v = get_bits(gb, n);
382  return get_bits1(gb) ? -v : v;
383 }
384 
385 static av_always_inline int inv_recenter_nonneg(int v, int m)
386 {
387  if (v > 2 * m)
388  return v;
389  if (v & 1)
390  return m - ((v + 1) >> 1);
391  return m + (v >> 1);
392 }
393 
394 // differential forward probability updates
395 static int update_prob(VPXRangeCoder *c, int p)
396 {
397  static const uint8_t inv_map_table[255] = {
398  7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
399  189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
400  10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
401  25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
402  40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
403  55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
404  70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
405  86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
406  101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
407  116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
408  131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
409  146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
410  161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
411  177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
412  192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
413  207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
414  222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
415  237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
416  252, 253, 253,
417  };
418  int d;
419 
420  /* This code is trying to do a differential probability update. For a
421  * current probability A in the range [1, 255], the difference to a new
422  * probability of any value can be expressed differentially as 1-A, 255-A
423  * where some part of this (absolute range) exists both in positive as
424  * well as the negative part, whereas another part only exists in one
425  * half. We're trying to code this shared part differentially, i.e.
426  * times two where the value of the lowest bit specifies the sign, and
427  * the single part is then coded on top of this. This absolute difference
428  * then again has a value of [0, 254], but a bigger value in this range
429  * indicates that we're further away from the original value A, so we
430  * can code this as a VLC code, since higher values are increasingly
431  * unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough'
432  * updates vs. the 'fine, exact' updates further down the range, which
433  * adds one extra dimension to this differential update model. */
434 
435  if (!vp89_rac_get(c)) {
436  d = vp89_rac_get_uint(c, 4) + 0;
437  } else if (!vp89_rac_get(c)) {
438  d = vp89_rac_get_uint(c, 4) + 16;
439  } else if (!vp89_rac_get(c)) {
440  d = vp89_rac_get_uint(c, 5) + 32;
441  } else {
442  d = vp89_rac_get_uint(c, 7);
443  if (d >= 65)
444  d = (d << 1) - 65 + vp89_rac_get(c);
445  d += 64;
446  av_assert2(d < FF_ARRAY_ELEMS(inv_map_table));
447  }
448 
449  return p <= 128 ? 1 + inv_recenter_nonneg(inv_map_table[d], p - 1) :
450  255 - inv_recenter_nonneg(inv_map_table[d], 255 - p);
451 }
452 
454 {
455  static const enum AVColorSpace colorspaces[8] = {
458  };
459  VP9Context *s = avctx->priv_data;
460  int bits = avctx->profile <= 1 ? 0 : 1 + get_bits1(&s->gb); // 0:8, 1:10, 2:12
461 
462  s->bpp_index = bits;
463  s->s.h.bpp = 8 + bits * 2;
464  s->bytesperpixel = (7 + s->s.h.bpp) >> 3;
465  avctx->colorspace = colorspaces[get_bits(&s->gb, 3)];
466  if (avctx->colorspace == AVCOL_SPC_RGB) { // RGB = profile 1
467  static const enum AVPixelFormat pix_fmt_rgb[3] = {
469  };
470  s->ss_h = s->ss_v = 0;
471  avctx->color_range = AVCOL_RANGE_JPEG;
472  s->pix_fmt = pix_fmt_rgb[bits];
473  if (avctx->profile & 1) {
474  if (get_bits1(&s->gb)) {
475  av_log(avctx, AV_LOG_ERROR, "Reserved bit set in RGB\n");
476  return AVERROR_INVALIDDATA;
477  }
478  } else {
479  av_log(avctx, AV_LOG_ERROR, "RGB not supported in profile %d\n",
480  avctx->profile);
481  return AVERROR_INVALIDDATA;
482  }
483  } else {
484  static const enum AVPixelFormat pix_fmt_for_ss[3][2 /* v */][2 /* h */] = {
491  };
493  if (avctx->profile & 1) {
494  s->ss_h = get_bits1(&s->gb);
495  s->ss_v = get_bits1(&s->gb);
496  s->pix_fmt = pix_fmt_for_ss[bits][s->ss_v][s->ss_h];
497  if (s->pix_fmt == AV_PIX_FMT_YUV420P) {
498  av_log(avctx, AV_LOG_ERROR, "YUV 4:2:0 not supported in profile %d\n",
499  avctx->profile);
500  return AVERROR_INVALIDDATA;
501  } else if (get_bits1(&s->gb)) {
502  av_log(avctx, AV_LOG_ERROR, "Profile %d color details reserved bit set\n",
503  avctx->profile);
504  return AVERROR_INVALIDDATA;
505  }
506  } else {
507  s->ss_h = s->ss_v = 1;
508  s->pix_fmt = pix_fmt_for_ss[bits][1][1];
509  }
510  }
511 
512  return 0;
513 }
514 
516  const uint8_t *data, int size, int *ref)
517 {
518  VP9Context *s = avctx->priv_data;
519  int c, i, j, k, l, m, n, w, h, max, size2, ret, sharp;
520  int last_invisible;
521  const uint8_t *data2;
522 
523  /* general header */
524  if ((ret = init_get_bits8(&s->gb, data, size)) < 0) {
525  av_log(avctx, AV_LOG_ERROR, "Failed to initialize bitstream reader\n");
526  return ret;
527  }
528  if (get_bits(&s->gb, 2) != 0x2) { // frame marker
529  av_log(avctx, AV_LOG_ERROR, "Invalid frame marker\n");
530  return AVERROR_INVALIDDATA;
531  }
532  avctx->profile = get_bits1(&s->gb);
533  avctx->profile |= get_bits1(&s->gb) << 1;
534  if (avctx->profile == 3) avctx->profile += get_bits1(&s->gb);
535  if (avctx->profile > 3) {
536  av_log(avctx, AV_LOG_ERROR, "Profile %d is not yet supported\n", avctx->profile);
537  return AVERROR_INVALIDDATA;
538  }
539  s->s.h.profile = avctx->profile;
540  if (get_bits1(&s->gb)) {
541  *ref = get_bits(&s->gb, 3);
542  return 0;
543  }
544 
545  s->last_keyframe = s->s.h.keyframe;
546  s->s.h.keyframe = !get_bits1(&s->gb);
547 
548  last_invisible = s->s.h.invisible;
549  s->s.h.invisible = !get_bits1(&s->gb);
550  s->s.h.errorres = get_bits1(&s->gb);
551  s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible;
552 
553  if (s->s.h.keyframe) {
554  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
555  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
556  return AVERROR_INVALIDDATA;
557  }
558  if ((ret = read_colorspace_details(avctx)) < 0)
559  return ret;
560  // for profile 1, here follows the subsampling bits
561  s->s.h.refreshrefmask = 0xff;
562  w = get_bits(&s->gb, 16) + 1;
563  h = get_bits(&s->gb, 16) + 1;
564  if (get_bits1(&s->gb)) // display size
565  skip_bits(&s->gb, 32);
566  } else {
567  s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0;
568  s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2);
569  if (s->s.h.intraonly) {
570  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
571  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
572  return AVERROR_INVALIDDATA;
573  }
574  if (avctx->profile >= 1) {
575  if ((ret = read_colorspace_details(avctx)) < 0)
576  return ret;
577  } else {
578  s->ss_h = s->ss_v = 1;
579  s->s.h.bpp = 8;
580  s->bpp_index = 0;
581  s->bytesperpixel = 1;
582  s->pix_fmt = AV_PIX_FMT_YUV420P;
583  avctx->colorspace = AVCOL_SPC_BT470BG;
584  avctx->color_range = AVCOL_RANGE_MPEG;
585  }
586  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
587  w = get_bits(&s->gb, 16) + 1;
588  h = get_bits(&s->gb, 16) + 1;
589  if (get_bits1(&s->gb)) // display size
590  skip_bits(&s->gb, 32);
591  } else {
592  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
593  s->s.h.refidx[0] = get_bits(&s->gb, 3);
594  s->s.h.signbias[0] = get_bits1(&s->gb) && !s->s.h.errorres;
595  s->s.h.refidx[1] = get_bits(&s->gb, 3);
596  s->s.h.signbias[1] = get_bits1(&s->gb) && !s->s.h.errorres;
597  s->s.h.refidx[2] = get_bits(&s->gb, 3);
598  s->s.h.signbias[2] = get_bits1(&s->gb) && !s->s.h.errorres;
599  if (!s->s.refs[s->s.h.refidx[0]].f ||
600  !s->s.refs[s->s.h.refidx[1]].f ||
601  !s->s.refs[s->s.h.refidx[2]].f) {
602  av_log(avctx, AV_LOG_ERROR, "Not all references are available\n");
603  return AVERROR_INVALIDDATA;
604  }
605  if (get_bits1(&s->gb)) {
606  w = s->s.refs[s->s.h.refidx[0]].f->width;
607  h = s->s.refs[s->s.h.refidx[0]].f->height;
608  } else if (get_bits1(&s->gb)) {
609  w = s->s.refs[s->s.h.refidx[1]].f->width;
610  h = s->s.refs[s->s.h.refidx[1]].f->height;
611  } else if (get_bits1(&s->gb)) {
612  w = s->s.refs[s->s.h.refidx[2]].f->width;
613  h = s->s.refs[s->s.h.refidx[2]].f->height;
614  } else {
615  w = get_bits(&s->gb, 16) + 1;
616  h = get_bits(&s->gb, 16) + 1;
617  }
618  // Note that in this code, "CUR_FRAME" is actually before we
619  // have formally allocated a frame, and thus actually represents
620  // the _last_ frame
621  s->s.h.use_last_frame_mvs &= s->s.frames[CUR_FRAME].tf.f &&
622  s->s.frames[CUR_FRAME].tf.f->width == w &&
623  s->s.frames[CUR_FRAME].tf.f->height == h;
624  if (get_bits1(&s->gb)) // display size
625  skip_bits(&s->gb, 32);
626  s->s.h.highprecisionmvs = get_bits1(&s->gb);
627  s->s.h.filtermode = get_bits1(&s->gb) ? FILTER_SWITCHABLE :
628  get_bits(&s->gb, 2);
629  s->s.h.allowcompinter = s->s.h.signbias[0] != s->s.h.signbias[1] ||
630  s->s.h.signbias[0] != s->s.h.signbias[2];
631  if (s->s.h.allowcompinter) {
632  if (s->s.h.signbias[0] == s->s.h.signbias[1]) {
633  s->s.h.fixcompref = 2;
634  s->s.h.varcompref[0] = 0;
635  s->s.h.varcompref[1] = 1;
636  } else if (s->s.h.signbias[0] == s->s.h.signbias[2]) {
637  s->s.h.fixcompref = 1;
638  s->s.h.varcompref[0] = 0;
639  s->s.h.varcompref[1] = 2;
640  } else {
641  s->s.h.fixcompref = 0;
642  s->s.h.varcompref[0] = 1;
643  s->s.h.varcompref[1] = 2;
644  }
645  }
646  }
647  }
648  s->s.h.refreshctx = s->s.h.errorres ? 0 : get_bits1(&s->gb);
649  s->s.h.parallelmode = s->s.h.errorres ? 1 : get_bits1(&s->gb);
650  s->s.h.framectxid = c = get_bits(&s->gb, 2);
651  if (s->s.h.keyframe || s->s.h.intraonly)
652  s->s.h.framectxid = 0; // BUG: libvpx ignores this field in keyframes
653 
654  /* loopfilter header data */
655  if (s->s.h.keyframe || s->s.h.errorres || s->s.h.intraonly) {
656  // reset loopfilter defaults
657  s->s.h.lf_delta.ref[0] = 1;
658  s->s.h.lf_delta.ref[1] = 0;
659  s->s.h.lf_delta.ref[2] = -1;
660  s->s.h.lf_delta.ref[3] = -1;
661  s->s.h.lf_delta.mode[0] = 0;
662  s->s.h.lf_delta.mode[1] = 0;
663  memset(s->s.h.segmentation.feat, 0, sizeof(s->s.h.segmentation.feat));
664  }
665  s->s.h.filter.level = get_bits(&s->gb, 6);
666  sharp = get_bits(&s->gb, 3);
667  // if sharpness changed, reinit lim/mblim LUTs. if it didn't change, keep
668  // the old cache values since they are still valid
669  if (s->s.h.filter.sharpness != sharp) {
670  for (i = 1; i <= 63; i++) {
671  int limit = i;
672 
673  if (sharp > 0) {
674  limit >>= (sharp + 3) >> 2;
675  limit = FFMIN(limit, 9 - sharp);
676  }
677  limit = FFMAX(limit, 1);
678 
679  s->filter_lut.lim_lut[i] = limit;
680  s->filter_lut.mblim_lut[i] = 2 * (i + 2) + limit;
681  }
682  }
683  s->s.h.filter.sharpness = sharp;
684  if ((s->s.h.lf_delta.enabled = get_bits1(&s->gb))) {
685  if ((s->s.h.lf_delta.updated = get_bits1(&s->gb))) {
686  for (i = 0; i < 4; i++)
687  if (get_bits1(&s->gb))
688  s->s.h.lf_delta.ref[i] = get_sbits_inv(&s->gb, 6);
689  for (i = 0; i < 2; i++)
690  if (get_bits1(&s->gb))
691  s->s.h.lf_delta.mode[i] = get_sbits_inv(&s->gb, 6);
692  }
693  }
694 
695  /* quantization header data */
696  s->s.h.yac_qi = get_bits(&s->gb, 8);
697  s->s.h.ydc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
698  s->s.h.uvdc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
699  s->s.h.uvac_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
700  s->s.h.lossless = s->s.h.yac_qi == 0 && s->s.h.ydc_qdelta == 0 &&
701  s->s.h.uvdc_qdelta == 0 && s->s.h.uvac_qdelta == 0;
702 #if FF_API_CODEC_PROPS
704  if (s->s.h.lossless)
707 #endif
708 
709  /* segmentation header info */
710  if ((s->s.h.segmentation.enabled = get_bits1(&s->gb))) {
711  if ((s->s.h.segmentation.update_map = get_bits1(&s->gb))) {
712  for (i = 0; i < 7; i++)
713  s->s.h.segmentation.prob[i] = get_bits1(&s->gb) ?
714  get_bits(&s->gb, 8) : 255;
715  if ((s->s.h.segmentation.temporal = get_bits1(&s->gb)))
716  for (i = 0; i < 3; i++)
717  s->s.h.segmentation.pred_prob[i] = get_bits1(&s->gb) ?
718  get_bits(&s->gb, 8) : 255;
719  }
720 
721  if (get_bits1(&s->gb)) {
722  s->s.h.segmentation.absolute_vals = get_bits1(&s->gb);
723  for (i = 0; i < 8; i++) {
724  if ((s->s.h.segmentation.feat[i].q_enabled = get_bits1(&s->gb)))
725  s->s.h.segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8);
726  if ((s->s.h.segmentation.feat[i].lf_enabled = get_bits1(&s->gb)))
727  s->s.h.segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6);
728  if ((s->s.h.segmentation.feat[i].ref_enabled = get_bits1(&s->gb)))
729  s->s.h.segmentation.feat[i].ref_val = get_bits(&s->gb, 2);
730  s->s.h.segmentation.feat[i].skip_enabled = get_bits1(&s->gb);
731  }
732  }
733  } else {
734  // Reset fields under segmentation switch if segmentation is disabled.
735  // This is necessary because some hwaccels don't ignore these fields
736  // if segmentation is disabled.
737  s->s.h.segmentation.temporal = 0;
738  s->s.h.segmentation.update_map = 0;
739  }
740 
741  // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas
742  for (i = 0; i < (s->s.h.segmentation.enabled ? 8 : 1); i++) {
743  int qyac, qydc, quvac, quvdc, lflvl, sh;
744 
745  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].q_enabled) {
746  if (s->s.h.segmentation.absolute_vals)
747  qyac = av_clip_uintp2(s->s.h.segmentation.feat[i].q_val, 8);
748  else
749  qyac = av_clip_uintp2(s->s.h.yac_qi + s->s.h.segmentation.feat[i].q_val, 8);
750  } else {
751  qyac = s->s.h.yac_qi;
752  }
753  qydc = av_clip_uintp2(qyac + s->s.h.ydc_qdelta, 8);
754  quvdc = av_clip_uintp2(qyac + s->s.h.uvdc_qdelta, 8);
755  quvac = av_clip_uintp2(qyac + s->s.h.uvac_qdelta, 8);
756  qyac = av_clip_uintp2(qyac, 8);
757 
758  s->s.h.segmentation.feat[i].qmul[0][0] = ff_vp9_dc_qlookup[s->bpp_index][qydc];
759  s->s.h.segmentation.feat[i].qmul[0][1] = ff_vp9_ac_qlookup[s->bpp_index][qyac];
760  s->s.h.segmentation.feat[i].qmul[1][0] = ff_vp9_dc_qlookup[s->bpp_index][quvdc];
761  s->s.h.segmentation.feat[i].qmul[1][1] = ff_vp9_ac_qlookup[s->bpp_index][quvac];
762 
763  sh = s->s.h.filter.level >= 32;
764  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].lf_enabled) {
765  if (s->s.h.segmentation.absolute_vals)
766  lflvl = av_clip_uintp2(s->s.h.segmentation.feat[i].lf_val, 6);
767  else
768  lflvl = av_clip_uintp2(s->s.h.filter.level + s->s.h.segmentation.feat[i].lf_val, 6);
769  } else {
770  lflvl = s->s.h.filter.level;
771  }
772  if (s->s.h.lf_delta.enabled) {
773  s->s.h.segmentation.feat[i].lflvl[0][0] =
774  s->s.h.segmentation.feat[i].lflvl[0][1] =
775  av_clip_uintp2(lflvl + (s->s.h.lf_delta.ref[0] * (1 << sh)), 6);
776  for (j = 1; j < 4; j++) {
777  s->s.h.segmentation.feat[i].lflvl[j][0] =
778  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
779  s->s.h.lf_delta.mode[0]) * (1 << sh)), 6);
780  s->s.h.segmentation.feat[i].lflvl[j][1] =
781  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
782  s->s.h.lf_delta.mode[1]) * (1 << sh)), 6);
783  }
784  } else {
785  memset(s->s.h.segmentation.feat[i].lflvl, lflvl,
786  sizeof(s->s.h.segmentation.feat[i].lflvl));
787  }
788  }
789 
790  /* tiling info */
791  if ((ret = update_size(avctx, w, h)) < 0) {
792  av_log(avctx, AV_LOG_ERROR, "Failed to initialize decoder for %dx%d @ %d\n",
793  w, h, s->pix_fmt);
794  return ret;
795  }
796  for (s->s.h.tiling.log2_tile_cols = 0;
797  s->sb_cols > (64 << s->s.h.tiling.log2_tile_cols);
798  s->s.h.tiling.log2_tile_cols++) ;
799  for (max = 0; (s->sb_cols >> max) >= 4; max++) ;
800  max = FFMAX(0, max - 1);
801  while (max > s->s.h.tiling.log2_tile_cols) {
802  if (get_bits1(&s->gb))
803  s->s.h.tiling.log2_tile_cols++;
804  else
805  break;
806  }
807  s->s.h.tiling.log2_tile_rows = decode012(&s->gb);
808  s->s.h.tiling.tile_rows = 1 << s->s.h.tiling.log2_tile_rows;
809  if (s->s.h.tiling.tile_cols != (1 << s->s.h.tiling.log2_tile_cols)) {
810  int n_range_coders;
811  VPXRangeCoder *rc;
812 
813  if (s->td) {
814  for (i = 0; i < s->active_tile_cols; i++)
815  vp9_tile_data_free(&s->td[i]);
816  av_freep(&s->td);
817  }
818 
819  s->s.h.tiling.tile_cols = 1 << s->s.h.tiling.log2_tile_cols;
820  s->active_tile_cols = avctx->active_thread_type == FF_THREAD_SLICE ?
821  s->s.h.tiling.tile_cols : 1;
822  vp9_alloc_entries(avctx, s->sb_rows);
823  if (avctx->active_thread_type == FF_THREAD_SLICE) {
824  n_range_coders = 4; // max_tile_rows
825  } else {
826  n_range_coders = s->s.h.tiling.tile_cols;
827  }
828  s->td = av_calloc(s->active_tile_cols, sizeof(VP9TileData) +
829  n_range_coders * sizeof(VPXRangeCoder));
830  if (!s->td)
831  return AVERROR(ENOMEM);
832  rc = (VPXRangeCoder *) &s->td[s->active_tile_cols];
833  for (i = 0; i < s->active_tile_cols; i++) {
834  s->td[i].s = s;
835  s->td[i].c_b = rc;
836  rc += n_range_coders;
837  }
838  }
839 
840  /* check reference frames */
841  if (!s->s.h.keyframe && !s->s.h.intraonly) {
842  int valid_ref_frame = 0;
843  for (i = 0; i < 3; i++) {
844  AVFrame *ref = s->s.refs[s->s.h.refidx[i]].f;
845  int refw = ref->width, refh = ref->height;
846 
847  if (ref->format != avctx->pix_fmt) {
848  av_log(avctx, AV_LOG_ERROR,
849  "Ref pixfmt (%s) did not match current frame (%s)",
850  av_get_pix_fmt_name(ref->format),
851  av_get_pix_fmt_name(avctx->pix_fmt));
852  return AVERROR_INVALIDDATA;
853  } else if (refw == w && refh == h) {
854  s->mvscale[i][0] = s->mvscale[i][1] = 0;
855  } else {
856  /* Check to make sure at least one of frames that */
857  /* this frame references has valid dimensions */
858  if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
859  av_log(avctx, AV_LOG_WARNING,
860  "Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
861  refw, refh, w, h);
862  s->mvscale[i][0] = s->mvscale[i][1] = REF_INVALID_SCALE;
863  continue;
864  }
865  s->mvscale[i][0] = (refw << 14) / w;
866  s->mvscale[i][1] = (refh << 14) / h;
867  s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14;
868  s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14;
869  }
870  valid_ref_frame++;
871  }
872  if (!valid_ref_frame) {
873  av_log(avctx, AV_LOG_ERROR, "No valid reference frame is found, bitstream not supported\n");
874  return AVERROR_INVALIDDATA;
875  }
876  }
877 
878  if (s->s.h.keyframe || s->s.h.errorres || (s->s.h.intraonly && s->s.h.resetctx == 3)) {
879  s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p =
880  s->prob_ctx[3].p = ff_vp9_default_probs;
881  memcpy(s->prob_ctx[0].coef, ff_vp9_default_coef_probs,
882  sizeof(ff_vp9_default_coef_probs));
883  memcpy(s->prob_ctx[1].coef, ff_vp9_default_coef_probs,
884  sizeof(ff_vp9_default_coef_probs));
885  memcpy(s->prob_ctx[2].coef, ff_vp9_default_coef_probs,
886  sizeof(ff_vp9_default_coef_probs));
887  memcpy(s->prob_ctx[3].coef, ff_vp9_default_coef_probs,
888  sizeof(ff_vp9_default_coef_probs));
889  } else if (s->s.h.intraonly && s->s.h.resetctx == 2) {
890  s->prob_ctx[c].p = ff_vp9_default_probs;
891  memcpy(s->prob_ctx[c].coef, ff_vp9_default_coef_probs,
892  sizeof(ff_vp9_default_coef_probs));
893  }
894 
895  // next 16 bits is size of the rest of the header (arith-coded)
896  s->s.h.compressed_header_size = size2 = get_bits(&s->gb, 16);
897  s->s.h.uncompressed_header_size = (get_bits_count(&s->gb) + 7) / 8;
898 
899  data2 = align_get_bits(&s->gb);
900  if (size2 > size - (data2 - data)) {
901  av_log(avctx, AV_LOG_ERROR, "Invalid compressed header size\n");
902  return AVERROR_INVALIDDATA;
903  }
904  ret = ff_vpx_init_range_decoder(&s->c, data2, size2);
905  if (ret < 0)
906  return ret;
907 
908  if (vpx_rac_get_prob_branchy(&s->c, 128)) { // marker bit
909  av_log(avctx, AV_LOG_ERROR, "Marker bit was set\n");
910  return AVERROR_INVALIDDATA;
911  }
912 
913  for (i = 0; i < s->active_tile_cols; i++) {
914  if (s->s.h.keyframe || s->s.h.intraonly) {
915  memset(s->td[i].counts.coef, 0, sizeof(s->td[0].counts.coef));
916  memset(s->td[i].counts.eob, 0, sizeof(s->td[0].counts.eob));
917  } else {
918  memset(&s->td[i].counts, 0, sizeof(s->td[0].counts));
919  }
920  s->td[i].nb_block_structure = 0;
921  }
922 
923  /* FIXME is it faster to not copy here, but do it down in the fw updates
924  * as explicit copies if the fw update is missing (and skip the copy upon
925  * fw update)? */
926  s->prob.p = s->prob_ctx[c].p;
927 
928  // txfm updates
929  if (s->s.h.lossless) {
930  s->s.h.txfmmode = TX_4X4;
931  } else {
932  s->s.h.txfmmode = vp89_rac_get_uint(&s->c, 2);
933  if (s->s.h.txfmmode == 3)
934  s->s.h.txfmmode += vp89_rac_get(&s->c);
935 
936  if (s->s.h.txfmmode == TX_SWITCHABLE) {
937  for (i = 0; i < 2; i++)
938  if (vpx_rac_get_prob_branchy(&s->c, 252))
939  s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]);
940  for (i = 0; i < 2; i++)
941  for (j = 0; j < 2; j++)
942  if (vpx_rac_get_prob_branchy(&s->c, 252))
943  s->prob.p.tx16p[i][j] =
944  update_prob(&s->c, s->prob.p.tx16p[i][j]);
945  for (i = 0; i < 2; i++)
946  for (j = 0; j < 3; j++)
947  if (vpx_rac_get_prob_branchy(&s->c, 252))
948  s->prob.p.tx32p[i][j] =
949  update_prob(&s->c, s->prob.p.tx32p[i][j]);
950  }
951  }
952 
953  // coef updates
954  for (i = 0; i < 4; i++) {
955  uint8_t (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i];
956  if (vp89_rac_get(&s->c)) {
957  for (j = 0; j < 2; j++)
958  for (k = 0; k < 2; k++)
959  for (l = 0; l < 6; l++)
960  for (m = 0; m < 6; m++) {
961  uint8_t *p = s->prob.coef[i][j][k][l][m];
962  uint8_t *r = ref[j][k][l][m];
963  if (m >= 3 && l == 0) // dc only has 3 pt
964  break;
965  for (n = 0; n < 3; n++) {
966  if (vpx_rac_get_prob_branchy(&s->c, 252))
967  p[n] = update_prob(&s->c, r[n]);
968  else
969  p[n] = r[n];
970  }
971  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
972  }
973  } else {
974  for (j = 0; j < 2; j++)
975  for (k = 0; k < 2; k++)
976  for (l = 0; l < 6; l++)
977  for (m = 0; m < 6; m++) {
978  uint8_t *p = s->prob.coef[i][j][k][l][m];
979  uint8_t *r = ref[j][k][l][m];
980  if (m > 3 && l == 0) // dc only has 3 pt
981  break;
982  memcpy(p, r, 3);
983  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
984  }
985  }
986  if (s->s.h.txfmmode == i)
987  break;
988  }
989 
990  // mode updates
991  for (i = 0; i < 3; i++)
992  if (vpx_rac_get_prob_branchy(&s->c, 252))
993  s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]);
994  if (!s->s.h.keyframe && !s->s.h.intraonly) {
995  for (i = 0; i < 7; i++)
996  for (j = 0; j < 3; j++)
997  if (vpx_rac_get_prob_branchy(&s->c, 252))
998  s->prob.p.mv_mode[i][j] =
999  update_prob(&s->c, s->prob.p.mv_mode[i][j]);
1000 
1001  if (s->s.h.filtermode == FILTER_SWITCHABLE)
1002  for (i = 0; i < 4; i++)
1003  for (j = 0; j < 2; j++)
1004  if (vpx_rac_get_prob_branchy(&s->c, 252))
1005  s->prob.p.filter[i][j] =
1006  update_prob(&s->c, s->prob.p.filter[i][j]);
1007 
1008  for (i = 0; i < 4; i++)
1009  if (vpx_rac_get_prob_branchy(&s->c, 252))
1010  s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]);
1011 
1012  if (s->s.h.allowcompinter) {
1013  s->s.h.comppredmode = vp89_rac_get(&s->c);
1014  if (s->s.h.comppredmode)
1015  s->s.h.comppredmode += vp89_rac_get(&s->c);
1016  if (s->s.h.comppredmode == PRED_SWITCHABLE)
1017  for (i = 0; i < 5; i++)
1018  if (vpx_rac_get_prob_branchy(&s->c, 252))
1019  s->prob.p.comp[i] =
1020  update_prob(&s->c, s->prob.p.comp[i]);
1021  } else {
1022  s->s.h.comppredmode = PRED_SINGLEREF;
1023  }
1024 
1025  if (s->s.h.comppredmode != PRED_COMPREF) {
1026  for (i = 0; i < 5; i++) {
1027  if (vpx_rac_get_prob_branchy(&s->c, 252))
1028  s->prob.p.single_ref[i][0] =
1029  update_prob(&s->c, s->prob.p.single_ref[i][0]);
1030  if (vpx_rac_get_prob_branchy(&s->c, 252))
1031  s->prob.p.single_ref[i][1] =
1032  update_prob(&s->c, s->prob.p.single_ref[i][1]);
1033  }
1034  }
1035 
1036  if (s->s.h.comppredmode != PRED_SINGLEREF) {
1037  for (i = 0; i < 5; i++)
1038  if (vpx_rac_get_prob_branchy(&s->c, 252))
1039  s->prob.p.comp_ref[i] =
1040  update_prob(&s->c, s->prob.p.comp_ref[i]);
1041  }
1042 
1043  for (i = 0; i < 4; i++)
1044  for (j = 0; j < 9; j++)
1045  if (vpx_rac_get_prob_branchy(&s->c, 252))
1046  s->prob.p.y_mode[i][j] =
1047  update_prob(&s->c, s->prob.p.y_mode[i][j]);
1048 
1049  for (i = 0; i < 4; i++)
1050  for (j = 0; j < 4; j++)
1051  for (k = 0; k < 3; k++)
1052  if (vpx_rac_get_prob_branchy(&s->c, 252))
1053  s->prob.p.partition[3 - i][j][k] =
1054  update_prob(&s->c,
1055  s->prob.p.partition[3 - i][j][k]);
1056 
1057  // mv fields don't use the update_prob subexp model for some reason
1058  for (i = 0; i < 3; i++)
1059  if (vpx_rac_get_prob_branchy(&s->c, 252))
1060  s->prob.p.mv_joint[i] = (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1061 
1062  for (i = 0; i < 2; i++) {
1063  if (vpx_rac_get_prob_branchy(&s->c, 252))
1064  s->prob.p.mv_comp[i].sign =
1065  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1066 
1067  for (j = 0; j < 10; j++)
1068  if (vpx_rac_get_prob_branchy(&s->c, 252))
1069  s->prob.p.mv_comp[i].classes[j] =
1070  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1071 
1072  if (vpx_rac_get_prob_branchy(&s->c, 252))
1073  s->prob.p.mv_comp[i].class0 =
1074  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1075 
1076  for (j = 0; j < 10; j++)
1077  if (vpx_rac_get_prob_branchy(&s->c, 252))
1078  s->prob.p.mv_comp[i].bits[j] =
1079  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1080  }
1081 
1082  for (i = 0; i < 2; i++) {
1083  for (j = 0; j < 2; j++)
1084  for (k = 0; k < 3; k++)
1085  if (vpx_rac_get_prob_branchy(&s->c, 252))
1086  s->prob.p.mv_comp[i].class0_fp[j][k] =
1087  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1088 
1089  for (j = 0; j < 3; j++)
1090  if (vpx_rac_get_prob_branchy(&s->c, 252))
1091  s->prob.p.mv_comp[i].fp[j] =
1092  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1093  }
1094 
1095  if (s->s.h.highprecisionmvs) {
1096  for (i = 0; i < 2; i++) {
1097  if (vpx_rac_get_prob_branchy(&s->c, 252))
1098  s->prob.p.mv_comp[i].class0_hp =
1099  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1100 
1101  if (vpx_rac_get_prob_branchy(&s->c, 252))
1102  s->prob.p.mv_comp[i].hp =
1103  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1104  }
1105  }
1106  }
1107 
1108  return (data2 - data) + size2;
1109 }
1110 
1111 static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1112  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1113 {
1114  const VP9Context *s = td->s;
1115  int c = ((s->above_partition_ctx[col] >> (3 - bl)) & 1) |
1116  (((td->left_partition_ctx[row & 0x7] >> (3 - bl)) & 1) << 1);
1117  const uint8_t *p = s->s.h.keyframe || s->s.h.intraonly ? ff_vp9_default_kf_partition_probs[bl][c] :
1118  s->prob.p.partition[bl][c];
1119  enum BlockPartition bp;
1120  ptrdiff_t hbs = 4 >> bl;
1121  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1122  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1123  int bytesperpixel = s->bytesperpixel;
1124 
1125  if (bl == BL_8X8) {
1127  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1128  } else if (col + hbs < s->cols) { // FIXME why not <=?
1129  if (row + hbs < s->rows) { // FIXME why not <=?
1131  switch (bp) {
1132  case PARTITION_NONE:
1133  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1134  break;
1135  case PARTITION_H:
1136  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1137  yoff += hbs * 8 * y_stride;
1138  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1139  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, bl, bp);
1140  break;
1141  case PARTITION_V:
1142  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1143  yoff += hbs * 8 * bytesperpixel;
1144  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1145  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, bl, bp);
1146  break;
1147  case PARTITION_SPLIT:
1148  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1149  decode_sb(td, row, col + hbs, lflvl,
1150  yoff + 8 * hbs * bytesperpixel,
1151  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1152  yoff += hbs * 8 * y_stride;
1153  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1154  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1155  decode_sb(td, row + hbs, col + hbs, lflvl,
1156  yoff + 8 * hbs * bytesperpixel,
1157  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1158  break;
1159  default:
1160  av_unreachable("ff_vp9_partition_tree only has "
1161  "the four PARTITION_* terminal codes");
1162  }
1163  } else if (vpx_rac_get_prob_branchy(td->c, p[1])) {
1164  bp = PARTITION_SPLIT;
1165  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1166  decode_sb(td, row, col + hbs, lflvl,
1167  yoff + 8 * hbs * bytesperpixel,
1168  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1169  } else {
1170  bp = PARTITION_H;
1171  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1172  }
1173  } else if (row + hbs < s->rows) { // FIXME why not <=?
1174  if (vpx_rac_get_prob_branchy(td->c, p[2])) {
1175  bp = PARTITION_SPLIT;
1176  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1177  yoff += hbs * 8 * y_stride;
1178  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1179  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1180  } else {
1181  bp = PARTITION_V;
1182  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1183  }
1184  } else {
1185  bp = PARTITION_SPLIT;
1186  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1187  }
1188  td->counts.partition[bl][c][bp]++;
1189 }
1190 
1191 static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1192  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1193 {
1194  const VP9Context *s = td->s;
1195  VP9Block *b = td->b;
1196  ptrdiff_t hbs = 4 >> bl;
1197  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1198  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1199  int bytesperpixel = s->bytesperpixel;
1200 
1201  if (bl == BL_8X8) {
1202  av_assert2(b->bl == BL_8X8);
1203  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1204  } else if (td->b->bl == bl) {
1205  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1206  if (b->bp == PARTITION_H && row + hbs < s->rows) {
1207  yoff += hbs * 8 * y_stride;
1208  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1209  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, b->bl, b->bp);
1210  } else if (b->bp == PARTITION_V && col + hbs < s->cols) {
1211  yoff += hbs * 8 * bytesperpixel;
1212  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1213  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, b->bl, b->bp);
1214  }
1215  } else {
1216  decode_sb_mem(td, row, col, lflvl, yoff, uvoff, bl + 1);
1217  if (col + hbs < s->cols) { // FIXME why not <=?
1218  if (row + hbs < s->rows) {
1219  decode_sb_mem(td, row, col + hbs, lflvl, yoff + 8 * hbs * bytesperpixel,
1220  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1221  yoff += hbs * 8 * y_stride;
1222  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1223  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1224  decode_sb_mem(td, row + hbs, col + hbs, lflvl,
1225  yoff + 8 * hbs * bytesperpixel,
1226  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1227  } else {
1228  yoff += hbs * 8 * bytesperpixel;
1229  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1230  decode_sb_mem(td, row, col + hbs, lflvl, yoff, uvoff, bl + 1);
1231  }
1232  } else if (row + hbs < s->rows) {
1233  yoff += hbs * 8 * y_stride;
1234  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1235  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1236  }
1237  }
1238 }
1239 
1240 static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
1241 {
1242  int sb_start = ( idx * n) >> log2_n;
1243  int sb_end = ((idx + 1) * n) >> log2_n;
1244  *start = FFMIN(sb_start, n) << 3;
1245  *end = FFMIN(sb_end, n) << 3;
1246 }
1247 
1249 {
1250  int i;
1251 
1252  av_freep(&s->intra_pred_data[0]);
1253  for (i = 0; i < s->active_tile_cols; i++)
1254  vp9_tile_data_free(&s->td[i]);
1255 }
1256 
1258 {
1259  VP9Context *s = avctx->priv_data;
1260  int i;
1261 
1262  for (int i = 0; i < 3; i++)
1263  vp9_frame_unref(&s->s.frames[i]);
1264  av_refstruct_pool_uninit(&s->frame_extradata_pool);
1265  for (i = 0; i < 8; i++) {
1266  ff_progress_frame_unref(&s->s.refs[i]);
1267  ff_progress_frame_unref(&s->next_refs[i]);
1268  vp9_frame_unref(&s->s.ref_frames[i]);
1269  }
1270 
1271  free_buffers(s);
1272 #if HAVE_THREADS
1273  av_freep(&s->entries);
1274  ff_pthread_free(s, vp9_context_offsets);
1275 #endif
1276 
1277  av_refstruct_unref(&s->header_ref);
1278  ff_cbs_fragment_free(&s->current_frag);
1279  ff_cbs_close(&s->cbc);
1280 
1281  av_freep(&s->td);
1282  return 0;
1283 }
1284 
1285 static int decode_tiles(AVCodecContext *avctx,
1286  const uint8_t *data, int size)
1287 {
1288  VP9Context *s = avctx->priv_data;
1289  VP9TileData *td = &s->td[0];
1290  int row, col, tile_row, tile_col, ret;
1291  int bytesperpixel;
1292  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1293  AVFrame *f;
1294  ptrdiff_t yoff, uvoff, ls_y, ls_uv;
1295 
1296  f = s->s.frames[CUR_FRAME].tf.f;
1297  ls_y = f->linesize[0];
1298  ls_uv =f->linesize[1];
1299  bytesperpixel = s->bytesperpixel;
1300 
1301  yoff = uvoff = 0;
1302  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1303  set_tile_offset(&tile_row_start, &tile_row_end,
1304  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1305 
1306  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1307  int64_t tile_size;
1308 
1309  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1310  tile_row == s->s.h.tiling.tile_rows - 1) {
1311  tile_size = size;
1312  } else {
1313  tile_size = AV_RB32(data);
1314  data += 4;
1315  size -= 4;
1316  }
1317  if (tile_size > size)
1318  return AVERROR_INVALIDDATA;
1319  ret = ff_vpx_init_range_decoder(&td->c_b[tile_col], data, tile_size);
1320  if (ret < 0)
1321  return ret;
1322  if (vpx_rac_get_prob_branchy(&td->c_b[tile_col], 128)) // marker bit
1323  return AVERROR_INVALIDDATA;
1324  data += tile_size;
1325  size -= tile_size;
1326  }
1327 
1328  for (row = tile_row_start; row < tile_row_end;
1329  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1330  VP9Filter *lflvl_ptr = s->lflvl;
1331  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1332 
1333  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1334  set_tile_offset(&tile_col_start, &tile_col_end,
1335  tile_col, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1336  td->tile_col_start = tile_col_start;
1337  if (s->pass != 2) {
1338  memset(td->left_partition_ctx, 0, 8);
1339  memset(td->left_skip_ctx, 0, 8);
1340  if (s->s.h.keyframe || s->s.h.intraonly) {
1341  memset(td->left_mode_ctx, DC_PRED, 16);
1342  } else {
1343  memset(td->left_mode_ctx, NEARESTMV, 8);
1344  }
1345  memset(td->left_y_nnz_ctx, 0, 16);
1346  memset(td->left_uv_nnz_ctx, 0, 32);
1347  memset(td->left_segpred_ctx, 0, 8);
1348 
1349  td->c = &td->c_b[tile_col];
1350  }
1351 
1352  for (col = tile_col_start;
1353  col < tile_col_end;
1354  col += 8, yoff2 += 64 * bytesperpixel,
1355  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1356  // FIXME integrate with lf code (i.e. zero after each
1357  // use, similar to invtxfm coefficients, or similar)
1358  if (s->pass != 1) {
1359  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1360  }
1361 
1362  if (s->pass == 2) {
1363  decode_sb_mem(td, row, col, lflvl_ptr,
1364  yoff2, uvoff2, BL_64X64);
1365  } else {
1366  if (vpx_rac_is_end(td->c)) {
1367  return AVERROR_INVALIDDATA;
1368  }
1369  decode_sb(td, row, col, lflvl_ptr,
1370  yoff2, uvoff2, BL_64X64);
1371  }
1372  }
1373  }
1374 
1375  if (s->pass == 1)
1376  continue;
1377 
1378  // backup pre-loopfilter reconstruction data for intra
1379  // prediction of next row of sb64s
1380  if (row + 8 < s->rows) {
1381  memcpy(s->intra_pred_data[0],
1382  f->data[0] + yoff + 63 * ls_y,
1383  8 * s->cols * bytesperpixel);
1384  memcpy(s->intra_pred_data[1],
1385  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1386  8 * s->cols * bytesperpixel >> s->ss_h);
1387  memcpy(s->intra_pred_data[2],
1388  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1389  8 * s->cols * bytesperpixel >> s->ss_h);
1390  }
1391 
1392  // loopfilter one row
1393  if (s->s.h.filter.level) {
1394  yoff2 = yoff;
1395  uvoff2 = uvoff;
1396  lflvl_ptr = s->lflvl;
1397  for (col = 0; col < s->cols;
1398  col += 8, yoff2 += 64 * bytesperpixel,
1399  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1400  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, row, col,
1401  yoff2, uvoff2);
1402  }
1403  }
1404 
1405  // FIXME maybe we can make this more finegrained by running the
1406  // loopfilter per-block instead of after each sbrow
1407  // In fact that would also make intra pred left preparation easier?
1408  ff_progress_frame_report(&s->s.frames[CUR_FRAME].tf, row >> 3);
1409  }
1410  }
1411  return 0;
1412 }
1413 
1414 #if HAVE_THREADS
1415 static av_always_inline
1416 int decode_tiles_mt(AVCodecContext *avctx, void *tdata, int jobnr,
1417  int threadnr)
1418 {
1419  VP9Context *s = avctx->priv_data;
1420  VP9TileData *td = &s->td[jobnr];
1421  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1422  int bytesperpixel = s->bytesperpixel, row, col, tile_row;
1423  unsigned tile_cols_len;
1424  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1425  VP9Filter *lflvl_ptr_base;
1426  AVFrame *f;
1427 
1428  f = s->s.frames[CUR_FRAME].tf.f;
1429  ls_y = f->linesize[0];
1430  ls_uv =f->linesize[1];
1431 
1432  set_tile_offset(&tile_col_start, &tile_col_end,
1433  jobnr, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1434  td->tile_col_start = tile_col_start;
1435  uvoff = (64 * bytesperpixel >> s->ss_h)*(tile_col_start >> 3);
1436  yoff = (64 * bytesperpixel)*(tile_col_start >> 3);
1437  lflvl_ptr_base = s->lflvl+(tile_col_start >> 3);
1438 
1439  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1440  set_tile_offset(&tile_row_start, &tile_row_end,
1441  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1442 
1443  td->c = &td->c_b[tile_row];
1444  for (row = tile_row_start; row < tile_row_end;
1445  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1446  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1447  VP9Filter *lflvl_ptr = lflvl_ptr_base+s->sb_cols*(row >> 3);
1448 
1449  memset(td->left_partition_ctx, 0, 8);
1450  memset(td->left_skip_ctx, 0, 8);
1451  if (s->s.h.keyframe || s->s.h.intraonly) {
1452  memset(td->left_mode_ctx, DC_PRED, 16);
1453  } else {
1454  memset(td->left_mode_ctx, NEARESTMV, 8);
1455  }
1456  memset(td->left_y_nnz_ctx, 0, 16);
1457  memset(td->left_uv_nnz_ctx, 0, 32);
1458  memset(td->left_segpred_ctx, 0, 8);
1459 
1460  for (col = tile_col_start;
1461  col < tile_col_end;
1462  col += 8, yoff2 += 64 * bytesperpixel,
1463  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1464  // FIXME integrate with lf code (i.e. zero after each
1465  // use, similar to invtxfm coefficients, or similar)
1466  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1467  decode_sb(td, row, col, lflvl_ptr,
1468  yoff2, uvoff2, BL_64X64);
1469  }
1470 
1471  // backup pre-loopfilter reconstruction data for intra
1472  // prediction of next row of sb64s
1473  tile_cols_len = tile_col_end - tile_col_start;
1474  if (row + 8 < s->rows) {
1475  memcpy(s->intra_pred_data[0] + (tile_col_start * 8 * bytesperpixel),
1476  f->data[0] + yoff + 63 * ls_y,
1477  8 * tile_cols_len * bytesperpixel);
1478  memcpy(s->intra_pred_data[1] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1479  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1480  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1481  memcpy(s->intra_pred_data[2] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1482  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1483  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1484  }
1485 
1486  vp9_report_tile_progress(s, row >> 3, 1);
1487  }
1488  }
1489  return 0;
1490 }
1491 
1492 static av_always_inline
1493 int loopfilter_proc(AVCodecContext *avctx)
1494 {
1495  VP9Context *s = avctx->priv_data;
1496  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1497  VP9Filter *lflvl_ptr;
1498  int bytesperpixel = s->bytesperpixel, col, i;
1499  AVFrame *f;
1500 
1501  f = s->s.frames[CUR_FRAME].tf.f;
1502  ls_y = f->linesize[0];
1503  ls_uv =f->linesize[1];
1504 
1505  for (i = 0; i < s->sb_rows; i++) {
1506  vp9_await_tile_progress(s, i, s->s.h.tiling.tile_cols);
1507 
1508  if (s->s.h.filter.level) {
1509  yoff = (ls_y * 64)*i;
1510  uvoff = (ls_uv * 64 >> s->ss_v)*i;
1511  lflvl_ptr = s->lflvl+s->sb_cols*i;
1512  for (col = 0; col < s->cols;
1513  col += 8, yoff += 64 * bytesperpixel,
1514  uvoff += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1515  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, i << 3, col,
1516  yoff, uvoff);
1517  }
1518  }
1519  }
1520  return 0;
1521 }
1522 #endif
1523 
1525 {
1526  AVVideoEncParams *par;
1527  unsigned int tile, nb_blocks = 0;
1528 
1529  if (s->s.h.segmentation.enabled) {
1530  for (tile = 0; tile < s->active_tile_cols; tile++)
1531  nb_blocks += s->td[tile].nb_block_structure;
1532  }
1533 
1535  AV_VIDEO_ENC_PARAMS_VP9, nb_blocks);
1536  if (!par)
1537  return AVERROR(ENOMEM);
1538 
1539  par->qp = s->s.h.yac_qi;
1540  par->delta_qp[0][0] = s->s.h.ydc_qdelta;
1541  par->delta_qp[1][0] = s->s.h.uvdc_qdelta;
1542  par->delta_qp[2][0] = s->s.h.uvdc_qdelta;
1543  par->delta_qp[1][1] = s->s.h.uvac_qdelta;
1544  par->delta_qp[2][1] = s->s.h.uvac_qdelta;
1545 
1546  if (nb_blocks) {
1547  unsigned int block = 0;
1548  unsigned int tile, block_tile;
1549 
1550  for (tile = 0; tile < s->active_tile_cols; tile++) {
1551  VP9TileData *td = &s->td[tile];
1552 
1553  for (block_tile = 0; block_tile < td->nb_block_structure; block_tile++) {
1555  unsigned int row = td->block_structure[block_tile].row;
1556  unsigned int col = td->block_structure[block_tile].col;
1557  uint8_t seg_id = frame->segmentation_map[row * 8 * s->sb_cols + col];
1558 
1559  b->src_x = col * 8;
1560  b->src_y = row * 8;
1561  b->w = 1 << (3 + td->block_structure[block_tile].block_size_idx_x);
1562  b->h = 1 << (3 + td->block_structure[block_tile].block_size_idx_y);
1563 
1564  if (s->s.h.segmentation.feat[seg_id].q_enabled) {
1565  b->delta_qp = s->s.h.segmentation.feat[seg_id].q_val;
1566  if (s->s.h.segmentation.absolute_vals)
1567  b->delta_qp -= par->qp;
1568  }
1569  }
1570  }
1571  }
1572 
1573  return 0;
1574 }
1575 
1577  int *got_frame, AVPacket *pkt)
1578 {
1579  const uint8_t *data = pkt->data;
1580  int size = pkt->size;
1581  VP9Context *s = avctx->priv_data;
1582  int ret, i, j, ref;
1583  CodedBitstreamUnit *unit;
1584  VP9RawFrame *rf;
1585 
1586  int retain_segmap_ref = s->s.frames[REF_FRAME_SEGMAP].segmentation_map &&
1587  (!s->s.h.segmentation.enabled || !s->s.h.segmentation.update_map);
1588  const VP9Frame *src;
1589  AVFrame *f;
1590 
1591  ret = ff_cbs_read_packet(s->cbc, &s->current_frag, pkt);
1592  if (ret < 0) {
1593  ff_cbs_fragment_reset(&s->current_frag);
1594  av_log(avctx, AV_LOG_ERROR, "Failed to read frame header.\n");
1595  return ret;
1596  }
1597 
1598  unit = &s->current_frag.units[0];
1599  rf = unit->content;
1600 
1601  av_refstruct_replace(&s->header_ref, unit->content_ref);
1602  s->frame_header = &rf->header;
1603 
1604  if ((ret = decode_frame_header(avctx, data, size, &ref)) < 0) {
1605  return ret;
1606  } else if (ret == 0) {
1607  if (!s->s.refs[ref].f) {
1608  av_log(avctx, AV_LOG_ERROR, "Requested reference %d not available\n", ref);
1609  return AVERROR_INVALIDDATA;
1610  }
1611  for (int i = 0; i < 8; i++)
1612  ff_progress_frame_replace(&s->next_refs[i], &s->s.refs[i]);
1613  ff_thread_finish_setup(avctx);
1614  ff_progress_frame_await(&s->s.refs[ref], INT_MAX);
1615  ff_cbs_fragment_reset(&s->current_frag);
1616 
1617  if ((ret = av_frame_ref(frame, s->s.refs[ref].f)) < 0)
1618  return ret;
1619  frame->pts = pkt->pts;
1620  frame->pkt_dts = pkt->dts;
1621  *got_frame = 1;
1622  return pkt->size;
1623  }
1624  data += ret;
1625  size -= ret;
1626 
1627  src = !s->s.h.keyframe && !s->s.h.intraonly && !s->s.h.errorres ?
1628  &s->s.frames[CUR_FRAME] : &s->s.frames[BLANK_FRAME];
1629  if (!retain_segmap_ref || s->s.h.keyframe || s->s.h.intraonly)
1630  vp9_frame_replace(&s->s.frames[REF_FRAME_SEGMAP], src);
1631  vp9_frame_replace(&s->s.frames[REF_FRAME_MVPAIR], src);
1632  vp9_frame_unref(&s->s.frames[CUR_FRAME]);
1633  if ((ret = vp9_frame_alloc(avctx, &s->s.frames[CUR_FRAME])) < 0)
1634  return ret;
1635 
1636  s->s.frames[CUR_FRAME].header_ref = av_refstruct_ref(s->header_ref);
1637  s->s.frames[CUR_FRAME].frame_header = s->frame_header;
1638 
1639  f = s->s.frames[CUR_FRAME].tf.f;
1640  if (s->s.h.keyframe)
1641  f->flags |= AV_FRAME_FLAG_KEY;
1642  else
1643  f->flags &= ~AV_FRAME_FLAG_KEY;
1644  if (s->s.h.lossless)
1645  f->flags |= AV_FRAME_FLAG_LOSSLESS;
1646  else
1647  f->flags &= ~AV_FRAME_FLAG_LOSSLESS;
1648  f->pict_type = (s->s.h.keyframe || s->s.h.intraonly) ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1649 
1650  // Non-existent frames have the implicit dimension 0x0 != CUR_FRAME
1651  if (!s->s.frames[REF_FRAME_MVPAIR].tf.f ||
1652  (s->s.frames[REF_FRAME_MVPAIR].tf.f->width != s->s.frames[CUR_FRAME].tf.f->width ||
1653  s->s.frames[REF_FRAME_MVPAIR].tf.f->height != s->s.frames[CUR_FRAME].tf.f->height)) {
1654  vp9_frame_unref(&s->s.frames[REF_FRAME_SEGMAP]);
1655  }
1656 
1657  // ref frame setup
1658  for (i = 0; i < 8; i++) {
1659  ff_progress_frame_replace(&s->next_refs[i],
1660  s->s.h.refreshrefmask & (1 << i) ?
1661  &s->s.frames[CUR_FRAME].tf : &s->s.refs[i]);
1662  }
1663 
1664  if (avctx->hwaccel) {
1665  const FFHWAccel *hwaccel = ffhwaccel(avctx->hwaccel);
1666  ret = hwaccel->start_frame(avctx, pkt->buf, pkt->data, pkt->size);
1667  if (ret < 0)
1668  return ret;
1669  ret = hwaccel->decode_slice(avctx, pkt->data, pkt->size);
1670  if (ret < 0)
1671  return ret;
1672  ret = hwaccel->end_frame(avctx);
1673  if (ret < 0)
1674  return ret;
1675 
1676  for (i = 0; i < 8; i++) {
1677  vp9_frame_replace(&s->s.ref_frames[i],
1678  s->s.h.refreshrefmask & (1 << i) ?
1679  &s->s.frames[CUR_FRAME] : &s->s.ref_frames[i]);
1680  }
1681 
1682  goto finish;
1683  }
1684 
1685  // main tile decode loop
1686  memset(s->above_partition_ctx, 0, s->cols);
1687  memset(s->above_skip_ctx, 0, s->cols);
1688  if (s->s.h.keyframe || s->s.h.intraonly) {
1689  memset(s->above_mode_ctx, DC_PRED, s->cols * 2);
1690  } else {
1691  memset(s->above_mode_ctx, NEARESTMV, s->cols);
1692  }
1693  memset(s->above_y_nnz_ctx, 0, s->sb_cols * 16);
1694  memset(s->above_uv_nnz_ctx[0], 0, s->sb_cols * 16 >> s->ss_h);
1695  memset(s->above_uv_nnz_ctx[1], 0, s->sb_cols * 16 >> s->ss_h);
1696  memset(s->above_segpred_ctx, 0, s->cols);
1697  s->pass = s->s.frames[CUR_FRAME].uses_2pass =
1698  avctx->active_thread_type == FF_THREAD_FRAME && s->s.h.refreshctx && !s->s.h.parallelmode;
1699  if ((ret = update_block_buffers(avctx)) < 0) {
1700  av_log(avctx, AV_LOG_ERROR,
1701  "Failed to allocate block buffers\n");
1702  return ret;
1703  }
1704  if (s->s.h.refreshctx && s->s.h.parallelmode) {
1705  int j, k, l, m;
1706 
1707  for (i = 0; i < 4; i++) {
1708  for (j = 0; j < 2; j++)
1709  for (k = 0; k < 2; k++)
1710  for (l = 0; l < 6; l++)
1711  for (m = 0; m < 6; m++)
1712  memcpy(s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m],
1713  s->prob.coef[i][j][k][l][m], 3);
1714  if (s->s.h.txfmmode == i)
1715  break;
1716  }
1717  s->prob_ctx[s->s.h.framectxid].p = s->prob.p;
1718  ff_thread_finish_setup(avctx);
1719  } else if (!s->s.h.refreshctx) {
1720  ff_thread_finish_setup(avctx);
1721  }
1722 
1723 #if HAVE_THREADS
1724  if (avctx->active_thread_type & FF_THREAD_SLICE) {
1725  for (i = 0; i < s->sb_rows; i++)
1726  atomic_init(&s->entries[i], 0);
1727  }
1728 #endif
1729 
1730  do {
1731  for (i = 0; i < s->active_tile_cols; i++) {
1732  s->td[i].b = s->td[i].b_base;
1733  s->td[i].block = s->td[i].block_base;
1734  s->td[i].uvblock[0] = s->td[i].uvblock_base[0];
1735  s->td[i].uvblock[1] = s->td[i].uvblock_base[1];
1736  s->td[i].eob = s->td[i].eob_base;
1737  s->td[i].uveob[0] = s->td[i].uveob_base[0];
1738  s->td[i].uveob[1] = s->td[i].uveob_base[1];
1739  s->td[i].error_info = 0;
1740  }
1741 
1742 #if HAVE_THREADS
1743  if (avctx->active_thread_type == FF_THREAD_SLICE) {
1744  int tile_row, tile_col;
1745 
1746  av_assert1(!s->pass);
1747 
1748  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1749  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1750  int64_t tile_size;
1751 
1752  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1753  tile_row == s->s.h.tiling.tile_rows - 1) {
1754  tile_size = size;
1755  } else {
1756  tile_size = AV_RB32(data);
1757  data += 4;
1758  size -= 4;
1759  }
1760  if (tile_size > size)
1761  return AVERROR_INVALIDDATA;
1762  ret = ff_vpx_init_range_decoder(&s->td[tile_col].c_b[tile_row], data, tile_size);
1763  if (ret < 0)
1764  return ret;
1765  if (vpx_rac_get_prob_branchy(&s->td[tile_col].c_b[tile_row], 128)) // marker bit
1766  return AVERROR_INVALIDDATA;
1767  data += tile_size;
1768  size -= tile_size;
1769  }
1770  }
1771 
1772  ff_slice_thread_execute_with_mainfunc(avctx, decode_tiles_mt, loopfilter_proc, s->td, NULL, s->s.h.tiling.tile_cols);
1773  } else
1774 #endif
1775  {
1776  ret = decode_tiles(avctx, data, size);
1777  if (ret < 0)
1778  goto fail;
1779  }
1780 
1781  // Sum all counts fields into td[0].counts for tile threading
1782  if (avctx->active_thread_type == FF_THREAD_SLICE)
1783  for (i = 1; i < s->s.h.tiling.tile_cols; i++)
1784  for (j = 0; j < sizeof(s->td[i].counts) / sizeof(unsigned); j++)
1785  ((unsigned *)&s->td[0].counts)[j] += ((unsigned *)&s->td[i].counts)[j];
1786 
1787  if (s->pass < 2 && s->s.h.refreshctx && !s->s.h.parallelmode) {
1789  ff_thread_finish_setup(avctx);
1790  }
1791  } while (s->pass++ == 1);
1792 
1793  if (s->td->error_info < 0) {
1794  av_log(avctx, AV_LOG_ERROR, "Failed to decode tile data\n");
1795  s->td->error_info = 0;
1797  goto fail;
1798  }
1800  ret = vp9_export_enc_params(s, &s->s.frames[CUR_FRAME]);
1801  if (ret < 0)
1802  goto fail;
1803  }
1804 
1805 finish:
1806  ff_cbs_fragment_reset(&s->current_frag);
1807 
1808  ff_progress_frame_report(&s->s.frames[CUR_FRAME].tf, INT_MAX);
1809  // ref frame setup
1810  for (int i = 0; i < 8; i++)
1811  ff_progress_frame_replace(&s->s.refs[i], &s->next_refs[i]);
1812 
1813  if (!s->s.h.invisible) {
1814  if ((ret = av_frame_ref(frame, s->s.frames[CUR_FRAME].tf.f)) < 0)
1815  return ret;
1816  *got_frame = 1;
1817  }
1818 
1819  return pkt->size;
1820 fail:
1821  ff_progress_frame_report(&s->s.frames[CUR_FRAME].tf, INT_MAX);
1822  return ret;
1823 }
1824 
1826 {
1827  VP9Context *s = avctx->priv_data;
1828  int i;
1829 
1830  for (i = 0; i < 3; i++)
1831  vp9_frame_unref(&s->s.frames[i]);
1832 
1833  for (i = 0; i < 8; i++) {
1834  ff_progress_frame_unref(&s->s.refs[i]);
1835  vp9_frame_unref(&s->s.ref_frames[i]);
1836  }
1837 
1838  ff_cbs_fragment_reset(&s->current_frag);
1839  ff_cbs_flush(s->cbc);
1840 
1841  if (FF_HW_HAS_CB(avctx, flush))
1842  FF_HW_SIMPLE_CALL(avctx, flush);
1843 }
1844 
1846 {
1847  VP9Context *s = avctx->priv_data;
1848  int ret;
1849 
1850  s->last_bpp = 0;
1851  s->s.h.filter.sharpness = -1;
1852 
1853  ret = ff_cbs_init(&s->cbc, AV_CODEC_ID_VP9, avctx);
1854  if (ret < 0)
1855  return ret;
1856 
1857 #if HAVE_THREADS
1858  if (avctx->active_thread_type & FF_THREAD_SLICE) {
1859  ret = ff_pthread_init(s, vp9_context_offsets);
1860  if (ret < 0)
1861  return ret;
1862  }
1863 #endif
1864 
1865  return 0;
1866 }
1867 
1868 #if HAVE_THREADS
1869 static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
1870 {
1871  VP9Context *s = dst->priv_data, *ssrc = src->priv_data;
1872 
1873  for (int i = 0; i < 3; i++)
1874  vp9_frame_replace(&s->s.frames[i], &ssrc->s.frames[i]);
1875  for (int i = 0; i < 8; i++)
1876  ff_progress_frame_replace(&s->s.refs[i], &ssrc->next_refs[i]);
1877  av_refstruct_replace(&s->frame_extradata_pool, ssrc->frame_extradata_pool);
1878  s->frame_extradata_pool_size = ssrc->frame_extradata_pool_size;
1879 
1880  av_refstruct_replace(&s->header_ref, ssrc->header_ref);
1881  for (int i = 0; i < 8; i++)
1882  vp9_frame_replace(&s->s.ref_frames[i], &ssrc->s.ref_frames[i]);
1883 
1884  s->frame_header = ssrc->frame_header;
1885  memcpy(s->cbc->priv_data, ssrc->cbc->priv_data, sizeof(CodedBitstreamVP9Context));
1886 
1887  s->s.h.invisible = ssrc->s.h.invisible;
1888  s->s.h.keyframe = ssrc->s.h.keyframe;
1889  s->s.h.intraonly = ssrc->s.h.intraonly;
1890  s->ss_v = ssrc->ss_v;
1891  s->ss_h = ssrc->ss_h;
1892  s->s.h.segmentation.enabled = ssrc->s.h.segmentation.enabled;
1893  s->s.h.segmentation.update_map = ssrc->s.h.segmentation.update_map;
1894  s->s.h.segmentation.absolute_vals = ssrc->s.h.segmentation.absolute_vals;
1895  s->bytesperpixel = ssrc->bytesperpixel;
1896  s->gf_fmt = ssrc->gf_fmt;
1897  s->w = ssrc->w;
1898  s->h = ssrc->h;
1899  s->s.h.bpp = ssrc->s.h.bpp;
1900  s->bpp_index = ssrc->bpp_index;
1901  s->pix_fmt = ssrc->pix_fmt;
1902  memcpy(&s->prob_ctx, &ssrc->prob_ctx, sizeof(s->prob_ctx));
1903  memcpy(&s->s.h.lf_delta, &ssrc->s.h.lf_delta, sizeof(s->s.h.lf_delta));
1904  memcpy(&s->s.h.segmentation.feat, &ssrc->s.h.segmentation.feat,
1905  sizeof(s->s.h.segmentation.feat));
1906 
1907  return 0;
1908 }
1909 #endif
1910 
1912  .p.name = "vp9",
1913  CODEC_LONG_NAME("Google VP9"),
1914  .p.type = AVMEDIA_TYPE_VIDEO,
1915  .p.id = AV_CODEC_ID_VP9,
1916  .priv_data_size = sizeof(VP9Context),
1917  .init = vp9_decode_init,
1921  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
1924  .flush = vp9_decode_flush,
1925  UPDATE_THREAD_CONTEXT(vp9_decode_update_thread_context),
1926  .p.profiles = NULL_IF_CONFIG_SMALL(ff_vp9_profiles),
1927  .bsfs = "vp9_superframe_split",
1928  .hw_configs = (const AVCodecHWConfigInternal *const []) {
1929 #if CONFIG_VP9_DXVA2_HWACCEL
1930  HWACCEL_DXVA2(vp9),
1931 #endif
1932 #if CONFIG_VP9_D3D11VA_HWACCEL
1933  HWACCEL_D3D11VA(vp9),
1934 #endif
1935 #if CONFIG_VP9_D3D11VA2_HWACCEL
1936  HWACCEL_D3D11VA2(vp9),
1937 #endif
1938 #if CONFIG_VP9_D3D12VA_HWACCEL
1939  HWACCEL_D3D12VA(vp9),
1940 #endif
1941 #if CONFIG_VP9_NVDEC_HWACCEL
1942  HWACCEL_NVDEC(vp9),
1943 #endif
1944 #if CONFIG_VP9_VAAPI_HWACCEL
1945  HWACCEL_VAAPI(vp9),
1946 #endif
1947 #if CONFIG_VP9_VDPAU_HWACCEL
1948  HWACCEL_VDPAU(vp9),
1949 #endif
1950 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
1951  HWACCEL_VIDEOTOOLBOX(vp9),
1952 #endif
1953 #if CONFIG_VP9_VULKAN_HWACCEL
1954  HWACCEL_VULKAN(vp9),
1955 #endif
1956  NULL
1957  },
1958 };
VP9TileData::left_y_nnz_ctx
uint8_t left_y_nnz_ctx[16]
Definition: vp9dec.h:216
HWACCEL_D3D12VA
#define HWACCEL_D3D12VA(codec)
Definition: hwconfig.h:80
AVVideoEncParams::qp
int32_t qp
Base quantisation parameter for the frame.
Definition: video_enc_params.h:103
hwconfig.h
ff_progress_frame_report
void ff_progress_frame_report(ProgressFrame *f, int n)
Notify later decoding threads when part of their reference frame is ready.
Definition: decode.c:1828
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1405
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
FF_CODEC_CAP_SLICE_THREAD_HAS_MF
#define FF_CODEC_CAP_SLICE_THREAD_HAS_MF
Codec initializes slice-based threading with a main function.
Definition: codec_internal.h:64
decode_tiles
static int decode_tiles(AVCodecContext *avctx, const uint8_t *data, int size)
Definition: vp9.c:1285
CodedBitstreamUnit::content_ref
void * content_ref
If content is reference counted, a RefStruct reference backing content.
Definition: cbs.h:119
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
vp9_frame_alloc
static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
Definition: vp9.c:106
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
PRED_SWITCHABLE
@ PRED_SWITCHABLE
Definition: vp9shared.h:53
PRED_SINGLEREF
@ PRED_SINGLEREF
Definition: vp9shared.h:51
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:659
VP9TileData::uvblock_base
int16_t * uvblock_base[2]
Definition: vp9dec.h:232
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1200
VP9TileData::partition
unsigned partition[4][4][4]
Definition: vp9dec.h:207
VP9Frame
Definition: vp9shared.h:66
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:124
ff_vp9_decoder
const FFCodec ff_vp9_decoder
Definition: vp9.c:1911
decode_sb
static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1111
ff_vp9_adapt_probs
void ff_vp9_adapt_probs(VP9Context *s)
Definition: vp9prob.c:44
CodedBitstreamUnit::content
void * content
Pointer to the decomposed form of this unit.
Definition: cbs.h:114
int64_t
long long int64_t
Definition: coverity.c:34
vp9_decode_flush
static void vp9_decode_flush(AVCodecContext *avctx)
Definition: vp9.c:1825
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:250
VP9TileData::left_skip_ctx
uint8_t left_skip_ctx[8]
Definition: vp9dec.h:221
VP9TileData::row
int row
Definition: vp9dec.h:177
PRED_COMPREF
@ PRED_COMPREF
Definition: vp9shared.h:52
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:421
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:64
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:767
BlockPartition
BlockPartition
Definition: vp9shared.h:36
AVPacket::data
uint8_t * data
Definition: packet.h:552
DC_PRED
@ DC_PRED
Definition: vp9.h:48
pthread_mutex_lock
static av_always_inline int pthread_mutex_lock(pthread_mutex_t *mutex)
Definition: os2threads.h:119
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:66
b
#define b
Definition: input.c:42
ff_progress_frame_get_buffer
int ff_progress_frame_get_buffer(AVCodecContext *avctx, ProgressFrame *f, int flags)
Wrapper around ff_progress_frame_alloc() and ff_thread_get_buffer().
Definition: decode.c:1788
data
const char data[16]
Definition: mxf.c:149
update_size
static int update_size(AVCodecContext *avctx, int w, int h)
Definition: vp9.c:164
decode_sb_mem
static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1191
REF_FRAME_SEGMAP
#define REF_FRAME_SEGMAP
Definition: vp9shared.h:174
decode_frame_header
static int decode_frame_header(AVCodecContext *avctx, const uint8_t *data, int size, int *ref)
Definition: vp9.c:515
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:539
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:254
FFCodec
Definition: codec_internal.h:127
VP9TileData::c_b
VPXRangeCoder * c_b
Definition: vp9dec.h:175
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:691
VP9TileData::left_segpred_ctx
uint8_t left_segpred_ctx[8]
Definition: vp9dec.h:223
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:176
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
VP9_SYNCCODE
#define VP9_SYNCCODE
Definition: vp9.c:49
VP9Block::bl
enum BlockLevel bl
Definition: vp9dec.h:91
vp89_rac.h
VP9Filter
Definition: vp9dec.h:79
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
VP9TileData::b
VP9Block * b
Definition: vp9dec.h:180
VPXRangeCoder
Definition: vpx_rac.h:35
thread.h
ff_pthread_free
av_cold void ff_pthread_free(void *obj, const unsigned offsets[])
Definition: pthread.c:92
AV_PIX_FMT_VULKAN
@ AV_PIX_FMT_VULKAN
Vulkan hardware images.
Definition: pixfmt.h:379
FILTER_SWITCHABLE
@ FILTER_SWITCHABLE
Definition: vp9.h:70
CodedBitstreamUnit
Coded bitstream unit structure.
Definition: cbs.h:77
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
VP9Block
Definition: vp9dec.h:85
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:379
close
static av_cold void close(AVCodecParserContext *s)
Definition: apv_parser.c:135
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:696
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:333
AVCOL_SPC_RESERVED
@ AVCOL_SPC_RESERVED
reserved for future use by ITU-T and ISO/IEC just like 15-255 are
Definition: pixfmt.h:694
TX_SWITCHABLE
@ TX_SWITCHABLE
Definition: vp9.h:33
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
finish
static void finish(void)
Definition: movenc.c:374
FFHWAccel
Definition: hwaccel_internal.h:34
ff_vp9_ac_qlookup
const int16_t ff_vp9_ac_qlookup[3][256]
Definition: vp9data.c:334
AVVideoEncParams::delta_qp
int32_t delta_qp[4][2]
Quantisation parameter offset from the base (per-frame) qp for a given plane (first index) and AC/DC ...
Definition: video_enc_params.h:109
fail
#define fail()
Definition: checkasm.h:199
AV_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME
#define AV_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME
If this flag is set, the entries will be zeroed before being returned to the user (after the init or ...
Definition: refstruct.h:221
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:558
GetBitContext
Definition: get_bits.h:109
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:488
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:72
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
PARTITION_NONE
@ PARTITION_NONE
Definition: vp9shared.h:37
vp9_frame_unref
static void vp9_frame_unref(VP9Frame *f)
Definition: vp9.c:97
progressframe.h
refstruct.h
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:73
VP9TileData::col
int col
Definition: vp9dec.h:177
vp9_decode_free
static av_cold int vp9_decode_free(AVCodecContext *avctx)
Definition: vp9.c:1257
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:542
avassert.h
FF_CODEC_CAP_USES_PROGRESSFRAMES
#define FF_CODEC_CAP_USES_PROGRESSFRAMES
The decoder might make use of the ProgressFrame API.
Definition: codec_internal.h:68
ff_vp9_model_pareto8
const uint8_t ff_vp9_model_pareto8[256][8]
Definition: vp9data.c:1176
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:539
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1638
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:636
BL_8X8
@ BL_8X8
Definition: vp9shared.h:83
PARTITION_V
@ PARTITION_V
Definition: vp9shared.h:39
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:341
ff_hwaccel_frame_priv_alloc
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
Allocate a hwaccel frame private data if the provided avctx uses a hwaccel method that needs it.
Definition: decode.c:2181
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:134
s
#define s(width, name)
Definition: cbs_vp9.c:198
pthread_mutex_unlock
static av_always_inline int pthread_mutex_unlock(pthread_mutex_t *mutex)
Definition: os2threads.h:126
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:697
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:411
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:222
vp9data.h
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:298
ff_progress_frame_unref
void ff_progress_frame_unref(ProgressFrame *f)
Give up a reference to the underlying frame contained in a ProgressFrame and reset the ProgressFrame,...
Definition: decode.c:1811
ff_progress_frame_await
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_progress_frame_await() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_progress_frame_report() has been called on them. This includes draw_edges(). Porting codecs to frame threading
decode.h
get_bits.h
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
VP9TileData::block_size_idx_x
unsigned int block_size_idx_x
Definition: vp9dec.h:240
ff_vp9dsp_init
av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact)
Definition: vp9dsp.c:88
ff_vp9_partition_tree
const int8_t ff_vp9_partition_tree[3][2]
Definition: vp9data.c:35
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
vp9_decode_frame
static int vp9_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: vp9.c:1576
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:326
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:95
AVPacket::buf
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: packet.h:535
NULL
#define NULL
Definition: coverity.c:32
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:669
hwaccel_internal.h
VP9Context
Definition: vp9dec.h:97
av_unreachable
#define av_unreachable(msg)
Asserts that are used as compiler optimization hints depending upon ASSERT_LEVEL and NBDEBUG.
Definition: avassert.h:108
REF_FRAME_MVPAIR
#define REF_FRAME_MVPAIR
Definition: vp9shared.h:173
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:386
vp89_rac_get_uint
static av_unused int vp89_rac_get_uint(VPXRangeCoder *c, int bits)
Definition: vp89_rac.h:41
profiles.h
AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:541
flush
void(* flush)(AVBSFContext *ctx)
Definition: dts2pts.c:370
av_refstruct_pool_get
void * av_refstruct_pool_get(AVRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
pthread_internal.h
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:335
AV_PIX_FMT_D3D12
@ AV_PIX_FMT_D3D12
Hardware surfaces for Direct3D 12.
Definition: pixfmt.h:440
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:540
VP9mv
Definition: vp9shared.h:56
PARTITION_SPLIT
@ PARTITION_SPLIT
Definition: vp9shared.h:40
FF_HW_HAS_CB
#define FF_HW_HAS_CB(avctx, function)
Definition: hwaccel_internal.h:179
VP9RawFrame
Definition: cbs_vp9.h:164
atomic_load_explicit
#define atomic_load_explicit(object, order)
Definition: stdatomic.h:96
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
vp9_frame_replace
static void vp9_frame_replace(VP9Frame *dst, const VP9Frame *src)
Definition: vp9.c:147
VP9RawFrame::header
VP9RawFrameHeader header
Definition: cbs_vp9.h:165
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:58
vp9.h
f
f
Definition: af_crystalizer.c:122
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:553
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:276
codec_internal.h
VP9TileData::eob_base
uint8_t * eob_base
Definition: vp9dec.h:233
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
pix_fmt_rgb
static enum AVPixelFormat pix_fmt_rgb[3]
Definition: libdav1d.c:68
REF_INVALID_SCALE
#define REF_INVALID_SCALE
Definition: vp9dec.h:43
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
read_colorspace_details
static int read_colorspace_details(AVCodecContext *avctx)
Definition: vp9.c:453
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:544
size
int size
Definition: twinvq_data.h:10344
vp9_alloc_entries
static int vp9_alloc_entries(AVCodecContext *avctx, int n)
Definition: vp9.c:87
atomic_fetch_add_explicit
#define atomic_fetch_add_explicit(object, operand, order)
Definition: stdatomic.h:149
VP9TileData::b_base
VP9Block * b_base
Definition: vp9dec.h:180
free_buffers
static void free_buffers(VP9Context *s)
Definition: vp9.c:1248
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:546
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1573
AVCodecHWConfigInternal
Definition: hwconfig.h:25
TX_4X4
@ TX_4X4
Definition: vp9.h:28
update_block_buffers
static int update_block_buffers(AVCodecContext *avctx)
Definition: vp9.c:318
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:551
av_refstruct_ref
void * av_refstruct_ref(void *obj)
Create a new reference to an object managed via this API, i.e.
Definition: refstruct.c:140
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:99
CodedBitstreamVP9Context
Definition: cbs_vp9.h:192
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:78
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:336
VP9TileData::block_base
int16_t * block_base
Definition: vp9dec.h:232
inv_recenter_nonneg
static av_always_inline int inv_recenter_nonneg(int v, int m)
Definition: vp9.c:385
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
vpx_rac_is_end
static av_always_inline int vpx_rac_is_end(VPXRangeCoder *c)
returns 1 if the end of the stream has been reached, 0 otherwise.
Definition: vpx_rac.h:51
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:126
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1572
VP9TileData::left_uv_nnz_ctx
uint8_t left_uv_nnz_ctx[2][16]
Definition: vp9dec.h:219
av_refstruct_unref
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:194
ff_slice_thread_execute_with_mainfunc
int ff_slice_thread_execute_with_mainfunc(AVCodecContext *avctx, action_func2 *func2, main_func *mainfunc, void *arg, int *ret, int job_count)
Definition: pthread_slice.c:104
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
Definition: pixfmt.h:698
assign
#define assign(var, type, n)
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:305
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
update_prob
static int update_prob(VPXRangeCoder *c, int p)
Definition: vp9.c:395
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:545
DEFINE_OFFSET_ARRAY
#define DEFINE_OFFSET_ARRAY(type, name, cnt_variable, mutexes, conds)
Definition: pthread_internal.h:61
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:701
vpx_rac.h
decode012
static int BS_FUNC() decode012(BSCTX *bc)
Return decoded truncated unary code for the values 0, 1, 2.
Definition: bitstream_template.h:444
VP9TileData::block_size_idx_y
unsigned int block_size_idx_y
Definition: vp9dec.h:241
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:559
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:690
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:57
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
vpx_rac_get_prob_branchy
static av_always_inline int vpx_rac_get_prob_branchy(VPXRangeCoder *c, int prob)
Definition: vpx_rac.h:99
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:120
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
get_sbits_inv
static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
Definition: vp9.c:379
VP9TileData::left_mode_ctx
uint8_t left_mode_ctx[16]
Definition: vp9dec.h:217
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:693
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:631
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:750
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
VP9TileData::c
VPXRangeCoder * c
Definition: vp9dec.h:176
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:74
avcodec.h
limit
static double limit(double x)
Definition: vf_pseudocolor.c:142
vp89_rac_get_tree
static av_always_inline int vp89_rac_get_tree(VPXRangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
Definition: vp89_rac.h:54
VP9TileData::s
const VP9Context * s
Definition: vp9dec.h:174
BL_64X64
@ BL_64X64
Definition: vp9shared.h:80
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
vp9_decode_init
static av_cold int vp9_decode_init(AVCodecContext *avctx)
Definition: vp9.c:1845
tile
static int FUNC() tile(CodedBitstreamContext *ctx, RWContext *rw, APVRawTile *current, int tile_idx, uint32_t tile_size)
Definition: cbs_apv_syntax_template.c:224
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:555
hwaccel
static const char * hwaccel
Definition: ffplay.c:353
ff_vpx_init_range_decoder
int ff_vpx_init_range_decoder(VPXRangeCoder *c, const uint8_t *buf, int buf_size)
Definition: vpx_rac.c:42
av_refstruct_pool_alloc
AVRefStructPool * av_refstruct_pool_alloc(size_t size, unsigned flags)
Equivalent to av_refstruct_pool_alloc(size, flags, NULL, NULL, NULL, NULL, NULL)
Definition: refstruct.c:335
vp9_tile_data_free
static void vp9_tile_data_free(VP9TileData *td)
Definition: vp9.c:90
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
VP9mvrefPair
Definition: vp9shared.h:61
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:543
pthread_cond_signal
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
Definition: os2threads.h:152
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:395
VP9TileData::block_structure
struct VP9TileData::@311 * block_structure
ff_progress_frame_replace
void ff_progress_frame_replace(ProgressFrame *dst, const ProgressFrame *src)
Do nothing if dst and src already refer to the same AVFrame; otherwise unreference dst and if src is ...
Definition: decode.c:1818
VP9TileData
Definition: vp9dec.h:173
VP9TileData::uveob_base
uint8_t * uveob_base[2]
Definition: vp9dec.h:233
HWACCEL_VULKAN
#define HWACCEL_VULKAN(codec)
Definition: hwconfig.h:76
vp89_rac_get
static av_always_inline int vp89_rac_get(VPXRangeCoder *c)
Definition: vp89_rac.h:36
VP9TileData::counts
struct VP9TileData::@309 counts
AVCodecContext
main external API structure.
Definition: avcodec.h:431
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1580
VP9Filter::mask
uint8_t mask[2][2][8][4]
Definition: vp9dec.h:82
av_refstruct_replace
void av_refstruct_replace(void *dstp, const void *src)
Ensure *dstp refers to the same object as src.
Definition: refstruct.c:160
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1618
ffhwaccel
static const FFHWAccel * ffhwaccel(const AVHWAccel *codec)
Definition: hwaccel_internal.h:168
ff_vp9_decode_block
void ff_vp9_decode_block(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl, enum BlockPartition bp)
Definition: vp9block.c:1264
NEARESTMV
@ NEARESTMV
Definition: vp9shared.h:44
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
BlockLevel
BlockLevel
Definition: vp9shared.h:79
AVCodecContext::export_side_data
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame,...
Definition: avcodec.h:1774
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
ff_pthread_init
av_cold int ff_pthread_init(void *obj, const unsigned offsets[])
Initialize/destroy a list of mutexes/conditions contained in a structure.
Definition: pthread.c:105
pthread_cond_wait
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
Definition: os2threads.h:192
vp9dec.h
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
ff_vp9_default_kf_partition_probs
const uint8_t ff_vp9_default_kf_partition_probs[4][4][3]
Definition: vp9data.c:41
AV_VIDEO_ENC_PARAMS_VP9
@ AV_VIDEO_ENC_PARAMS_VP9
VP9 stores:
Definition: video_enc_params.h:44
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:322
ff_vp9_default_probs
const ProbContext ff_vp9_default_probs
Definition: vp9data.c:1435
CUR_FRAME
#define CUR_FRAME
Definition: vp9shared.h:172
ff_vp9_loopfilter_sb
void ff_vp9_loopfilter_sb(struct AVCodecContext *avctx, VP9Filter *lflvl, int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff)
Definition: vp9lpf.c:179
av_refstruct_pool_uninit
static void av_refstruct_pool_uninit(AVRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
vp9_export_enc_params
static int vp9_export_enc_params(VP9Context *s, VP9Frame *frame)
Definition: vp9.c:1524
AVPacket
This structure stores compressed data.
Definition: packet.h:529
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
PARTITION_H
@ PARTITION_H
Definition: vp9shared.h:38
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
videodsp.h
BLANK_FRAME
#define BLANK_FRAME
Definition: vp9shared.h:175
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
AVCodecContext::properties
attribute_deprecated unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1637
HWACCEL_MAX
#define HWACCEL_MAX
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Get the block at the specified.
Definition: video_enc_params.h:143
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:545
h
h
Definition: vp9dsp_template.c:2070
atomic_init
#define atomic_init(obj, value)
Definition: stdatomic.h:33
VP9TileData::nb_block_structure
unsigned int nb_block_structure
Definition: vp9dec.h:243
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:692
VP9TileData::tile_col_start
unsigned tile_col_start
Definition: vp9dec.h:181
AV_FRAME_FLAG_LOSSLESS
#define AV_FRAME_FLAG_LOSSLESS
A decoder can use this flag to mark frames which were originally encoded losslessly.
Definition: frame.h:657
src
#define src
Definition: vp8dsp.c:248
ff_vp9_profiles
const AVProfile ff_vp9_profiles[]
Definition: profiles.c:155
video_enc_params.h
set_tile_offset
static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
Definition: vp9.c:1240
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3361
ff_vp9_dc_qlookup
const int16_t ff_vp9_dc_qlookup[3][256]
Definition: vp9data.c:231
ff_vp9_default_coef_probs
const uint8_t ff_vp9_default_coef_probs[4][2][2][6][6][3]
Definition: vp9data.c:1540
VP9TileData::left_partition_ctx
uint8_t left_partition_ctx[8]
Definition: vp9dec.h:220