FFmpeg
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
mpegvideo_dec.c
Go to the documentation of this file.
1 /*
2  * Common mpeg video decoding code
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <limits.h>
24 
25 #include "config_components.h"
26 
27 #include "libavutil/avassert.h"
28 #include "libavutil/emms.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/internal.h"
32 
33 #include "avcodec.h"
34 #include "decode.h"
35 #include "h263.h"
36 #include "h264chroma.h"
37 #include "internal.h"
38 #include "mpegutils.h"
39 #include "mpegvideo.h"
40 #include "mpegvideodec.h"
41 #include "mpeg4videodec.h"
42 #include "libavutil/refstruct.h"
43 #include "thread.h"
44 #include "threadprogress.h"
45 #include "wmv2dec.h"
46 
48 {
49  enum ThreadingStatus thread_status;
50 
52 
53  s->avctx = avctx;
54  s->width = avctx->coded_width;
55  s->height = avctx->coded_height;
56  s->codec_id = avctx->codec->id;
57  s->workaround_bugs = avctx->workaround_bugs;
58 
59  /* convert fourcc to upper case */
60  s->codec_tag = ff_toupper4(avctx->codec_tag);
61 
63 
64  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
65 
66  if (s->picture_pool) // VC-1 can call this multiple times
67  return 0;
68 
69  thread_status = ff_thread_sync_ref(avctx, offsetof(MpegEncContext, picture_pool));
70  if (thread_status != FF_THREAD_IS_COPY) {
71  s->picture_pool = ff_mpv_alloc_pic_pool(thread_status != FF_THREAD_NO_FRAME_THREADING);
72  if (!s->picture_pool)
73  return AVERROR(ENOMEM);
74  }
75  return 0;
76 }
77 
79  const AVCodecContext *src)
80 {
81  MpegEncContext *const s1 = src->priv_data;
82  MpegEncContext *const s = dst->priv_data;
83  int ret = 0;
84 
85  if (dst == src)
86  return 0;
87 
88  av_assert0(s != s1);
89 
90  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
91  s->height = s1->height;
92  s->width = s1->width;
94  return ret;
95  ret = 1;
96  }
97 
98  s->quarter_sample = s1->quarter_sample;
99 
100  s->picture_number = s1->picture_number;
101 
102  ff_mpv_replace_picture(&s->cur_pic, &s1->cur_pic);
103  ff_mpv_replace_picture(&s->last_pic, &s1->last_pic);
104  ff_mpv_replace_picture(&s->next_pic, &s1->next_pic);
105 
106  s->linesize = s1->linesize;
107  s->uvlinesize = s1->uvlinesize;
108 
109  // Error/bug resilience
110  s->workaround_bugs = s1->workaround_bugs;
111  s->padding_bug_score = s1->padding_bug_score;
112 
113  // MPEG-4 timing info
114  memcpy(&s->last_time_base, &s1->last_time_base,
115  (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
116  (char *) &s1->last_time_base);
117 
118  // B-frame info
119  s->low_delay = s1->low_delay;
120 
121  // MPEG-2/interlacing info
122  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
123  (char *) &s1->first_field + sizeof(s1->first_field) - (char *) &s1->progressive_sequence);
124 
125  return ret;
126 }
127 
129 {
131 
132  av_refstruct_pool_uninit(&s->picture_pool);
134  return 0;
135 }
136 
138 {
139  int err = 0;
140 
141  if (!s->context_initialized)
142  return AVERROR(EINVAL);
143 
145 
146  ff_mpv_unref_picture(&s->last_pic);
147  ff_mpv_unref_picture(&s->next_pic);
148  ff_mpv_unref_picture(&s->cur_pic);
149 
150  if ((s->width || s->height) &&
151  (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
152  goto fail;
153 
154  /* set chroma shifts */
155  err = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
156  &s->chroma_x_shift,
157  &s->chroma_y_shift);
158  if (err < 0)
159  goto fail;
160 
161  if ((err = ff_mpv_init_context_frame(s)))
162  goto fail;
163 
164  memset(s->thread_context, 0, sizeof(s->thread_context));
165  s->thread_context[0] = s;
166 
167  if (s->width && s->height) {
169  if (err < 0)
170  goto fail;
171  }
172  s->context_reinit = 0;
173 
174  return 0;
175  fail:
177  s->context_reinit = 1;
178  return err;
179 }
180 
181 static int alloc_picture(MpegEncContext *s, MPVWorkPicture *dst, int reference)
182 {
183  AVCodecContext *avctx = s->avctx;
184  MPVPicture *pic = av_refstruct_pool_get(s->picture_pool);
185  int ret;
186 
187  if (!pic)
188  return AVERROR(ENOMEM);
189 
190  dst->ptr = pic;
191 
192  pic->reference = reference;
193 
194  /* WM Image / Screen codecs allocate internal buffers with different
195  * dimensions / colorspaces; ignore user-defined callbacks for these. */
200  reference ? AV_GET_BUFFER_FLAG_REF : 0);
201  } else {
202  pic->f->width = avctx->width;
203  pic->f->height = avctx->height;
204  pic->f->format = avctx->pix_fmt;
206  }
207  if (ret < 0)
208  goto fail;
209 
210  ret = ff_mpv_pic_check_linesize(avctx, pic->f, &s->linesize, &s->uvlinesize);
211  if (ret < 0)
212  goto fail;
213 
215  if (ret < 0)
216  goto fail;
217 
218  av_assert1(s->mb_width == s->buffer_pools.alloc_mb_width);
219  av_assert1(s->mb_height == s->buffer_pools.alloc_mb_height ||
220  FFALIGN(s->mb_height, 2) == s->buffer_pools.alloc_mb_height);
221  av_assert1(s->mb_stride == s->buffer_pools.alloc_mb_stride);
222  ret = ff_mpv_alloc_pic_accessories(s->avctx, dst, &s->sc,
223  &s->buffer_pools, s->mb_height);
224  if (ret < 0)
225  goto fail;
226 
227  return 0;
228 fail:
230  return ret;
231 }
232 
234 {
235  MPVPicture *pic;
236  int ret = alloc_picture(s, dst, 1);
237  if (ret < 0)
238  return ret;
239 
240  pic = dst->ptr;
241  pic->dummy = 1;
242 
243  ff_thread_progress_report(&pic->progress, INT_MAX);
244 
245  return 0;
246 }
247 
248 static void color_frame(AVFrame *frame, int luma)
249 {
250  int h_chroma_shift, v_chroma_shift;
251 
252  for (int i = 0; i < frame->height; i++)
253  memset(frame->data[0] + frame->linesize[0] * i, luma, frame->width);
254 
255  if (!frame->data[1])
256  return;
257  av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
258  for (int i = 0; i < AV_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
259  memset(frame->data[1] + frame->linesize[1] * i,
260  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
261  memset(frame->data[2] + frame->linesize[2] * i,
262  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
263  }
264 }
265 
267 {
268  AVCodecContext *avctx = s->avctx;
269  int ret;
270 
271  av_assert1(!s->last_pic.ptr || s->last_pic.ptr->f->buf[0]);
272  av_assert1(!s->next_pic.ptr || s->next_pic.ptr->f->buf[0]);
273  if (!s->last_pic.ptr && s->pict_type != AV_PICTURE_TYPE_I) {
274  if (s->pict_type == AV_PICTURE_TYPE_B && s->next_pic.ptr)
276  "allocating dummy last picture for B frame\n");
277  else if (s->codec_id != AV_CODEC_ID_H261 /* H.261 has no keyframes */ &&
278  (s->picture_structure == PICT_FRAME || s->first_field))
280  "warning: first frame is no keyframe\n");
281 
282  /* Allocate a dummy frame */
283  ret = alloc_dummy_frame(s, &s->last_pic);
284  if (ret < 0)
285  return ret;
286 
287  if (!avctx->hwaccel) {
288  int luma_val = s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263 ? 16 : 0x80;
289  color_frame(s->last_pic.ptr->f, luma_val);
290  }
291  }
292  if (!s->next_pic.ptr && s->pict_type == AV_PICTURE_TYPE_B) {
293  /* Allocate a dummy frame */
294  ret = alloc_dummy_frame(s, &s->next_pic);
295  if (ret < 0)
296  return ret;
297  }
298 
299  av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_pic.ptr &&
300  s->last_pic.ptr->f->buf[0]));
301 
302  return 0;
303 }
304 
305 /**
306  * generic function called after decoding
307  * the header and before a frame is decoded.
308  */
310 {
311  int ret;
312 
313  s->mb_skipped = 0;
314 
316  av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
317  return AVERROR_BUG;
318  }
319 
320  ff_mpv_unref_picture(&s->cur_pic);
321  ret = alloc_picture(s, &s->cur_pic,
322  s->pict_type != AV_PICTURE_TYPE_B && !s->droppable);
323  if (ret < 0)
324  return ret;
325 
326  s->cur_pic.ptr->f->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST * !!s->top_field_first;
327  s->cur_pic.ptr->f->flags |= AV_FRAME_FLAG_INTERLACED *
328  (!s->progressive_frame && !s->progressive_sequence);
329  s->cur_pic.ptr->field_picture = s->picture_structure != PICT_FRAME;
330 
331  s->cur_pic.ptr->f->pict_type = s->pict_type;
332  if (s->pict_type == AV_PICTURE_TYPE_I)
333  s->cur_pic.ptr->f->flags |= AV_FRAME_FLAG_KEY;
334  else
335  s->cur_pic.ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
336 
337  if (s->pict_type != AV_PICTURE_TYPE_B) {
338  ff_mpv_workpic_from_pic(&s->last_pic, s->next_pic.ptr);
339  if (!s->droppable)
340  ff_mpv_workpic_from_pic(&s->next_pic, s->cur_pic.ptr);
341  }
342  ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
343  (void*)s->last_pic.ptr, (void*)s->next_pic.ptr, (void*)s->cur_pic.ptr,
344  s->last_pic.ptr ? s->last_pic.ptr->f->data[0] : NULL,
345  s->next_pic.ptr ? s->next_pic.ptr->f->data[0] : NULL,
346  s->cur_pic.ptr ? s->cur_pic.ptr->f->data[0] : NULL,
347  s->pict_type, s->droppable);
348 
350  if (ret < 0)
351  return ret;
352 
353  if (s->avctx->debug & FF_DEBUG_NOMC)
354  color_frame(s->cur_pic.ptr->f, 0x80);
355 
356  return 0;
357 }
358 
359 /* called after a frame has been decoded. */
361 {
362  emms_c();
363 
364  if (s->cur_pic.reference)
365  ff_thread_progress_report(&s->cur_pic.ptr->progress, INT_MAX);
366 }
367 
369 {
370  ff_print_debug_info2(s->avctx, pict, p->mb_type,
371  p->qscale_table, p->motion_val,
372  p->mb_width, p->mb_height, p->mb_stride, s->quarter_sample);
373 }
374 
376  const MPVPicture *p, int qp_type)
377 {
378  AVVideoEncParams *par;
379  int mult = (qp_type == FF_MPV_QSCALE_TYPE_MPEG1) ? 2 : 1;
380  unsigned int nb_mb = p->mb_height * p->mb_width;
381 
382  if (!(s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS))
383  return 0;
384 
386  if (!par)
387  return AVERROR(ENOMEM);
388 
389  for (unsigned y = 0; y < p->mb_height; y++)
390  for (unsigned x = 0; x < p->mb_width; x++) {
391  const unsigned int block_idx = y * p->mb_width + x;
392  const unsigned int mb_xy = y * p->mb_stride + x;
393  AVVideoBlockParams *const b = av_video_enc_params_block(par, block_idx);
394 
395  b->src_x = x * 16;
396  b->src_y = y * 16;
397  b->w = 16;
398  b->h = 16;
399 
400  b->delta_qp = p->qscale_table[mb_xy] * mult;
401  }
402 
403  return 0;
404 }
405 
407 {
408  ff_draw_horiz_band(s->avctx, s->cur_pic.ptr->f,
409  s->last_pic.ptr ? s->last_pic.ptr->f : NULL,
410  y, h, s->picture_structure,
411  s->first_field, s->low_delay);
412 }
413 
415 {
416  MpegEncContext *const s = avctx->priv_data;
417 
418  ff_mpv_unref_picture(&s->cur_pic);
419  ff_mpv_unref_picture(&s->last_pic);
420  ff_mpv_unref_picture(&s->next_pic);
421 
422  s->mb_x = s->mb_y = 0;
423 
424  s->pp_time = 0;
425 }
426 
428  uint8_t *dest, const uint8_t *src,
429  int field_based, int field_select,
430  int src_x, int src_y,
431  int width, int height, ptrdiff_t stride,
432  int h_edge_pos, int v_edge_pos,
433  int w, int h, const h264_chroma_mc_func *pix_op,
434  int motion_x, int motion_y)
435 {
436  const int lowres = s->avctx->lowres;
437  const int op_index = lowres;
438  const int s_mask = (2 << lowres) - 1;
439  int emu = 0;
440  int sx, sy;
441 
442  av_assert2(op_index <= 3);
443 
444  if (s->quarter_sample) {
445  motion_x /= 2;
446  motion_y /= 2;
447  }
448 
449  sx = motion_x & s_mask;
450  sy = motion_y & s_mask;
451  src_x += motion_x >> lowres + 1;
452  src_y += motion_y >> lowres + 1;
453 
454  src += src_y * stride + src_x;
455 
456  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
457  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
458  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
459  s->linesize, s->linesize,
460  w + 1, (h + 1) << field_based,
461  src_x, src_y * (1 << field_based),
463  src = s->sc.edge_emu_buffer;
464  emu = 1;
465  }
466 
467  sx = (sx << 2) >> lowres;
468  sy = (sy << 2) >> lowres;
469  if (field_select)
470  src += s->linesize;
471  pix_op[op_index](dest, src, stride, h, sx, sy);
472  return emu;
473 }
474 
475 /* apply one mpeg motion vector to the three components */
477  uint8_t *dest_y,
478  uint8_t *dest_cb,
479  uint8_t *dest_cr,
480  int field_based,
481  int bottom_field,
482  int field_select,
483  uint8_t *const *ref_picture,
484  const h264_chroma_mc_func *pix_op,
485  int motion_x, int motion_y,
486  int h, int mb_y)
487 {
488  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
489  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
490  ptrdiff_t uvlinesize, linesize;
491  const int lowres = s->avctx->lowres;
492  const int op_index = lowres - 1 + s->chroma_x_shift;
493  const int block_s = 8 >> lowres;
494  const int s_mask = (2 << lowres) - 1;
495  const int h_edge_pos = s->h_edge_pos >> lowres;
496  const int v_edge_pos = s->v_edge_pos >> lowres;
497  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
498 
499  av_assert2(op_index <= 3);
500 
501  linesize = s->cur_pic.linesize[0] << field_based;
502  uvlinesize = s->cur_pic.linesize[1] << field_based;
503 
504  // FIXME obviously not perfect but qpel will not work in lowres anyway
505  if (s->quarter_sample) {
506  motion_x /= 2;
507  motion_y /= 2;
508  }
509 
510  if (field_based) {
511  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
512  }
513 
514  sx = motion_x & s_mask;
515  sy = motion_y & s_mask;
516  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
517  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
518 
519  if (s->out_format == FMT_H263) {
520  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
521  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
522  uvsrc_x = src_x >> 1;
523  uvsrc_y = src_y >> 1;
524  } else if (s->out_format == FMT_H261) {
525  // even chroma mv's are full pel in H261
526  mx = motion_x / 4;
527  my = motion_y / 4;
528  uvsx = (2 * mx) & s_mask;
529  uvsy = (2 * my) & s_mask;
530  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
531  uvsrc_y = mb_y * block_s + (my >> lowres);
532  } else {
533  if (s->chroma_y_shift) {
534  mx = motion_x / 2;
535  my = motion_y / 2;
536  uvsx = mx & s_mask;
537  uvsy = my & s_mask;
538  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
539  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
540  } else {
541  if (s->chroma_x_shift) {
542  //Chroma422
543  mx = motion_x / 2;
544  uvsx = mx & s_mask;
545  uvsy = motion_y & s_mask;
546  uvsrc_y = src_y;
547  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
548  } else {
549  //Chroma444
550  uvsx = motion_x & s_mask;
551  uvsy = motion_y & s_mask;
552  uvsrc_x = src_x;
553  uvsrc_y = src_y;
554  }
555  }
556  }
557 
558  ptr_y = ref_picture[0] + src_y * linesize + src_x;
559  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
560  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
561 
562  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
563  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - FFMAX(h, hc<<s->chroma_y_shift), 0)) {
564  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
565  linesize >> field_based, linesize >> field_based,
566  17, 17 + field_based,
567  src_x, src_y * (1 << field_based), h_edge_pos,
568  v_edge_pos);
569  ptr_y = s->sc.edge_emu_buffer;
570  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
571  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
572  uint8_t *vbuf =ubuf + 10 * s->uvlinesize;
573  if (s->workaround_bugs & FF_BUG_IEDGE)
574  vbuf -= s->uvlinesize;
575  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
576  uvlinesize >> field_based, uvlinesize >> field_based,
577  9, 9 + field_based,
578  uvsrc_x, uvsrc_y * (1 << field_based),
579  h_edge_pos >> 1, v_edge_pos >> 1);
580  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
581  uvlinesize >> field_based,uvlinesize >> field_based,
582  9, 9 + field_based,
583  uvsrc_x, uvsrc_y * (1 << field_based),
584  h_edge_pos >> 1, v_edge_pos >> 1);
585  ptr_cb = ubuf;
586  ptr_cr = vbuf;
587  }
588  }
589 
590  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
591  if (bottom_field) {
592  dest_y += s->linesize;
593  dest_cb += s->uvlinesize;
594  dest_cr += s->uvlinesize;
595  }
596 
597  if (field_select) {
598  ptr_y += s->linesize;
599  ptr_cb += s->uvlinesize;
600  ptr_cr += s->uvlinesize;
601  }
602 
603  sx = (sx << 2) >> lowres;
604  sy = (sy << 2) >> lowres;
605  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
606 
607  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
608  uvsx = (uvsx << 2) >> lowres;
609  uvsy = (uvsy << 2) >> lowres;
610  if (hc) {
611  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
612  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
613  }
614  }
615  // FIXME h261 lowres loop filter
616 }
617 
619  uint8_t *dest_cb, uint8_t *dest_cr,
620  uint8_t *const *ref_picture,
621  const h264_chroma_mc_func * pix_op,
622  int mx, int my)
623 {
624  const int lowres = s->avctx->lowres;
625  const int op_index = lowres;
626  const int block_s = 8 >> lowres;
627  const int s_mask = (2 << lowres) - 1;
628  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
629  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
630  int emu = 0, src_x, src_y, sx, sy;
631  ptrdiff_t offset;
632  const uint8_t *ptr;
633 
634  av_assert2(op_index <= 3);
635 
636  if (s->quarter_sample) {
637  mx /= 2;
638  my /= 2;
639  }
640 
641  /* In case of 8X8, we construct a single chroma motion vector
642  with a special rounding */
645 
646  sx = mx & s_mask;
647  sy = my & s_mask;
648  src_x = s->mb_x * block_s + (mx >> lowres + 1);
649  src_y = s->mb_y * block_s + (my >> lowres + 1);
650 
651  offset = src_y * s->uvlinesize + src_x;
652  ptr = ref_picture[1] + offset;
653  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
654  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
655  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
656  s->uvlinesize, s->uvlinesize,
657  9, 9,
658  src_x, src_y, h_edge_pos, v_edge_pos);
659  ptr = s->sc.edge_emu_buffer;
660  emu = 1;
661  }
662  sx = (sx << 2) >> lowres;
663  sy = (sy << 2) >> lowres;
664  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
665 
666  ptr = ref_picture[2] + offset;
667  if (emu) {
668  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
669  s->uvlinesize, s->uvlinesize,
670  9, 9,
671  src_x, src_y, h_edge_pos, v_edge_pos);
672  ptr = s->sc.edge_emu_buffer;
673  }
674  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
675 }
676 
677 /**
678  * motion compensation of a single macroblock
679  * @param s context
680  * @param dest_y luma destination pointer
681  * @param dest_cb chroma cb/u destination pointer
682  * @param dest_cr chroma cr/v destination pointer
683  * @param dir direction (0->forward, 1->backward)
684  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
685  * @param pix_op halfpel motion compensation function (average or put normally)
686  * the motion vectors are taken from s->mv and the MV type from s->mv_type
687  */
688 static inline void MPV_motion_lowres(MpegEncContext *s,
689  uint8_t *dest_y, uint8_t *dest_cb,
690  uint8_t *dest_cr,
691  int dir, uint8_t *const *ref_picture,
692  const h264_chroma_mc_func *pix_op)
693 {
694  int mx, my;
695  int mb_x, mb_y;
696  const int lowres = s->avctx->lowres;
697  const int block_s = 8 >>lowres;
698 
699  mb_x = s->mb_x;
700  mb_y = s->mb_y;
701 
702  switch (s->mv_type) {
703  case MV_TYPE_16X16:
704  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
705  0, 0, 0,
706  ref_picture, pix_op,
707  s->mv[dir][0][0], s->mv[dir][0][1],
708  2 * block_s, mb_y);
709  break;
710  case MV_TYPE_8X8:
711  mx = 0;
712  my = 0;
713  for (int i = 0; i < 4; i++) {
714  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
715  s->linesize) * block_s,
716  ref_picture[0], 0, 0,
717  (2 * mb_x + (i & 1)) * block_s,
718  (2 * mb_y + (i >> 1)) * block_s,
719  s->width, s->height, s->linesize,
720  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
721  block_s, block_s, pix_op,
722  s->mv[dir][i][0], s->mv[dir][i][1]);
723 
724  mx += s->mv[dir][i][0];
725  my += s->mv[dir][i][1];
726  }
727 
728  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
729  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
730  pix_op, mx, my);
731  break;
732  case MV_TYPE_FIELD:
733  if (s->picture_structure == PICT_FRAME) {
734  /* top field */
735  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
736  1, 0, s->field_select[dir][0],
737  ref_picture, pix_op,
738  s->mv[dir][0][0], s->mv[dir][0][1],
739  block_s, mb_y);
740  /* bottom field */
741  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
742  1, 1, s->field_select[dir][1],
743  ref_picture, pix_op,
744  s->mv[dir][1][0], s->mv[dir][1][1],
745  block_s, mb_y);
746  } else {
747  if (s->picture_structure != s->field_select[dir][0] + 1 &&
748  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
749  ref_picture = s->cur_pic.ptr->f->data;
750  }
751  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
752  0, 0, s->field_select[dir][0],
753  ref_picture, pix_op,
754  s->mv[dir][0][0],
755  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
756  }
757  break;
758  case MV_TYPE_16X8:
759  for (int i = 0; i < 2; i++) {
760  uint8_t *const *ref2picture;
761 
762  if (s->picture_structure == s->field_select[dir][i] + 1 ||
763  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
764  ref2picture = ref_picture;
765  } else {
766  ref2picture = s->cur_pic.ptr->f->data;
767  }
768 
769  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
770  0, 0, s->field_select[dir][i],
771  ref2picture, pix_op,
772  s->mv[dir][i][0], s->mv[dir][i][1] +
773  2 * block_s * i, block_s, mb_y >> 1);
774 
775  dest_y += 2 * block_s * s->linesize;
776  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
777  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
778  }
779  break;
780  case MV_TYPE_DMV:
781  if (s->picture_structure == PICT_FRAME) {
782  for (int i = 0; i < 2; i++) {
783  for (int j = 0; j < 2; j++) {
784  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
785  1, j, j ^ i,
786  ref_picture, pix_op,
787  s->mv[dir][2 * i + j][0],
788  s->mv[dir][2 * i + j][1],
789  block_s, mb_y);
790  }
791  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
792  }
793  } else {
794  for (int i = 0; i < 2; i++) {
795  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
796  0, 0, s->picture_structure != i + 1,
797  ref_picture, pix_op,
798  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
799  2 * block_s, mb_y >> 1);
800 
801  // after put we make avg of the same block
802  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
803 
804  // opposite parity is always in the same
805  // frame if this is second field
806  if (!s->first_field) {
807  ref_picture = s->cur_pic.ptr->f->data;
808  }
809  }
810  }
811  break;
812  default:
813  av_unreachable("No other mpegvideo MV types exist");
814  }
815 }
816 
817 /**
818  * find the lowest MB row referenced in the MVs
819  */
821 {
822  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
823  int off, mvs;
824 
825  if (s->picture_structure != PICT_FRAME || s->mcsel)
826  goto unhandled;
827 
828  switch (s->mv_type) {
829  case MV_TYPE_16X16:
830  mvs = 1;
831  break;
832  case MV_TYPE_16X8:
833  mvs = 2;
834  break;
835  case MV_TYPE_8X8:
836  mvs = 4;
837  break;
838  default:
839  goto unhandled;
840  }
841 
842  for (int i = 0; i < mvs; i++) {
843  int my = s->mv[dir][i][1];
844  my_max = FFMAX(my_max, my);
845  my_min = FFMIN(my_min, my);
846  }
847 
848  off = ((FFMAX(-my_min, my_max) << qpel_shift) + 63) >> 6;
849 
850  return av_clip(s->mb_y + off, 0, s->mb_height - 1);
851 unhandled:
852  return s->mb_height - 1;
853 }
854 
855 /* add block[] to dest[] */
856 static inline void add_dct(MpegEncContext *s,
857  int16_t *block, int i, uint8_t *dest, int line_size)
858 {
859  if (s->block_last_index[i] >= 0) {
860  s->idsp.idct_add(dest, line_size, block);
861  }
862 }
863 
864 /* put block[] to dest[] */
865 static inline void put_dct(MpegEncContext *s,
866  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
867 {
868  s->dct_unquantize_intra(s, block, i, qscale);
869  s->idsp.idct_put(dest, line_size, block);
870 }
871 
872 static inline void add_dequant_dct(MpegEncContext *s,
873  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
874 {
875  if (s->block_last_index[i] >= 0) {
876  s->dct_unquantize_inter(s, block, i, qscale);
877 
878  s->idsp.idct_add(dest, line_size, block);
879  }
880 }
881 
882 #define NOT_MPEG12_H261 0
883 #define MAY_BE_MPEG12_H261 1
884 #define DEFINITELY_MPEG12_H261 2
885 
886 /* generic function called after a macroblock has been parsed by the decoder.
887 
888  Important variables used:
889  s->mb_intra : true if intra macroblock
890  s->mv_dir : motion vector direction
891  s->mv_type : motion vector type
892  s->mv : motion vector
893  s->interlaced_dct : true if interlaced dct used (mpeg2)
894  */
895 static av_always_inline
897  int lowres_flag, int is_mpeg12)
898 {
899 #define IS_MPEG12_H261(s) (is_mpeg12 == MAY_BE_MPEG12_H261 ? ((s)->out_format <= FMT_H261) : is_mpeg12)
900  uint8_t *dest_y = s->dest[0], *dest_cb = s->dest[1], *dest_cr = s->dest[2];
901  int dct_linesize, dct_offset;
902  const int linesize = s->cur_pic.linesize[0]; //not s->linesize as this would be wrong for field pics
903  const int uvlinesize = s->cur_pic.linesize[1];
904  const int block_size = lowres_flag ? 8 >> s->avctx->lowres : 8;
905 
906  dct_linesize = linesize << s->interlaced_dct;
907  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
908 
909  if (!s->mb_intra) {
910  /* motion handling */
911  if (HAVE_THREADS && is_mpeg12 != DEFINITELY_MPEG12_H261 &&
912  s->avctx->active_thread_type & FF_THREAD_FRAME) {
913  if (s->mv_dir & MV_DIR_FORWARD) {
914  ff_thread_progress_await(&s->last_pic.ptr->progress,
916  }
917  if (s->mv_dir & MV_DIR_BACKWARD) {
918  ff_thread_progress_await(&s->next_pic.ptr->progress,
920  }
921  }
922 
923  if (lowres_flag) {
924  const h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
925 
926  if (s->mv_dir & MV_DIR_FORWARD) {
927  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_pic.data, op_pix);
928  op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
929  }
930  if (s->mv_dir & MV_DIR_BACKWARD) {
931  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_pic.data, op_pix);
932  }
933  } else {
934  const op_pixels_func (*op_pix)[4];
935  const qpel_mc_func (*op_qpix)[16];
936 
937  if ((is_mpeg12 == DEFINITELY_MPEG12_H261 || !s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
938  op_pix = s->hdsp.put_pixels_tab;
939  op_qpix = s->qdsp.put_qpel_pixels_tab;
940  } else {
941  op_pix = s->hdsp.put_no_rnd_pixels_tab;
942  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
943  }
944  if (s->mv_dir & MV_DIR_FORWARD) {
945  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_pic.data, op_pix, op_qpix);
946  op_pix = s->hdsp.avg_pixels_tab;
947  op_qpix = s->qdsp.avg_qpel_pixels_tab;
948  }
949  if (s->mv_dir & MV_DIR_BACKWARD) {
950  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_pic.data, op_pix, op_qpix);
951  }
952  }
953 
954  /* skip dequant / idct if we are really late ;) */
955  if (s->avctx->skip_idct) {
956  if ( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
957  ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
958  || s->avctx->skip_idct >= AVDISCARD_ALL)
959  return;
960  }
961 
962  /* add dct residue */
963  if (is_mpeg12 != DEFINITELY_MPEG12_H261 && s->dct_unquantize_inter) {
964  // H.263, H.263+, H.263I, FLV, RV10, RV20 and MPEG-4 with MPEG-2 quantization
965  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
966  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
967  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
968  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
969 
970  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
971  av_assert2(s->chroma_y_shift);
972  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
973  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
974  }
975  } else if (is_mpeg12 == DEFINITELY_MPEG12_H261 || lowres_flag || (s->codec_id != AV_CODEC_ID_WMV2)) {
976  // H.261, MPEG-1, MPEG-2, MPEG-4 with H.263 quantization,
977  // MSMP4V1-3 and WMV1.
978  // Also RV30, RV40 and the VC-1 family when performing error resilience,
979  // but all blocks are skipped in this case.
980  add_dct(s, block[0], 0, dest_y , dct_linesize);
981  add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
982  add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
983  add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
984 
985  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
986  if (s->chroma_y_shift) {//Chroma420
987  add_dct(s, block[4], 4, dest_cb, uvlinesize);
988  add_dct(s, block[5], 5, dest_cr, uvlinesize);
989  } else {
990  //chroma422
991  dct_linesize = uvlinesize << s->interlaced_dct;
992  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
993 
994  add_dct(s, block[4], 4, dest_cb, dct_linesize);
995  add_dct(s, block[5], 5, dest_cr, dct_linesize);
996  add_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize);
997  add_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize);
998  if (!s->chroma_x_shift) {//Chroma444
999  add_dct(s, block[8], 8, dest_cb + block_size, dct_linesize);
1000  add_dct(s, block[9], 9, dest_cr + block_size, dct_linesize);
1001  add_dct(s, block[10], 10, dest_cb + block_size + dct_offset, dct_linesize);
1002  add_dct(s, block[11], 11, dest_cr + block_size + dct_offset, dct_linesize);
1003  }
1004  }
1005  } //fi gray
1006  } else if (CONFIG_WMV2_DECODER) {
1007  ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
1008  }
1009  } else {
1010  /* Only MPEG-4 Simple Studio Profile is supported in > 8-bit mode.
1011  TODO: Integrate 10-bit properly into mpegvideo.c so that ER works properly */
1012  if (is_mpeg12 != DEFINITELY_MPEG12_H261 && CONFIG_MPEG4_DECODER &&
1013  /* s->codec_id == AV_CODEC_ID_MPEG4 && */
1014  s->avctx->bits_per_raw_sample > 8) {
1015  ff_mpeg4_decode_studio(s, dest_y, dest_cb, dest_cr, block_size,
1016  uvlinesize, dct_linesize, dct_offset);
1017  } else if (!IS_MPEG12_H261(s)) {
1018  /* dct only in intra block */
1019  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
1020  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
1021  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
1022  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1023 
1024  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1025  if (s->chroma_y_shift) {
1026  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1027  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1028  } else {
1029  dct_offset >>= 1;
1030  dct_linesize >>= 1;
1031  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
1032  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
1033  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1034  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1035  }
1036  }
1037  } else {
1038  s->idsp.idct_put(dest_y, dct_linesize, block[0]);
1039  s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
1040  s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
1041  s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
1042 
1043  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1044  if (s->chroma_y_shift) {
1045  s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
1046  s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
1047  } else {
1048  dct_linesize = uvlinesize << s->interlaced_dct;
1049  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
1050 
1051  s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
1052  s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
1053  s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
1054  s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
1055  if (!s->chroma_x_shift) { //Chroma444
1056  s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
1057  s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
1058  s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
1059  s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
1060  }
1061  }
1062  } //gray
1063  }
1064  }
1065 }
1066 
1068 {
1069  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1070  uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1071 
1072  s->cur_pic.qscale_table[mb_xy] = s->qscale;
1073 
1074  /* avoid copy if macroblock skipped in last frame too */
1075  if (s->mb_skipped) {
1076  s->mb_skipped = 0;
1077  av_assert2(s->pict_type != AV_PICTURE_TYPE_I);
1078  *mbskip_ptr = 1;
1079  } else if (!s->cur_pic.reference) {
1080  *mbskip_ptr = 1;
1081  } else{
1082  *mbskip_ptr = 0; /* not skipped */
1083  }
1084 
1085  if (s->avctx->debug & FF_DEBUG_DCT_COEFF) {
1086  /* print DCT coefficients */
1087  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1088  for (int i = 0; i < 6; i++) {
1089  for (int j = 0; j < 64; j++) {
1090  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
1091  block[i][s->idsp.idct_permutation[j]]);
1092  }
1093  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1094  }
1095  }
1096 
1097  av_assert2((s->out_format <= FMT_H261) == (s->out_format == FMT_H261 || s->out_format == FMT_MPEG1));
1098  if (!s->avctx->lowres) {
1099 #if !CONFIG_SMALL
1100  if (s->out_format <= FMT_H261)
1102  else
1104 #else
1106 #endif
1107  } else
1109 }
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:33
ff_draw_horiz_band
void ff_draw_horiz_band(AVCodecContext *avctx, const AVFrame *cur, const AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:54
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1405
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:185
MpegEncContext::progressive_sequence
int progressive_sequence
Definition: mpegvideo.h:285
av_clip
#define av_clip
Definition: common.h:100
ff_thread_progress_report
void ff_thread_progress_report(ThreadProgress *pro, int n)
This function is a no-op in no-op mode; otherwise it notifies other threads that a certain level of p...
Definition: threadprogress.c:53
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::workaround_bugs
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:1327
ff_mpv_decode_init
av_cold int ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:47
threadprogress.h
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], const qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:821
ff_mpv_init_duplicate_contexts
av_cold int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:126
mpeg4videodec.h
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:187
ff_thread_can_start_frame
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread_frame.c:1012
put_dct
static void put_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_dec.c:865
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:410
MpegEncContext::workaround_bugs
int workaround_bugs
workaround bugs in encoders which cannot be detected automatically
Definition: mpegvideo.h:97
AVFrame::width
int width
Definition: frame.h:482
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
b
#define b
Definition: input.c:42
ff_toupper4
unsigned int ff_toupper4(unsigned int x)
Definition: to_upper4.h:29
MpegEncContext::dest
uint8_t * dest[3]
Definition: mpegvideo.h:210
mpegvideo.h
ff_wmv2_add_mb
void ff_wmv2_add_mb(MpegEncContext *s, int16_t block1[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr)
Definition: wmv2dec.c:85
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:82
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
MAY_BE_MPEG12_H261
#define MAY_BE_MPEG12_H261
Definition: mpegvideo_dec.c:883
FMT_H261
@ FMT_H261
Definition: mpegvideo.h:55
MpegEncContext::height
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:87
mpegutils.h
thread.h
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:182
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:189
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:55
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:638
AV_VIDEO_ENC_PARAMS_MPEG2
@ AV_VIDEO_ENC_PARAMS_MPEG2
Definition: video_enc_params.h:65
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:57
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo_dec.c:1067
MPVPicture::mb_type
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:68
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:54
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:440
fail
#define fail()
Definition: checkasm.h:196
MpegEncContext::padding_bug_score
int padding_bug_score
used to detect the VERY common padding bug in MPEG-4
Definition: mpegvideo.h:256
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:109
MPVPicture::motion_val
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:65
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3369
hpel_motion_lowres
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, const uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
Definition: mpegvideo_dec.c:427
MpegEncContext::width
int width
Definition: mpegvideo.h:87
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:607
refstruct.h
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:73
ff_mpv_init_context_frame
av_cold int ff_mpv_init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:230
MPVPicture::dummy
int dummy
Picture is a dummy and should not be output.
Definition: mpegpicture.h:81
mult
static int16_t mult(Float11 *f1, Float11 *f2)
Definition: g726.c:60
avassert.h
mpegvideodec.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
av_cold
#define av_cold
Definition: attributes.h:90
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:625
emms_c
#define emms_c()
Definition: emms.h:63
ff_mpeg_flush
av_cold void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:414
ff_hwaccel_frame_priv_alloc
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
Allocate a hwaccel frame private data if the provided avctx uses a hwaccel method that needs it.
Definition: decode.c:2181
s
#define s(width, name)
Definition: cbs_vp9.c:198
MpegEncContext::last_time_base
int last_time_base
Definition: mpegvideo.h:243
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
MpegEncContext::h_edge_pos
int h_edge_pos
Definition: mpegvideo.h:107
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:411
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:1048
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:70
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
decode.h
limits.h
AV_CODEC_ID_VC1IMAGE
@ AV_CODEC_ID_VC1IMAGE
Definition: codec_id.h:204
MpegEncContext::cur_pic
MPVWorkPicture cur_pic
copy of the current picture structure.
Definition: mpegvideo.h:139
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:441
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:57
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:56
ff_mpv_common_end
av_cold void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:467
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
MpegEncContext::low_delay
int low_delay
no reordering needed / has no B-frames
Definition: mpegvideo.h:255
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:229
MpegEncContext::picture_pool
struct AVRefStructPool * picture_pool
Pool for MPVPictures.
Definition: mpegvideo.h:111
MpegEncContext::field_select
int field_select[2][2]
Definition: mpegvideo.h:196
ff_thread_progress_await
void ff_thread_progress_await(const ThreadProgress *pro_c, int n)
This function is a no-op in no-op mode; otherwise it waits until other threads have reached a certain...
Definition: threadprogress.c:64
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const MPVPicture *p, int qp_type)
Definition: mpegvideo_dec.c:375
NULL
#define NULL
Definition: coverity.c:32
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:204
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:95
MpegEncContext::next_pic
MPVWorkPicture next_pic
copy of the next picture structure.
Definition: mpegvideo.h:133
ff_mpv_common_defaults
av_cold void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:208
ff_mpv_decode_close
av_cold int ff_mpv_decode_close(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:128
av_unreachable
#define av_unreachable(msg)
Asserts that are used as compiler optimization hints depending upon ASSERT_LEVEL and NBDEBUG.
Definition: avassert.h:109
DEFINITELY_MPEG12_H261
#define DEFINITELY_MPEG12_H261
Definition: mpegvideo_dec.c:884
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
FF_BUG_IEDGE
#define FF_BUG_IEDGE
Definition: avcodec.h:1342
av_refstruct_pool_get
void * av_refstruct_pool_get(AVRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
IS_MPEG12_H261
#define IS_MPEG12_H261(s)
lowres
static int lowres
Definition: ffplay.c:330
FF_THREAD_IS_COPY
@ FF_THREAD_IS_COPY
Definition: thread.h:61
alloc_dummy_frame
static int av_cold alloc_dummy_frame(MpegEncContext *s, MPVWorkPicture *dst)
Definition: mpegvideo_dec.c:233
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
FF_MPV_QSCALE_TYPE_MPEG1
#define FF_MPV_QSCALE_TYPE_MPEG1
Definition: mpegvideodec.h:40
MPVPicture::reference
int reference
Definition: mpegpicture.h:86
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:186
ff_mpv_alloc_dummy_frames
int ff_mpv_alloc_dummy_frames(MpegEncContext *s)
Ensure that the dummy frames are allocated according to pict_type if necessary.
Definition: mpegvideo_dec.c:266
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
MpegEncContext::pb_field_time
uint16_t pb_field_time
like above, just for interlaced
Definition: mpegvideo.h:250
add_dct
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
Definition: mpegvideo_dec.c:856
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:58
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1380
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:228
f
f
Definition: af_crystalizer.c:122
MPVPicture::mb_stride
int mb_stride
mb_stride of the tables
Definition: mpegpicture.h:79
ff_print_debug_info2
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, const uint32_t *mbtype_table, const int8_t *qscale_table, int16_t(*const motion_val[2])[2], int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
Definition: mpegutils.c:155
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:302
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:161
height
#define height
Definition: dsp.h:89
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
ff_h263_round_chroma
static int ff_h263_round_chroma(int x)
Definition: h263.h:30
MpegEncContext::v_edge_pos
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:107
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
h264chroma.h
ff_mpeg_draw_horiz_band
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo_dec.c:406
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:497
MpegEncContext::quarter_sample
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:252
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:309
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:188
FF_THREAD_NO_FRAME_THREADING
@ FF_THREAD_NO_FRAME_THREADING
Definition: thread.h:63
color_frame
static void color_frame(AVFrame *frame, int luma)
Definition: mpegvideo_dec.c:248
MPVPicture::mb_width
int mb_width
mb_width of the tables
Definition: mpegpicture.h:77
lowest_referenced_row
static int lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
Definition: mpegvideo_dec.c:820
AV_CODEC_ID_MSS2
@ AV_CODEC_ID_MSS2
Definition: codec_id.h:221
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1572
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
emms.h
MPVPicture::hwaccel_picture_private
void * hwaccel_picture_private
RefStruct reference for hardware accelerator private data.
Definition: mpegpicture.h:75
avcodec_default_get_buffer2
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: get_buffer.c:253
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const MPVPicture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:368
mpv_reconstruct_mb_internal
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpegvideo_dec.c:896
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:110
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
MPVPicture::qscale_table
int8_t * qscale_table
Definition: mpegpicture.h:62
internal.h
mpeg_motion_lowres
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
Definition: mpegvideo_dec.c:476
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:57
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:204
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:120
MpegEncContext::last_pic
MPVWorkPicture last_pic
copy of the previous picture structure.
Definition: mpegvideo.h:127
MPVPicture::mb_height
int mb_height
mb_height of the tables
Definition: mpegpicture.h:78
AVCodecContext::height
int height
Definition: avcodec.h:592
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:631
MPV_motion_lowres
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
Definition: mpegvideo_dec.c:688
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:633
MpegEncContext::picture_number
int picture_number
Definition: mpegvideo.h:103
FF_DEBUG_NOMC
#define FF_DEBUG_NOMC
Definition: avcodec.h:1389
avcodec.h
ff_mpv_workpic_from_pic
void ff_mpv_workpic_from_pic(MPVWorkPicture *wpic, MPVPicture *pic)
Definition: mpegpicture.c:128
stride
#define stride
Definition: h264pred_template.c:536
chroma_4mv_motion_lowres
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int mx, int my)
Definition: mpegvideo_dec.c:618
ret
ret
Definition: filter_design.txt:187
wmv2dec.h
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_thread_sync_ref
enum ThreadingStatus ff_thread_sync_ref(AVCodecContext *avctx, size_t offset)
Allows to synchronize objects whose lifetime is the whole decoding process among all frame threads.
Definition: decode.c:1839
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:395
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
update_thread_context for mpegvideo-based decoders.
Definition: mpegvideo_dec.c:78
ff_mpeg4_decode_studio
void ff_mpeg4_decode_studio(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int block_size, int uvlinesize, int dct_linesize, int dct_offset)
Definition: mpeg4videodec.c:254
AVCodecContext
main external API structure.
Definition: avcodec.h:431
AVFrame::height
int height
Definition: frame.h:482
alloc_picture
static int alloc_picture(MpegEncContext *s, MPVWorkPicture *dst, int reference)
Definition: mpegvideo_dec.c:181
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:280
add_dequant_dct
static void add_dequant_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_dec.c:872
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:360
NOT_MPEG12_H261
#define NOT_MPEG12_H261
Definition: mpegvideo_dec.c:882
ff_mpv_pic_check_linesize
int ff_mpv_pic_check_linesize(void *logctx, const AVFrame *f, ptrdiff_t *linesizep, ptrdiff_t *uvlinesizep)
Definition: mpegpicture.c:181
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
ff_mpv_replace_picture
void ff_mpv_replace_picture(MPVWorkPicture *dst, const MPVWorkPicture *src)
Definition: mpegpicture.c:121
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:607
MPVWorkPicture
Definition: mpegpicture.h:95
ThreadingStatus
ThreadingStatus
Definition: thread.h:60
MPVPicture::progress
ThreadProgress progress
Definition: mpegpicture.h:92
MpegEncContext::first_field
int first_field
is 1 for the first field of a field picture 0 otherwise
Definition: mpegvideo.h:310
av_refstruct_pool_uninit
static void av_refstruct_pool_uninit(AVRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:456
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:181
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:592
imgutils.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
ff_mpv_free_context_frame
av_cold void ff_mpv_free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution as well as the slice thread contex...
Definition: mpegvideo.c:441
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Get the block at the specified.
Definition: video_enc_params.h:143
h
h
Definition: vp9dsp_template.c:2070
AV_CODEC_ID_WMV3IMAGE
@ AV_CODEC_ID_WMV3IMAGE
Definition: codec_id.h:203
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
width
#define width
Definition: dsp.h:89
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:225
ff_mpv_alloc_pic_accessories
int ff_mpv_alloc_pic_accessories(AVCodecContext *avctx, MPVWorkPicture *wpic, ScratchpadContext *sc, BufferPoolContext *pools, int mb_height)
Allocate an MPVPicture's accessories (but not the AVFrame's buffer itself) and set the MPVWorkPicture...
Definition: mpegpicture.c:237
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:64
ff_mpv_alloc_pic_pool
av_cold AVRefStructPool * ff_mpv_alloc_pic_pool(int init_progress)
Allocate a pool of MPVPictures.
Definition: mpegpicture.c:90
src
#define src
Definition: vp8dsp.c:248
video_enc_params.h
ff_mpv_common_frame_size_change
av_cold int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo_dec.c:137
h263.h