FFmpeg
decode.c
Go to the documentation of this file.
1 /*
2  * generic decoding-related code
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 #include <stdbool.h>
23 #include <string.h>
24 
25 #include "config.h"
26 
27 #if CONFIG_ICONV
28 # include <iconv.h>
29 #endif
30 
31 #include "libavutil/avassert.h"
33 #include "libavutil/common.h"
34 #include "libavutil/emms.h"
35 #include "libavutil/frame.h"
36 #include "libavutil/hwcontext.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/internal.h"
40 #include "libavutil/mem.h"
41 #include "libavutil/stereo3d.h"
42 
43 #include "avcodec.h"
44 #include "avcodec_internal.h"
45 #include "bytestream.h"
46 #include "bsf.h"
47 #include "codec_desc.h"
48 #include "codec_internal.h"
49 #include "decode.h"
50 #include "exif.h"
51 #include "hwaccel_internal.h"
52 #include "hwconfig.h"
53 #include "internal.h"
54 #include "lcevcdec.h"
55 #include "packet_internal.h"
56 #include "progressframe.h"
57 #include "libavutil/refstruct.h"
58 #include "thread.h"
59 #include "threadprogress.h"
60 
61 typedef struct DecodeContext {
63 
64  /**
65  * This is set to AV_FRAME_FLAG_KEY for decoders of intra-only formats
66  * (those whose codec descriptor has AV_CODEC_PROP_INTRA_ONLY set)
67  * to set the flag generically.
68  */
70 
71  /**
72  * This is set to AV_PICTURE_TYPE_I for intra only video decoders
73  * and to AV_PICTURE_TYPE_NONE for other decoders. It is used to set
74  * the AVFrame's pict_type before the decoder receives it.
75  */
77 
78  /* to prevent infinite loop on errors when draining */
80 
81  /**
82  * The caller has submitted a NULL packet on input.
83  */
85 
86  int64_t pts_correction_num_faulty_pts; /// Number of incorrect PTS values so far
87  int64_t pts_correction_num_faulty_dts; /// Number of incorrect DTS values so far
88  int64_t pts_correction_last_pts; /// PTS of the last frame
89  int64_t pts_correction_last_dts; /// DTS of the last frame
90 
91  /**
92  * Bitmask indicating for which side data types we prefer user-supplied
93  * (global or attached to packets) side data over bytestream.
94  */
96 
97 #if CONFIG_LIBLCEVC_DEC
98  struct {
100  int frame;
101  int base_width;
102  int base_height;
103  int width;
104  int height;
105  } lcevc;
106 #endif
107 } DecodeContext;
108 
110 {
111  return (DecodeContext *)avci;
112 }
113 
114 static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
115 {
116  int ret;
117  size_t size;
118  const uint8_t *data;
119  uint32_t flags;
120  int64_t val;
121 
123  if (!data)
124  return 0;
125 
126  if (!(avctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE)) {
127  av_log(avctx, AV_LOG_ERROR, "This decoder does not support parameter "
128  "changes, but PARAM_CHANGE side data was sent to it.\n");
129  ret = AVERROR(EINVAL);
130  goto fail2;
131  }
132 
133  if (size < 4)
134  goto fail;
135 
136  flags = bytestream_get_le32(&data);
137  size -= 4;
138 
140  if (size < 4)
141  goto fail;
142  val = bytestream_get_le32(&data);
143  if (val <= 0 || val > INT_MAX) {
144  av_log(avctx, AV_LOG_ERROR, "Invalid sample rate");
146  goto fail2;
147  }
148  avctx->sample_rate = val;
149  size -= 4;
150  }
152  if (size < 8)
153  goto fail;
154  avctx->width = bytestream_get_le32(&data);
155  avctx->height = bytestream_get_le32(&data);
156  size -= 8;
157  ret = ff_set_dimensions(avctx, avctx->width, avctx->height);
158  if (ret < 0)
159  goto fail2;
160  }
161 
162  return 0;
163 fail:
164  av_log(avctx, AV_LOG_ERROR, "PARAM_CHANGE side data too small.\n");
166 fail2:
167  if (ret < 0) {
168  av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n");
169  if (avctx->err_recognition & AV_EF_EXPLODE)
170  return ret;
171  }
172  return 0;
173 }
174 
176 {
177  int ret = 0;
178 
180  if (pkt) {
182  }
183  return ret;
184 }
185 
187 {
188  AVCodecInternal *avci = avctx->internal;
189  const FFCodec *const codec = ffcodec(avctx->codec);
190  int ret;
191 
192  if (avci->bsf)
193  return 0;
194 
195  ret = av_bsf_list_parse_str(codec->bsfs, &avci->bsf);
196  if (ret < 0) {
197  av_log(avctx, AV_LOG_ERROR, "Error parsing decoder bitstream filters '%s': %s\n", codec->bsfs, av_err2str(ret));
198  if (ret != AVERROR(ENOMEM))
199  ret = AVERROR_BUG;
200  goto fail;
201  }
202 
203  /* We do not currently have an API for passing the input timebase into decoders,
204  * but no filters used here should actually need it.
205  * So we make up some plausible-looking number (the MPEG 90kHz timebase) */
206  avci->bsf->time_base_in = (AVRational){ 1, 90000 };
208  if (ret < 0)
209  goto fail;
210 
211  ret = av_bsf_init(avci->bsf);
212  if (ret < 0)
213  goto fail;
214 
215  return 0;
216 fail:
217  av_bsf_free(&avci->bsf);
218  return ret;
219 }
220 
221 #if !HAVE_THREADS
222 #define ff_thread_get_packet(avctx, pkt) (AVERROR_BUG)
223 #define ff_thread_receive_frame(avctx, frame, flags) (AVERROR_BUG)
224 #endif
225 
227 {
228  AVCodecInternal *avci = avctx->internal;
229  int ret;
230 
231  ret = av_bsf_receive_packet(avci->bsf, pkt);
232  if (ret < 0)
233  return ret;
234 
237  if (ret < 0)
238  goto finish;
239  }
240 
241  ret = apply_param_change(avctx, pkt);
242  if (ret < 0)
243  goto finish;
244 
245  return 0;
246 finish:
248  return ret;
249 }
250 
252 {
253  AVCodecInternal *avci = avctx->internal;
254  DecodeContext *dc = decode_ctx(avci);
255 
256  if (avci->draining)
257  return AVERROR_EOF;
258 
259  /* If we are a worker thread, get the next packet from the threading
260  * context. Otherwise we are the main (user-facing) context, so we get the
261  * next packet from the input filterchain.
262  */
263  if (avctx->internal->is_frame_mt)
264  return ff_thread_get_packet(avctx, pkt);
265 
266  while (1) {
267  int ret = decode_get_packet(avctx, pkt);
268  if (ret == AVERROR(EAGAIN) &&
269  (!AVPACKET_IS_EMPTY(avci->buffer_pkt) || dc->draining_started)) {
270  ret = av_bsf_send_packet(avci->bsf, avci->buffer_pkt);
271  if (ret >= 0)
272  continue;
273 
275  }
276 
277  if (ret == AVERROR_EOF)
278  avci->draining = 1;
279  return ret;
280  }
281 }
282 
283 /**
284  * Attempt to guess proper monotonic timestamps for decoded video frames
285  * which might have incorrect times. Input timestamps may wrap around, in
286  * which case the output will as well.
287  *
288  * @param pts the pts field of the decoded AVPacket, as passed through
289  * AVFrame.pts
290  * @param dts the dts field of the decoded AVPacket
291  * @return one of the input values, may be AV_NOPTS_VALUE
292  */
294  int64_t reordered_pts, int64_t dts)
295 {
297 
298  if (dts != AV_NOPTS_VALUE) {
299  dc->pts_correction_num_faulty_dts += dts <= dc->pts_correction_last_dts;
300  dc->pts_correction_last_dts = dts;
301  } else if (reordered_pts != AV_NOPTS_VALUE)
302  dc->pts_correction_last_dts = reordered_pts;
303 
304  if (reordered_pts != AV_NOPTS_VALUE) {
305  dc->pts_correction_num_faulty_pts += reordered_pts <= dc->pts_correction_last_pts;
306  dc->pts_correction_last_pts = reordered_pts;
307  } else if(dts != AV_NOPTS_VALUE)
308  dc->pts_correction_last_pts = dts;
309 
310  if ((dc->pts_correction_num_faulty_pts<=dc->pts_correction_num_faulty_dts || dts == AV_NOPTS_VALUE)
311  && reordered_pts != AV_NOPTS_VALUE)
312  pts = reordered_pts;
313  else
314  pts = dts;
315 
316  return pts;
317 }
318 
319 static int discard_samples(AVCodecContext *avctx, AVFrame *frame, int64_t *discarded_samples)
320 {
321  AVCodecInternal *avci = avctx->internal;
322  AVFrameSideData *side;
323  uint32_t discard_padding = 0;
324  uint8_t skip_reason = 0;
325  uint8_t discard_reason = 0;
326 
328  if (side && side->size >= 10) {
329  avci->skip_samples = AV_RL32(side->data);
330  avci->skip_samples = FFMAX(0, avci->skip_samples);
331  discard_padding = AV_RL32(side->data + 4);
332  av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
333  avci->skip_samples, (int)discard_padding);
334  skip_reason = AV_RL8(side->data + 8);
335  discard_reason = AV_RL8(side->data + 9);
336  }
337 
338  if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
339  if (!side && (avci->skip_samples || discard_padding))
341  if (side && (avci->skip_samples || discard_padding)) {
342  AV_WL32(side->data, avci->skip_samples);
343  AV_WL32(side->data + 4, discard_padding);
344  AV_WL8(side->data + 8, skip_reason);
345  AV_WL8(side->data + 9, discard_reason);
346  avci->skip_samples = 0;
347  }
348  return 0;
349  }
351 
352  if ((frame->flags & AV_FRAME_FLAG_DISCARD)) {
353  avci->skip_samples = FFMAX(0, avci->skip_samples - frame->nb_samples);
354  *discarded_samples += frame->nb_samples;
355  return AVERROR(EAGAIN);
356  }
357 
358  if (avci->skip_samples > 0) {
359  if (frame->nb_samples <= avci->skip_samples){
360  *discarded_samples += frame->nb_samples;
361  avci->skip_samples -= frame->nb_samples;
362  av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
363  avci->skip_samples);
364  return AVERROR(EAGAIN);
365  } else {
366  av_samples_copy(frame->extended_data, frame->extended_data, 0, avci->skip_samples,
367  frame->nb_samples - avci->skip_samples, avctx->ch_layout.nb_channels, frame->format);
368  if (avctx->pkt_timebase.num && avctx->sample_rate) {
369  int64_t diff_ts = av_rescale_q(avci->skip_samples,
370  (AVRational){1, avctx->sample_rate},
371  avctx->pkt_timebase);
372  if (frame->pts != AV_NOPTS_VALUE)
373  frame->pts += diff_ts;
374  if (frame->pkt_dts != AV_NOPTS_VALUE)
375  frame->pkt_dts += diff_ts;
376  if (frame->duration >= diff_ts)
377  frame->duration -= diff_ts;
378  } else
379  av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n");
380 
381  av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n",
382  avci->skip_samples, frame->nb_samples);
383  *discarded_samples += avci->skip_samples;
384  frame->nb_samples -= avci->skip_samples;
385  avci->skip_samples = 0;
386  }
387  }
388 
389  if (discard_padding > 0 && discard_padding <= frame->nb_samples) {
390  if (discard_padding == frame->nb_samples) {
391  *discarded_samples += frame->nb_samples;
392  return AVERROR(EAGAIN);
393  } else {
394  if (avctx->pkt_timebase.num && avctx->sample_rate) {
395  int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding,
396  (AVRational){1, avctx->sample_rate},
397  avctx->pkt_timebase);
398  frame->duration = diff_ts;
399  } else
400  av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n");
401 
402  av_log(avctx, AV_LOG_DEBUG, "discard %d/%d samples\n",
403  (int)discard_padding, frame->nb_samples);
404  frame->nb_samples -= discard_padding;
405  }
406  }
407 
408  return 0;
409 }
410 
411 /*
412  * The core of the receive_frame_wrapper for the decoders implementing
413  * the simple API. Certain decoders might consume partial packets without
414  * returning any output, so this function needs to be called in a loop until it
415  * returns EAGAIN.
416  **/
417 static inline int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame, int64_t *discarded_samples)
418 {
419  AVCodecInternal *avci = avctx->internal;
420  DecodeContext *dc = decode_ctx(avci);
421  AVPacket *const pkt = avci->in_pkt;
422  const FFCodec *const codec = ffcodec(avctx->codec);
423  int got_frame, consumed;
424  int ret;
425 
426  if (!pkt->data && !avci->draining) {
428  ret = ff_decode_get_packet(avctx, pkt);
429  if (ret < 0 && ret != AVERROR_EOF)
430  return ret;
431  }
432 
433  // Some codecs (at least wma lossless) will crash when feeding drain packets
434  // after EOF was signaled.
435  if (avci->draining_done)
436  return AVERROR_EOF;
437 
438  if (!pkt->data &&
440  return AVERROR_EOF;
441 
442  got_frame = 0;
443 
444  frame->pict_type = dc->initial_pict_type;
445  frame->flags |= dc->intra_only_flag;
446  consumed = codec->cb.decode(avctx, frame, &got_frame, pkt);
447 
449  frame->pkt_dts = pkt->dts;
450  emms_c();
451 
452  if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
453  ret = (!got_frame || frame->flags & AV_FRAME_FLAG_DISCARD)
454  ? AVERROR(EAGAIN)
455  : 0;
456  } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
457  ret = !got_frame ? AVERROR(EAGAIN)
458  : discard_samples(avctx, frame, discarded_samples);
459  } else
460  av_assert0(0);
461 
462  if (ret == AVERROR(EAGAIN))
464 
465  // FF_CODEC_CB_TYPE_DECODE decoders must not return AVERROR EAGAIN
466  // code later will add AVERROR(EAGAIN) to a pointer
467  av_assert0(consumed != AVERROR(EAGAIN));
468  if (consumed < 0)
469  ret = consumed;
470  if (consumed >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO)
471  consumed = pkt->size;
472 
473  if (!ret)
474  av_assert0(frame->buf[0]);
475  if (ret == AVERROR(EAGAIN))
476  ret = 0;
477 
478  /* do not stop draining when got_frame != 0 or ret < 0 */
479  if (avci->draining && !got_frame) {
480  if (ret < 0) {
481  /* prevent infinite loop if a decoder wrongly always return error on draining */
482  /* reasonable nb_errors_max = maximum b frames + thread count */
483  int nb_errors_max = 20 + (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME ?
484  avctx->thread_count : 1);
485 
486  if (decode_ctx(avci)->nb_draining_errors++ >= nb_errors_max) {
487  av_log(avctx, AV_LOG_ERROR, "Too many errors when draining, this is a bug. "
488  "Stop draining and force EOF.\n");
489  avci->draining_done = 1;
490  ret = AVERROR_BUG;
491  }
492  } else {
493  avci->draining_done = 1;
494  }
495  }
496 
497  if (consumed >= pkt->size || ret < 0) {
499  } else {
500  pkt->data += consumed;
501  pkt->size -= consumed;
507  }
508  }
509 
510  return ret;
511 }
512 
513 #if CONFIG_LCMS2
514 static int detect_colorspace(AVCodecContext *avctx, AVFrame *frame)
515 {
516  AVCodecInternal *avci = avctx->internal;
518  AVColorPrimariesDesc coeffs;
519  enum AVColorPrimaries prim;
520  cmsHPROFILE profile;
521  AVFrameSideData *sd;
522  int ret;
523  if (!(avctx->flags2 & AV_CODEC_FLAG2_ICC_PROFILES))
524  return 0;
525 
527  if (!sd || !sd->size)
528  return 0;
529 
530  if (!avci->icc.avctx) {
531  ret = ff_icc_context_init(&avci->icc, avctx);
532  if (ret < 0)
533  return ret;
534  }
535 
536  profile = cmsOpenProfileFromMemTHR(avci->icc.ctx, sd->data, sd->size);
537  if (!profile)
538  return AVERROR_INVALIDDATA;
539 
540  ret = ff_icc_profile_sanitize(&avci->icc, profile);
541  if (!ret)
542  ret = ff_icc_profile_read_primaries(&avci->icc, profile, &coeffs);
543  if (!ret)
544  ret = ff_icc_profile_detect_transfer(&avci->icc, profile, &trc);
545  cmsCloseProfile(profile);
546  if (ret < 0)
547  return ret;
548 
549  prim = av_csp_primaries_id_from_desc(&coeffs);
550  if (prim != AVCOL_PRI_UNSPECIFIED)
551  frame->color_primaries = prim;
552  if (trc != AVCOL_TRC_UNSPECIFIED)
553  frame->color_trc = trc;
554  return 0;
555 }
556 #else /* !CONFIG_LCMS2 */
558 {
559  return 0;
560 }
561 #endif
562 
563 static int fill_frame_props(const AVCodecContext *avctx, AVFrame *frame)
564 {
565  int ret;
566 
567  if (frame->color_primaries == AVCOL_PRI_UNSPECIFIED)
568  frame->color_primaries = avctx->color_primaries;
569  if (frame->color_trc == AVCOL_TRC_UNSPECIFIED)
570  frame->color_trc = avctx->color_trc;
571  if (frame->colorspace == AVCOL_SPC_UNSPECIFIED)
572  frame->colorspace = avctx->colorspace;
573  if (frame->color_range == AVCOL_RANGE_UNSPECIFIED)
574  frame->color_range = avctx->color_range;
575  if (frame->chroma_location == AVCHROMA_LOC_UNSPECIFIED)
576  frame->chroma_location = avctx->chroma_sample_location;
577  if (frame->alpha_mode == AVALPHA_MODE_UNSPECIFIED)
578  frame->alpha_mode = avctx->alpha_mode;
579 
580  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
581  if (!frame->sample_aspect_ratio.num) frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
582  if (frame->format == AV_PIX_FMT_NONE) frame->format = avctx->pix_fmt;
583  } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
584  if (frame->format == AV_SAMPLE_FMT_NONE)
585  frame->format = avctx->sample_fmt;
586  if (!frame->ch_layout.nb_channels) {
587  ret = av_channel_layout_copy(&frame->ch_layout, &avctx->ch_layout);
588  if (ret < 0)
589  return ret;
590  }
591  if (!frame->sample_rate)
592  frame->sample_rate = avctx->sample_rate;
593  }
594 
595  return 0;
596 }
597 
599 {
600  int ret;
601  int64_t discarded_samples = 0;
602 
603  while (!frame->buf[0]) {
604  if (discarded_samples > avctx->max_samples)
605  return AVERROR(EAGAIN);
606  ret = decode_simple_internal(avctx, frame, &discarded_samples);
607  if (ret < 0)
608  return ret;
609  }
610 
611  return 0;
612 }
613 
615 {
616  AVCodecInternal *avci = avctx->internal;
617  DecodeContext *dc = decode_ctx(avci);
618  const FFCodec *const codec = ffcodec(avctx->codec);
619  int ret;
620 
621  av_assert0(!frame->buf[0]);
622 
623  if (codec->cb_type == FF_CODEC_CB_TYPE_RECEIVE_FRAME) {
624  while (1) {
625  frame->pict_type = dc->initial_pict_type;
626  frame->flags |= dc->intra_only_flag;
627  ret = codec->cb.receive_frame(avctx, frame);
628  emms_c();
629  if (!ret) {
630  if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
631  int64_t discarded_samples = 0;
632  ret = discard_samples(avctx, frame, &discarded_samples);
633  }
634  if (ret == AVERROR(EAGAIN) || (frame->flags & AV_FRAME_FLAG_DISCARD)) {
636  continue;
637  }
638  }
639  break;
640  }
641  } else
643 
644  if (ret == AVERROR_EOF)
645  avci->draining_done = 1;
646 
647  return ret;
648 }
649 
651  unsigned flags)
652 {
653  AVCodecInternal *avci = avctx->internal;
654  DecodeContext *dc = decode_ctx(avci);
655  int ret, ok;
656 
657  if (avctx->active_thread_type & FF_THREAD_FRAME)
659  else
661 
662  /* preserve ret */
663  ok = detect_colorspace(avctx, frame);
664  if (ok < 0) {
666  return ok;
667  }
668 
669  if (!ret) {
670  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
671  if (!frame->width)
672  frame->width = avctx->width;
673  if (!frame->height)
674  frame->height = avctx->height;
675  }
676 
677  ret = fill_frame_props(avctx, frame);
678  if (ret < 0) {
680  return ret;
681  }
682 
683  frame->best_effort_timestamp = guess_correct_pts(dc,
684  frame->pts,
685  frame->pkt_dts);
686 
687  /* the only case where decode data is not set should be decoders
688  * that do not call ff_get_buffer() */
689  av_assert0(frame->private_ref ||
690  !(avctx->codec->capabilities & AV_CODEC_CAP_DR1));
691 
692  if (frame->private_ref) {
693  FrameDecodeData *fdd = frame->private_ref;
694 
695  if (fdd->post_process) {
696  ret = fdd->post_process(avctx, frame);
697  if (ret < 0) {
699  return ret;
700  }
701  }
702  }
703  }
704 
705  /* free the per-frame decode data */
706  av_refstruct_unref(&frame->private_ref);
707 
708  return ret;
709 }
710 
712 {
713  AVCodecInternal *avci = avctx->internal;
714  DecodeContext *dc = decode_ctx(avci);
715  int ret;
716 
717  if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
718  return AVERROR(EINVAL);
719 
720  if (dc->draining_started)
721  return AVERROR_EOF;
722 
723  if (avpkt && !avpkt->size && avpkt->data)
724  return AVERROR(EINVAL);
725 
726  if (avpkt && (avpkt->data || avpkt->side_data_elems)) {
727  if (!AVPACKET_IS_EMPTY(avci->buffer_pkt))
728  return AVERROR(EAGAIN);
729  ret = av_packet_ref(avci->buffer_pkt, avpkt);
730  if (ret < 0)
731  return ret;
732  } else
733  dc->draining_started = 1;
734 
735  if (!avci->buffer_frame->buf[0] && !dc->draining_started) {
737  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
738  return ret;
739  }
740 
741  return 0;
742 }
743 
745 {
746  /* make sure we are noisy about decoders returning invalid cropping data */
747  if (frame->crop_left >= INT_MAX - frame->crop_right ||
748  frame->crop_top >= INT_MAX - frame->crop_bottom ||
749  (frame->crop_left + frame->crop_right) >= frame->width ||
750  (frame->crop_top + frame->crop_bottom) >= frame->height) {
751  av_log(avctx, AV_LOG_WARNING,
752  "Invalid cropping information set by a decoder: "
753  "%zu/%zu/%zu/%zu (frame size %dx%d). "
754  "This is a bug, please report it\n",
755  frame->crop_left, frame->crop_right, frame->crop_top, frame->crop_bottom,
756  frame->width, frame->height);
757  frame->crop_left = 0;
758  frame->crop_right = 0;
759  frame->crop_top = 0;
760  frame->crop_bottom = 0;
761  return 0;
762  }
763 
764  if (!avctx->apply_cropping)
765  return 0;
766 
769 }
770 
771 // make sure frames returned to the caller are valid
773 {
774  if (!frame->buf[0] || frame->format < 0)
775  goto fail;
776 
777  switch (avctx->codec_type) {
778  case AVMEDIA_TYPE_VIDEO:
779  if (frame->width <= 0 || frame->height <= 0)
780  goto fail;
781  break;
782  case AVMEDIA_TYPE_AUDIO:
783  if (!av_channel_layout_check(&frame->ch_layout) ||
784  frame->sample_rate <= 0)
785  goto fail;
786 
787  break;
788  default: av_assert0(0);
789  }
790 
791  return 0;
792 fail:
793  av_log(avctx, AV_LOG_ERROR, "An invalid frame was output by a decoder. "
794  "This is a bug, please report it.\n");
795  return AVERROR_BUG;
796 }
797 
799 {
800  AVCodecInternal *avci = avctx->internal;
801  int ret;
802 
803  if (avci->buffer_frame->buf[0]) {
805  } else {
807  if (ret < 0)
808  return ret;
809  }
810 
811  ret = frame_validate(avctx, frame);
812  if (ret < 0)
813  goto fail;
814 
815  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
816  ret = apply_cropping(avctx, frame);
817  if (ret < 0)
818  goto fail;
819  }
820 
821  avctx->frame_num++;
822 
823  return 0;
824 fail:
826  return ret;
827 }
828 
830 {
831  memset(sub, 0, sizeof(*sub));
832  sub->pts = AV_NOPTS_VALUE;
833 }
834 
835 #define UTF8_MAX_BYTES 4 /* 5 and 6 bytes sequences should not be used */
836 static int recode_subtitle(AVCodecContext *avctx, const AVPacket **outpkt,
837  const AVPacket *inpkt, AVPacket *buf_pkt)
838 {
839 #if CONFIG_ICONV
840  iconv_t cd = (iconv_t)-1;
841  int ret = 0;
842  char *inb, *outb;
843  size_t inl, outl;
844 #endif
845 
846  if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_PRE_DECODER || inpkt->size == 0) {
847  *outpkt = inpkt;
848  return 0;
849  }
850 
851 #if CONFIG_ICONV
852  inb = inpkt->data;
853  inl = inpkt->size;
854 
855  if (inl >= INT_MAX / UTF8_MAX_BYTES - AV_INPUT_BUFFER_PADDING_SIZE) {
856  av_log(avctx, AV_LOG_ERROR, "Subtitles packet is too big for recoding\n");
857  return AVERROR(ERANGE);
858  }
859 
860  cd = iconv_open("UTF-8", avctx->sub_charenc);
861  av_assert0(cd != (iconv_t)-1);
862 
863  ret = av_new_packet(buf_pkt, inl * UTF8_MAX_BYTES);
864  if (ret < 0)
865  goto end;
866  ret = av_packet_copy_props(buf_pkt, inpkt);
867  if (ret < 0)
868  goto end;
869  outb = buf_pkt->data;
870  outl = buf_pkt->size;
871 
872  if (iconv(cd, &inb, &inl, &outb, &outl) == (size_t)-1 ||
873  iconv(cd, NULL, NULL, &outb, &outl) == (size_t)-1 ||
874  outl >= buf_pkt->size || inl != 0) {
875  ret = FFMIN(AVERROR(errno), -1);
876  av_log(avctx, AV_LOG_ERROR, "Unable to recode subtitle event \"%s\" "
877  "from %s to UTF-8\n", inpkt->data, avctx->sub_charenc);
878  goto end;
879  }
880  buf_pkt->size -= outl;
881  memset(buf_pkt->data + buf_pkt->size, 0, outl);
882  *outpkt = buf_pkt;
883 
884  ret = 0;
885 end:
886  if (ret < 0)
887  av_packet_unref(buf_pkt);
888  if (cd != (iconv_t)-1)
889  iconv_close(cd);
890  return ret;
891 #else
892  av_log(avctx, AV_LOG_ERROR, "requesting subtitles recoding without iconv");
893  return AVERROR(EINVAL);
894 #endif
895 }
896 
897 static int utf8_check(const uint8_t *str)
898 {
899  const uint8_t *byte;
900  uint32_t codepoint, min;
901 
902  while (*str) {
903  byte = str;
904  GET_UTF8(codepoint, *(byte++), return 0;);
905  min = byte - str == 1 ? 0 : byte - str == 2 ? 0x80 :
906  1 << (5 * (byte - str) - 4);
907  if (codepoint < min || codepoint >= 0x110000 ||
908  codepoint == 0xFFFE /* BOM */ ||
909  codepoint >= 0xD800 && codepoint <= 0xDFFF /* surrogates */)
910  return 0;
911  str = byte;
912  }
913  return 1;
914 }
915 
917  int *got_sub_ptr, const AVPacket *avpkt)
918 {
919  int ret = 0;
920 
921  if (!avpkt->data && avpkt->size) {
922  av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
923  return AVERROR(EINVAL);
924  }
925  if (!avctx->codec)
926  return AVERROR(EINVAL);
928  av_log(avctx, AV_LOG_ERROR, "Codec not subtitle decoder\n");
929  return AVERROR(EINVAL);
930  }
931 
932  *got_sub_ptr = 0;
934 
935  if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size) {
936  AVCodecInternal *avci = avctx->internal;
937  const AVPacket *pkt;
938 
939  ret = recode_subtitle(avctx, &pkt, avpkt, avci->buffer_pkt);
940  if (ret < 0)
941  return ret;
942 
943  if (avctx->pkt_timebase.num && avpkt->pts != AV_NOPTS_VALUE)
944  sub->pts = av_rescale_q(avpkt->pts,
945  avctx->pkt_timebase, AV_TIME_BASE_Q);
946  ret = ffcodec(avctx->codec)->cb.decode_sub(avctx, sub, got_sub_ptr, pkt);
947  if (pkt == avci->buffer_pkt) // did we recode?
949  if (ret < 0) {
950  *got_sub_ptr = 0;
951  avsubtitle_free(sub);
952  return ret;
953  }
954  av_assert1(!sub->num_rects || *got_sub_ptr);
955 
956  if (sub->num_rects && !sub->end_display_time && avpkt->duration &&
957  avctx->pkt_timebase.num) {
958  AVRational ms = { 1, 1000 };
959  sub->end_display_time = av_rescale_q(avpkt->duration,
960  avctx->pkt_timebase, ms);
961  }
962 
964  sub->format = 0;
965  else if (avctx->codec_descriptor->props & AV_CODEC_PROP_TEXT_SUB)
966  sub->format = 1;
967 
968  for (unsigned i = 0; i < sub->num_rects; i++) {
970  sub->rects[i]->ass && !utf8_check(sub->rects[i]->ass)) {
971  av_log(avctx, AV_LOG_ERROR,
972  "Invalid UTF-8 in decoded subtitles text; "
973  "maybe missing -sub_charenc option\n");
974  avsubtitle_free(sub);
975  *got_sub_ptr = 0;
976  return AVERROR_INVALIDDATA;
977  }
978  }
979 
980  if (*got_sub_ptr)
981  avctx->frame_num++;
982  }
983 
984  return ret;
985 }
986 
988  const enum AVPixelFormat *fmt)
989 {
990  const AVPixFmtDescriptor *desc;
991  const AVCodecHWConfig *config;
992  int i, n;
993 
994  // If a device was supplied when the codec was opened, assume that the
995  // user wants to use it.
996  if (avctx->hw_device_ctx && ffcodec(avctx->codec)->hw_configs) {
997  AVHWDeviceContext *device_ctx =
999  for (i = 0;; i++) {
1000  config = &ffcodec(avctx->codec)->hw_configs[i]->public;
1001  if (!config)
1002  break;
1003  if (!(config->methods &
1005  continue;
1006  if (device_ctx->type != config->device_type)
1007  continue;
1008  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
1009  if (config->pix_fmt == fmt[n])
1010  return fmt[n];
1011  }
1012  }
1013  }
1014  // No device or other setup, so we have to choose from things which
1015  // don't any other external information.
1016 
1017  // If the last element of the list is a software format, choose it
1018  // (this should be best software format if any exist).
1019  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
1020  desc = av_pix_fmt_desc_get(fmt[n - 1]);
1021  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
1022  return fmt[n - 1];
1023 
1024  // Finally, traverse the list in order and choose the first entry
1025  // with no external dependencies (if there is no hardware configuration
1026  // information available then this just picks the first entry).
1027  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
1028  for (i = 0;; i++) {
1029  config = avcodec_get_hw_config(avctx->codec, i);
1030  if (!config)
1031  break;
1032  if (config->pix_fmt == fmt[n])
1033  break;
1034  }
1035  if (!config) {
1036  // No specific config available, so the decoder must be able
1037  // to handle this format without any additional setup.
1038  return fmt[n];
1039  }
1040  if (config->methods & AV_CODEC_HW_CONFIG_METHOD_INTERNAL) {
1041  // Usable with only internal setup.
1042  return fmt[n];
1043  }
1044  }
1045 
1046  // Nothing is usable, give up.
1047  return AV_PIX_FMT_NONE;
1048 }
1049 
1051  enum AVHWDeviceType dev_type)
1052 {
1053  AVHWDeviceContext *device_ctx;
1054  AVHWFramesContext *frames_ctx;
1055  int ret;
1056 
1057  if (!avctx->hwaccel)
1058  return AVERROR(ENOSYS);
1059 
1060  if (avctx->hw_frames_ctx)
1061  return 0;
1062  if (!avctx->hw_device_ctx) {
1063  av_log(avctx, AV_LOG_ERROR, "A hardware frames or device context is "
1064  "required for hardware accelerated decoding.\n");
1065  return AVERROR(EINVAL);
1066  }
1067 
1068  device_ctx = (AVHWDeviceContext *)avctx->hw_device_ctx->data;
1069  if (device_ctx->type != dev_type) {
1070  av_log(avctx, AV_LOG_ERROR, "Device type %s expected for hardware "
1071  "decoding, but got %s.\n", av_hwdevice_get_type_name(dev_type),
1072  av_hwdevice_get_type_name(device_ctx->type));
1073  return AVERROR(EINVAL);
1074  }
1075 
1077  avctx->hw_device_ctx,
1078  avctx->hwaccel->pix_fmt,
1079  &avctx->hw_frames_ctx);
1080  if (ret < 0)
1081  return ret;
1082 
1083  frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1084 
1085 
1086  if (frames_ctx->initial_pool_size) {
1087  // We guarantee 4 base work surfaces. The function above guarantees 1
1088  // (the absolute minimum), so add the missing count.
1089  frames_ctx->initial_pool_size += 3;
1090  }
1091 
1093  if (ret < 0) {
1094  av_buffer_unref(&avctx->hw_frames_ctx);
1095  return ret;
1096  }
1097 
1098  return 0;
1099 }
1100 
1102  AVBufferRef *device_ref,
1104  AVBufferRef **out_frames_ref)
1105 {
1106  AVBufferRef *frames_ref = NULL;
1107  const AVCodecHWConfigInternal *hw_config;
1108  const FFHWAccel *hwa;
1109  int i, ret;
1110  bool clean_priv_data = false;
1111 
1112  for (i = 0;; i++) {
1113  hw_config = ffcodec(avctx->codec)->hw_configs[i];
1114  if (!hw_config)
1115  return AVERROR(ENOENT);
1116  if (hw_config->public.pix_fmt == hw_pix_fmt)
1117  break;
1118  }
1119 
1120  hwa = hw_config->hwaccel;
1121  if (!hwa || !hwa->frame_params)
1122  return AVERROR(ENOENT);
1123 
1124  frames_ref = av_hwframe_ctx_alloc(device_ref);
1125  if (!frames_ref)
1126  return AVERROR(ENOMEM);
1127 
1128  if (!avctx->internal->hwaccel_priv_data) {
1129  avctx->internal->hwaccel_priv_data =
1130  av_mallocz(hwa->priv_data_size);
1131  if (!avctx->internal->hwaccel_priv_data) {
1132  av_buffer_unref(&frames_ref);
1133  return AVERROR(ENOMEM);
1134  }
1135  clean_priv_data = true;
1136  }
1137 
1138  ret = hwa->frame_params(avctx, frames_ref);
1139  if (ret >= 0) {
1140  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)frames_ref->data;
1141 
1142  if (frames_ctx->initial_pool_size) {
1143  // If the user has requested that extra output surfaces be
1144  // available then add them here.
1145  if (avctx->extra_hw_frames > 0)
1146  frames_ctx->initial_pool_size += avctx->extra_hw_frames;
1147 
1148  // If frame threading is enabled then an extra surface per thread
1149  // is also required.
1150  if (avctx->active_thread_type & FF_THREAD_FRAME)
1151  frames_ctx->initial_pool_size += avctx->thread_count;
1152  }
1153 
1154  *out_frames_ref = frames_ref;
1155  } else {
1156  if (clean_priv_data)
1158  av_buffer_unref(&frames_ref);
1159  }
1160  return ret;
1161 }
1162 
1163 static int hwaccel_init(AVCodecContext *avctx,
1164  const FFHWAccel *hwaccel)
1165 {
1166  int err;
1167 
1168  if (hwaccel->p.capabilities & AV_HWACCEL_CODEC_CAP_EXPERIMENTAL &&
1170  av_log(avctx, AV_LOG_WARNING, "Ignoring experimental hwaccel: %s\n",
1171  hwaccel->p.name);
1172  return AVERROR_PATCHWELCOME;
1173  }
1174 
1175  if (!avctx->internal->hwaccel_priv_data && hwaccel->priv_data_size) {
1176  avctx->internal->hwaccel_priv_data =
1177  av_mallocz(hwaccel->priv_data_size);
1178  if (!avctx->internal->hwaccel_priv_data)
1179  return AVERROR(ENOMEM);
1180  }
1181 
1182  avctx->hwaccel = &hwaccel->p;
1183  if (hwaccel->init) {
1184  err = hwaccel->init(avctx);
1185  if (err < 0) {
1186  av_log(avctx, AV_LOG_ERROR, "Failed setup for format %s: "
1187  "hwaccel initialisation returned error.\n",
1188  av_get_pix_fmt_name(hwaccel->p.pix_fmt));
1190  avctx->hwaccel = NULL;
1191  return err;
1192  }
1193  }
1194 
1195  return 0;
1196 }
1197 
1199 {
1200  if (FF_HW_HAS_CB(avctx, uninit))
1201  FF_HW_SIMPLE_CALL(avctx, uninit);
1202 
1204 
1205  avctx->hwaccel = NULL;
1206 
1207  av_buffer_unref(&avctx->hw_frames_ctx);
1208 }
1209 
1210 int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
1211 {
1212  const AVPixFmtDescriptor *desc;
1213  enum AVPixelFormat *choices;
1214  enum AVPixelFormat ret, user_choice;
1215  const AVCodecHWConfigInternal *hw_config;
1216  const AVCodecHWConfig *config;
1217  int i, n, err;
1218 
1219  // Find end of list.
1220  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
1221  // Must contain at least one entry.
1222  av_assert0(n >= 1);
1223  // If a software format is available, it must be the last entry.
1224  desc = av_pix_fmt_desc_get(fmt[n - 1]);
1225  if (desc->flags & AV_PIX_FMT_FLAG_HWACCEL) {
1226  // No software format is available.
1227  } else {
1228  avctx->sw_pix_fmt = fmt[n - 1];
1229  }
1230 
1231  choices = av_memdup(fmt, (n + 1) * sizeof(*choices));
1232  if (!choices)
1233  return AV_PIX_FMT_NONE;
1234 
1235  for (;;) {
1236  // Remove the previous hwaccel, if there was one.
1237  ff_hwaccel_uninit(avctx);
1238 
1239  user_choice = avctx->get_format(avctx, choices);
1240  if (user_choice == AV_PIX_FMT_NONE) {
1241  // Explicitly chose nothing, give up.
1242  ret = AV_PIX_FMT_NONE;
1243  break;
1244  }
1245 
1246  desc = av_pix_fmt_desc_get(user_choice);
1247  if (!desc) {
1248  av_log(avctx, AV_LOG_ERROR, "Invalid format returned by "
1249  "get_format() callback.\n");
1250  ret = AV_PIX_FMT_NONE;
1251  break;
1252  }
1253  av_log(avctx, AV_LOG_DEBUG, "Format %s chosen by get_format().\n",
1254  desc->name);
1255 
1256  for (i = 0; i < n; i++) {
1257  if (choices[i] == user_choice)
1258  break;
1259  }
1260  if (i == n) {
1261  av_log(avctx, AV_LOG_ERROR, "Invalid return from get_format(): "
1262  "%s not in possible list.\n", desc->name);
1263  ret = AV_PIX_FMT_NONE;
1264  break;
1265  }
1266 
1267  if (ffcodec(avctx->codec)->hw_configs) {
1268  for (i = 0;; i++) {
1269  hw_config = ffcodec(avctx->codec)->hw_configs[i];
1270  if (!hw_config)
1271  break;
1272  if (hw_config->public.pix_fmt == user_choice)
1273  break;
1274  }
1275  } else {
1276  hw_config = NULL;
1277  }
1278 
1279  if (!hw_config) {
1280  // No config available, so no extra setup required.
1281  ret = user_choice;
1282  break;
1283  }
1284  config = &hw_config->public;
1285 
1286  if (config->methods &
1288  avctx->hw_frames_ctx) {
1289  const AVHWFramesContext *frames_ctx =
1291  if (frames_ctx->format != user_choice) {
1292  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1293  "does not match the format of the provided frames "
1294  "context.\n", desc->name);
1295  goto try_again;
1296  }
1297  } else if (config->methods &
1299  avctx->hw_device_ctx) {
1300  const AVHWDeviceContext *device_ctx =
1302  if (device_ctx->type != config->device_type) {
1303  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1304  "does not match the type of the provided device "
1305  "context.\n", desc->name);
1306  goto try_again;
1307  }
1308  } else if (config->methods &
1310  // Internal-only setup, no additional configuration.
1311  } else if (config->methods &
1313  // Some ad-hoc configuration we can't see and can't check.
1314  } else {
1315  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1316  "missing configuration.\n", desc->name);
1317  goto try_again;
1318  }
1319  if (hw_config->hwaccel) {
1320  av_log(avctx, AV_LOG_DEBUG, "Format %s requires hwaccel %s "
1321  "initialisation.\n", desc->name, hw_config->hwaccel->p.name);
1322  err = hwaccel_init(avctx, hw_config->hwaccel);
1323  if (err < 0)
1324  goto try_again;
1325  }
1326  ret = user_choice;
1327  break;
1328 
1329  try_again:
1330  av_log(avctx, AV_LOG_DEBUG, "Format %s not usable, retrying "
1331  "get_format() without it.\n", desc->name);
1332  for (i = 0; i < n; i++) {
1333  if (choices[i] == user_choice)
1334  break;
1335  }
1336  for (; i + 1 < n; i++)
1337  choices[i] = choices[i + 1];
1338  --n;
1339  }
1340 
1341  if (ret < 0)
1342  ff_hwaccel_uninit(avctx);
1343 
1344  av_freep(&choices);
1345  return ret;
1346 }
1347 
1348 static const AVPacketSideData*
1351 {
1352  for (int i = 0; i < nb_sd; i++)
1353  if (sd[i].type == type)
1354  return &sd[i];
1355 
1356  return NULL;
1357 }
1358 
1361 {
1363 }
1364 
1366  const AVPacketSideData *sd_pkt)
1367 {
1368  const AVStereo3D *src;
1369  AVStereo3D *dst;
1370  int ret;
1371 
1372  ret = av_buffer_make_writable(&sd_frame->buf);
1373  if (ret < 0)
1374  return ret;
1375  sd_frame->data = sd_frame->buf->data;
1376 
1377  dst = ( AVStereo3D*)sd_frame->data;
1378  src = (const AVStereo3D*)sd_pkt->data;
1379 
1380  if (dst->type == AV_STEREO3D_UNSPEC)
1381  dst->type = src->type;
1382 
1383  if (dst->view == AV_STEREO3D_VIEW_UNSPEC)
1384  dst->view = src->view;
1385 
1386  if (dst->primary_eye == AV_PRIMARY_EYE_NONE)
1387  dst->primary_eye = src->primary_eye;
1388 
1389  if (!dst->baseline)
1390  dst->baseline = src->baseline;
1391 
1392  if (!dst->horizontal_disparity_adjustment.num)
1393  dst->horizontal_disparity_adjustment = src->horizontal_disparity_adjustment;
1394 
1395  if (!dst->horizontal_field_of_view.num)
1396  dst->horizontal_field_of_view = src->horizontal_field_of_view;
1397 
1398  return 0;
1399 }
1400 
1402 {
1403  AVExifMetadata ifd = { 0 };
1404  AVExifEntry *entry = NULL;
1405  AVBufferRef *buf = NULL;
1406  AVFrameSideData *sd_frame;
1407  int ret;
1408 
1409  ret = av_exif_parse_buffer(NULL, sd_pkt->data, sd_pkt->size, &ifd,
1411  if (ret < 0)
1412  return ret;
1413 
1414  ret = av_exif_get_entry(NULL, &ifd, av_exif_get_tag_id("Orientation"), 0, &entry);
1415  if (ret < 0)
1416  goto end;
1417 
1418  if (!entry) {
1419  ret = av_exif_ifd_to_dict(NULL, &ifd, &dst->metadata);
1420  if (ret < 0)
1421  goto end;
1422 
1423  sd_frame = av_frame_side_data_new(&dst->side_data, &dst->nb_side_data, AV_FRAME_DATA_EXIF,
1424  sd_pkt->size, 0);
1425  if (sd_frame)
1426  memcpy(sd_frame->data, sd_pkt->data, sd_pkt->size);
1427  ret = sd_frame ? 0 : AVERROR(ENOMEM);
1428 
1429  goto end;
1430  } else if (entry->count <= 0 || entry->type != AV_TIFF_SHORT) {
1432  goto end;
1433  }
1434 
1435  // If a display matrix already exists in the frame, give it priority
1436  if (av_frame_side_data_get(dst->side_data, dst->nb_side_data, AV_FRAME_DATA_DISPLAYMATRIX))
1437  goto finish;
1438 
1439  sd_frame = av_frame_side_data_new(&dst->side_data, &dst->nb_side_data, AV_FRAME_DATA_DISPLAYMATRIX,
1440  sizeof(int32_t) * 9, 0);
1441  if (!sd_frame) {
1442  ret = AVERROR(ENOMEM);
1443  goto end;
1444  }
1445 
1446  ret = av_exif_orientation_to_matrix((int32_t *)sd_frame->data, entry->value.uint[0]);
1447  if (ret < 0)
1448  goto end;
1449 
1450 finish:
1451  av_exif_remove_entry(NULL, &ifd, entry->id, 0);
1452 
1453  ret = av_exif_ifd_to_dict(NULL, &ifd, &dst->metadata);
1454  if (ret < 0)
1455  goto end;
1456 
1457  ret = av_exif_write(NULL, &ifd, &buf, AV_EXIF_TIFF_HEADER);
1458  if (ret < 0)
1459  goto end;
1460 
1461  if (!av_frame_side_data_add(&dst->side_data, &dst->nb_side_data, AV_FRAME_DATA_EXIF, &buf, 0)) {
1462  ret = AVERROR(ENOMEM);
1463  goto end;
1464  }
1465 
1466  ret = 0;
1467 end:
1468  av_buffer_unref(&buf);
1469  av_exif_free(&ifd);
1470  return ret;
1471 }
1472 
1474  const AVPacketSideData *sd_src, int nb_sd_src,
1475  const SideDataMap *map)
1476 
1477 {
1478  for (int i = 0; map[i].packet < AV_PKT_DATA_NB; i++) {
1479  const enum AVPacketSideDataType type_pkt = map[i].packet;
1480  const enum AVFrameSideDataType type_frame = map[i].frame;
1481  const AVPacketSideData *sd_pkt;
1482  AVFrameSideData *sd_frame;
1483 
1484  sd_pkt = packet_side_data_get(sd_src, nb_sd_src, type_pkt);
1485  if (!sd_pkt)
1486  continue;
1487 
1488  sd_frame = av_frame_get_side_data(dst, type_frame);
1489  if (sd_frame) {
1490  if (type_frame == AV_FRAME_DATA_STEREO3D) {
1491  int ret = side_data_stereo3d_merge(sd_frame, sd_pkt);
1492  if (ret < 0)
1493  return ret;
1494  }
1495 
1496  continue;
1497  }
1498 
1499  switch (type_pkt) {
1500  case AV_PKT_DATA_EXIF: {
1501  int ret = side_data_exif_parse(dst, sd_pkt);
1502  if (ret < 0)
1503  return ret;
1504  break;
1505  }
1506  default:
1507  sd_frame = av_frame_new_side_data(dst, type_frame, sd_pkt->size);
1508  if (!sd_frame)
1509  return AVERROR(ENOMEM);
1510 
1511  memcpy(sd_frame->data, sd_pkt->data, sd_pkt->size);
1512  break;
1513  }
1514  }
1515 
1516  return 0;
1517 }
1518 
1520 {
1521  size_t size;
1522  const uint8_t *side_metadata;
1523 
1524  AVDictionary **frame_md = &frame->metadata;
1525 
1526  side_metadata = av_packet_get_side_data(avpkt,
1528  return av_packet_unpack_dictionary(side_metadata, size, frame_md);
1529 }
1530 
1532  AVFrame *frame, const AVPacket *pkt)
1533 {
1534  static const SideDataMap sd[] = {
1541  { AV_PKT_DATA_NB }
1542  };
1543 
1544  int ret = 0;
1545 
1546  frame->pts = pkt->pts;
1547  frame->duration = pkt->duration;
1548 
1550  if (ret < 0)
1551  return ret;
1552 
1554  if (ret < 0)
1555  return ret;
1556 
1558 
1559  if (pkt->flags & AV_PKT_FLAG_DISCARD) {
1560  frame->flags |= AV_FRAME_FLAG_DISCARD;
1561  }
1562 
1563  if (avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE) {
1564  int ret = av_buffer_replace(&frame->opaque_ref, pkt->opaque_ref);
1565  if (ret < 0)
1566  return ret;
1567  frame->opaque = pkt->opaque;
1568  }
1569 
1570  return 0;
1571 }
1572 
1574 {
1575  int ret;
1576 
1579  if (ret < 0)
1580  return ret;
1581 
1582  for (int i = 0; i < avctx->nb_decoded_side_data; i++) {
1583  const AVFrameSideData *src = avctx->decoded_side_data[i];
1584  if (av_frame_get_side_data(frame, src->type))
1585  continue;
1586  ret = av_frame_side_data_clone(&frame->side_data, &frame->nb_side_data, src, 0);
1587  if (ret < 0)
1588  return ret;
1589  }
1590 
1592  const AVPacket *pkt = avctx->internal->last_pkt_props;
1593 
1595  if (ret < 0)
1596  return ret;
1597  }
1598 
1599  ret = fill_frame_props(avctx, frame);
1600  if (ret < 0)
1601  return ret;
1602 
1603  switch (avctx->codec->type) {
1604  case AVMEDIA_TYPE_VIDEO:
1605  if (frame->width && frame->height &&
1606  av_image_check_sar(frame->width, frame->height,
1607  frame->sample_aspect_ratio) < 0) {
1608  av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
1609  frame->sample_aspect_ratio.num,
1610  frame->sample_aspect_ratio.den);
1611  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
1612  }
1613  break;
1614  }
1615  return 0;
1616 }
1617 
1619 {
1620  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1621  int i;
1622  int num_planes = av_pix_fmt_count_planes(frame->format);
1624  int flags = desc ? desc->flags : 0;
1625  if (num_planes == 1 && (flags & AV_PIX_FMT_FLAG_PAL))
1626  num_planes = 2;
1627  for (i = 0; i < num_planes; i++) {
1628  av_assert0(frame->data[i]);
1629  }
1630  // For formats without data like hwaccel allow unused pointers to be non-NULL.
1631  for (i = num_planes; num_planes > 0 && i < FF_ARRAY_ELEMS(frame->data); i++) {
1632  if (frame->data[i])
1633  av_log(avctx, AV_LOG_ERROR, "Buffer returned by get_buffer2() did not zero unused plane pointers\n");
1634  frame->data[i] = NULL;
1635  }
1636  }
1637 }
1638 
1639 static void decode_data_free(AVRefStructOpaque unused, void *obj)
1640 {
1641  FrameDecodeData *fdd = obj;
1642 
1643  if (fdd->post_process_opaque_free)
1645 
1646  if (fdd->hwaccel_priv_free)
1647  fdd->hwaccel_priv_free(fdd->hwaccel_priv);
1648 }
1649 
1651 {
1652  FrameDecodeData *fdd;
1653 
1654  av_assert1(!frame->private_ref);
1655  av_refstruct_unref(&frame->private_ref);
1656 
1657  fdd = av_refstruct_alloc_ext(sizeof(*fdd), 0, NULL, decode_data_free);
1658  if (!fdd)
1659  return AVERROR(ENOMEM);
1660 
1661  frame->private_ref = fdd;
1662 
1663  return 0;
1664 }
1665 
1667 {
1668 #if CONFIG_LIBLCEVC_DEC
1669  AVCodecInternal *avci = avctx->internal;
1670  DecodeContext *dc = decode_ctx(avci);
1671 
1672  dc->lcevc.frame = dc->lcevc.ctx && avctx->codec_type == AVMEDIA_TYPE_VIDEO &&
1674 
1675  if (dc->lcevc.frame) {
1676  int ret = ff_lcevc_parse_frame(dc->lcevc.ctx, frame,
1677  &dc->lcevc.width, &dc->lcevc.height, avctx);
1678  if (ret < 0)
1679  return ret;
1680 
1681  // force get_buffer2() to allocate the base frame using the same dimensions
1682  // as the final enhanced frame, in order to prevent reinitializing the buffer
1683  // pools unnecessarely
1684  if (dc->lcevc.width && dc->lcevc.height) {
1685  dc->lcevc.base_width = frame->width;
1686  dc->lcevc.base_height = frame->height;
1687  frame->width = dc->lcevc.width;
1688  frame->height = dc->lcevc.height;
1689  }
1690  }
1691 #endif
1692  return 0;
1693 }
1694 
1696 {
1697 #if CONFIG_LIBLCEVC_DEC
1698  AVCodecInternal *avci = avctx->internal;
1699  DecodeContext *dc = decode_ctx(avci);
1700 
1701  if (dc->lcevc.frame) {
1702  FrameDecodeData *fdd = frame->private_ref;
1703  FFLCEVCFrame *frame_ctx;
1704  int ret;
1705 
1706  if (!dc->lcevc.width || !dc->lcevc.height) {
1707  dc->lcevc.frame = 0;
1708  return 0;
1709  }
1710 
1711  frame_ctx = av_mallocz(sizeof(*frame_ctx));
1712  if (!frame_ctx)
1713  return AVERROR(ENOMEM);
1714 
1715  frame_ctx->frame = av_frame_alloc();
1716  if (!frame_ctx->frame) {
1717  av_free(frame_ctx);
1718  return AVERROR(ENOMEM);
1719  }
1720 
1721  frame_ctx->lcevc = av_refstruct_ref(dc->lcevc.ctx);
1722  frame_ctx->frame->width = dc->lcevc.width;
1723  frame_ctx->frame->height = dc->lcevc.height;
1724  frame_ctx->frame->format = frame->format;
1725 
1726  frame->width = dc->lcevc.base_width;
1727  frame->height = dc->lcevc.base_height;
1728 
1729  ret = avctx->get_buffer2(avctx, frame_ctx->frame, 0);
1730  if (ret < 0) {
1731  ff_lcevc_unref(frame_ctx);
1732  return ret;
1733  }
1734 
1735  validate_avframe_allocation(avctx, frame_ctx->frame);
1736 
1737  fdd->post_process_opaque = frame_ctx;
1740  }
1741  dc->lcevc.frame = 0;
1742 #endif
1743 
1744  return 0;
1745 }
1746 
1748 {
1749  const FFHWAccel *hwaccel = ffhwaccel(avctx->hwaccel);
1750  int override_dimensions = 1;
1751  int ret;
1752 
1754 
1755  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1756  if ((unsigned)avctx->width > INT_MAX - STRIDE_ALIGN ||
1757  (ret = av_image_check_size2(FFALIGN(avctx->width, STRIDE_ALIGN), avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx)) < 0 || avctx->pix_fmt<0) {
1758  av_log(avctx, AV_LOG_ERROR, "video_get_buffer: image parameters invalid\n");
1759  ret = AVERROR(EINVAL);
1760  goto fail;
1761  }
1762 
1763  if (frame->width <= 0 || frame->height <= 0) {
1764  frame->width = FFMAX(avctx->width, AV_CEIL_RSHIFT(avctx->coded_width, avctx->lowres));
1765  frame->height = FFMAX(avctx->height, AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres));
1766  override_dimensions = 0;
1767  }
1768 
1769  if (frame->data[0] || frame->data[1] || frame->data[2] || frame->data[3]) {
1770  av_log(avctx, AV_LOG_ERROR, "pic->data[*]!=NULL in get_buffer_internal\n");
1771  ret = AVERROR(EINVAL);
1772  goto fail;
1773  }
1774  } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
1775  if (frame->nb_samples * (int64_t)avctx->ch_layout.nb_channels > avctx->max_samples) {
1776  av_log(avctx, AV_LOG_ERROR, "samples per frame %d, exceeds max_samples %"PRId64"\n", frame->nb_samples, avctx->max_samples);
1777  ret = AVERROR(EINVAL);
1778  goto fail;
1779  }
1780  }
1781  ret = ff_decode_frame_props(avctx, frame);
1782  if (ret < 0)
1783  goto fail;
1784 
1785  if (hwaccel) {
1786  if (hwaccel->alloc_frame) {
1787  ret = hwaccel->alloc_frame(avctx, frame);
1788  goto end;
1789  }
1790  } else {
1791  avctx->sw_pix_fmt = avctx->pix_fmt;
1792  ret = update_frame_props(avctx, frame);
1793  if (ret < 0)
1794  goto fail;
1795  }
1796 
1797  ret = avctx->get_buffer2(avctx, frame, flags);
1798  if (ret < 0)
1799  goto fail;
1800 
1802 
1804  if (ret < 0)
1805  goto fail;
1806 
1808  if (ret < 0)
1809  goto fail;
1810 
1811 end:
1812  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO && !override_dimensions &&
1814  frame->width = avctx->width;
1815  frame->height = avctx->height;
1816  }
1817 
1818 fail:
1819  if (ret < 0) {
1820  av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1822  }
1823 
1824  return ret;
1825 }
1826 
1828 {
1829  AVFrame *tmp;
1830  int ret;
1831 
1833 
1834  // make sure the discard flag does not persist
1835  frame->flags &= ~AV_FRAME_FLAG_DISCARD;
1836 
1837  if (frame->data[0] && (frame->width != avctx->width || frame->height != avctx->height || frame->format != avctx->pix_fmt)) {
1838  av_log(avctx, AV_LOG_WARNING, "Picture changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s in reget buffer()\n",
1839  frame->width, frame->height, av_get_pix_fmt_name(frame->format), avctx->width, avctx->height, av_get_pix_fmt_name(avctx->pix_fmt));
1841  }
1842 
1843  if (!frame->data[0])
1844  return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
1845 
1846  av_frame_side_data_free(&frame->side_data, &frame->nb_side_data);
1847 
1849  return ff_decode_frame_props(avctx, frame);
1850 
1851  tmp = av_frame_alloc();
1852  if (!tmp)
1853  return AVERROR(ENOMEM);
1854 
1856 
1858  if (ret < 0) {
1859  av_frame_free(&tmp);
1860  return ret;
1861  }
1862 
1864  av_frame_free(&tmp);
1865 
1866  return 0;
1867 }
1868 
1870 {
1871  int ret = reget_buffer_internal(avctx, frame, flags);
1872  if (ret < 0)
1873  av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
1874  return ret;
1875 }
1876 
1877 typedef struct ProgressInternal {
1879  struct AVFrame *f;
1881 
1883 {
1884  av_assert1(!!f->f == !!f->progress);
1885  av_assert1(!f->progress || f->progress->f == f->f);
1886 }
1887 
1889 {
1891 
1892  av_assert1(!f->f && !f->progress);
1893 
1894  f->progress = av_refstruct_pool_get(pool);
1895  if (!f->progress)
1896  return AVERROR(ENOMEM);
1897 
1898  f->f = f->progress->f;
1899  return 0;
1900 }
1901 
1903 {
1904  int ret = ff_progress_frame_alloc(avctx, f);
1905  if (ret < 0)
1906  return ret;
1907 
1908  ret = ff_thread_get_buffer(avctx, f->progress->f, flags);
1909  if (ret < 0) {
1910  f->f = NULL;
1911  av_refstruct_unref(&f->progress);
1912  return ret;
1913  }
1914  return 0;
1915 }
1916 
1918 {
1919  av_assert1(src->progress && src->f && src->f == src->progress->f);
1920  av_assert1(!dst->f && !dst->progress);
1921  dst->f = src->f;
1922  dst->progress = av_refstruct_ref(src->progress);
1923 }
1924 
1926 {
1928  f->f = NULL;
1929  av_refstruct_unref(&f->progress);
1930 }
1931 
1933 {
1934  if (dst == src)
1935  return;
1938  if (src->f)
1940 }
1941 
1943 {
1944  ff_thread_progress_report(&f->progress->progress, n);
1945 }
1946 
1948 {
1949  ff_thread_progress_await(&f->progress->progress, n);
1950 }
1951 
1952 #if !HAVE_THREADS
1954 {
1956 }
1957 #endif /* !HAVE_THREADS */
1958 
1960 {
1961  const AVCodecContext *avctx = opaque.nc;
1962  ProgressInternal *progress = obj;
1963  int ret;
1964 
1966  if (ret < 0)
1967  return ret;
1968 
1969  progress->f = av_frame_alloc();
1970  if (!progress->f)
1971  return AVERROR(ENOMEM);
1972 
1973  return 0;
1974 }
1975 
1976 static void progress_frame_pool_reset_cb(AVRefStructOpaque unused, void *obj)
1977 {
1978  ProgressInternal *progress = obj;
1979 
1980  ff_thread_progress_reset(&progress->progress);
1981  av_frame_unref(progress->f);
1982 }
1983 
1985 {
1986  ProgressInternal *progress = obj;
1987 
1989  av_frame_free(&progress->f);
1990 }
1991 
1993 {
1994  AVCodecInternal *avci = avctx->internal;
1995  DecodeContext *dc = decode_ctx(avci);
1996  int ret = 0;
1997 
1998  dc->initial_pict_type = AV_PICTURE_TYPE_NONE;
2000  dc->intra_only_flag = AV_FRAME_FLAG_KEY;
2001  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO)
2002  dc->initial_pict_type = AV_PICTURE_TYPE_I;
2003  }
2004 
2005  /* if the decoder init function was already called previously,
2006  * free the already allocated subtitle_header before overwriting it */
2007  av_freep(&avctx->subtitle_header);
2008 
2009  if (avctx->codec->max_lowres < avctx->lowres || avctx->lowres < 0) {
2010  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2011  avctx->codec->max_lowres);
2012  avctx->lowres = avctx->codec->max_lowres;
2013  }
2014  if (avctx->sub_charenc) {
2015  if (avctx->codec_type != AVMEDIA_TYPE_SUBTITLE) {
2016  av_log(avctx, AV_LOG_ERROR, "Character encoding is only "
2017  "supported with subtitles codecs\n");
2018  return AVERROR(EINVAL);
2019  } else if (avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB) {
2020  av_log(avctx, AV_LOG_WARNING, "Codec '%s' is bitmap-based, "
2021  "subtitles character encoding will be ignored\n",
2022  avctx->codec_descriptor->name);
2024  } else {
2025  /* input character encoding is set for a text based subtitle
2026  * codec at this point */
2029 
2031 #if CONFIG_ICONV
2032  iconv_t cd = iconv_open("UTF-8", avctx->sub_charenc);
2033  if (cd == (iconv_t)-1) {
2034  ret = AVERROR(errno);
2035  av_log(avctx, AV_LOG_ERROR, "Unable to open iconv context "
2036  "with input character encoding \"%s\"\n", avctx->sub_charenc);
2037  return ret;
2038  }
2039  iconv_close(cd);
2040 #else
2041  av_log(avctx, AV_LOG_ERROR, "Character encoding subtitles "
2042  "conversion needs a libavcodec built with iconv support "
2043  "for this codec\n");
2044  return AVERROR(ENOSYS);
2045 #endif
2046  }
2047  }
2048  }
2049 
2050  dc->pts_correction_num_faulty_pts =
2051  dc->pts_correction_num_faulty_dts = 0;
2052  dc->pts_correction_last_pts =
2053  dc->pts_correction_last_dts = INT64_MIN;
2054 
2055  if ( !CONFIG_GRAY && avctx->flags & AV_CODEC_FLAG_GRAY
2057  av_log(avctx, AV_LOG_WARNING,
2058  "gray decoding requested but not enabled at configuration time\n");
2059  if (avctx->flags2 & AV_CODEC_FLAG2_EXPORT_MVS) {
2061  }
2062 
2063  if (avctx->nb_side_data_prefer_packet == 1 &&
2064  avctx->side_data_prefer_packet[0] == -1)
2065  dc->side_data_pref_mask = ~0ULL;
2066  else {
2067  for (unsigned i = 0; i < avctx->nb_side_data_prefer_packet; i++) {
2068  int val = avctx->side_data_prefer_packet[i];
2069 
2070  if (val < 0 || val >= AV_PKT_DATA_NB) {
2071  av_log(avctx, AV_LOG_ERROR, "Invalid side data type: %d\n", val);
2072  return AVERROR(EINVAL);
2073  }
2074 
2075  for (unsigned j = 0; ff_sd_global_map[j].packet < AV_PKT_DATA_NB; j++) {
2076  if (ff_sd_global_map[j].packet == val) {
2077  val = ff_sd_global_map[j].frame;
2078 
2079  // this code will need to be changed when we have more than
2080  // 64 frame side data types
2081  if (val >= 64) {
2082  av_log(avctx, AV_LOG_ERROR, "Side data type too big\n");
2083  return AVERROR_BUG;
2084  }
2085 
2086  dc->side_data_pref_mask |= 1ULL << val;
2087  }
2088  }
2089  }
2090  }
2091 
2092  avci->in_pkt = av_packet_alloc();
2093  avci->last_pkt_props = av_packet_alloc();
2094  if (!avci->in_pkt || !avci->last_pkt_props)
2095  return AVERROR(ENOMEM);
2096 
2098  avci->progress_frame_pool =
2104  if (!avci->progress_frame_pool)
2105  return AVERROR(ENOMEM);
2106  }
2107  ret = decode_bsfs_init(avctx);
2108  if (ret < 0)
2109  return ret;
2110 
2112  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2113 #if CONFIG_LIBLCEVC_DEC
2114  ret = ff_lcevc_alloc(&dc->lcevc.ctx, avctx);
2115  if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE))
2116  return ret;
2117 #endif
2118  }
2119  }
2120 
2121  return 0;
2122 }
2123 
2124 /**
2125  * Check side data preference and clear existing side data from frame
2126  * if needed.
2127  *
2128  * @retval 0 side data of this type can be added to frame
2129  * @retval 1 side data of this type should not be added to frame
2130  */
2131 static int side_data_pref(const AVCodecContext *avctx, AVFrameSideData ***sd,
2132  int *nb_sd, enum AVFrameSideDataType type)
2133 {
2134  DecodeContext *dc = decode_ctx(avctx->internal);
2135 
2136  // Note: could be skipped for `type` without corresponding packet sd
2137  if (av_frame_side_data_get(*sd, *nb_sd, type)) {
2138  if (dc->side_data_pref_mask & (1ULL << type))
2139  return 1;
2140  av_frame_side_data_remove(sd, nb_sd, type);
2141  }
2142 
2143  return 0;
2144 }
2145 
2146 
2148  enum AVFrameSideDataType type, size_t size,
2149  AVFrameSideData **psd)
2150 {
2151  AVFrameSideData *sd;
2152 
2153  if (side_data_pref(avctx, &frame->side_data, &frame->nb_side_data, type)) {
2154  if (psd)
2155  *psd = NULL;
2156  return 0;
2157  }
2158 
2160  if (psd)
2161  *psd = sd;
2162 
2163  return sd ? 0 : AVERROR(ENOMEM);
2164 }
2165 
2167  AVFrameSideData ***sd, int *nb_sd,
2169  AVBufferRef **buf)
2170 {
2171  int ret = 0;
2172 
2173  if (side_data_pref(avctx, sd, nb_sd, type))
2174  goto finish;
2175 
2176  if (!av_frame_side_data_add(sd, nb_sd, type, buf, 0))
2177  ret = AVERROR(ENOMEM);
2178 
2179 finish:
2181 
2182  return ret;
2183 }
2184 
2187  AVBufferRef **buf)
2188 {
2190  &frame->side_data, &frame->nb_side_data,
2191  type, buf);
2192 }
2193 
2195  AVFrameSideData ***sd, int *nb_sd,
2196  struct AVMasteringDisplayMetadata **mdm)
2197 {
2198  AVBufferRef *buf;
2199  size_t size;
2200 
2202  *mdm = NULL;
2203  return 0;
2204  }
2205 
2207  if (!*mdm)
2208  return AVERROR(ENOMEM);
2209 
2210  buf = av_buffer_create((uint8_t *)*mdm, size, NULL, NULL, 0);
2211  if (!buf) {
2212  av_freep(mdm);
2213  return AVERROR(ENOMEM);
2214  }
2215 
2217  &buf, 0)) {
2218  *mdm = NULL;
2219  av_buffer_unref(&buf);
2220  return AVERROR(ENOMEM);
2221  }
2222 
2223  return 0;
2224 }
2225 
2228 {
2229  if (side_data_pref(avctx, &frame->side_data, &frame->nb_side_data,
2231  *mdm = NULL;
2232  return 0;
2233  }
2234 
2236  return *mdm ? 0 : AVERROR(ENOMEM);
2237 }
2238 
2240  AVFrameSideData ***sd, int *nb_sd,
2241  AVContentLightMetadata **clm)
2242 {
2243  AVBufferRef *buf;
2244  size_t size;
2245 
2246  if (side_data_pref(avctx, sd, nb_sd, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL)) {
2247  *clm = NULL;
2248  return 0;
2249  }
2250 
2252  if (!*clm)
2253  return AVERROR(ENOMEM);
2254 
2255  buf = av_buffer_create((uint8_t *)*clm, size, NULL, NULL, 0);
2256  if (!buf) {
2257  av_freep(clm);
2258  return AVERROR(ENOMEM);
2259  }
2260 
2262  &buf, 0)) {
2263  *clm = NULL;
2264  av_buffer_unref(&buf);
2265  return AVERROR(ENOMEM);
2266  }
2267 
2268  return 0;
2269 }
2270 
2272  AVContentLightMetadata **clm)
2273 {
2274  if (side_data_pref(avctx, &frame->side_data, &frame->nb_side_data,
2276  *clm = NULL;
2277  return 0;
2278  }
2279 
2281  return *clm ? 0 : AVERROR(ENOMEM);
2282 }
2283 
2284 int ff_copy_palette(void *dst, const AVPacket *src, void *logctx)
2285 {
2286  size_t size;
2287  const void *pal = av_packet_get_side_data(src, AV_PKT_DATA_PALETTE, &size);
2288 
2289  if (pal && size == AVPALETTE_SIZE) {
2290  memcpy(dst, pal, AVPALETTE_SIZE);
2291  return 1;
2292  } else if (pal) {
2293  av_log(logctx, AV_LOG_ERROR,
2294  "Palette size %zu is wrong\n", size);
2295  }
2296  return 0;
2297 }
2298 
2299 int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
2300 {
2301  const FFHWAccel *hwaccel = ffhwaccel(avctx->hwaccel);
2302 
2303  if (!hwaccel || !hwaccel->frame_priv_data_size)
2304  return 0;
2305 
2306  av_assert0(!*hwaccel_picture_private);
2307 
2308  if (hwaccel->free_frame_priv) {
2309  AVHWFramesContext *frames_ctx;
2310 
2311  if (!avctx->hw_frames_ctx)
2312  return AVERROR(EINVAL);
2313 
2314  frames_ctx = (AVHWFramesContext *) avctx->hw_frames_ctx->data;
2315  *hwaccel_picture_private = av_refstruct_alloc_ext(hwaccel->frame_priv_data_size, 0,
2316  frames_ctx->device_ctx,
2317  hwaccel->free_frame_priv);
2318  } else {
2319  *hwaccel_picture_private = av_refstruct_allocz(hwaccel->frame_priv_data_size);
2320  }
2321 
2322  if (!*hwaccel_picture_private)
2323  return AVERROR(ENOMEM);
2324 
2325  return 0;
2326 }
2327 
2329 {
2330  AVCodecInternal *avci = avctx->internal;
2331  DecodeContext *dc = decode_ctx(avci);
2332 
2334  av_packet_unref(avci->in_pkt);
2335 
2336  dc->pts_correction_last_pts =
2337  dc->pts_correction_last_dts = INT64_MIN;
2338 
2339  if (avci->bsf)
2340  av_bsf_flush(avci->bsf);
2341 
2342  dc->nb_draining_errors = 0;
2343  dc->draining_started = 0;
2344 }
2345 
2347 {
2348  return av_mallocz(sizeof(DecodeContext));
2349 }
2350 
2352 {
2353  const DecodeContext *src_dc = decode_ctx(src->internal);
2354  DecodeContext *dst_dc = decode_ctx(dst->internal);
2355 
2356  dst_dc->initial_pict_type = src_dc->initial_pict_type;
2357  dst_dc->intra_only_flag = src_dc->intra_only_flag;
2358  dst_dc->side_data_pref_mask = src_dc->side_data_pref_mask;
2359 #if CONFIG_LIBLCEVC_DEC
2360  av_refstruct_replace(&dst_dc->lcevc.ctx, src_dc->lcevc.ctx);
2361  dst_dc->lcevc.width = src_dc->lcevc.width;
2362  dst_dc->lcevc.height = src_dc->lcevc.height;
2363 #endif
2364 }
2365 
2367 {
2368 #if CONFIG_LIBLCEVC_DEC
2369  AVCodecInternal *avci = avctx->internal;
2370  DecodeContext *dc = decode_ctx(avci);
2371 
2372  av_refstruct_unref(&dc->lcevc.ctx);
2373 #endif
2374 }
2375 
2376 static int attach_displaymatrix(AVCodecContext *avctx, AVFrame *frame, int orientation)
2377 {
2378  AVFrameSideData *sd = NULL;
2379  int32_t *matrix;
2380  int ret;
2381  /* invalid orientation */
2382  if (orientation < 1 || orientation > 8)
2383  return AVERROR_INVALIDDATA;
2385  if (ret < 0) {
2386  av_log(avctx, AV_LOG_ERROR, "Could not allocate frame side data: %s\n", av_err2str(ret));
2387  return ret;
2388  }
2389  if (sd) {
2390  matrix = (int32_t *) sd->data;
2391  ret = av_exif_orientation_to_matrix(matrix, orientation);
2392  }
2393 
2394  return ret;
2395 }
2396 
2397 static int exif_attach_ifd(AVCodecContext *avctx, AVFrame *frame, const AVExifMetadata *ifd, AVBufferRef **pbuf)
2398 {
2399  const AVExifEntry *orient = NULL;
2400  AVExifMetadata *cloned = NULL;
2401  int ret;
2402 
2403  for (size_t i = 0; i < ifd->count; i++) {
2404  const AVExifEntry *entry = &ifd->entries[i];
2405  if (entry->id == av_exif_get_tag_id("Orientation") &&
2406  entry->count > 0 && entry->type == AV_TIFF_SHORT) {
2407  orient = entry;
2408  break;
2409  }
2410  }
2411 
2412  if (orient) {
2413  av_log(avctx, AV_LOG_DEBUG, "found EXIF orientation: %" PRIu64 "\n", orient->value.uint[0]);
2414  ret = attach_displaymatrix(avctx, frame, orient->value.uint[0]);
2415  if (ret < 0) {
2416  av_log(avctx, AV_LOG_WARNING, "unable to attach displaymatrix from EXIF\n");
2417  } else {
2418  cloned = av_exif_clone_ifd(ifd);
2419  if (!cloned) {
2420  ret = AVERROR(ENOMEM);
2421  goto end;
2422  }
2423  av_exif_remove_entry(avctx, cloned, orient->id, 0);
2424  ifd = cloned;
2425  }
2426  }
2427 
2428  ret = av_exif_ifd_to_dict(avctx, ifd, &frame->metadata);
2429  if (ret < 0)
2430  goto end;
2431 
2432  if (cloned || !*pbuf) {
2433  av_buffer_unref(pbuf);
2434  ret = av_exif_write(avctx, ifd, pbuf, AV_EXIF_TIFF_HEADER);
2435  if (ret < 0)
2436  goto end;
2437  }
2438 
2440  if (ret < 0)
2441  goto end;
2442 
2443  ret = 0;
2444 
2445 end:
2446  av_buffer_unref(pbuf);
2447  av_exif_free(cloned);
2448  av_free(cloned);
2449  return ret;
2450 }
2451 
2453 {
2454  AVBufferRef *dummy = NULL;
2455  return exif_attach_ifd(avctx, frame, ifd, &dummy);
2456 }
2457 
2459  enum AVExifHeaderMode header_mode)
2460 {
2461  int ret;
2462  AVBufferRef *data = *pbuf;
2463  AVExifMetadata ifd = { 0 };
2464 
2465  ret = av_exif_parse_buffer(avctx, data->data, data->size, &ifd, header_mode);
2466  if (ret < 0)
2467  goto end;
2468 
2469  ret = exif_attach_ifd(avctx, frame, &ifd, pbuf);
2470 
2471 end:
2472  av_buffer_unref(pbuf);
2473  av_exif_free(&ifd);
2474  return ret;
2475 }
lcevcdec.h
flags
const SwsFlags flags[]
Definition: swscale.c:71
ff_get_coded_side_data
const AVPacketSideData * ff_get_coded_side_data(const AVCodecContext *avctx, enum AVPacketSideDataType type)
Get side data of the given type from a decoding context.
Definition: decode.c:1359
DecodeContext::intra_only_flag
int intra_only_flag
This is set to AV_FRAME_FLAG_KEY for decoders of intra-only formats (those whose codec descriptor has...
Definition: decode.c:69
AVSubtitle
Definition: avcodec.h:2094
hwconfig.h
ff_progress_frame_report
void ff_progress_frame_report(ProgressFrame *f, int n)
Notify later decoding threads when part of their reference frame is ready.
Definition: decode.c:1942
av_samples_copy
int av_samples_copy(uint8_t *const *dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:222
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:432
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1417
exif_attach_ifd
static int exif_attach_ifd(AVCodecContext *avctx, AVFrame *frame, const AVExifMetadata *ifd, AVBufferRef **pbuf)
Definition: decode.c:2397
FFCodec::receive_frame
int(* receive_frame)(struct AVCodecContext *avctx, struct AVFrame *frame)
Decode API with decoupled packet/frame dataflow.
Definition: codec_internal.h:219
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:203
AVBSFContext::par_in
AVCodecParameters * par_in
Parameters of the input stream.
Definition: bsf.h:90
hwaccel_init
static int hwaccel_init(AVCodecContext *avctx, const FFHWAccel *hwaccel)
Definition: decode.c:1163
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
ff_decode_get_packet
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:251
hw_pix_fmt
static enum AVPixelFormat hw_pix_fmt
Definition: hw_decode.c:46
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
Frame::width
int width
Definition: ffplay.c:160
entry
#define entry
Definition: aom_film_grain_template.c:66
AV_CODEC_HW_CONFIG_METHOD_INTERNAL
@ AV_CODEC_HW_CONFIG_METHOD_INTERNAL
The codec supports this format by some internal method.
Definition: codec.h:318
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
ff_thread_progress_report
void ff_thread_progress_report(ThreadProgress *pro, int n)
This function is a no-op in no-op mode; otherwise it notifies other threads that a certain level of p...
Definition: threadprogress.c:53
AVCodecContext::alpha_mode
enum AVAlphaMode alpha_mode
Indicates how the alpha channel of the video is represented.
Definition: avcodec.h:1944
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:2099
threadprogress.h
AVCodecContext::get_format
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
Callback to negotiate the pixel format.
Definition: avcodec.h:769
ff_icc_profile_read_primaries
int ff_icc_profile_read_primaries(FFIccContext *s, cmsHPROFILE profile, AVColorPrimariesDesc *out_primaries)
Read the color primaries and white point coefficients encoded by an ICC profile, and return the raw v...
Definition: fflcms2.c:253
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:422
av_exif_parse_buffer
int av_exif_parse_buffer(void *logctx, const uint8_t *buf, size_t size, AVExifMetadata *ifd, enum AVExifHeaderMode header_mode)
Decodes the EXIF data provided in the buffer and writes it into the struct *ifd.
Definition: exif.c:881
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:667
AVColorTransferCharacteristic
AVColorTransferCharacteristic
Color Transfer Characteristic.
Definition: pixfmt.h:666
FFCodec::cb
union FFCodec::@106 cb
AVCodecContext::decoded_side_data
AVFrameSideData ** decoded_side_data
Array containing static side data, such as HDR10 CLL / MDCV structures.
Definition: avcodec.h:1936
attach_post_process_data
static int attach_post_process_data(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1695
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1210
apply_cropping
static int apply_cropping(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:744
ThreadProgress
ThreadProgress is an API to easily notify other threads about progress of any kind as long as it can ...
Definition: threadprogress.h:43
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1036
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:659
AVExifEntry
Definition: exif.h:85
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:647
av_exif_write
int av_exif_write(void *logctx, const AVExifMetadata *ifd, AVBufferRef **buffer, enum AVExifHeaderMode header_mode)
Allocates a buffer using av_malloc of an appropriate size and writes the EXIF data represented by ifd...
Definition: exif.c:752
AVExifMetadata
Definition: exif.h:76
AVColorPrimariesDesc
Struct that contains both white point location and primaries location, providing the complete descrip...
Definition: csp.h:78
DecodeContext::initial_pict_type
enum AVPictureType initial_pict_type
This is set to AV_PICTURE_TYPE_I for intra only video decoders and to AV_PICTURE_TYPE_NONE for other ...
Definition: decode.c:76
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3456
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_HWACCEL_CODEC_CAP_EXPERIMENTAL
#define AV_HWACCEL_CODEC_CAP_EXPERIMENTAL
HWAccel is experimental and is thus avoided in favor of non experimental codecs.
Definition: avcodec.h:1995
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:59
AVRefStructOpaque
RefStruct is an API for creating reference-counted objects with minimal overhead.
Definition: refstruct.h:58
matrix
Definition: vc1dsp.c:43
AV_PKT_FLAG_DISCARD
#define AV_PKT_FLAG_DISCARD
Flag is used to discard packets which are required to maintain valid decoder state but are not requir...
Definition: packet.h:650
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:200
AVPictureType
AVPictureType
Definition: avutil.h:276
av_exif_ifd_to_dict
int av_exif_ifd_to_dict(void *logctx, const AVExifMetadata *ifd, AVDictionary **metadata)
Recursively reads all tags from the IFD and stores them in the provided metadata dictionary.
Definition: exif.c:1052
AVCodecInternal::skip_samples
int skip_samples
Number of audio samples to skip at the start of the next decoded frame.
Definition: internal.h:125
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1410
AV_CODEC_FLAG_UNALIGNED
#define AV_CODEC_FLAG_UNALIGNED
Allow decoders to produce frames with data planes that are not aligned to CPU requirements (e....
Definition: avcodec.h:209
AVCodecContext::codec_descriptor
const struct AVCodecDescriptor * codec_descriptor
AVCodecDescriptor.
Definition: avcodec.h:1716
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:263
AVCodecDescriptor::name
const char * name
Name of the codec described by this descriptor.
Definition: codec_desc.h:46
AV_WL8
#define AV_WL8(p, d)
Definition: intreadwrite.h:395
AVCodecContext::coded_side_data
AVPacketSideData * coded_side_data
Additional data associated with the entire coded stream.
Definition: avcodec.h:1775
int64_t
long long int64_t
Definition: coverity.c:34
av_exif_orientation_to_matrix
int av_exif_orientation_to_matrix(int32_t *matrix, int orientation)
Convert an orientation constant used by EXIF's orientation tag into a display matrix used by AV_FRAME...
Definition: exif.c:1332
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2098
AVExifHeaderMode
AVExifHeaderMode
Definition: exif.h:58
av_unused
#define av_unused
Definition: attributes.h:151
decode_simple_receive_frame
static int decode_simple_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:598
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:152
FFHWAccel::p
AVHWAccel p
The public AVHWAccel.
Definition: hwaccel_internal.h:38
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
AVFrame::opaque
void * opaque
Frame owner's private data.
Definition: frame.h:565
FrameDecodeData
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
Definition: decode.h:33
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:337
DecodeContext::pts_correction_last_pts
int64_t pts_correction_last_pts
Number of incorrect DTS values so far.
Definition: decode.c:88
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
AVFrameSideData::buf
AVBufferRef * buf
Definition: frame.h:287
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:660
AVFrame::width
int width
Definition: frame.h:499
AVPacketSideData
This structure stores auxiliary information for decoding, presenting, or otherwise processing the cod...
Definition: packet.h:409
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:191
FFHWAccel::frame_params
int(* frame_params)(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
Fill the given hw_frames context with current codec parameters.
Definition: hwaccel_internal.h:146
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:263
internal.h
AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX
@ AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX
The codec supports this format via the hw_frames_ctx interface.
Definition: codec.h:311
AVPacket::data
uint8_t * data
Definition: packet.h:588
FFLCEVCContext
Definition: lcevcdec.h:34
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:669
ff_progress_frame_get_buffer
int ff_progress_frame_get_buffer(AVCodecContext *avctx, ProgressFrame *f, int flags)
Wrapper around ff_progress_frame_alloc() and ff_thread_get_buffer().
Definition: decode.c:1902
data
const char data[16]
Definition: mxf.c:149
AV_PKT_DATA_S12M_TIMECODE
@ AV_PKT_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1:2014.
Definition: packet.h:288
FFCodec
Definition: codec_internal.h:127
AVCodecContext::subtitle_header
uint8_t * subtitle_header
Definition: avcodec.h:1751
AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE
@ AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE
Definition: packet.h:665
FrameDecodeData::hwaccel_priv_free
void(* hwaccel_priv_free)(void *priv)
Definition: decode.h:52
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:176
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:606
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: defs.h:62
FF_SUB_CHARENC_MODE_PRE_DECODER
#define FF_SUB_CHARENC_MODE_PRE_DECODER
the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv
Definition: avcodec.h:1734
AVDictionary
Definition: dict.c:32
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVColorPrimaries
AVColorPrimaries
Chromaticity coordinates of the source primaries.
Definition: pixfmt.h:636
avcodec_default_get_format
enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Definition: decode.c:987
av_frame_side_data_clone
int av_frame_side_data_clone(AVFrameSideData ***sd, int *nb_sd, const AVFrameSideData *src, unsigned int flags)
Add a new side data entry to an array based on existing side data, taking a reference towards the con...
Definition: side_data.c:248
avcodec_is_open
int avcodec_is_open(AVCodecContext *s)
Definition: avcodec.c:705
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:329
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:604
AV_RL8
#define AV_RL8(x)
Definition: intreadwrite.h:394
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
decode_ctx
static DecodeContext * decode_ctx(AVCodecInternal *avci)
Definition: decode.c:109
av_bsf_free
void av_bsf_free(AVBSFContext **pctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:52
FF_SUB_CHARENC_MODE_AUTOMATIC
#define FF_SUB_CHARENC_MODE_AUTOMATIC
libavcodec will select the mode itself
Definition: avcodec.h:1733
AV_STEREO3D_VIEW_UNSPEC
@ AV_STEREO3D_VIEW_UNSPEC
Content is unspecified.
Definition: stereo3d.h:168
tf_sess_config.config
config
Definition: tf_sess_config.py:33
thread.h
AV_STEREO3D_UNSPEC
@ AV_STEREO3D_UNSPEC
Video is stereoscopic but the packing is unspecified.
Definition: stereo3d.h:143
av_frame_apply_cropping
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields.
Definition: frame.c:760
DecodeContext::pts_correction_num_faulty_dts
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
Definition: decode.c:87
ff_hwaccel_uninit
void ff_hwaccel_uninit(AVCodecContext *avctx)
Definition: decode.c:1198
av_memdup
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
Definition: mem.c:304
AVContentLightMetadata
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
Definition: mastering_display_metadata.h:107
decode_get_packet
static int decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Definition: decode.c:226
AVCodec::max_lowres
uint8_t max_lowres
maximum value for lowres supported by the decoder
Definition: codec.h:192
AVPacketSideData::size
size_t size
Definition: packet.h:411
AV_TIFF_SHORT
@ AV_TIFF_SHORT
Definition: exif.h:45
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3496
DecodeContext::frame
AVFrame * frame
Definition: decode_simple.h:39
DecodeContext::nb_draining_errors
int nb_draining_errors
Definition: decode.c:79
AV_CODEC_FLAG_COPY_OPAQUE
#define AV_CODEC_FLAG_COPY_OPAQUE
Definition: avcodec.h:279
finish
static void finish(void)
Definition: movenc.c:374
FFHWAccel
Definition: hwaccel_internal.h:34
bsf.h
guess_correct_pts
static int64_t guess_correct_pts(DecodeContext *dc, int64_t reordered_pts, int64_t dts)
Attempt to guess proper monotonic timestamps for decoded video frames which might have incorrect time...
Definition: decode.c:293
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:448
AV_PKT_DATA_PALETTE
@ AV_PKT_DATA_PALETTE
An AV_PKT_DATA_PALETTE side data packet contains exactly AVPALETTE_SIZE bytes worth of palette.
Definition: packet.h:47
AVPacket::opaque_ref
AVBufferRef * opaque_ref
AVBufferRef for free use by the API user.
Definition: packet.h:624
STRIDE_ALIGN
#define STRIDE_ALIGN
Definition: internal.h:46
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:1051
fail
#define fail()
Definition: checkasm.h:220
ff_icc_context_init
int ff_icc_context_init(FFIccContext *s, void *avctx)
Initializes an FFIccContext.
Definition: fflcms2.c:30
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1573
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
dummy
int dummy
Definition: motion.c:64
ff_lcevc_process
int ff_lcevc_process(void *logctx, AVFrame *frame)
Definition: lcevcdec.c:304
av_exif_free
void av_exif_free(AVExifMetadata *ifd)
Frees all resources associated with the given EXIF metadata struct.
Definition: exif.c:658
FF_SUB_CHARENC_MODE_DO_NOTHING
#define FF_SUB_CHARENC_MODE_DO_NOTHING
do nothing (demuxer outputs a stream supposed to be already in UTF-8, or the codec is bitmap for inst...
Definition: avcodec.h:1732
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:496
val
static double val(void *priv, double ch)
Definition: aeval.c:77
FrameDecodeData::post_process_opaque_free
void(* post_process_opaque_free)(void *opaque)
Definition: decode.h:46
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
ff_decode_frame_props_from_pkt
int ff_decode_frame_props_from_pkt(const AVCodecContext *avctx, AVFrame *frame, const AVPacket *pkt)
Set various frame properties from the provided packet.
Definition: decode.c:1531
pts
static int64_t pts
Definition: transcode_aac.c:644
add_metadata_from_side_data
static int add_metadata_from_side_data(const AVPacket *avpkt, AVFrame *frame)
Definition: decode.c:1519
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:615
AVCodecContext::max_samples
int64_t max_samples
The number of samples per frame to maximally accept.
Definition: avcodec.h:1837
AVRational::num
int num
Numerator.
Definition: rational.h:59
progressframe.h
AVFrameSideDataType
AVFrameSideDataType
Definition: frame.h:49
refstruct.h
AVSubtitleRect::ass
char * ass
0 terminated ASS/SSA compatible event line.
Definition: avcodec.h:2091
av_image_check_size2
int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of a plane of an image with...
Definition: imgutils.c:289
ff_decode_internal_sync
av_cold void ff_decode_internal_sync(AVCodecContext *dst, const AVCodecContext *src)
Definition: decode.c:2351
ff_frame_new_side_data_from_buf
int ff_frame_new_side_data_from_buf(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef **buf)
Similar to ff_frame_new_side_data, but using an existing buffer ref.
Definition: decode.c:2185
avsubtitle_free
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: avcodec.c:413
av_refstruct_allocz
static void * av_refstruct_allocz(size_t size)
Equivalent to av_refstruct_alloc_ext(size, 0, NULL, NULL)
Definition: refstruct.h:105
AVHWDeviceContext
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
Definition: hwcontext.h:63
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
AVCodecContext::get_buffer2
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:1212
GET_UTF8
#define GET_UTF8(val, GET_BYTE, ERROR)
Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form.
Definition: common.h:488
avcodec_decode_subtitle2
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, const AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:916
ff_decode_content_light_new_ext
int ff_decode_content_light_new_ext(const AVCodecContext *avctx, AVFrameSideData ***sd, int *nb_sd, AVContentLightMetadata **clm)
Same as ff_decode_content_light_new, but taking a AVFrameSideData array directly instead of an AVFram...
Definition: decode.c:2239
avassert.h
FF_CODEC_CAP_USES_PROGRESSFRAMES
#define FF_CODEC_CAP_USES_PROGRESSFRAMES
The decoder might make use of the ProgressFrame API.
Definition: codec_internal.h:68
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:653
AV_PKT_DATA_PARAM_CHANGE
@ AV_PKT_DATA_PARAM_CHANGE
An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
Definition: packet.h:69
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
AVFrameSideData::size
size_t size
Definition: frame.h:285
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:106
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
emms_c
#define emms_c()
Definition: emms.h:89
ff_progress_frame_ref
void ff_progress_frame_ref(ProgressFrame *dst, const ProgressFrame *src)
Set dst->f to src->f and make dst a co-owner of src->f.
Definition: decode.c:1917
AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS
@ AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS
Definition: packet.h:666
AVCodecContext::side_data_prefer_packet
int * side_data_prefer_packet
Decoding only.
Definition: avcodec.h:1920
stereo3d.h
ff_hwaccel_frame_priv_alloc
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
Allocate a hwaccel frame private data if the provided avctx uses a hwaccel method that needs it.
Definition: decode.c:2299
get_subtitle_defaults
static void get_subtitle_defaults(AVSubtitle *sub)
Definition: decode.c:829
FrameDecodeData::post_process_opaque
void * post_process_opaque
Definition: decode.h:45
av_new_packet
int av_new_packet(AVPacket *pkt, int size)
Allocate the payload of a packet and initialize its fields with default values.
Definition: packet.c:98
ff_decode_internal_uninit
av_cold void ff_decode_internal_uninit(AVCodecContext *avctx)
Definition: decode.c:2366
validate_avframe_allocation
static void validate_avframe_allocation(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1618
AVCodecInternal::buffer_pkt
AVPacket * buffer_pkt
Temporary buffers for newly received or not yet output packets/frames.
Definition: internal.h:144
av_bsf_flush
void av_bsf_flush(AVBSFContext *ctx)
Reset the internal bitstream filter state.
Definition: bsf.c:190
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
FFHWAccel::priv_data_size
int priv_data_size
Size of the private data to allocate in AVCodecInternal.hwaccel_priv_data.
Definition: hwaccel_internal.h:114
AVCodecContext::nb_decoded_side_data
int nb_decoded_side_data
Definition: avcodec.h:1937
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:411
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:201
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:1044
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:42
AVHWDeviceType
AVHWDeviceType
Definition: hwcontext.h:27
AVCodecDescriptor::type
enum AVMediaType type
Definition: codec_desc.h:40
av_refstruct_alloc_ext
static void * av_refstruct_alloc_ext(size_t size, unsigned flags, void *opaque, void(*free_cb)(AVRefStructOpaque opaque, void *obj))
A wrapper around av_refstruct_alloc_ext_c() for the common case of a non-const qualified opaque.
Definition: refstruct.h:94
av_exif_clone_ifd
AVExifMetadata * av_exif_clone_ifd(const AVExifMetadata *ifd)
Allocates a duplicate of the provided EXIF metadata struct.
Definition: exif.c:1283
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
AVPacketSideData::data
uint8_t * data
Definition: packet.h:410
AVRefStructPool
AVRefStructPool is an API for a thread-safe pool of objects managed via the RefStruct API.
Definition: refstruct.c:183
ctx
static AVFormatContext * ctx
Definition: movenc.c:49
ff_progress_frame_unref
void ff_progress_frame_unref(ProgressFrame *f)
Give up a reference to the underlying frame contained in a ProgressFrame and reset the ProgressFrame,...
Definition: decode.c:1925
decode.h
AVBSFContext::time_base_in
AVRational time_base_in
The timebase used for the timestamps of the input packets.
Definition: bsf.h:102
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVCodecHWConfig::pix_fmt
enum AVPixelFormat pix_fmt
For decoders, a hardware pixel format which that decoder may be able to decode to if suitable hardwar...
Definition: codec.h:339
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2100
av_csp_primaries_id_from_desc
enum AVColorPrimaries av_csp_primaries_id_from_desc(const AVColorPrimariesDesc *prm)
Detects which enum AVColorPrimaries constant corresponds to the given complete gamut description.
Definition: csp.c:115
AVCodecContext::max_pixels
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:1794
AV_PKT_DATA_LCEVC
@ AV_PKT_DATA_LCEVC
Raw LCEVC payload data, as a uint8_t array, with NAL emulation bytes intact.
Definition: packet.h:346
av_hwdevice_get_type_name
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
Definition: hwcontext.c:120
progress_frame_pool_init_cb
static av_cold int progress_frame_pool_init_cb(AVRefStructOpaque opaque, void *obj)
Definition: decode.c:1959
av_mallocz
#define av_mallocz(s)
Definition: tableprint_vlc.h:31
AVPacket::opaque
void * opaque
for some private data of the user
Definition: packet.h:613
ProgressInternal
Definition: decode.c:1877
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:639
AVCodecHWConfigInternal::hwaccel
const struct FFHWAccel * hwaccel
If this configuration uses a hwaccel, a pointer to it.
Definition: hwconfig.h:35
check_progress_consistency
static void check_progress_consistency(const ProgressFrame *f)
Definition: decode.c:1882
av_content_light_metadata_alloc
AVContentLightMetadata * av_content_light_metadata_alloc(size_t *size)
Allocate an AVContentLightMetadata structure and set its fields to default values.
Definition: mastering_display_metadata.c:72
FFCodec::decode
int(* decode)(struct AVCodecContext *avctx, struct AVFrame *frame, int *got_frame_ptr, struct AVPacket *avpkt)
Decode to an AVFrame.
Definition: codec_internal.h:202
tmp
static uint8_t tmp[40]
Definition: aes_ctr.c:52
discard_samples
static int discard_samples(AVCodecContext *avctx, AVFrame *frame, int64_t *discarded_samples)
Definition: decode.c:319
ff_decode_get_hw_frames_ctx
int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx, enum AVHWDeviceType dev_type)
Make sure avctx.hw_frames_ctx is set.
Definition: decode.c:1050
ff_decode_mastering_display_new
int ff_decode_mastering_display_new(const AVCodecContext *avctx, AVFrame *frame, AVMasteringDisplayMetadata **mdm)
Wrapper around av_mastering_display_metadata_create_side_data(), which rejects side data overridden b...
Definition: decode.c:2226
DecodeContext::draining_started
int draining_started
The caller has submitted a NULL packet on input.
Definition: decode.c:84
ff_thread_get_packet
#define ff_thread_get_packet(avctx, pkt)
Definition: decode.c:222
AVCodecDescriptor::props
int props
Codec properties, a combination of AV_CODEC_PROP_* flags.
Definition: codec_desc.h:54
if
if(ret)
Definition: filter_design.txt:179
AVCodecContext::sub_charenc
char * sub_charenc
Character encoding of the input subtitles file.
Definition: avcodec.h:1723
AV_CODEC_FLAG2_SKIP_MANUAL
#define AV_CODEC_FLAG2_SKIP_MANUAL
Do not skip samples and export skip information as frame side data.
Definition: avcodec.h:368
AV_CODEC_PROP_INTRA_ONLY
#define AV_CODEC_PROP_INTRA_ONLY
Codec uses only intra compression.
Definition: codec_desc.h:72
AVCodecInternal::progress_frame_pool
struct AVRefStructPool * progress_frame_pool
Definition: internal.h:71
av_exif_get_tag_id
int32_t av_exif_get_tag_id(const char *name)
Retrieves the tag ID associated with the provided tag string name.
Definition: exif.c:243
ff_thread_progress_await
void ff_thread_progress_await(const ThreadProgress *pro_c, int n)
This function is a no-op in no-op mode; otherwise it waits until other threads have reached a certain...
Definition: threadprogress.c:64
AV_PKT_DATA_EXIF
@ AV_PKT_DATA_EXIF
Extensible image file format metadata.
Definition: packet.h:369
ff_decode_exif_attach_ifd
int ff_decode_exif_attach_ifd(AVCodecContext *avctx, AVFrame *frame, const AVExifMetadata *ifd)
Definition: decode.c:2452
av_bsf_init
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
Definition: bsf.c:149
utf8_check
static int utf8_check(const uint8_t *str)
Definition: decode.c:897
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
AVCodecContext::apply_cropping
int apply_cropping
Video decoding only.
Definition: avcodec.h:1821
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:677
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AV_EXIF_TIFF_HEADER
@ AV_EXIF_TIFF_HEADER
The TIFF header starts with 0x49492a00, or 0x4d4d002a.
Definition: exif.h:63
hwaccel_internal.h
av_bsf_receive_packet
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
Definition: bsf.c:230
AVCodec::type
enum AVMediaType type
Definition: codec.h:185
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVCodecContext::nb_coded_side_data
int nb_coded_side_data
Definition: avcodec.h:1776
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:474
av_frame_side_data_remove
void av_frame_side_data_remove(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type from an array.
Definition: side_data.c:102
FF_CODEC_CB_TYPE_DECODE_SUB
@ FF_CODEC_CB_TYPE_DECODE_SUB
Definition: codec_internal.h:112
AVPALETTE_SIZE
#define AVPALETTE_SIZE
Definition: pixfmt.h:32
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
ff_lcevc_unref
void ff_lcevc_unref(void *opaque)
Definition: lcevcdec.c:403
AV_CODEC_PROP_BITMAP_SUB
#define AV_CODEC_PROP_BITMAP_SUB
Subtitle codec is bitmap based Decoded AVSubtitle data can be read from the AVSubtitleRect->pict fiel...
Definition: codec_desc.h:111
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
av_refstruct_pool_alloc_ext
static AVRefStructPool * av_refstruct_pool_alloc_ext(size_t size, unsigned flags, void *opaque, int(*init_cb)(AVRefStructOpaque opaque, void *obj), void(*reset_cb)(AVRefStructOpaque opaque, void *obj), void(*free_entry_cb)(AVRefStructOpaque opaque, void *obj), void(*free_cb)(AVRefStructOpaque opaque))
A wrapper around av_refstruct_pool_alloc_ext_c() for the common case of a non-const qualified opaque.
Definition: refstruct.h:258
AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
@ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
Mastering display metadata associated with a video frame.
Definition: frame.h:120
av_refstruct_pool_get
void * av_refstruct_pool_get(AVRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
FFLCEVCFrame::lcevc
FFLCEVCContext * lcevc
Definition: lcevcdec.h:44
AV_REFSTRUCT_POOL_FLAG_FREE_ON_INIT_ERROR
#define AV_REFSTRUCT_POOL_FLAG_FREE_ON_INIT_ERROR
If this flag is set and both init_cb and free_entry_cb callbacks are provided, then free_cb will be c...
Definition: refstruct.h:213
DecodeContext::pts_correction_last_dts
int64_t pts_correction_last_dts
PTS of the last frame.
Definition: decode.c:89
AV_CODEC_FLAG2_ICC_PROFILES
#define AV_CODEC_FLAG2_ICC_PROFILES
Generate/parse ICC profiles on encode/decode, as appropriate for the type of file.
Definition: avcodec.h:378
ff_icc_profile_detect_transfer
int ff_icc_profile_detect_transfer(FFIccContext *s, cmsHPROFILE profile, enum AVColorTransferCharacteristic *out_trc)
Attempt detecting the transfer characteristic that best approximates the transfer function encoded by...
Definition: fflcms2.c:300
av_packet_ref
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: packet.c:440
AVCodecInternal::draining_done
int draining_done
Definition: internal.h:146
FF_HW_HAS_CB
#define FF_HW_HAS_CB(avctx, function)
Definition: hwaccel_internal.h:179
UTF8_MAX_BYTES
#define UTF8_MAX_BYTES
Definition: decode.c:835
AVPACKET_IS_EMPTY
#define AVPACKET_IS_EMPTY(pkt)
Definition: packet_internal.h:26
AV_FRAME_DATA_AFD
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:90
ff_sd_global_map
const SideDataMap ff_sd_global_map[]
A map between packet and frame side data types.
Definition: avcodec.c:57
av_exif_remove_entry
int av_exif_remove_entry(void *logctx, AVExifMetadata *ifd, uint16_t id, int flags)
Remove an entry from the provided EXIF metadata struct.
Definition: exif.c:1278
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:743
AV_CODEC_EXPORT_DATA_ENHANCEMENTS
#define AV_CODEC_EXPORT_DATA_ENHANCEMENTS
Decoding only.
Definition: avcodec.h:406
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AVCodecInternal::last_pkt_props
AVPacket * last_pkt_props
Properties (timestamps+side data) extracted from the last packet passed for decoding.
Definition: internal.h:90
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
AV_PKT_DATA_NB
@ AV_PKT_DATA_NB
The number of side data types.
Definition: packet.h:379
progress_frame_pool_free_entry_cb
static av_cold void progress_frame_pool_free_entry_cb(AVRefStructOpaque opaque, void *obj)
Definition: decode.c:1984
attribute_align_arg
#define attribute_align_arg
Definition: internal.h:50
av_codec_is_decoder
int av_codec_is_decoder(const AVCodec *codec)
Definition: utils.c:85
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1709
f
f
Definition: af_crystalizer.c:122
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:503
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1747
SideDataMap::packet
enum AVPacketSideDataType packet
Definition: avcodec_internal.h:35
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:302
AVPacket::size
int size
Definition: packet.h:589
ff_progress_frame_alloc
int ff_progress_frame_alloc(AVCodecContext *avctx, ProgressFrame *f)
This function sets up the ProgressFrame, i.e.
Definition: decode.c:1888
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
byte
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:99
ff_decode_exif_attach_buffer
int ff_decode_exif_attach_buffer(AVCodecContext *avctx, AVFrame *frame, AVBufferRef **pbuf, enum AVExifHeaderMode header_mode)
Attach the data buffer to the frame.
Definition: decode.c:2458
height
#define height
Definition: dsp.h:89
AVCodecContext::extra_hw_frames
int extra_hw_frames
Video decoding only.
Definition: avcodec.h:1510
codec_internal.h
FrameDecodeData::post_process
int(* post_process)(void *logctx, AVFrame *frame)
The callback to perform some delayed processing on the frame right before it is returned to the calle...
Definition: decode.h:44
AVCodecInternal::hwaccel_priv_data
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:130
decode_data_free
static void decode_data_free(AVRefStructOpaque unused, void *obj)
Definition: decode.c:1639
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
av_frame_copy
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:711
av_bsf_send_packet
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
Definition: bsf.c:202
AV_PKT_DATA_DYNAMIC_HDR10_PLUS
@ AV_PKT_DATA_DYNAMIC_HDR10_PLUS
HDR10+ dynamic metadata associated with a video frame.
Definition: packet.h:296
i
#define i(width, name, range_min, range_max)
Definition: cbs_h264.c:63
AVExifEntry::id
uint16_t id
Definition: exif.h:86
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
ff_codec_is_decoder
static int ff_codec_is_decoder(const AVCodec *avcodec)
Internal version of av_codec_is_decoder().
Definition: codec_internal.h:309
FF_CODEC_CAP_SETS_FRAME_PROPS
#define FF_CODEC_CAP_SETS_FRAME_PROPS
Codec handles output frame properties internally instead of letting the internal logic derive them fr...
Definition: codec_internal.h:77
AVCodecInternal::bsf
struct AVBSFContext * bsf
Definition: internal.h:84
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1043
AV_FRAME_DATA_LCEVC
@ AV_FRAME_DATA_LCEVC
Raw LCEVC payload data, as a uint8_t array, with NAL emulation bytes intact.
Definition: frame.h:236
AVCodecContext::pkt_timebase
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are expressed.
Definition: avcodec.h:550
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:60
size
int size
Definition: twinvq_data.h:10344
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
frame_validate
static int frame_validate(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:772
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:2147
ff_frame_new_side_data_from_buf_ext
int ff_frame_new_side_data_from_buf_ext(const AVCodecContext *avctx, AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type, AVBufferRef **buf)
Same as ff_frame_new_side_data_from_buf, but taking a AVFrameSideData array directly instead of an AV...
Definition: decode.c:2166
side_data_pref
static int side_data_pref(const AVCodecContext *avctx, AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type)
Check side data preference and clear existing side data from frame if needed.
Definition: decode.c:2131
AVFrameSideData::data
uint8_t * data
Definition: frame.h:284
ffcodec
static const av_always_inline FFCodec * ffcodec(const AVCodec *codec)
Definition: codec_internal.h:290
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:535
fill_frame_props
static int fill_frame_props(const AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:563
AVCHROMA_LOC_UNSPECIFIED
@ AVCHROMA_LOC_UNSPECIFIED
Definition: pixfmt.h:797
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:514
AVCodecHWConfigInternal
Definition: hwconfig.h:25
AV_PICTURE_TYPE_NONE
@ AV_PICTURE_TYPE_NONE
Undefined.
Definition: avutil.h:277
av_buffer_make_writable
int av_buffer_make_writable(AVBufferRef **pbuf)
Create a writable reference from a given buffer reference, avoiding data copy if possible.
Definition: buffer.c:165
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:2097
frame.h
av_packet_unpack_dictionary
int av_packet_unpack_dictionary(const uint8_t *data, size_t size, AVDictionary **dict)
Unpack a dictionary from side_data.
Definition: packet.c:352
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:725
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:587
av_refstruct_ref
void * av_refstruct_ref(void *obj)
Create a new reference to an object managed via this API, i.e.
Definition: refstruct.c:140
ProgressInternal::progress
ThreadProgress progress
Definition: decode.c:1878
AV_PRIMARY_EYE_NONE
@ AV_PRIMARY_EYE_NONE
Neither eye.
Definition: stereo3d.h:178
av_content_light_metadata_create_side_data
AVContentLightMetadata * av_content_light_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVContentLightMetadata and add it to the frame.
Definition: mastering_display_metadata.c:82
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
SideDataMap::frame
enum AVFrameSideDataType frame
Definition: avcodec_internal.h:36
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:594
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:63
ff_progress_frame_await
void ff_progress_frame_await(const ProgressFrame *f, int n)
Wait for earlier decoding threads to finish reference frames.
Definition: decode.c:1947
AVCodecInternal
Definition: internal.h:49
FFCodec::hw_configs
const struct AVCodecHWConfigInternal *const * hw_configs
Array of pointers to hardware configurations supported by the codec, or NULL if no hardware supported...
Definition: codec_internal.h:270
DecodeContext::side_data_pref_mask
uint64_t side_data_pref_mask
DTS of the last frame.
Definition: decode.c:95
FF_THREAD_NO_FRAME_THREADING
@ FF_THREAD_NO_FRAME_THREADING
Definition: thread.h:63
packet_side_data_get
static const AVPacketSideData * packet_side_data_get(const AVPacketSideData *sd, int nb_sd, enum AVPacketSideDataType type)
Definition: decode.c:1349
AVCodecContext::nb_side_data_prefer_packet
unsigned nb_side_data_prefer_packet
Number of entries in side_data_prefer_packet.
Definition: avcodec.h:1924
detect_colorspace
static int detect_colorspace(av_unused AVCodecContext *c, av_unused AVFrame *f)
Definition: decode.c:557
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1584
AV_FRAME_DATA_SKIP_SAMPLES
@ AV_FRAME_DATA_SKIP_SAMPLES
Recommends skipping the specified number of samples.
Definition: frame.h:109
av_refstruct_unref
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
av_mastering_display_metadata_alloc_size
AVMasteringDisplayMetadata * av_mastering_display_metadata_alloc_size(size_t *size)
Allocate an AVMasteringDisplayMetadata structure and set its fields to default values.
Definition: mastering_display_metadata.c:44
AVHWAccel::name
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:1961
AV_PKT_DATA_STRINGS_METADATA
@ AV_PKT_DATA_STRINGS_METADATA
A list of zero terminated key/value strings.
Definition: packet.h:169
emms.h
AVCodecInternal::is_frame_mt
int is_frame_mt
This field is set to 1 when frame threading is being used and the parent AVCodecContext of this AVCod...
Definition: internal.h:61
avcodec_send_packet
int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:711
FFCodec::caps_internal
unsigned caps_internal
Internal codec capabilities FF_CODEC_CAP_*.
Definition: codec_internal.h:136
extract_packet_props
static int extract_packet_props(AVCodecInternal *avci, const AVPacket *pkt)
Definition: decode.c:175
uninit
static void uninit(AVBSFContext *ctx)
Definition: pcm_rechunk.c:68
av_packet_copy_props
int av_packet_copy_props(AVPacket *dst, const AVPacket *src)
Copy only "properties" fields from src to dst.
Definition: packet.c:395
AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
@ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
Content light level (based on CTA-861.3).
Definition: frame.h:137
AVExifEntry::value
union AVExifEntry::@124 value
AVSubtitle::format
uint16_t format
Definition: avcodec.h:2095
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:581
update_frame_props
static int update_frame_props(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1666
av_frame_side_data_free
void av_frame_side_data_free(AVFrameSideData ***sd, int *nb_sd)
Free all side data entries and their contents, then zeroes out the values which the pointers are poin...
Definition: side_data.c:133
ff_decode_internal_alloc
av_cold AVCodecInternal * ff_decode_internal_alloc(void)
Definition: decode.c:2346
reget_buffer_internal
static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags)
Definition: decode.c:1827
av_packet_get_side_data
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, size_t *size)
Get side information from packet.
Definition: packet.c:252
internal.h
ff_decode_receive_frame_internal
int ff_decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
Do the actual decoding and obtain a decoded frame from the decoder, if available.
Definition: decode.c:614
AVExifMetadata::entries
AVExifEntry * entries
Definition: exif.h:78
common.h
AVCodecInternal::in_pkt
AVPacket * in_pkt
This packet is used to hold the packet given to decoders implementing the .decode API; it is unused b...
Definition: internal.h:83
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:58
ff_thread_progress_init
av_cold int ff_thread_progress_init(ThreadProgress *pro, int init_mode)
Initialize a ThreadProgress.
Definition: threadprogress.c:33
AV_FRAME_DATA_STEREO3D
@ AV_FRAME_DATA_STEREO3D
Stereoscopic 3d metadata.
Definition: frame.h:64
DecodeContext::avci
AVCodecInternal avci
Definition: decode.c:62
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:523
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
AVExifMetadata::count
unsigned int count
Definition: exif.h:80
AVCodecContext::hw_device_ctx
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:1487
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:684
AVExifEntry::uint
uint64_t * uint
Definition: exif.h:109
AVMasteringDisplayMetadata
Mastering display metadata capable of representing the color volume of the display used to master the...
Definition: mastering_display_metadata.h:38
Frame::frame
AVFrame * frame
Definition: ffplay.c:154
FF_CODEC_CAP_SETS_PKT_DTS
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: codec_internal.h:49
exif.h
profile
int profile
Definition: mxfenc.c:2297
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:703
AVCodecContext::height
int height
Definition: avcodec.h:600
decode_simple_internal
static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame, int64_t *discarded_samples)
Definition: decode.c:417
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:639
AVCodecContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
Definition: avcodec.h:1465
avcodec.h
FFCodec::decode_sub
int(* decode_sub)(struct AVCodecContext *avctx, struct AVSubtitle *sub, int *got_frame_ptr, const struct AVPacket *avpkt)
Decode subtitle data to an AVSubtitle.
Definition: codec_internal.h:210
AVCodecContext::sub_charenc_mode
int sub_charenc_mode
Subtitles character encoding mode.
Definition: avcodec.h:1731
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:118
AVCodecContext::frame_num
int64_t frame_num
Frame counter, set by libavcodec.
Definition: avcodec.h:1890
avcodec_get_hw_frames_parameters
int avcodec_get_hw_frames_parameters(AVCodecContext *avctx, AVBufferRef *device_ref, enum AVPixelFormat hw_pix_fmt, AVBufferRef **out_frames_ref)
Create and return a AVHWFramesContext with values adequate for hardware decoding.
Definition: decode.c:1101
ff_reget_buffer
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Identical in function to ff_get_buffer(), except it reuses the existing buffer if available.
Definition: decode.c:1869
ret
ret
Definition: filter_design.txt:187
AVHWDeviceContext::type
enum AVHWDeviceType type
This field identifies the underlying API used for hardware access.
Definition: hwcontext.h:75
AVALPHA_MODE_UNSPECIFIED
@ AVALPHA_MODE_UNSPECIFIED
Unknown alpha handling, or no alpha channel.
Definition: pixfmt.h:811
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
AVHWFramesContext::device_ctx
AVHWDeviceContext * device_ctx
The parent AVHWDeviceContext.
Definition: hwcontext.h:137
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1369
AV_CODEC_PROP_TEXT_SUB
#define AV_CODEC_PROP_TEXT_SUB
Subtitle codec is text based.
Definition: codec_desc.h:116
av_channel_layout_check
int av_channel_layout_check(const AVChannelLayout *channel_layout)
Check whether a channel layout is valid, i.e.
Definition: channel_layout.c:783
ff_thread_sync_ref
enum ThreadingStatus ff_thread_sync_ref(AVCodecContext *avctx, size_t offset)
Allows to synchronize objects whose lifetime is the whole decoding process among all frame threads.
Definition: decode.c:1953
hwaccel
static const char * hwaccel
Definition: ffplay.c:356
ff_decode_content_light_new
int ff_decode_content_light_new(const AVCodecContext *avctx, AVFrame *frame, AVContentLightMetadata **clm)
Wrapper around av_content_light_metadata_create_side_data(), which rejects side data overridden by th...
Definition: decode.c:2271
ff_thread_progress_destroy
av_cold void ff_thread_progress_destroy(ThreadProgress *pro)
Destroy a ThreadProgress.
Definition: threadprogress.c:44
AVPacket::side_data
AVPacketSideData * side_data
Additional packet data that can be provided by the container.
Definition: packet.h:599
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_progress_frame_replace
void ff_progress_frame_replace(ProgressFrame *dst, const ProgressFrame *src)
Do nothing if dst and src already refer to the same AVFrame; otherwise unreference dst and if src is ...
Definition: decode.c:1932
apply_param_change
static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
Definition: decode.c:114
ff_decode_frame_props
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1573
AV_FRAME_DATA_DYNAMIC_HDR_PLUS
@ AV_FRAME_DATA_DYNAMIC_HDR_PLUS
HDR dynamic metadata associated with a video frame.
Definition: frame.h:159
AVCodecContext
main external API structure.
Definition: avcodec.h:439
AVFrame::height
int height
Definition: frame.h:499
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1592
recode_subtitle
static int recode_subtitle(AVCodecContext *avctx, const AVPacket **outpkt, const AVPacket *inpkt, AVPacket *buf_pkt)
Definition: decode.c:836
channel_layout.h
av_mastering_display_metadata_create_side_data
AVMasteringDisplayMetadata * av_mastering_display_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVMasteringDisplayMetadata and add it to the frame.
Definition: mastering_display_metadata.c:58
avcodec_internal.h
av_frame_side_data_new
AVFrameSideData * av_frame_side_data_new(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type, size_t size, unsigned int flags)
Add new side data entry to an array.
Definition: side_data.c:198
ff_decode_flush_buffers
av_cold void ff_decode_flush_buffers(AVCodecContext *avctx)
Definition: decode.c:2328
av_refstruct_replace
void av_refstruct_replace(void *dstp, const void *src)
Ensure *dstp refers to the same object as src.
Definition: refstruct.c:160
attach_displaymatrix
static int attach_displaymatrix(AVCodecContext *avctx, AVFrame *frame, int orientation)
Definition: decode.c:2376
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
ffhwaccel
static const FFHWAccel * ffhwaccel(const AVHWAccel *codec)
Definition: hwaccel_internal.h:168
AV_FRAME_CROP_UNALIGNED
@ AV_FRAME_CROP_UNALIGNED
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
Definition: frame.h:1002
side_data_stereo3d_merge
static int side_data_stereo3d_merge(AVFrameSideData *sd_frame, const AVPacketSideData *sd_pkt)
Definition: decode.c:1365
decode_receive_frame_internal
static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame, unsigned flags)
Definition: decode.c:650
AV_PKT_DATA_AFD
@ AV_PKT_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: packet.h:258
ff_decode_preinit
av_cold int ff_decode_preinit(AVCodecContext *avctx)
Perform decoder initialization and validation.
Definition: decode.c:1992
AV_PKT_DATA_SKIP_SAMPLES
@ AV_PKT_DATA_SKIP_SAMPLES
Recommends skipping the specified number of samples.
Definition: packet.h:153
AVCodecContext::export_side_data
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame,...
Definition: avcodec.h:1786
AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
@ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
The codec supports this format via the hw_device_ctx interface.
Definition: codec.h:298
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
AVPacketSideDataType
AVPacketSideDataType
Definition: packet.h:41
FF_CODEC_CB_TYPE_RECEIVE_FRAME
@ FF_CODEC_CB_TYPE_RECEIVE_FRAME
Definition: codec_internal.h:115
ProgressInternal::f
struct AVFrame * f
Definition: decode.c:1879
ff_thread_progress_reset
static void ff_thread_progress_reset(ThreadProgress *pro)
Reset the ThreadProgress.progress counter; must only be called if the ThreadProgress is not in use in...
Definition: threadprogress.h:72
FFCodec::cb_type
unsigned cb_type
This field determines the type of the codec (decoder/encoder) and also the exact callback cb implemen...
Definition: codec_internal.h:160
ff_thread_receive_frame
#define ff_thread_receive_frame(avctx, frame, flags)
Definition: decode.c:223
avcodec_get_hw_config
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *codec, int index)
Retrieve supported hardware configurations for a codec.
Definition: utils.c:850
AVCodecInternal::buffer_frame
AVFrame * buffer_frame
Definition: internal.h:145
AV_CODEC_CAP_PARAM_CHANGE
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: codec.h:103
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:449
ff_decode_receive_frame
int ff_decode_receive_frame(AVCodecContext *avctx, AVFrame *frame, unsigned flags)
avcodec_receive_frame() implementation for decoders.
Definition: decode.c:798
AVCodecInternal::draining
int draining
decoding: AVERROR_EOF has been returned from ff_decode_get_packet(); must not be used by decoders tha...
Definition: internal.h:139
FFCodec::bsfs
const char * bsfs
Decoding only, a comma-separated list of bitstream filters to apply to packets before decoding.
Definition: codec_internal.h:261
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:615
progress_frame_pool_reset_cb
static void progress_frame_pool_reset_cb(AVRefStructOpaque unused, void *obj)
Definition: decode.c:1976
AVHWFramesContext::initial_pool_size
int initial_pool_size
Initial size of the frame pool.
Definition: hwcontext.h:190
AVCodecContext::codec_type
enum AVMediaType codec_type
Definition: avcodec.h:447
AV_FRAME_FLAG_DISCARD
#define AV_FRAME_FLAG_DISCARD
A flag to mark the frames which need to be decoded, but shouldn't be output.
Definition: frame.h:646
desc
const char * desc
Definition: libsvtav1.c:82
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
av_bsf_list_parse_str
int av_bsf_list_parse_str(const char *str, AVBSFContext **bsf_lst)
Parse string describing list of bitstream filters and create single AVBSFContext describing the whole...
Definition: bsf.c:526
ff_decode_mastering_display_new_ext
int ff_decode_mastering_display_new_ext(const AVCodecContext *avctx, AVFrameSideData ***sd, int *nb_sd, struct AVMasteringDisplayMetadata **mdm)
Same as ff_decode_mastering_display_new, but taking a AVFrameSideData array directly instead of an AV...
Definition: decode.c:2194
side_data_map
static int side_data_map(AVFrame *dst, const AVPacketSideData *sd_src, int nb_sd_src, const SideDataMap *map)
Definition: decode.c:1473
AV_PKT_DATA_A53_CC
@ AV_PKT_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: packet.h:239
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
packet_internal.h
AV_CODEC_EXPORT_DATA_MVS
#define AV_CODEC_EXPORT_DATA_MVS
Export motion vectors through frame side data.
Definition: avcodec.h:386
ff_icc_profile_sanitize
int ff_icc_profile_sanitize(FFIccContext *s, cmsHPROFILE profile)
Sanitize an ICC profile to try and fix badly broken values.
Definition: fflcms2.c:211
mastering_display_metadata.h
ff_attach_decode_data
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1650
ThreadingStatus
ThreadingStatus
Definition: thread.h:60
avcodec_parameters_from_context
int avcodec_parameters_from_context(struct AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: codec_par.c:138
ff_lcevc_parse_frame
int ff_lcevc_parse_frame(FFLCEVCContext *lcevc, const AVFrame *frame, int *width, int *height, void *logctx)
Definition: lcevcdec.c:333
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:282
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
map
const VDPAUPixFmtMap * map
Definition: hwcontext_vdpau.c:71
AV_CODEC_FLAG2_EXPORT_MVS
#define AV_CODEC_FLAG2_EXPORT_MVS
Export motion vectors through frame side data.
Definition: avcodec.h:364
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
ProgressFrame
The ProgressFrame structure.
Definition: progressframe.h:73
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
SideDataMap
Definition: avcodec_internal.h:34
AVPacket
This structure stores compressed data.
Definition: packet.h:565
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
DecodeContext::pts_correction_num_faulty_pts
int64_t pts_correction_num_faulty_pts
Definition: decode.c:86
av_exif_get_entry
int av_exif_get_entry(void *logctx, AVExifMetadata *ifd, uint16_t id, int flags, AVExifEntry **value)
Get an entry with the tagged ID from the EXIF metadata struct.
Definition: exif.c:1189
av_frame_side_data_get
static const AVFrameSideData * av_frame_side_data_get(AVFrameSideData *const *sd, const int nb_sd, enum AVFrameSideDataType type)
Wrapper around av_frame_side_data_get_c() to workaround the limitation that for any type T the conver...
Definition: frame.h:1151
ff_lcevc_alloc
int ff_lcevc_alloc(FFLCEVCContext **plcevc, void *logctx)
Definition: lcevcdec.c:374
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:600
AV_FRAME_DATA_EXIF
@ AV_FRAME_DATA_EXIF
Extensible image file format metadata.
Definition: frame.h:262
int32_t
int32_t
Definition: audioconvert.c:56
bytestream.h
FrameDecodeData::hwaccel_priv
void * hwaccel_priv
Per-frame private data for hwaccels.
Definition: decode.h:51
imgutils.h
hwcontext.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AV_CODEC_HW_CONFIG_METHOD_AD_HOC
@ AV_CODEC_HW_CONFIG_METHOD_AD_HOC
The codec supports this format by some ad-hoc method.
Definition: codec.h:327
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
AVCodecHWConfig
Definition: codec.h:330
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:203
AVCodecContext::sw_pix_fmt
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:646
av_image_check_sar
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:323
pkt
static AVPacket * pkt
Definition: demux_decode.c:55
ff_copy_palette
int ff_copy_palette(void *dst, const AVPacket *src, void *logctx)
Check whether the side-data of src contains a palette of size AVPALETTE_SIZE; if so,...
Definition: decode.c:2284
width
#define width
Definition: dsp.h:89
AVCodecHWConfigInternal::public
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
Definition: hwconfig.h:30
decode_bsfs_init
static int decode_bsfs_init(AVCodecContext *avctx)
Definition: decode.c:186
FFLCEVCFrame
Definition: lcevcdec.h:43
codec_desc.h
AV_PIX_FMT_FLAG_PAL
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:120
side_data_exif_parse
static int side_data_exif_parse(AVFrame *dst, const AVPacketSideData *sd_pkt)
Definition: decode.c:1401
FFLCEVCFrame::frame
struct AVFrame * frame
Definition: lcevcdec.h:45
AVHWAccel::pix_fmt
enum AVPixelFormat pix_fmt
Supported pixel format.
Definition: avcodec.h:1982
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:624
DecodeContext
Definition: decode.c:61
av_frame_side_data_add
AVFrameSideData * av_frame_side_data_add(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type, AVBufferRef **buf, unsigned int flags)
Add a new side data entry to an array from an existing AVBufferRef.
Definition: side_data.c:223
FF_REGET_BUFFER_FLAG_READONLY
#define FF_REGET_BUFFER_FLAG_READONLY
the returned buffer does not need to be writable
Definition: decode.h:128
src
#define src
Definition: vp8dsp.c:248
Frame::height
int height
Definition: ffplay.c:161
AVPacket::side_data_elems
int side_data_elems
Definition: packet.h:600
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3376
FF_SUB_CHARENC_MODE_IGNORE
#define FF_SUB_CHARENC_MODE_IGNORE
neither convert the subtitles, nor check them for valid UTF-8
Definition: avcodec.h:1735
min
float min
Definition: vorbis_enc_data.h:429