FFmpeg
videotoolbox.c
Go to the documentation of this file.
1 /*
2  * Videotoolbox hardware acceleration
3  *
4  * copyright (c) 2012 Sebastien Zwickert
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "config.h"
24 #include "config_components.h"
25 #include "videotoolbox.h"
26 #include "libavutil/attributes.h"
28 #include "libavutil/mem.h"
29 #include "vt_internal.h"
30 #include "libavutil/avutil.h"
31 #include "libavutil/hwcontext.h"
32 #include "libavutil/pixdesc.h"
33 #include "bytestream.h"
34 #include "decode.h"
35 #include "internal.h"
36 #include "h264dec.h"
37 #include "hevc/hevcdec.h"
38 #include "hwaccel_internal.h"
39 #include "mpegvideo.h"
40 #include "proresdec.h"
41 #include <Availability.h>
42 #include <AvailabilityMacros.h>
43 #include <TargetConditionals.h>
44 
45 #ifndef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
46 # define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder CFSTR("RequireHardwareAcceleratedVideoDecoder")
47 #endif
48 #ifndef kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
49 # define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder CFSTR("EnableHardwareAcceleratedVideoDecoder")
50 #endif
51 
52 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
53 enum { kCMVideoCodecType_HEVC = 'hvc1' };
54 #endif
55 
56 #if !HAVE_KCMVIDEOCODECTYPE_VP9
57 enum { kCMVideoCodecType_VP9 = 'vp09' };
58 #endif
59 
60 #if !HAVE_KCMVIDEOCODECTYPE_AV1
61 enum { kCMVideoCodecType_AV1 = 'av01' };
62 #endif
63 
64 #define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING 12
65 
66 typedef struct VTHWFrame {
67  CVPixelBufferRef pixbuf;
69 } VTHWFrame;
70 
71 static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
72 {
74  av_buffer_unref(&ref->hw_frames_ctx);
75  CVPixelBufferRelease(ref->pixbuf);
76 
77  av_free(data);
78 }
79 
81  const uint8_t *buffer,
82  uint32_t size)
83 {
84  void *tmp;
85 
86  tmp = av_fast_realloc(vtctx->bitstream,
87  &vtctx->allocated_size,
88  size);
89 
90  if (!tmp)
91  return AVERROR(ENOMEM);
92 
93  vtctx->bitstream = tmp;
94  memcpy(vtctx->bitstream, buffer, size);
95  vtctx->bitstream_size = size;
96 
97  return 0;
98 }
99 
101  const uint8_t *buffer,
102  uint32_t size)
103 {
104  void *tmp;
105 
106  tmp = av_fast_realloc(vtctx->bitstream,
107  &vtctx->allocated_size,
108  vtctx->bitstream_size + size);
109 
110  if (!tmp)
111  return AVERROR(ENOMEM);
112 
113  vtctx->bitstream = tmp;
114  memcpy(vtctx->bitstream + vtctx->bitstream_size, buffer, size);
115  vtctx->bitstream_size += size;
116 
117  return 0;
118 }
119 
120 static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
121 {
122  int ret;
123  VTHWFrame *ref = (VTHWFrame *)frame->buf[0]->data;
124 
125  if (!ref->pixbuf) {
126  av_log(avctx, AV_LOG_ERROR, "No frame decoded?\n");
128  return AVERROR_EXTERNAL;
129  }
130 
131  frame->crop_right = 0;
132  frame->crop_left = 0;
133  frame->crop_top = 0;
134  frame->crop_bottom = 0;
135 
136  if ((ret = av_vt_pixbuf_set_attachments(avctx, ref->pixbuf, frame)) < 0)
137  return ret;
138 
139  frame->data[3] = (uint8_t*)ref->pixbuf;
140 
141  if (ref->hw_frames_ctx) {
142  av_buffer_unref(&frame->hw_frames_ctx);
143  frame->hw_frames_ctx = av_buffer_ref(ref->hw_frames_ctx);
144  if (!frame->hw_frames_ctx)
145  return AVERROR(ENOMEM);
146  }
147 
148  return 0;
149 }
150 
152 {
153  size_t size = sizeof(VTHWFrame);
154  uint8_t *data = NULL;
155  AVBufferRef *buf = NULL;
156  int ret = ff_attach_decode_data(avctx, frame);
157  FrameDecodeData *fdd;
158  if (ret < 0)
159  return ret;
160 
161  data = av_mallocz(size);
162  if (!data)
163  return AVERROR(ENOMEM);
165  if (!buf) {
166  av_freep(&data);
167  return AVERROR(ENOMEM);
168  }
169  frame->buf[0] = buf;
170 
171  fdd = frame->private_ref;
173 
174  frame->width = avctx->width;
175  frame->height = avctx->height;
176  frame->format = avctx->pix_fmt;
177 
178  return 0;
179 }
180 
181 #define AV_W8(p, v) *(p) = (v)
182 
183 static int escape_ps(uint8_t* dst, const uint8_t* src, int src_size)
184 {
185  int i;
186  int size = src_size;
187  uint8_t* p = dst;
188 
189  for (i = 0; i < src_size; i++) {
190  if (i + 2 < src_size &&
191  src[i] == 0x00 &&
192  src[i + 1] == 0x00 &&
193  src[i + 2] <= 0x03) {
194  if (dst) {
195  *p++ = src[i++];
196  *p++ = src[i];
197  *p++ = 0x03;
198  } else {
199  i++;
200  }
201  size++;
202  } else if (dst)
203  *p++ = src[i];
204  }
205 
206  if (dst)
207  av_assert0((p - dst) == size);
208 
209  return size;
210 }
211 
213 {
214  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
215  H264Context *h = avctx->priv_data;
216  CFDataRef data = NULL;
217  uint8_t *p;
218  int sps_size = escape_ps(NULL, h->ps.sps->data, h->ps.sps->data_size);
219  int pps_size = escape_ps(NULL, h->ps.pps->data, h->ps.pps->data_size);
220  int vt_extradata_size;
221  uint8_t *vt_extradata;
222 
223  vt_extradata_size = 6 + 2 + sps_size + 3 + pps_size;
224  vt_extradata = av_malloc(vt_extradata_size);
225 
226  if (!vt_extradata)
227  return NULL;
228 
229  p = vt_extradata;
230 
231  AV_W8(p + 0, 1); /* version */
232  AV_W8(p + 1, h->ps.sps->data[1]); /* profile */
233  AV_W8(p + 2, h->ps.sps->data[2]); /* profile compat */
234  AV_W8(p + 3, h->ps.sps->data[3]); /* level */
235  AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */
236  AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
237  AV_WB16(p + 6, sps_size);
238  p += 8;
239  p += escape_ps(p, h->ps.sps->data, h->ps.sps->data_size);
240  AV_W8(p + 0, 1); /* number of pps */
241  AV_WB16(p + 1, pps_size);
242  p += 3;
243  p += escape_ps(p, h->ps.pps->data, h->ps.pps->data_size);
244 
245  av_assert0(p - vt_extradata == vt_extradata_size);
246 
247  // save sps header (profile/level) used to create decoder session,
248  // so we can detect changes and recreate it.
249  if (vtctx)
250  memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
251 
252  data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
253  av_free(vt_extradata);
254  return data;
255 }
256 
258 {
259  HEVCContext *h = avctx->priv_data;
260  int i, num_vps = 0, num_sps = 0, num_pps = 0;
261  const HEVCPPS *pps = h->pps;
262  const HEVCSPS *sps = pps->sps;
263  const HEVCVPS *vps = sps->vps;
264  PTLCommon ptlc = vps->ptl.general_ptl;
265  VUI vui = sps->vui;
266  uint8_t parallelismType;
267  CFDataRef data = NULL;
268  uint8_t *p;
269  int vt_extradata_size = 23 + 3 + 3 + 3;
270  uint8_t *vt_extradata;
271 
272 #define COUNT_SIZE_PS(T, t) \
273  for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
274  if (h->ps.t##ps_list[i]) { \
275  const HEVC##T##PS *lps = h->ps.t##ps_list[i]; \
276  vt_extradata_size += 2 + escape_ps(NULL, lps->data, lps->data_size); \
277  num_##t##ps++; \
278  } \
279  }
280 
281  COUNT_SIZE_PS(V, v)
282  COUNT_SIZE_PS(S, s)
283  COUNT_SIZE_PS(P, p)
284 
285  vt_extradata = av_malloc(vt_extradata_size);
286  if (!vt_extradata)
287  return NULL;
288  p = vt_extradata;
289 
290  /* unsigned int(8) configurationVersion = 1; */
291  AV_W8(p + 0, 1);
292 
293  /*
294  * unsigned int(2) general_profile_space;
295  * unsigned int(1) general_tier_flag;
296  * unsigned int(5) general_profile_idc;
297  */
298  AV_W8(p + 1, ptlc.profile_space << 6 |
299  ptlc.tier_flag << 5 |
300  ptlc.profile_idc);
301 
302  /* unsigned int(32) general_profile_compatibility_flags; */
303  for (i = 0; i < 4; i++) {
304  AV_W8(p + 2 + i, ptlc.profile_compatibility_flag[i * 8] << 7 |
305  ptlc.profile_compatibility_flag[i * 8 + 1] << 6 |
306  ptlc.profile_compatibility_flag[i * 8 + 2] << 5 |
307  ptlc.profile_compatibility_flag[i * 8 + 3] << 4 |
308  ptlc.profile_compatibility_flag[i * 8 + 4] << 3 |
309  ptlc.profile_compatibility_flag[i * 8 + 5] << 2 |
310  ptlc.profile_compatibility_flag[i * 8 + 6] << 1 |
311  ptlc.profile_compatibility_flag[i * 8 + 7]);
312  }
313 
314  /* unsigned int(48) general_constraint_indicator_flags; */
315  AV_W8(p + 6, ptlc.progressive_source_flag << 7 |
316  ptlc.interlaced_source_flag << 6 |
317  ptlc.non_packed_constraint_flag << 5 |
318  ptlc.frame_only_constraint_flag << 4);
319  AV_W8(p + 7, 0);
320  AV_WN32(p + 8, 0);
321 
322  /* unsigned int(8) general_level_idc; */
323  AV_W8(p + 12, ptlc.level_idc);
324 
325  /*
326  * bit(4) reserved = ‘1111’b;
327  * unsigned int(12) min_spatial_segmentation_idc;
328  */
329  AV_W8(p + 13, 0xf0 | (vui.min_spatial_segmentation_idc >> 4));
330  AV_W8(p + 14, vui.min_spatial_segmentation_idc & 0xff);
331 
332  /*
333  * bit(6) reserved = ‘111111’b;
334  * unsigned int(2) parallelismType;
335  */
337  parallelismType = 0;
338  else if (pps->entropy_coding_sync_enabled_flag && pps->tiles_enabled_flag)
339  parallelismType = 0;
340  else if (pps->entropy_coding_sync_enabled_flag)
341  parallelismType = 3;
342  else if (pps->tiles_enabled_flag)
343  parallelismType = 2;
344  else
345  parallelismType = 1;
346  AV_W8(p + 15, 0xfc | parallelismType);
347 
348  /*
349  * bit(6) reserved = ‘111111’b;
350  * unsigned int(2) chromaFormat;
351  */
352  AV_W8(p + 16, sps->chroma_format_idc | 0xfc);
353 
354  /*
355  * bit(5) reserved = ‘11111’b;
356  * unsigned int(3) bitDepthLumaMinus8;
357  */
358  AV_W8(p + 17, (sps->bit_depth - 8) | 0xf8);
359 
360  /*
361  * bit(5) reserved = ‘11111’b;
362  * unsigned int(3) bitDepthChromaMinus8;
363  */
364  AV_W8(p + 18, (sps->bit_depth_chroma - 8) | 0xf8);
365 
366  /* bit(16) avgFrameRate; */
367  AV_WB16(p + 19, 0);
368 
369  /*
370  * bit(2) constantFrameRate;
371  * bit(3) numTemporalLayers;
372  * bit(1) temporalIdNested;
373  * unsigned int(2) lengthSizeMinusOne;
374  */
375  AV_W8(p + 21, 0 << 6 |
376  sps->max_sub_layers << 3 |
377  sps->temporal_id_nesting << 2 |
378  3);
379 
380  /* unsigned int(8) numOfArrays; */
381  AV_W8(p + 22, 3);
382 
383  p += 23;
384 
385 #define APPEND_PS(T, t) \
386  /* \
387  * bit(1) array_completeness; \
388  * unsigned int(1) reserved = 0; \
389  * unsigned int(6) NAL_unit_type; \
390  */ \
391  AV_W8(p, 1 << 7 | \
392  HEVC_NAL_##T##PS & 0x3f); \
393  /* unsigned int(16) numNalus; */ \
394  AV_WB16(p + 1, num_##t##ps); \
395  p += 3; \
396  for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
397  if (h->ps.t##ps_list[i]) { \
398  const HEVC##T##PS *lps = h->ps.t##ps_list[i]; \
399  int size = escape_ps(p + 2, lps->data, lps->data_size); \
400  /* unsigned int(16) nalUnitLength; */ \
401  AV_WB16(p, size); \
402  /* bit(8*nalUnitLength) nalUnit; */ \
403  p += 2 + size; \
404  } \
405  }
406 
407  APPEND_PS(V, v)
408  APPEND_PS(S, s)
409  APPEND_PS(P, p)
410 
411  av_assert0(p - vt_extradata == vt_extradata_size);
412 
413  data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
414  av_free(vt_extradata);
415  return data;
416 }
417 
419  const AVBufferRef *buffer_ref,
420  const uint8_t *buffer,
421  uint32_t size)
422 {
423  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
424  H264Context *h = avctx->priv_data;
425 
426  if (h->is_avc == 1) {
427  return ff_videotoolbox_buffer_copy(vtctx, buffer, size);
428  }
429 
430  return 0;
431 }
432 
434  int type,
435  const uint8_t *buffer,
436  uint32_t size)
437 {
438  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
439  H264Context *h = avctx->priv_data;
440 
441  // save sps header (profile/level) used to create decoder session
442  if (!vtctx->sps[0])
443  memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
444 
445  if (type == H264_NAL_SPS) {
446  if (size > 4 && memcmp(vtctx->sps, buffer + 1, 3) != 0) {
447  vtctx->reconfig_needed = true;
448  memcpy(vtctx->sps, buffer + 1, 3);
449  }
450  }
451 
452  // pass-through SPS/PPS changes to the decoder
454 }
455 
457  const uint8_t *buffer,
458  uint32_t size)
459 {
460  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
461  void *tmp;
462 
463  tmp = av_fast_realloc(vtctx->bitstream,
464  &vtctx->allocated_size,
465  vtctx->bitstream_size+size+4);
466  if (!tmp)
467  return AVERROR(ENOMEM);
468 
469  vtctx->bitstream = tmp;
470 
471  AV_WB32(vtctx->bitstream + vtctx->bitstream_size, size);
472  memcpy(vtctx->bitstream + vtctx->bitstream_size + 4, buffer, size);
473 
474  vtctx->bitstream_size += size + 4;
475 
476  return 0;
477 }
478 
480  const uint8_t *buffer,
481  uint32_t size)
482 {
483  H264Context *h = avctx->priv_data;
484 
485  if (h->is_avc == 1)
486  return 0;
487 
489 }
490 
491 #if CONFIG_VIDEOTOOLBOX
492 // Return the AVVideotoolboxContext that matters currently. Where it comes from
493 // depends on the API used.
494 static AVVideotoolboxContext *videotoolbox_get_context(AVCodecContext *avctx)
495 {
496  // Somewhat tricky because the user can call av_videotoolbox_default_free()
497  // at any time, even when the codec is closed.
498  if (avctx->internal && avctx->internal->hwaccel_priv_data) {
499  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
500  if (vtctx->vt_ctx)
501  return vtctx->vt_ctx;
502  }
503  return avctx->hwaccel_context;
504 }
505 
506 static void videotoolbox_stop(AVCodecContext *avctx)
507 {
508  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
509  if (!videotoolbox)
510  return;
511 
512  if (videotoolbox->cm_fmt_desc) {
513  CFRelease(videotoolbox->cm_fmt_desc);
514  videotoolbox->cm_fmt_desc = NULL;
515  }
516 
517  if (videotoolbox->session) {
518  VTDecompressionSessionInvalidate(videotoolbox->session);
519  CFRelease(videotoolbox->session);
520  videotoolbox->session = NULL;
521  }
522 }
523 
525 {
526  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
527  if (!vtctx)
528  return 0;
529 
530  av_freep(&vtctx->bitstream);
531  if (vtctx->frame)
532  CVPixelBufferRelease(vtctx->frame);
533 
534  if (vtctx->vt_ctx)
535  videotoolbox_stop(avctx);
536 
538  av_freep(&vtctx->vt_ctx);
539 
540  return 0;
541 }
542 
543 static int videotoolbox_buffer_create(AVCodecContext *avctx, AVFrame *frame)
544 {
545  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
546  CVPixelBufferRef pixbuf = (CVPixelBufferRef)vtctx->frame;
547  OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
548  enum AVPixelFormat sw_format = av_map_videotoolbox_format_to_pixfmt(pixel_format);
549  int width = CVPixelBufferGetWidth(pixbuf);
550  int height = CVPixelBufferGetHeight(pixbuf);
551  AVHWFramesContext *cached_frames;
552  VTHWFrame *ref;
553  int ret;
554 
555  if (!frame->buf[0] || frame->data[3]) {
556  av_log(avctx, AV_LOG_ERROR, "videotoolbox: invalid state\n");
558  return AVERROR_EXTERNAL;
559  }
560 
561  ref = (VTHWFrame *)frame->buf[0]->data;
562 
563  if (ref->pixbuf)
564  CVPixelBufferRelease(ref->pixbuf);
565  ref->pixbuf = vtctx->frame;
566  vtctx->frame = NULL;
567 
568  // Old API code path.
569  if (!vtctx->cached_hw_frames_ctx)
570  return 0;
571 
572  cached_frames = (AVHWFramesContext*)vtctx->cached_hw_frames_ctx->data;
573 
574  if (cached_frames->sw_format != sw_format ||
575  cached_frames->width != width ||
576  cached_frames->height != height) {
577  AVBufferRef *hw_frames_ctx = av_hwframe_ctx_alloc(cached_frames->device_ref);
578  AVHWFramesContext *hw_frames;
579  AVVTFramesContext *hw_ctx;
580  if (!hw_frames_ctx)
581  return AVERROR(ENOMEM);
582 
583  hw_frames = (AVHWFramesContext*)hw_frames_ctx->data;
584  hw_frames->format = cached_frames->format;
585  hw_frames->sw_format = sw_format;
586  hw_frames->width = width;
587  hw_frames->height = height;
588  hw_ctx = hw_frames->hwctx;
589  hw_ctx->color_range = avctx->color_range;
590 
591  ret = av_hwframe_ctx_init(hw_frames_ctx);
592  if (ret < 0) {
593  av_buffer_unref(&hw_frames_ctx);
594  return ret;
595  }
596 
598  vtctx->cached_hw_frames_ctx = hw_frames_ctx;
599  }
600 
601  av_buffer_unref(&ref->hw_frames_ctx);
602  ref->hw_frames_ctx = av_buffer_ref(vtctx->cached_hw_frames_ctx);
603  if (!ref->hw_frames_ctx)
604  return AVERROR(ENOMEM);
605 
606  return 0;
607 }
608 
609 static void videotoolbox_write_mp4_descr_length(PutByteContext *pb, int length)
610 {
611  int i;
612  uint8_t b;
613 
614  for (i = 3; i >= 0; i--) {
615  b = (length >> (i * 7)) & 0x7F;
616  if (i != 0)
617  b |= 0x80;
618 
619  bytestream2_put_byteu(pb, b);
620  }
621 }
622 
623 static CFDataRef videotoolbox_esds_extradata_create(AVCodecContext *avctx)
624 {
625  CFDataRef data;
626  uint8_t *rw_extradata;
627  PutByteContext pb;
628  int full_size = 3 + 5 + 13 + 5 + avctx->extradata_size + 3;
629  // ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor
630  int config_size = 13 + 5 + avctx->extradata_size;
631  int s;
632 
633  if (!(rw_extradata = av_mallocz(full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING)))
634  return NULL;
635 
636  bytestream2_init_writer(&pb, rw_extradata, full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING);
637  bytestream2_put_byteu(&pb, 0); // version
638  bytestream2_put_ne24(&pb, 0); // flags
639 
640  // elementary stream descriptor
641  bytestream2_put_byteu(&pb, 0x03); // ES_DescrTag
642  videotoolbox_write_mp4_descr_length(&pb, full_size);
643  bytestream2_put_ne16(&pb, 0); // esid
644  bytestream2_put_byteu(&pb, 0); // stream priority (0-32)
645 
646  // decoder configuration descriptor
647  bytestream2_put_byteu(&pb, 0x04); // DecoderConfigDescrTag
648  videotoolbox_write_mp4_descr_length(&pb, config_size);
649  bytestream2_put_byteu(&pb, 32); // object type indication. 32 = AV_CODEC_ID_MPEG4
650  bytestream2_put_byteu(&pb, 0x11); // stream type
651  bytestream2_put_ne24(&pb, 0); // buffer size
652  bytestream2_put_ne32(&pb, 0); // max bitrate
653  bytestream2_put_ne32(&pb, 0); // avg bitrate
654 
655  // decoder specific descriptor
656  bytestream2_put_byteu(&pb, 0x05); ///< DecSpecificInfoTag
657  videotoolbox_write_mp4_descr_length(&pb, avctx->extradata_size);
658 
659  bytestream2_put_buffer(&pb, avctx->extradata, avctx->extradata_size);
660 
661  // SLConfigDescriptor
662  bytestream2_put_byteu(&pb, 0x06); // SLConfigDescrTag
663  bytestream2_put_byteu(&pb, 0x01); // length
664  bytestream2_put_byteu(&pb, 0x02); //
665 
666  s = bytestream2_size_p(&pb);
667 
668  data = CFDataCreate(kCFAllocatorDefault, rw_extradata, s);
669 
670  av_freep(&rw_extradata);
671  return data;
672 }
673 
674 static CMSampleBufferRef videotoolbox_sample_buffer_create(CMFormatDescriptionRef fmt_desc,
675  void *buffer,
676  int size)
677 {
678  OSStatus status;
679  CMBlockBufferRef block_buf;
680  CMSampleBufferRef sample_buf;
681 
682  block_buf = NULL;
683  sample_buf = NULL;
684 
685  status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,// structureAllocator
686  buffer, // memoryBlock
687  size, // blockLength
688  kCFAllocatorNull, // blockAllocator
689  NULL, // customBlockSource
690  0, // offsetToData
691  size, // dataLength
692  0, // flags
693  &block_buf);
694 
695  if (!status) {
696  status = CMSampleBufferCreate(kCFAllocatorDefault, // allocator
697  block_buf, // dataBuffer
698  TRUE, // dataReady
699  0, // makeDataReadyCallback
700  0, // makeDataReadyRefcon
701  fmt_desc, // formatDescription
702  1, // numSamples
703  0, // numSampleTimingEntries
704  NULL, // sampleTimingArray
705  0, // numSampleSizeEntries
706  NULL, // sampleSizeArray
707  &sample_buf);
708  }
709 
710  if (block_buf)
711  CFRelease(block_buf);
712 
713  return sample_buf;
714 }
715 
716 static void videotoolbox_decoder_callback(void *opaque,
717  void *sourceFrameRefCon,
718  OSStatus status,
719  VTDecodeInfoFlags flags,
720  CVImageBufferRef image_buffer,
721  CMTime pts,
722  CMTime duration)
723 {
724  VTContext *vtctx = opaque;
725 
726  if (vtctx->frame) {
727  CVPixelBufferRelease(vtctx->frame);
728  vtctx->frame = NULL;
729  }
730 
731  if (!image_buffer) {
732  // kVTVideoDecoderReferenceMissingErr, defined since the macOS 12 SDKs
733  if (status != -17694)
734  vtctx->reconfig_needed = true;
735 
737  "vt decoder cb: output image buffer is null: %i, reconfig %d\n",
738  status, vtctx->reconfig_needed);
739  return;
740  }
741 
742  vtctx->frame = CVPixelBufferRetain(image_buffer);
743 }
744 
745 static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx)
746 {
747  OSStatus status;
748  CMSampleBufferRef sample_buf;
749  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
750  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
751 
752  sample_buf = videotoolbox_sample_buffer_create(videotoolbox->cm_fmt_desc,
753  vtctx->bitstream,
754  vtctx->bitstream_size);
755 
756  if (!sample_buf)
757  return -1;
758 
759  status = VTDecompressionSessionDecodeFrame(videotoolbox->session,
760  sample_buf,
761  0, // decodeFlags
762  NULL, // sourceFrameRefCon
763  0); // infoFlagsOut
764  if (status == noErr)
765  status = VTDecompressionSessionWaitForAsynchronousFrames(videotoolbox->session);
766 
767  CFRelease(sample_buf);
768 
769  return status;
770 }
771 
772 static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type,
773  CFDictionaryRef decoder_spec,
774  int width,
775  int height)
776 {
777  CMFormatDescriptionRef cm_fmt_desc;
778  OSStatus status;
779 
780  status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
781  codec_type,
782  width,
783  height,
784  decoder_spec, // Dictionary of extension
785  &cm_fmt_desc);
786 
787  if (status)
788  return NULL;
789 
790  return cm_fmt_desc;
791 }
792 
793 static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
794  int height,
795  OSType pix_fmt)
796 {
797  CFMutableDictionaryRef buffer_attributes;
798  CFMutableDictionaryRef io_surface_properties;
799  CFNumberRef cv_pix_fmt;
800  CFNumberRef w;
801  CFNumberRef h;
802 
803  w = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &width);
804  h = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &height);
805  cv_pix_fmt = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &pix_fmt);
806 
807  buffer_attributes = CFDictionaryCreateMutable(kCFAllocatorDefault,
808  4,
809  &kCFTypeDictionaryKeyCallBacks,
810  &kCFTypeDictionaryValueCallBacks);
811  io_surface_properties = CFDictionaryCreateMutable(kCFAllocatorDefault,
812  0,
813  &kCFTypeDictionaryKeyCallBacks,
814  &kCFTypeDictionaryValueCallBacks);
815 
816  if (pix_fmt)
817  CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
818  CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties);
819  CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey, w);
820  CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey, h);
821 #if TARGET_OS_IPHONE
822  CFDictionarySetValue(buffer_attributes, kCVPixelBufferOpenGLESCompatibilityKey, kCFBooleanTrue);
823 #else
824  CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfaceOpenGLTextureCompatibilityKey, kCFBooleanTrue);
825 #endif
826 
827  CFRelease(io_surface_properties);
828  CFRelease(cv_pix_fmt);
829  CFRelease(w);
830  CFRelease(h);
831 
832  return buffer_attributes;
833 }
834 
835 static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type,
836  AVCodecContext *avctx)
837 {
838  CFMutableDictionaryRef avc_info;
839  CFDataRef data = NULL;
840 
841  CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
842  0,
843  &kCFTypeDictionaryKeyCallBacks,
844  &kCFTypeDictionaryValueCallBacks);
845 
846  CFDictionarySetValue(config_info,
850  kCFBooleanTrue);
851 
852  avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
853  1,
854  &kCFTypeDictionaryKeyCallBacks,
855  &kCFTypeDictionaryValueCallBacks);
856 
857  switch (codec_type) {
858  case kCMVideoCodecType_MPEG4Video :
859  if (avctx->extradata_size)
860  data = videotoolbox_esds_extradata_create(avctx);
861  if (data)
862  CFDictionarySetValue(avc_info, CFSTR("esds"), data);
863  break;
864  case kCMVideoCodecType_H264 :
866  if (data)
867  CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
868  break;
871  if (data)
872  CFDictionarySetValue(avc_info, CFSTR("hvcC"), data);
873  break;
874 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
875  case kCMVideoCodecType_VP9 :
877  if (data)
878  CFDictionarySetValue(avc_info, CFSTR("vpcC"), data);
879  break;
880 #endif
881 #if CONFIG_AV1_VIDEOTOOLBOX_HWACCEL
882  case kCMVideoCodecType_AV1 :
884  if (data)
885  CFDictionarySetValue(avc_info, CFSTR("av1C"), data);
886  break;
887 #endif
888  default:
889  break;
890  }
891 
892  CFDictionarySetValue(config_info,
893  kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
894  avc_info);
895 
896  if (data)
897  CFRelease(data);
898 
899  CFRelease(avc_info);
900  return config_info;
901 }
902 
903 static int videotoolbox_start(AVCodecContext *avctx)
904 {
905  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
906  OSStatus status;
907  VTDecompressionOutputCallbackRecord decoder_cb;
908  CFDictionaryRef decoder_spec;
909  CFDictionaryRef buf_attr;
910 
911  if (!videotoolbox) {
912  av_log(avctx, AV_LOG_ERROR, "hwaccel context is not set\n");
913  return -1;
914  }
915 
916  switch( avctx->codec_id ) {
917  case AV_CODEC_ID_H263 :
918  videotoolbox->cm_codec_type = kCMVideoCodecType_H263;
919  break;
920  case AV_CODEC_ID_H264 :
921  videotoolbox->cm_codec_type = kCMVideoCodecType_H264;
922  break;
923  case AV_CODEC_ID_HEVC :
924  videotoolbox->cm_codec_type = kCMVideoCodecType_HEVC;
925  break;
927  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video;
928  break;
930  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG2Video;
931  break;
932  case AV_CODEC_ID_MPEG4 :
933  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG4Video;
934  break;
935  case AV_CODEC_ID_PRORES :
936  switch (avctx->codec_tag) {
937  default:
938  av_log(avctx, AV_LOG_WARNING, "Unknown prores profile %d\n", avctx->codec_tag);
940  case MKTAG('a','p','c','o'): // kCMVideoCodecType_AppleProRes422Proxy
941  case MKTAG('a','p','c','s'): // kCMVideoCodecType_AppleProRes422LT
942  case MKTAG('a','p','c','n'): // kCMVideoCodecType_AppleProRes422
943  case MKTAG('a','p','c','h'): // kCMVideoCodecType_AppleProRes422HQ
944  case MKTAG('a','p','4','h'): // kCMVideoCodecType_AppleProRes4444
945  case MKTAG('a','p','4','x'): // kCMVideoCodecType_AppleProRes4444XQ
946  videotoolbox->cm_codec_type = av_bswap32(avctx->codec_tag);
947  break;
948  }
949  break;
950  case AV_CODEC_ID_VP9 :
951  videotoolbox->cm_codec_type = kCMVideoCodecType_VP9;
952  break;
953  case AV_CODEC_ID_AV1 :
954  videotoolbox->cm_codec_type = kCMVideoCodecType_AV1;
955  break;
956  default :
957  break;
958  }
959 
960 #if defined(MAC_OS_X_VERSION_10_9) && !TARGET_OS_IPHONE && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_9) && AV_HAS_BUILTIN(__builtin_available)
961  if (avctx->codec_id == AV_CODEC_ID_PRORES) {
962  if (__builtin_available(macOS 10.9, *)) {
963  VTRegisterProfessionalVideoWorkflowVideoDecoders();
964  }
965  }
966 #endif
967 
968 #if defined(MAC_OS_VERSION_11_0) && !TARGET_OS_IPHONE && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_VERSION_11_0) && AV_HAS_BUILTIN(__builtin_available)
969  if (__builtin_available(macOS 11.0, *)) {
970  VTRegisterSupplementalVideoDecoderIfAvailable(videotoolbox->cm_codec_type);
971  }
972 #endif
973 
974  decoder_spec = videotoolbox_decoder_config_create(videotoolbox->cm_codec_type, avctx);
975 
976  if (!decoder_spec) {
977  av_log(avctx, AV_LOG_ERROR, "decoder specification creation failed\n");
978  return -1;
979  }
980 
981  videotoolbox->cm_fmt_desc = videotoolbox_format_desc_create(videotoolbox->cm_codec_type,
982  decoder_spec,
983  avctx->width,
984  avctx->height);
985  if (!videotoolbox->cm_fmt_desc) {
986  if (decoder_spec)
987  CFRelease(decoder_spec);
988 
989  av_log(avctx, AV_LOG_ERROR, "format description creation failed\n");
990  return -1;
991  }
992 
993  buf_attr = videotoolbox_buffer_attributes_create(avctx->width,
994  avctx->height,
995  videotoolbox->cv_pix_fmt_type);
996 
997  decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback;
998  decoder_cb.decompressionOutputRefCon = avctx->internal->hwaccel_priv_data;
999 
1000  status = VTDecompressionSessionCreate(NULL, // allocator
1001  videotoolbox->cm_fmt_desc, // videoFormatDescription
1002  decoder_spec, // videoDecoderSpecification
1003  buf_attr, // destinationImageBufferAttributes
1004  &decoder_cb, // outputCallback
1005  &videotoolbox->session); // decompressionSessionOut
1006 
1007  if (decoder_spec)
1008  CFRelease(decoder_spec);
1009  if (buf_attr)
1010  CFRelease(buf_attr);
1011 
1012  switch (status) {
1013  case kVTVideoDecoderNotAvailableNowErr:
1014  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox session not available.\n");
1015  return AVERROR(ENOSYS);
1016  case kVTVideoDecoderUnsupportedDataFormatErr:
1017  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox does not support this format.\n");
1018  return AVERROR(ENOSYS);
1019  case kVTCouldNotFindVideoDecoderErr:
1020  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder for this format not found.\n");
1021  return AVERROR(ENOSYS);
1022  case kVTVideoDecoderMalfunctionErr:
1023  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox malfunction.\n");
1024  return AVERROR(EINVAL);
1025  case kVTVideoDecoderBadDataErr:
1026  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox reported invalid data.\n");
1027  return AVERROR_INVALIDDATA;
1028  case 0:
1029  return 0;
1030  default:
1031  av_log(avctx, AV_LOG_VERBOSE, "Unknown VideoToolbox session creation error %d\n", (int)status);
1032  return AVERROR_UNKNOWN;
1033  }
1034 }
1035 
1036 static const char *videotoolbox_error_string(OSStatus status)
1037 {
1038  switch (status) {
1039  case kVTVideoDecoderBadDataErr:
1040  return "bad data";
1041  case kVTVideoDecoderMalfunctionErr:
1042  return "decoder malfunction";
1043  case kVTInvalidSessionErr:
1044  return "invalid session";
1045  }
1046  return "unknown";
1047 }
1048 
1050 {
1051  OSStatus status;
1052  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
1053  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1054 
1055  if (vtctx->reconfig_needed == true) {
1056  vtctx->reconfig_needed = false;
1057  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder needs reconfig, restarting..\n");
1058  videotoolbox_stop(avctx);
1059  if (videotoolbox_start(avctx) != 0) {
1060  return AVERROR_EXTERNAL;
1061  }
1062  }
1063 
1064  if (!videotoolbox->session || !vtctx->bitstream || !vtctx->bitstream_size)
1065  return AVERROR_INVALIDDATA;
1066 
1067  status = videotoolbox_session_decode_frame(avctx);
1068  if (status != noErr) {
1069  if (status == kVTVideoDecoderMalfunctionErr || status == kVTInvalidSessionErr)
1070  vtctx->reconfig_needed = true;
1071  av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%s, %d)\n", videotoolbox_error_string(status), (int)status);
1072  return AVERROR_UNKNOWN;
1073  }
1074 
1075  if (!vtctx->frame)
1076  return AVERROR_UNKNOWN;
1077 
1078  return videotoolbox_buffer_create(avctx, frame);
1079 }
1080 
1081 static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
1082 {
1083  H264Context *h = avctx->priv_data;
1084  AVFrame *frame = h->cur_pic_ptr->f;
1085  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1087  vtctx->bitstream_size = 0;
1088  return ret;
1089 }
1090 
1091 static int videotoolbox_hevc_start_frame(AVCodecContext *avctx,
1092  const AVBufferRef *buffer_ref,
1093  const uint8_t *buffer,
1094  uint32_t size)
1095 {
1096  HEVCContext *h = avctx->priv_data;
1097  AVFrame *frame = h->cur_frame->f;
1098 
1099  frame->crop_right = 0;
1100  frame->crop_left = 0;
1101  frame->crop_top = 0;
1102  frame->crop_bottom = 0;
1103 
1104  return 0;
1105 }
1106 
1107 static int videotoolbox_hevc_decode_slice(AVCodecContext *avctx,
1108  const uint8_t *buffer,
1109  uint32_t size)
1110 {
1112 }
1113 
1114 
1115 static int videotoolbox_hevc_decode_params(AVCodecContext *avctx,
1116  int type,
1117  const uint8_t *buffer,
1118  uint32_t size)
1119 {
1121 }
1122 
1123 static int videotoolbox_hevc_end_frame(AVCodecContext *avctx)
1124 {
1125  HEVCContext *h = avctx->priv_data;
1126  AVFrame *frame = h->cur_frame->f;
1127  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1128  int ret;
1129 
1131  vtctx->bitstream_size = 0;
1132  return ret;
1133 }
1134 
1135 static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
1136  const AVBufferRef *buffer_ref,
1137  const uint8_t *buffer,
1138  uint32_t size)
1139 {
1140  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1141 
1142  return ff_videotoolbox_buffer_copy(vtctx, buffer, size);
1143 }
1144 
1145 static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
1146  const uint8_t *buffer,
1147  uint32_t size)
1148 {
1149  return 0;
1150 }
1151 
1152 static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
1153 {
1154  MpegEncContext *s = avctx->priv_data;
1155  AVFrame *frame = s->cur_pic.ptr->f;
1156 
1157  return ff_videotoolbox_common_end_frame(avctx, frame);
1158 }
1159 
1160 static int videotoolbox_prores_start_frame(AVCodecContext *avctx,
1161  const AVBufferRef *buffer_ref,
1162  const uint8_t *buffer,
1163  uint32_t size)
1164 {
1165  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1166  ProresContext *ctx = avctx->priv_data;
1167 
1168  /* Videotoolbox decodes both fields simultaneously */
1169  if (!ctx->first_field)
1170  return 0;
1171 
1172  return ff_videotoolbox_buffer_copy(vtctx, buffer, size);
1173 }
1174 
1175 static int videotoolbox_prores_decode_slice(AVCodecContext *avctx,
1176  const uint8_t *buffer,
1177  uint32_t size)
1178 {
1179  return 0;
1180 }
1181 
1182 static int videotoolbox_prores_end_frame(AVCodecContext *avctx)
1183 {
1184  ProresContext *ctx = avctx->priv_data;
1185  AVFrame *frame = ctx->frame;
1186 
1187  if (!ctx->first_field)
1188  return 0;
1189 
1190  return ff_videotoolbox_common_end_frame(avctx, frame);
1191 }
1192 
1193 static enum AVPixelFormat videotoolbox_best_pixel_format(AVCodecContext *avctx) {
1194  int depth;
1195  const AVPixFmtDescriptor *descriptor = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
1196  if (!descriptor)
1197  return AV_PIX_FMT_NV12; // same as av_videotoolbox_alloc_context()
1198 
1199  depth = descriptor->comp[0].depth;
1200 
1201  if (descriptor->flags & AV_PIX_FMT_FLAG_ALPHA)
1202  return (depth > 8) ? AV_PIX_FMT_AYUV64 : AV_PIX_FMT_AYUV;
1203 
1204 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR16BIPLANARVIDEORANGE
1205  if (depth > 10)
1206  return descriptor->log2_chroma_w == 0 ? AV_PIX_FMT_P416 : AV_PIX_FMT_P216;
1207 #endif
1208 
1209 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR10BIPLANARVIDEORANGE
1210  if (descriptor->log2_chroma_w == 0) {
1211 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR8BIPLANARVIDEORANGE
1212  if (depth <= 8)
1213  return AV_PIX_FMT_NV24;
1214 #endif
1215  return AV_PIX_FMT_P410;
1216  }
1217 #endif
1218 #if HAVE_KCVPIXELFORMATTYPE_422YPCBCR10BIPLANARVIDEORANGE
1219  if (descriptor->log2_chroma_h == 0) {
1220 #if HAVE_KCVPIXELFORMATTYPE_422YPCBCR8BIPLANARVIDEORANGE
1221  if (depth <= 8)
1222  return AV_PIX_FMT_NV16;
1223 #endif
1224  return AV_PIX_FMT_P210;
1225  }
1226 #endif
1227 #if HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE
1228  if (depth > 8) {
1229  return AV_PIX_FMT_P010;
1230  }
1231 #endif
1232 
1233  return AV_PIX_FMT_NV12;
1234 }
1235 
1236 static AVVideotoolboxContext *videotoolbox_alloc_context_with_pix_fmt(enum AVPixelFormat pix_fmt,
1237  bool full_range)
1238 {
1239  AVVideotoolboxContext *ret = av_mallocz(sizeof(*ret));
1240 
1241  if (ret) {
1242  OSType cv_pix_fmt_type = av_map_videotoolbox_format_from_pixfmt2(pix_fmt, full_range);
1243  if (cv_pix_fmt_type == 0) {
1244  cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
1245  }
1246  ret->cv_pix_fmt_type = cv_pix_fmt_type;
1247  }
1248 
1249  return ret;
1250 }
1251 
1253 {
1254  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1255  AVHWFramesContext *hw_frames;
1256  AVVTFramesContext *hw_ctx;
1257  int err;
1258  bool full_range;
1259 
1260  vtctx->logctx = avctx;
1261 
1262  if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx &&
1263  avctx->hwaccel_context)
1264  return videotoolbox_start(avctx);
1265 
1266  if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx) {
1267  av_log(avctx, AV_LOG_ERROR,
1268  "Either hw_frames_ctx or hw_device_ctx must be set.\n");
1269  return AVERROR(EINVAL);
1270  }
1271 
1272  vtctx->vt_ctx = videotoolbox_alloc_context_with_pix_fmt(AV_PIX_FMT_NONE, false);
1273  if (!vtctx->vt_ctx) {
1274  err = AVERROR(ENOMEM);
1275  goto fail;
1276  }
1277 
1278  if (avctx->hw_frames_ctx) {
1279  hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1280  } else {
1282  if (!avctx->hw_frames_ctx) {
1283  err = AVERROR(ENOMEM);
1284  goto fail;
1285  }
1286 
1287  hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1288  hw_frames->format = AV_PIX_FMT_VIDEOTOOLBOX;
1289  hw_frames->sw_format = videotoolbox_best_pixel_format(avctx);
1290  hw_frames->width = avctx->width;
1291  hw_frames->height = avctx->height;
1292  hw_ctx = hw_frames->hwctx;
1293  hw_ctx->color_range = avctx->color_range;
1294 
1295  err = av_hwframe_ctx_init(avctx->hw_frames_ctx);
1296  if (err < 0) {
1297  av_buffer_unref(&avctx->hw_frames_ctx);
1298  goto fail;
1299  }
1300  }
1301 
1303  if (!vtctx->cached_hw_frames_ctx) {
1304  err = AVERROR(ENOMEM);
1305  goto fail;
1306  }
1307 
1309  vtctx->vt_ctx->cv_pix_fmt_type =
1311  if (!vtctx->vt_ctx->cv_pix_fmt_type) {
1312  const AVPixFmtDescriptor *attempted_format =
1313  av_pix_fmt_desc_get(hw_frames->sw_format);
1314  av_log(avctx, AV_LOG_ERROR,
1315  "Failed to map underlying FFmpeg pixel format %s (%s range) to "
1316  "a VideoToolbox format!\n",
1317  attempted_format ? attempted_format->name : "<unknown>",
1319  err = AVERROR(EINVAL);
1320  goto fail;
1321  }
1322 
1323  err = videotoolbox_start(avctx);
1324  if (err < 0)
1325  goto fail;
1326 
1327  return 0;
1328 
1329 fail:
1330  ff_videotoolbox_uninit(avctx);
1331  return err;
1332 }
1333 
1335  AVBufferRef *hw_frames_ctx)
1336 {
1337  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)hw_frames_ctx->data;
1338 
1339  frames_ctx->format = AV_PIX_FMT_VIDEOTOOLBOX;
1340  frames_ctx->width = avctx->coded_width;
1341  frames_ctx->height = avctx->coded_height;
1342  frames_ctx->sw_format = videotoolbox_best_pixel_format(avctx);
1343 
1344  return 0;
1345 }
1346 
1348  .p.name = "h263_videotoolbox",
1349  .p.type = AVMEDIA_TYPE_VIDEO,
1350  .p.id = AV_CODEC_ID_H263,
1351  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1352  .alloc_frame = ff_videotoolbox_alloc_frame,
1353  .start_frame = videotoolbox_mpeg_start_frame,
1354  .decode_slice = videotoolbox_mpeg_decode_slice,
1355  .end_frame = videotoolbox_mpeg_end_frame,
1356  .frame_params = ff_videotoolbox_frame_params,
1358  .uninit = ff_videotoolbox_uninit,
1359  .priv_data_size = sizeof(VTContext),
1360 };
1361 
1363  .p.name = "hevc_videotoolbox",
1364  .p.type = AVMEDIA_TYPE_VIDEO,
1365  .p.id = AV_CODEC_ID_HEVC,
1366  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1367  .alloc_frame = ff_videotoolbox_alloc_frame,
1368  .start_frame = videotoolbox_hevc_start_frame,
1369  .decode_slice = videotoolbox_hevc_decode_slice,
1370  .decode_params = videotoolbox_hevc_decode_params,
1371  .end_frame = videotoolbox_hevc_end_frame,
1372  .frame_params = ff_videotoolbox_frame_params,
1374  .uninit = ff_videotoolbox_uninit,
1375  .priv_data_size = sizeof(VTContext),
1376 };
1377 
1379  .p.name = "h264_videotoolbox",
1380  .p.type = AVMEDIA_TYPE_VIDEO,
1381  .p.id = AV_CODEC_ID_H264,
1382  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1383  .alloc_frame = ff_videotoolbox_alloc_frame,
1384  .start_frame = ff_videotoolbox_h264_start_frame,
1385  .decode_slice = ff_videotoolbox_h264_decode_slice,
1386  .decode_params = videotoolbox_h264_decode_params,
1387  .end_frame = videotoolbox_h264_end_frame,
1388  .frame_params = ff_videotoolbox_frame_params,
1390  .uninit = ff_videotoolbox_uninit,
1391  .priv_data_size = sizeof(VTContext),
1392 };
1393 
1395  .p.name = "mpeg1_videotoolbox",
1396  .p.type = AVMEDIA_TYPE_VIDEO,
1397  .p.id = AV_CODEC_ID_MPEG1VIDEO,
1398  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1399  .alloc_frame = ff_videotoolbox_alloc_frame,
1400  .start_frame = videotoolbox_mpeg_start_frame,
1401  .decode_slice = videotoolbox_mpeg_decode_slice,
1402  .end_frame = videotoolbox_mpeg_end_frame,
1403  .frame_params = ff_videotoolbox_frame_params,
1405  .uninit = ff_videotoolbox_uninit,
1406  .priv_data_size = sizeof(VTContext),
1407 };
1408 
1410  .p.name = "mpeg2_videotoolbox",
1411  .p.type = AVMEDIA_TYPE_VIDEO,
1412  .p.id = AV_CODEC_ID_MPEG2VIDEO,
1413  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1414  .alloc_frame = ff_videotoolbox_alloc_frame,
1415  .start_frame = videotoolbox_mpeg_start_frame,
1416  .decode_slice = videotoolbox_mpeg_decode_slice,
1417  .end_frame = videotoolbox_mpeg_end_frame,
1418  .frame_params = ff_videotoolbox_frame_params,
1420  .uninit = ff_videotoolbox_uninit,
1421  .priv_data_size = sizeof(VTContext),
1422 };
1423 
1425  .p.name = "mpeg4_videotoolbox",
1426  .p.type = AVMEDIA_TYPE_VIDEO,
1427  .p.id = AV_CODEC_ID_MPEG4,
1428  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1429  .alloc_frame = ff_videotoolbox_alloc_frame,
1430  .start_frame = videotoolbox_mpeg_start_frame,
1431  .decode_slice = videotoolbox_mpeg_decode_slice,
1432  .end_frame = videotoolbox_mpeg_end_frame,
1433  .frame_params = ff_videotoolbox_frame_params,
1435  .uninit = ff_videotoolbox_uninit,
1436  .priv_data_size = sizeof(VTContext),
1437 };
1438 
1440  .p.name = "prores_videotoolbox",
1441  .p.type = AVMEDIA_TYPE_VIDEO,
1442  .p.id = AV_CODEC_ID_PRORES,
1443  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1444  .alloc_frame = ff_videotoolbox_alloc_frame,
1445  .start_frame = videotoolbox_prores_start_frame,
1446  .decode_slice = videotoolbox_prores_decode_slice,
1447  .end_frame = videotoolbox_prores_end_frame,
1448  .frame_params = ff_videotoolbox_frame_params,
1450  .uninit = ff_videotoolbox_uninit,
1451  .priv_data_size = sizeof(VTContext),
1452 };
1453 
1454 #endif /* CONFIG_VIDEOTOOLBOX */
videotoolbox_buffer_release
static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
Definition: videotoolbox.c:71
flags
const SwsFlags flags[]
Definition: swscale.c:72
AVVideotoolboxContext::cm_codec_type
int cm_codec_type
CoreMedia codec type that Videotoolbox will use to create the decompression session.
Definition: videotoolbox.h:78
AVCodecContext::hwaccel_context
void * hwaccel_context
Legacy hardware accelerator context.
Definition: avcodec.h:1441
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
APPEND_PS
#define APPEND_PS(T, t)
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
ff_videotoolbox_common_end_frame
int ff_videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame)
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3456
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
ff_videotoolbox_uninit
int ff_videotoolbox_uninit(AVCodecContext *avctx)
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:200
kCMVideoCodecType_AV1
@ kCMVideoCodecType_AV1
Definition: videotoolbox.c:61
ff_videotoolbox_buffer_append
int ff_videotoolbox_buffer_append(VTContext *vtctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:100
FFHWAccel::p
AVHWAccel p
The public AVHWAccel.
Definition: hwaccel_internal.h:38
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
FrameDecodeData
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
Definition: decode.h:33
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:337
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:435
pixdesc.h
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:777
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:263
internal.h
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:57
AVPixFmtDescriptor::name
const char * name
Definition: pixdesc.h:70
b
#define b
Definition: input.c:43
av_vt_pixbuf_set_attachments
int av_vt_pixbuf_set_attachments(void *log_ctx, CVPixelBufferRef pixbuf, const AVFrame *src)
Definition: hwcontext_videotoolbox.c:675
data
const char data[16]
Definition: mxf.c:149
ProresContext
Definition: proresdec.h:43
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
AV_W8
#define AV_W8(p, v)
Definition: videotoolbox.c:181
PTLCommon::profile_space
uint8_t profile_space
Definition: ps.h:128
COUNT_SIZE_PS
#define COUNT_SIZE_PS(T, t)
mpegvideo.h
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
ff_mpeg2_videotoolbox_hwaccel
const struct FFHWAccel ff_mpeg2_videotoolbox_hwaccel
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:220
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
AVVideotoolboxContext
This struct holds all the information that needs to be passed between the caller and libavcodec for i...
Definition: videotoolbox.h:57
PTLCommon::profile_compatibility_flag
uint8_t profile_compatibility_flag[32]
Definition: ps.h:131
escape_ps
static int escape_ps(uint8_t *dst, const uint8_t *src, int src_size)
Definition: videotoolbox.c:183
S
#define S(s, c, i)
Definition: flacdsp_template.c:46
PTLCommon::progressive_source_flag
uint8_t progressive_source_flag
Definition: ps.h:132
ff_hevc_videotoolbox_hwaccel
const struct FFHWAccel ff_hevc_videotoolbox_hwaccel
ff_videotoolbox_h264_start_frame
int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx, const AVBufferRef *buffer_ref, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:414
FFHWAccel
Definition: hwaccel_internal.h:34
PTLCommon::interlaced_source_flag
uint8_t interlaced_source_flag
Definition: ps.h:133
ff_videotoolbox_avcc_extradata_create
CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox.c:212
fail
#define fail()
Definition: checkasm.h:224
ff_h263_videotoolbox_hwaccel
const struct FFHWAccel ff_h263_videotoolbox_hwaccel
proresdec.h
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
pts
static int64_t pts
Definition: transcode_aac.c:644
kCMVideoCodecType_HEVC
@ kCMVideoCodecType_HEVC
Definition: videotoolbox.c:53
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:615
VTContext::allocated_size
int allocated_size
Definition: vt_internal.h:33
ff_videotoolbox_common_init
int ff_videotoolbox_common_init(AVCodecContext *avctx)
PTLCommon::frame_only_constraint_flag
uint8_t frame_only_constraint_flag
Definition: ps.h:135
videotoolbox.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
VTContext::bitstream
uint8_t * bitstream
Definition: vt_internal.h:27
kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
#define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
Definition: videotoolbox.c:49
AVHWFramesContext::height
int height
Definition: hwcontext.h:220
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:523
bytestream2_put_ne24
#define bytestream2_put_ne24
Definition: bytestream.h:128
full_range
bool full_range
Definition: hwcontext_videotoolbox.c:47
av_fast_realloc
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
Definition: mem.c:497
vt_internal.h
PTLCommon
Definition: ps.h:127
FrameDecodeData::hwaccel_priv_post_process
int(* hwaccel_priv_post_process)(void *logctx, AVFrame *frame)
Per-frame private data for hwaccels.
Definition: decode.h:53
s
#define s(width, name)
Definition: cbs_vp9.c:198
VTHWFrame
Definition: videotoolbox.c:66
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
bytestream2_put_buffer
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
Definition: bytestream.h:286
ff_mpeg1_videotoolbox_hwaccel
const struct FFHWAccel ff_mpeg1_videotoolbox_hwaccel
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:222
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:42
ff_videotoolbox_vpcc_extradata_create
CFDataRef ff_videotoolbox_vpcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox_vp9.c:65
kCMVideoCodecType_VP9
@ kCMVideoCodecType_VP9
Definition: videotoolbox.c:57
P
#define P
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
AV_PIX_FMT_FLAG_ALPHA
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:147
ctx
static AVFormatContext * ctx
Definition: movenc.c:49
decode.h
PTLCommon::non_packed_constraint_flag
uint8_t non_packed_constraint_flag
Definition: ps.h:134
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
PTLCommon::profile_idc
uint8_t profile_idc
Definition: ps.h:130
av_mallocz
#define av_mallocz(s)
Definition: tableprint_vlc.h:31
av_fallthrough
#define av_fallthrough
Definition: attributes.h:67
AVVTFramesContext
Definition: hwcontext_videotoolbox.h:45
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:79
tmp
static uint8_t tmp[40]
Definition: aes_ctr.c:52
PTLCommon::tier_flag
uint8_t tier_flag
Definition: ps.h:129
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:449
if
if(ret)
Definition: filter_design.txt:179
VTContext::bitstream_size
int bitstream_size
Definition: vt_internal.h:30
ff_attach_decode_data
int ff_attach_decode_data(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1692
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:3772
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:213
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:677
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AV_CODEC_ID_AV1
@ AV_CODEC_ID_AV1
Definition: codec_id.h:284
hwaccel_internal.h
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:401
AVVTFramesContext::color_range
enum AVColorRange color_range
Definition: hwcontext_videotoolbox.h:46
AVHWFramesContext::device_ref
AVBufferRef * device_ref
A reference to the parent AVHWDeviceContext.
Definition: hwcontext.h:129
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:474
V
#define V
Definition: avdct.c:32
AV_PIX_FMT_P410
#define AV_PIX_FMT_P410
Definition: pixfmt.h:617
AVVideotoolboxContext::session
VTDecompressionSessionRef session
Videotoolbox decompression session object.
Definition: videotoolbox.h:61
vps
static int FUNC() vps(CodedBitstreamContext *ctx, RWContext *rw, H265RawVPS *current)
Definition: cbs_h265_syntax_template.c:423
AVPixFmtDescriptor::flags
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:94
ff_videotoolbox_frame_params
int ff_videotoolbox_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
ff_videotoolbox_h264_decode_slice
int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:475
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
videotoolbox_common_decode_slice
static int videotoolbox_common_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:452
VTHWFrame::pixbuf
CVPixelBufferRef pixbuf
Definition: videotoolbox.c:67
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:415
PutByteContext
Definition: bytestream.h:37
hwcontext_videotoolbox.h
ff_prores_videotoolbox_hwaccel
const struct FFHWAccel ff_prores_videotoolbox_hwaccel
ff_videotoolbox_hvcc_extradata_create
CFDataRef ff_videotoolbox_hvcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox.c:257
hevcdec.h
height
#define height
Definition: dsp.h:89
AV_WN32
#define AV_WN32(p, v)
Definition: intreadwrite.h:372
AVCodecInternal::hwaccel_priv_data
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:130
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
av_bswap32
#define av_bswap32
Definition: bswap.h:47
i
#define i(width, name, range_min, range_max)
Definition: cbs_h264.c:63
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
size
int size
Definition: twinvq_data.h:10344
VUI
Definition: ps.h:98
AV_PIX_FMT_AYUV64
#define AV_PIX_FMT_AYUV64
Definition: pixfmt.h:601
ff_videotoolbox_av1c_extradata_create
CFDataRef ff_videotoolbox_av1c_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox_av1.c:31
AVVideotoolboxContext::cm_fmt_desc
CMVideoFormatDescriptionRef cm_fmt_desc
CoreMedia Format Description that Videotoolbox will use to create the decompression session.
Definition: videotoolbox.h:73
AV_PIX_FMT_NV16
@ AV_PIX_FMT_NV16
interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:198
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
attributes.h
AV_PIX_FMT_P216
#define AV_PIX_FMT_P216
Definition: pixfmt.h:620
AV_PIX_FMT_P210
#define AV_PIX_FMT_P210
Definition: pixfmt.h:616
VTContext
Definition: vt_internal.h:25
AV_PIX_FMT_AYUV
@ AV_PIX_FMT_AYUV
packed AYUV 4:4:4:4, 32bpp (1 Cr & Cb sample per 1x1 Y & A samples), AYUVAYUV...
Definition: pixfmt.h:442
AVHWAccel::name
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:1961
kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
#define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
Definition: videotoolbox.c:46
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:305
h264dec.h
H264Context
H264Context.
Definition: h264dec.h:338
av_malloc
#define av_malloc(s)
Definition: ops_asmgen.c:44
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:522
AV_PIX_FMT_NV24
@ AV_PIX_FMT_NV24
planar YUV 4:4:4, 24bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:371
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:228
VTContext::frame
CVImageBufferRef frame
Definition: vt_internal.h:36
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
AVCodecContext::hw_device_ctx
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:1487
bytestream2_put_ne32
#define bytestream2_put_ne32
Definition: bytestream.h:129
AVCodecContext::height
int height
Definition: avcodec.h:600
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:639
AVCodecContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
Definition: avcodec.h:1465
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:118
bytestream2_put_ne16
#define bytestream2_put_ne16
Definition: bytestream.h:127
ret
ret
Definition: filter_design.txt:187
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:96
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
ff_videotoolbox_alloc_frame
int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: videotoolbox.c:151
AVHWFramesContext::hwctx
void * hwctx
The format-specific data, allocated and freed automatically along with this context.
Definition: hwcontext.h:153
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING
#define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING
Definition: videotoolbox.c:64
av_map_videotoolbox_format_to_pixfmt
enum AVPixelFormat av_map_videotoolbox_format_to_pixfmt(uint32_t cv_fmt)
Convert a VideoToolbox (actually CoreVideo) format to AVPixelFormat.
Definition: hwcontext_videotoolbox.c:151
H264_NAL_SPS
@ H264_NAL_SPS
Definition: h264.h:41
AVCodecContext
main external API structure.
Definition: avcodec.h:439
status
ov_status_e status
Definition: dnn_backend_openvino.c:100
VTContext::vt_ctx
struct AVVideotoolboxContext * vt_ctx
Definition: vt_internal.h:43
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
ff_mpeg4_videotoolbox_hwaccel
const struct FFHWAccel ff_mpeg4_videotoolbox_hwaccel
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
HEVCContext
Definition: hevcdec.h:490
PTLCommon::level_idc
uint8_t level_idc
Definition: ps.h:147
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
pps
uint64_t pps
Definition: dovi_rpuenc.c:36
videotoolbox_postproc_frame
static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
Definition: videotoolbox.c:120
VTContext::logctx
void * logctx
Definition: vt_internal.h:49
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
VTHWFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: videotoolbox.c:68
AV_PIX_FMT_P010
#define AV_PIX_FMT_P010
Definition: pixfmt.h:602
VUI::min_spatial_segmentation_idc
int min_spatial_segmentation_idc
Definition: ps.h:120
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:615
VTContext::cached_hw_frames_ctx
struct AVBufferRef * cached_hw_frames_ctx
Definition: vt_internal.h:39
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
avutil.h
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
HEVCVPS
Definition: ps.h:171
HEVCSPS
Definition: ps.h:255
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
HEVCPPS
Definition: ps.h:374
w
uint8_t w
Definition: llvidencdsp.c:39
ff_videotoolbox_buffer_copy
int ff_videotoolbox_buffer_copy(VTContext *vtctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:80
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:464
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
AV_PIX_FMT_P416
#define AV_PIX_FMT_P416
Definition: pixfmt.h:621
ff_h264_videotoolbox_hwaccel
const struct FFHWAccel ff_h264_videotoolbox_hwaccel
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVVideotoolboxContext::cv_pix_fmt_type
OSType cv_pix_fmt_type
CVPixelBuffer Format Type that Videotoolbox will use for decoded frames.
Definition: videotoolbox.h:68
av_map_videotoolbox_format_from_pixfmt2
uint32_t av_map_videotoolbox_format_from_pixfmt2(enum AVPixelFormat pix_fmt, bool full_range)
Same as av_map_videotoolbox_format_from_pixfmt function, but can map and return full range pixel form...
Definition: hwcontext_videotoolbox.c:185
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:600
bytestream.h
hwcontext.h
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2070
AVCodecContext::sw_pix_fmt
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:646
videotoolbox_h264_decode_params
static int videotoolbox_h264_decode_params(AVCodecContext *avctx, int type, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:429
width
#define width
Definition: dsp.h:89
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:67
VTContext::reconfig_needed
bool reconfig_needed
Definition: vt_internal.h:47
VTContext::sps
uint8_t sps[3]
Definition: vt_internal.h:46
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
src
#define src
Definition: vp8dsp.c:248
duration
static int64_t duration
Definition: ffplay.c:329
bytestream2_size_p
static av_always_inline int bytestream2_size_p(const PutByteContext *p)
Definition: bytestream.h:207
AV_CODEC_ID_PRORES
@ AV_CODEC_ID_PRORES
Definition: codec_id.h:200