21 #include <VideoToolbox/VideoToolbox.h>
22 #include <CoreVideo/CoreVideo.h>
23 #include <CoreMedia/CoreMedia.h>
24 #include <TargetConditionals.h>
25 #include <Availability.h>
37 # define kCVImageBufferColorPrimaries_ITU_R_2020 CFSTR("ITU_R_2020")
38 # define kCVImageBufferTransferFunction_ITU_R_2020 CFSTR("ITU_R_2020")
39 # define kCVImageBufferYCbCrMatrix_ITU_R_2020 CFSTR("ITU_R_2020")
101 CFStringRef profile_level,
102 CFNumberRef gamma_level,
103 CFDictionaryRef enc_info,
104 CFDictionaryRef pixel_buffer_info);
210 CMSampleBufferRef sample_buffer,
217 size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
218 CMBlockBufferRef
block = CMSampleBufferGetDataBuffer(sample_buffer);
220 if (length_code_size > 4)
223 while (offset < src_size) {
228 status = CMBlockBufferCopyDataBytes(block,
233 for (i = 0; i < length_code_size; i++) {
235 box_len |= size_buf[i];
238 curr_src_len = box_len + length_code_size;
239 offset += curr_src_len;
266 CMVideoFormatDescriptionRef vid_fmt,
269 size_t total_size = 0;
271 int is_count_bad = 0;
274 status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt,
286 for (i = 0; i < ps_count || is_count_bad; i++) {
289 status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt,
300 if (i > 0 && is_count_bad) status = 0;
319 CMVideoFormatDescriptionRef vid_fmt,
324 int is_count_bad = 0;
329 status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt,
342 for (i = 0; i < ps_count || is_count_bad; i++) {
347 status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt,
354 if (i > 0 && is_count_bad) status = 0;
359 next_offset = offset +
sizeof(
start_code) + ps_size;
360 if (dst_size < next_offset) {
368 memcpy(dst + offset, ps, ps_size);
369 offset = next_offset;
382 CMVideoFormatDescriptionRef vid_fmt;
386 vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
416 void *sourceFrameCtx,
418 VTEncodeInfoFlags
flags,
419 CMSampleBufferRef sample_buffer)
425 if(sample_buffer) CFRelease(sample_buffer);
429 if (status || !sample_buffer) {
448 CMSampleBufferRef sample_buffer,
451 CMVideoFormatDescriptionRef vid_fmt;
455 vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
461 status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt,
483 CFStringRef *profile_level_val)
493 *profile_level_val =
NULL;
500 switch (vtctx->
level) {
501 case 0: *profile_level_val = kVTProfileLevel_H264_Baseline_AutoLevel;
break;
502 case 13: *profile_level_val = kVTProfileLevel_H264_Baseline_1_3;
break;
503 case 30: *profile_level_val = kVTProfileLevel_H264_Baseline_3_0;
break;
504 case 31: *profile_level_val = kVTProfileLevel_H264_Baseline_3_1;
break;
505 case 32: *profile_level_val = kVTProfileLevel_H264_Baseline_3_2;
break;
506 case 40: *profile_level_val = kVTProfileLevel_H264_Baseline_4_0;
break;
507 case 41: *profile_level_val = kVTProfileLevel_H264_Baseline_4_1;
break;
508 case 42: *profile_level_val = kVTProfileLevel_H264_Baseline_4_2;
break;
509 case 50: *profile_level_val = kVTProfileLevel_H264_Baseline_5_0;
break;
510 case 51: *profile_level_val = kVTProfileLevel_H264_Baseline_5_1;
break;
511 case 52: *profile_level_val = kVTProfileLevel_H264_Baseline_5_2;
break;
516 switch (vtctx->
level) {
517 case 0: *profile_level_val = kVTProfileLevel_H264_Main_AutoLevel;
break;
518 case 30: *profile_level_val = kVTProfileLevel_H264_Main_3_0;
break;
519 case 31: *profile_level_val = kVTProfileLevel_H264_Main_3_1;
break;
520 case 32: *profile_level_val = kVTProfileLevel_H264_Main_3_2;
break;
521 case 40: *profile_level_val = kVTProfileLevel_H264_Main_4_0;
break;
522 case 41: *profile_level_val = kVTProfileLevel_H264_Main_4_1;
break;
523 case 42: *profile_level_val = kVTProfileLevel_H264_Main_4_2;
break;
524 case 50: *profile_level_val = kVTProfileLevel_H264_Main_5_0;
break;
525 case 51: *profile_level_val = kVTProfileLevel_H264_Main_5_1;
break;
526 case 52: *profile_level_val = kVTProfileLevel_H264_Main_5_2;
break;
531 switch (vtctx->
level) {
532 case 0: *profile_level_val = kVTProfileLevel_H264_High_AutoLevel;
break;
533 case 30: *profile_level_val = kVTProfileLevel_H264_High_3_0;
break;
534 case 31: *profile_level_val = kVTProfileLevel_H264_High_3_1;
break;
535 case 32: *profile_level_val = kVTProfileLevel_H264_High_3_2;
break;
536 case 40: *profile_level_val = kVTProfileLevel_H264_High_4_0;
break;
537 case 41: *profile_level_val = kVTProfileLevel_H264_High_4_1;
break;
538 case 42: *profile_level_val = kVTProfileLevel_H264_High_4_2;
break;
539 case 50: *profile_level_val = kVTProfileLevel_H264_High_5_0;
break;
540 case 51: *profile_level_val = kVTProfileLevel_H264_High_5_1;
break;
541 case 52: *profile_level_val = kVTProfileLevel_H264_High_5_2;
break;
546 if (!*profile_level_val) {
557 int* av_pixel_format,
566 kCVPixelFormatType_420YpCbCr8BiPlanarFullRange :
567 kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
570 kCVPixelFormatType_420YpCbCr8PlanarFullRange :
571 kCVPixelFormatType_420YpCbCr8Planar;
583 CFDictionarySetValue(dict,
584 kCVImageBufferColorPrimariesKey,
589 CFDictionarySetValue(dict,
590 kCVImageBufferTransferFunctionKey,
595 CFDictionarySetValue(dict,
596 kCVImageBufferYCbCrMatrixKey,
602 CFMutableDictionaryRef* dict)
604 CFNumberRef cv_color_format_num =
NULL;
605 CFNumberRef width_num =
NULL;
606 CFNumberRef height_num =
NULL;
607 CFMutableDictionaryRef pixel_buffer_info =
NULL;
614 if (status)
return status;
616 pixel_buffer_info = CFDictionaryCreateMutable(
619 &kCFCopyStringDictionaryKeyCallBacks,
620 &kCFTypeDictionaryValueCallBacks);
622 if (!pixel_buffer_info)
goto pbinfo_nomem;
624 cv_color_format_num = CFNumberCreate(kCFAllocatorDefault,
627 if (!cv_color_format_num)
goto pbinfo_nomem;
629 CFDictionarySetValue(pixel_buffer_info,
630 kCVPixelBufferPixelFormatTypeKey,
631 cv_color_format_num);
634 width_num = CFNumberCreate(kCFAllocatorDefault,
637 if (!width_num)
return AVERROR(ENOMEM);
639 CFDictionarySetValue(pixel_buffer_info,
640 kCVPixelBufferWidthKey,
644 height_num = CFNumberCreate(kCFAllocatorDefault,
647 if (!height_num)
goto pbinfo_nomem;
649 CFDictionarySetValue(pixel_buffer_info,
650 kCVPixelBufferHeightKey,
656 *dict = pixel_buffer_info;
663 if (pixel_buffer_info) CFRelease(pixel_buffer_info);
669 CFStringRef *primaries)
678 *primaries = kCVImageBufferColorPrimaries_ITU_R_709_2;
695 CFStringRef *transfer_fnc,
696 CFNumberRef *gamma_level)
704 *transfer_fnc =
NULL;
708 *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_709_2;
712 *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_240M_1995;
717 *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
718 *gamma_level = CFNumberCreate(
NULL, kCFNumberFloat32Type, &gamma);
723 *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
724 *gamma_level = CFNumberCreate(
NULL, kCFNumberFloat32Type, &gamma);
743 *matrix = kCVImageBufferYCbCrMatrix_ITU_R_709_2;
752 *matrix = kCVImageBufferYCbCrMatrix_ITU_R_601_4;
756 *matrix = kCVImageBufferYCbCrMatrix_SMPTE_240M_1995;
773 CFStringRef profile_level,
774 CFNumberRef gamma_level,
775 CFDictionaryRef enc_info,
776 CFDictionaryRef pixel_buffer_info,
777 VTCompressionSessionRef *session)
781 CFNumberRef bit_rate_num;
783 int status = VTCompressionSessionCreate(kCFAllocatorDefault,
794 if (status || !vtctx->
session) {
795 av_log(avctx,
AV_LOG_ERROR,
"Error: cannot create compression session: %d\n", status);
797 #if !TARGET_OS_IPHONE
799 av_log(avctx,
AV_LOG_ERROR,
"Try -allow_sw 1. The hardware encoder may be busy, or not supported.\n");
806 bit_rate_num = CFNumberCreate(kCFAllocatorDefault,
809 if (!bit_rate_num)
return AVERROR(ENOMEM);
811 status = VTSessionSetProperty(vtctx->
session,
812 kVTCompressionPropertyKey_AverageBitRate,
814 CFRelease(bit_rate_num);
822 status = VTSessionSetProperty(vtctx->
session,
823 kVTCompressionPropertyKey_ProfileLevel,
832 CFNumberRef interval = CFNumberCreate(kCFAllocatorDefault,
839 status = VTSessionSetProperty(vtctx->
session,
840 kVTCompressionPropertyKey_MaxKeyFrameInterval,
845 av_log(avctx,
AV_LOG_ERROR,
"Error setting 'max key-frame interval' property: %d\n", status);
851 status = VTSessionSetProperty(vtctx->
session,
852 kVTCompressionPropertyKey_MoreFramesBeforeStart,
855 if (status == kVTPropertyNotSupportedErr) {
856 av_log(avctx,
AV_LOG_WARNING,
"frames_before property is not supported on this device. Ignoring.\n");
863 status = VTSessionSetProperty(vtctx->
session,
864 kVTCompressionPropertyKey_MoreFramesAfterEnd,
867 if (status == kVTPropertyNotSupportedErr) {
868 av_log(avctx,
AV_LOG_WARNING,
"frames_after property is not supported on this device. Ignoring.\n");
877 CFMutableDictionaryRef par;
884 num = CFNumberCreate(kCFAllocatorDefault,
888 den = CFNumberCreate(kCFAllocatorDefault,
894 par = CFDictionaryCreateMutable(kCFAllocatorDefault,
896 &kCFCopyStringDictionaryKeyCallBacks,
897 &kCFTypeDictionaryValueCallBacks);
899 if (!par || !num || !den) {
900 if (par) CFRelease(par);
901 if (num) CFRelease(num);
902 if (den) CFRelease(den);
907 CFDictionarySetValue(
909 kCMFormatDescriptionKey_PixelAspectRatioHorizontalSpacing,
912 CFDictionarySetValue(
914 kCMFormatDescriptionKey_PixelAspectRatioVerticalSpacing,
917 status = VTSessionSetProperty(vtctx->
session,
918 kVTCompressionPropertyKey_PixelAspectRatio,
928 "Error setting pixel aspect ratio to %d:%d: %d.\n",
939 status = VTSessionSetProperty(vtctx->
session,
940 kVTCompressionPropertyKey_TransferFunction,
950 status = VTSessionSetProperty(vtctx->
session,
951 kVTCompressionPropertyKey_YCbCrMatrix,
961 status = VTSessionSetProperty(vtctx->
session,
962 kVTCompressionPropertyKey_ColorPrimaries,
971 status = VTSessionSetProperty(vtctx->
session,
972 kCVImageBufferGammaLevelKey,
981 status = VTSessionSetProperty(vtctx->
session,
982 kVTCompressionPropertyKey_AllowFrameReordering,
986 av_log(avctx,
AV_LOG_ERROR,
"Error setting 'allow frame reordering' property: %d\n", status);
993 kVTH264EntropyMode_CABAC:
994 kVTH264EntropyMode_CAVLC;
996 status = VTSessionSetProperty(vtctx->
session,
997 kVTCompressionPropertyKey_H264EntropyMode,
1007 status = VTSessionSetProperty(vtctx->
session,
1008 kVTCompressionPropertyKey_RealTime,
1016 status = VTCompressionSessionPrepareToEncodeFrames(vtctx->
session);
1027 CFMutableDictionaryRef enc_info;
1028 CFMutableDictionaryRef pixel_buffer_info;
1031 CFStringRef profile_level;
1032 CFBooleanRef has_b_frames_cfbool;
1033 CFNumberRef gamma_level =
NULL;
1044 av_log(avctx,
AV_LOG_WARNING,
"Cannot use B-frames with baseline profile. Output will not contain B-frames.\n");
1049 av_log(avctx,
AV_LOG_WARNING,
"CABAC entropy requires 'main' or 'high' profile, but baseline was requested. Encode will not use CABAC entropy.\n");
1057 enc_info = CFDictionaryCreateMutable(
1058 kCFAllocatorDefault,
1060 &kCFCopyStringDictionaryKeyCallBacks,
1061 &kCFTypeDictionaryValueCallBacks
1064 if (!enc_info)
return AVERROR(ENOMEM);
1066 #if !TARGET_OS_IPHONE
1068 CFDictionarySetValue(enc_info, kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder, kCFBooleanTrue);
1070 CFDictionarySetValue(enc_info, kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder, kCFBooleanTrue);
1079 pixel_buffer_info =
NULL;
1113 status = VTSessionCopyProperty(vtctx->
session,
1114 kVTCompressionPropertyKey_AllowFrameReordering,
1115 kCFAllocatorDefault,
1116 &has_b_frames_cfbool);
1120 vtctx->
has_b_frames = CFBooleanGetValue(has_b_frames_cfbool);
1121 CFRelease(has_b_frames_cfbool);
1127 CFRelease(gamma_level);
1129 if (pixel_buffer_info)
1130 CFRelease(pixel_buffer_info);
1132 CFRelease(enc_info);
1139 CFArrayRef attachments;
1140 CFDictionaryRef attachment;
1141 CFBooleanRef not_sync;
1144 attachments = CMSampleBufferGetSampleAttachmentsArray(buffer,
false);
1145 len = !attachments ? 0 : CFArrayGetCount(attachments);
1148 *is_key_frame =
true;
1152 attachment = CFArrayGetValueAtIndex(attachments, 0);
1154 if (CFDictionaryGetValueIfPresent(attachment,
1155 kCMSampleAttachmentKey_NotSync,
1156 (
const void **)¬_sync))
1158 *is_key_frame = !CFBooleanGetValue(not_sync);
1160 *is_key_frame =
true;
1185 size_t length_code_size,
1186 CMSampleBufferRef sample_buffer,
1190 size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1191 size_t remaining_src_size = src_size;
1192 size_t remaining_dst_size = dst_size;
1193 size_t src_offset = 0;
1196 CMBlockBufferRef
block = CMSampleBufferGetDataBuffer(sample_buffer);
1198 if (length_code_size > 4) {
1202 while (remaining_src_size > 0) {
1203 size_t curr_src_len;
1204 size_t curr_dst_len;
1210 status = CMBlockBufferCopyDataBytes(block,
1219 for (i = 0; i < length_code_size; i++) {
1221 box_len |= size_buf[i];
1224 curr_src_len = box_len + length_code_size;
1227 if (remaining_src_size < curr_src_len) {
1231 if (remaining_dst_size < curr_dst_len) {
1238 status = CMBlockBufferCopyDataBytes(block,
1239 src_offset + length_code_size,
1248 src_offset += curr_src_len;
1249 dst_data += curr_dst_len;
1251 remaining_src_size -= curr_src_len;
1252 remaining_dst_size -= curr_dst_len;
1260 CMSampleBufferRef sample_buffer,
1268 size_t length_code_size;
1269 size_t header_size = 0;
1271 size_t out_buf_size;
1273 int64_t time_base_num;
1277 CMVideoFormatDescriptionRef vid_fmt;
1282 if (status)
return status;
1287 vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
1294 if (status)
return status;
1297 status =
count_nalus(length_code_size, sample_buffer, &nalu_count);
1301 in_buf_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1302 out_buf_size = header_size +
1304 nalu_count * ((int)
sizeof(
start_code) - (int)length_code_size);
1312 if(status)
return status;
1319 pkt->
data + header_size,
1320 pkt->
size - header_size
1332 pts = CMSampleBufferGetPresentationTimeStamp(sample_buffer);
1333 dts = CMSampleBufferGetDecodeTimeStamp (sample_buffer);
1335 if (CMTIME_IS_INVALID(dts)) {
1346 pkt->
pts = pts.value / time_base_num;
1347 pkt->
dts = dts.value / time_base_num - dts_delta;
1348 pkt->
size = out_buf_size;
1365 size_t *contiguous_buf_size)
1368 int av_format = frame->
format;
1378 "Could not get pixel format for color format '%s' range '%s'.\n",
1388 if (range_guessed) {
1393 "Color range not set for %s. Using MPEG range.\n",
1400 switch (av_format) {
1404 widths [0] = avctx->
width;
1405 heights[0] = avctx->
height;
1408 widths [1] = (avctx->
width + 1) / 2;
1409 heights[1] = (avctx->
height + 1) / 2;
1410 strides[1] = frame ? frame->
linesize[1] : (avctx->
width + 1) & -2;
1416 widths [0] = avctx->
width;
1417 heights[0] = avctx->
height;
1420 widths [1] = (avctx->
width + 1) / 2;
1421 heights[1] = (avctx->
height + 1) / 2;
1422 strides[1] = frame ? frame->
linesize[1] : (avctx->
width + 1) / 2;
1424 widths [2] = (avctx->
width + 1) / 2;
1425 heights[2] = (avctx->
height + 1) / 2;
1426 strides[2] = frame ? frame->
linesize[2] : (avctx->
width + 1) / 2;
1433 "Could not get frame format info for color %d range %d.\n",
1440 *contiguous_buf_size = 0;
1441 for (i = 0; i < *plane_count; i++) {
1442 if (i < *plane_count - 1 &&
1443 frame->
data[i] + strides[i] * heights[i] != frame->
data[i + 1]) {
1444 *contiguous_buf_size = 0;
1448 *contiguous_buf_size += strides[i] * heights[i];
1454 #if !TARGET_OS_IPHONE
1461 const void *plane_addresses[])
1470 CVPixelBufferRef cv_img,
1471 const size_t *plane_strides,
1472 const size_t *plane_rows)
1484 status = CVPixelBufferLockBaseAddress(cv_img, 0);
1489 "Error: Could not lock base address of CVPixelBuffer: %d.\n",
1494 if (CVPixelBufferIsPlanar(cv_img)) {
1495 plane_count = CVPixelBufferGetPlaneCount(cv_img);
1496 for (i = 0; frame->
data[i]; i++) {
1497 if (i == plane_count) {
1498 CVPixelBufferUnlockBaseAddress(cv_img, 0);
1501 "Error: different number of planes in AVFrame and CVPixelBuffer.\n"
1507 dst_addr = (
uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cv_img, i);
1509 dst_stride = CVPixelBufferGetBytesPerRowOfPlane(cv_img, i);
1510 src_stride = plane_strides[i];
1511 rows = plane_rows[i];
1513 if (dst_stride == src_stride) {
1514 memcpy(dst_addr, src_addr, src_stride * rows);
1516 copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
1518 for (j = 0; j < rows; j++) {
1519 memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
1524 if (frame->
data[1]) {
1525 CVPixelBufferUnlockBaseAddress(cv_img, 0);
1528 "Error: different number of planes in AVFrame and non-planar CVPixelBuffer.\n"
1534 dst_addr = (
uint8_t*)CVPixelBufferGetBaseAddress(cv_img);
1536 dst_stride = CVPixelBufferGetBytesPerRow(cv_img);
1537 src_stride = plane_strides[0];
1538 rows = plane_rows[0];
1540 if (dst_stride == src_stride) {
1541 memcpy(dst_addr, src_addr, src_stride * rows);
1543 copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
1545 for (j = 0; j < rows; j++) {
1546 memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
1551 status = CVPixelBufferUnlockBaseAddress(cv_img, 0);
1553 av_log(avctx,
AV_LOG_ERROR,
"Error: Could not unlock CVPixelBuffer base address: %d.\n", status);
1563 CVPixelBufferRef *cv_img)
1571 size_t contiguous_buf_size;
1572 #if TARGET_OS_IPHONE
1573 CVPixelBufferPoolRef pix_buf_pool;
1576 CFMutableDictionaryRef pix_buf_attachments = CFDictionaryCreateMutable(
1577 kCFAllocatorDefault,
1579 &kCFCopyStringDictionaryKeyCallBacks,
1580 &kCFTypeDictionaryValueCallBacks);
1582 if (!pix_buf_attachments)
return AVERROR(ENOMEM);
1588 *cv_img = (CVPixelBufferRef)frame->
data[3];
1595 memset(widths, 0,
sizeof(widths));
1596 memset(heights, 0,
sizeof(heights));
1597 memset(strides, 0,
sizeof(strides));
1607 &contiguous_buf_size
1614 "Error: Cannot convert format %d color_range %d: %d\n",
1623 #if TARGET_OS_IPHONE
1624 pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->
session);
1625 if (!pix_buf_pool) {
1630 status = CVPixelBufferPoolCreatePixelBuffer(
NULL,
1636 av_log(avctx,
AV_LOG_ERROR,
"Could not create pixel buffer from pool: %d.\n", status);
1640 status = copy_avframe_to_pixel_buffer(avctx, frame, *cv_img, strides, heights);
1648 if (!enc_frame)
return AVERROR(ENOMEM);
1656 status = CVPixelBufferCreateWithPlanarBytes(
1657 kCFAllocatorDefault,
1662 contiguous_buf_size,
1664 (
void **)enc_frame->
data,
1675 CVBufferSetAttachments(*cv_img, pix_buf_attachments, kCVAttachmentMode_ShouldPropagate);
1676 CFRelease(pix_buf_attachments);
1688 CFDictionaryRef* dict_out)
1690 CFDictionaryRef dict =
NULL;
1692 const void *keys[] = { kVTEncodeFrameOptionKey_ForceKeyFrame };
1693 const void *vals[] = { kCFBooleanTrue };
1695 dict = CFDictionaryCreate(
NULL, keys, vals, 1,
NULL,
NULL);
1696 if(!dict)
return AVERROR(ENOMEM);
1708 CFDictionaryRef frame_dict;
1709 CVPixelBufferRef cv_img =
NULL;
1712 if (status)
return status;
1721 status = VTCompressionSessionEncodeFrame(
1731 if (frame_dict) CFRelease(frame_dict);
1771 status = VTCompressionSessionCompleteFrames(vtctx->
session,
1789 if (status)
goto end_nopkt;
1790 if (!buf)
goto end_nopkt;
1794 if (status)
goto end_nopkt;
1806 CFStringRef profile_level,
1807 CFNumberRef gamma_level,
1808 CFDictionaryRef enc_info,
1809 CFDictionaryRef pixel_buffer_info)
1814 int chroma_size = (avctx->
width / 2) * (avctx->
height / 2);
1839 memset(frame->
data[0], 0, y_size);
1842 memset(frame->
data[1], 128, chroma_size);
1846 frame->
data[2] = frame->
buf[0]->
data + y_size + chroma_size;
1847 memset(frame->
data[2], 128, chroma_size);
1875 status = VTCompressionSessionCompleteFrames(vtctx->
session,
1942 #define OFFSET(x) offsetof(VTEncContext, x)
1943 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
1951 {
"1.3",
"Level 1.3, only available with Baseline Profile", 0,
AV_OPT_TYPE_CONST, { .i64 = 13 }, INT_MIN, INT_MAX,
VE,
"level" },
1952 {
"3.0",
"Level 3.0", 0,
AV_OPT_TYPE_CONST, { .i64 = 30 }, INT_MIN, INT_MAX,
VE,
"level" },
1953 {
"3.1",
"Level 3.1", 0,
AV_OPT_TYPE_CONST, { .i64 = 31 }, INT_MIN, INT_MAX,
VE,
"level" },
1954 {
"3.2",
"Level 3.2", 0,
AV_OPT_TYPE_CONST, { .i64 = 32 }, INT_MIN, INT_MAX,
VE,
"level" },
1955 {
"4.0",
"Level 4.0", 0,
AV_OPT_TYPE_CONST, { .i64 = 40 }, INT_MIN, INT_MAX,
VE,
"level" },
1956 {
"4.1",
"Level 4.1", 0,
AV_OPT_TYPE_CONST, { .i64 = 41 }, INT_MIN, INT_MAX,
VE,
"level" },
1957 {
"4.2",
"Level 4.2", 0,
AV_OPT_TYPE_CONST, { .i64 = 42 }, INT_MIN, INT_MAX,
VE,
"level" },
1958 {
"5.0",
"Level 5.0", 0,
AV_OPT_TYPE_CONST, { .i64 = 50 }, INT_MIN, INT_MAX,
VE,
"level" },
1959 {
"5.1",
"Level 5.1", 0,
AV_OPT_TYPE_CONST, { .i64 = 51 }, INT_MIN, INT_MAX,
VE,
"level" },
1960 {
"5.2",
"Level 5.2", 0,
AV_OPT_TYPE_CONST, { .i64 = 52 }, INT_MIN, INT_MAX,
VE,
"level" },
1963 { .i64 = 0 }, 0, 1,
VE },
1971 {
"realtime",
"Hint that encoding should happen in real-time if not faster (e.g. capturing from camera).",
1974 {
"frames_before",
"Other frames will come before the frames in this session. This helps smooth concatenation issues.",
1976 {
"frames_after",
"Other frames will come after the frames in this session. This helps smooth concatenation issues.",
1990 .
name =
"h264_videotoolbox",
1995 .pix_fmts = pix_fmts,
2000 .priv_class = &h264_videotoolbox_class,
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
ITU-R BT2020 for 12-bit system.
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define AV_NUM_DATA_POINTERS
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
pthread_cond_t cv_sample_sent
This structure describes decoded (raw) audio or video data.
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
ptrdiff_t const GLvoid * data
#define AV_LOG_WARNING
Something somehow does not look correct.
int64_t bit_rate
the average bitrate
#define LIBAVUTIL_VERSION_INT
enum AVColorRange av_frame_get_color_range(const AVFrame *frame)
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
hardware decoding through Videotoolbox
static av_cold int init(AVCodecContext *avctx)
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
enum AVColorRange color_range
MPEG vs JPEG YUV range.
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
enum AVMediaType codec_type
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
AVColorTransferCharacteristic
Color Transfer Characteristic.
functionally identical to above
const char * av_color_space_name(enum AVColorSpace space)
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
const char * av_color_range_name(enum AVColorRange range)
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
AVColorRange
MPEG vs JPEG YUV range.
AVColorPrimaries
Chromaticity coordinates of the source primaries.
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
void av_frame_set_color_range(AVFrame *frame, enum AVColorRange val)
AVCodecID
Identify the syntax and semantics of the bitstream.
int width
width and height of the video frame
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int has_b_frames
Size of the frame reordering buffer in the decoder.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
void av_frame_set_colorspace(AVFrame *frame, enum AVColorSpace val)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
int flags
AV_CODEC_FLAG_*.
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
simple assert() macros that are a bit more flexible than ISO C assert().
const char * name
Name of the codec implementation.
static const uint8_t offset[127][2]
static int get_frame(AVFilterContext *ctx, int is_second)
int flags
A combination of AV_PKT_FLAG values.
const char * av_color_primaries_name(enum AVColorPrimaries primaries)
VTCompressionSessionRef session
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
CFStringRef color_primaries
enum AVPictureType pict_type
Picture type of the frame.
#define AVERROR_BUFFER_TOO_SMALL
Buffer too small.
CMSampleBufferRef cm_buffer
int width
picture width / height.
ITU-R BT2020 non-constant luminance system.
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
the normal 2^n-1 "JPEG" YUV ranges
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Libavcodec external API header.
AVBufferRef * av_buffer_alloc(int size)
Allocate an AVBuffer of the given size using av_malloc().
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
main external API structure.
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
uint8_t * data
The data buffer.
Describe the class of an AVClass context structure.
CFStringRef transfer_function
enum AVColorSpace colorspace
YUV colorspace type.
rational number numerator/denominator
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
static int64_t pts
Global timestamp for the audio frames.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
the normal 219*2^(n-8) "MPEG" YUV ranges
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
common internal api header.
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
static av_always_inline int pthread_mutex_unlock(pthread_mutex_t *mutex)
enum AVColorPrimaries color_primaries
ITU-R BT2020 for 10-bit system.
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
enum AVColorTransferCharacteristic color_trc
static av_always_inline int pthread_mutex_lock(pthread_mutex_t *mutex)
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
#define AVERROR_EXTERNAL
Generic error in an external library.
AVPixelFormat
Pixel format.
This structure stores compressed data.
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...