71 #define QUANT_BIAS_SHIFT 8
73 #define QMAT_SHIFT_MMX 16
92 const uint16_t *quant_matrix,
93 int bias,
int qmin,
int qmax,
int intra)
99 for (qscale = qmin; qscale <= qmax; qscale++) {
104 else qscale2 = qscale << 1;
111 for (i = 0; i < 64; i++) {
113 int64_t den = (int64_t) qscale2 * quant_matrix[j];
123 for (i = 0; i < 64; i++) {
125 int64_t den =
ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
135 for (i = 0; i < 64; i++) {
137 int64_t den = (int64_t) qscale2 * quant_matrix[j];
148 if (
qmat16[qscale][0][i] == 0 ||
149 qmat16[qscale][0][i] == 128 * 256)
157 for (i = intra; i < 64; i++) {
162 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
169 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
178 int bestdiff=INT_MAX;
186 if (diff < bestdiff) {
208 for (i = 0; i < 64; i++) {
223 for (i = 0; i < s->
mb_num; i++) {
234 #define COPY(a) dst->a= src->a
259 for (i = -16; i < 16; i++) {
274 if (CONFIG_H263_ENCODER)
292 int i, ret, format_supported;
301 "only YUV420 and YUV422 are supported\n");
307 format_supported = 0;
316 format_supported = 1;
322 format_supported = 1;
324 if (!format_supported) {
354 #if FF_API_PRIVATE_OPT
371 "keyframe interval too large!, reducing it from %d to %d\n",
397 "intra dc precision must be positive, note some applications use"
398 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
459 av_log(avctx,
AV_LOG_ERROR,
"Either both buffer size and max rate or neither must be specified\n");
465 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
482 "impossible bitrate constraints, this will fail\n");
507 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
508 "specified vbv buffer is too large for the given bitrate!\n");
520 "OBMC is only supported with simple mb decision\n");
538 "max b frames must be 0 or positive for mpegvideo based encoders\n");
548 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
556 (avctx->
width > 2048 ||
563 ((avctx->
width &3) ||
570 (avctx->
width > 4095 ||
577 (avctx->
width > 16383 ||
578 avctx->
height > 16383 )) {
579 av_log(avctx,
AV_LOG_ERROR,
"MPEG-2 does not support resolutions above 16383x16383\n");
610 #if FF_API_PRIVATE_OPT
621 "mpeg2 style quantization not supported by codec\n");
641 "QP RD is no longer compatible with MJPEG or AMV\n");
645 #if FF_API_PRIVATE_OPT
655 "closed gop with scene change detection are not supported yet, "
656 "set threshold to 1000000000\n");
664 "low delay forcing is only available for mpeg2, "
665 "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
670 "B-frames cannot be used with low delay\n");
676 if (avctx->
qmax > 28) {
678 "non linear quant only supports qmax <= 28 currently\n");
696 "multi threaded encoding not supported by codec\n");
702 "automatic thread number detection not supported by codec, "
712 #if FF_API_PRIVATE_OPT
723 "notice: b_frame_strategy only affects the first pass\n");
746 av_log(avctx,
AV_LOG_ERROR,
"qmin and or qmax are invalid, they must be 0 < min <= max\n");
755 "timebase %d/%d not supported by MPEG 4 standard, "
756 "the maximum admitted value for the timebase denominator "
779 if (!CONFIG_MJPEG_ENCODER ||
786 if (!CONFIG_H261_ENCODER)
790 "The specified picture size of %dx%d is not valid for the "
791 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
801 if (!CONFIG_H263_ENCODER)
806 "The specified picture size of %dx%d is not valid for "
807 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
808 "352x288, 704x576, and 1408x1152. "
899 #if FF_API_PRIVATE_OPT
947 2 * 64 *
sizeof(uint16_t),
fail);
952 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->
modified_quant)
964 #if FF_API_PRIVATE_OPT
987 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
992 for (i = 0; i < 64; i++) {
1027 #if FF_API_PRIVATE_OPT
1075 if (CONFIG_MJPEG_ENCODER &&
1110 for (y = 0; y < 16; y++) {
1111 for (x = 0; x < 16; x++) {
1112 acc +=
FFABS(src[x + y * stride] - ref);
1128 for (y = 0; y <
h; y += 16) {
1129 for (x = 0; x <
w; x += 16) {
1134 int sae =
get_sae(src + offset, mean, stride);
1136 acc += sae + 500 < sad;
1154 int i, display_picture_number = 0, ret;
1157 int flush_offset = 1;
1170 "Invalid pts (%"PRId64
") <= last (%"PRId64
")\n",
1175 if (!s->
low_delay && display_picture_number == 1)
1184 "Warning: AVFrame.pts=? trying to guess (%"PRId64
")\n",
1187 pts = display_picture_number;
1191 if (!pic_arg->
buf[0] ||
1227 int h_chroma_shift, v_chroma_shift;
1232 for (i = 0; i < 3; i++) {
1233 int src_stride = pic_arg->
linesize[i];
1235 int h_shift = i ? h_chroma_shift : 0;
1236 int v_shift = i ? v_chroma_shift : 0;
1237 int w = s->
width >> h_shift;
1251 if (src_stride == dst_stride)
1252 memcpy(dst, src, src_stride * h);
1257 memcpy(dst2, src, w);
1282 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1286 if (flush_offset <= 1)
1289 encoding_delay = encoding_delay - flush_offset + 1;
1305 int64_t score64 = 0;
1307 for (plane = 0; plane < 3; plane++) {
1309 const int bw = plane ? 1 : 2;
1310 for (y = 0; y < s->
mb_height * bw; y++) {
1311 for (x = 0; x < s->
mb_width * bw; x++) {
1312 int off = p->
shared ? 0 : 16;
1318 case 0: score =
FFMAX(score, v);
break;
1319 case 1: score +=
FFABS(v);
break;
1320 case 2: score64 += v * (int64_t)v;
break;
1321 case 3: score64 +=
FFABS(v * (int64_t)v * v);
break;
1322 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v);
break;
1373 int64_t best_rd = INT64_MAX;
1374 int best_b_count = -1;
1385 b_lambda = p_lambda;
1395 pre_input = *pre_input_ptr;
1396 memcpy(data, pre_input_ptr->
f->
data,
sizeof(data));
1398 if (!pre_input.
shared && i) {
1413 width >> 1, height >> 1);
1418 width >> 1, height >> 1);
1497 return best_b_count;
1570 b_frames =
FFMAX(0, i - 1);
1573 for (i = 0; i < b_frames + 1; i++) {
1584 for (i = b_frames - 1; i >= 0; i--) {
1592 "warning, too many B-frames in a row\n");
1615 for (i = 0; i < b_frames; i++) {
1662 for (i = 0; i < 4; i++) {
1712 #if FF_API_CODED_FRAME
1718 #if FF_API_ERROR_FRAME
1730 for (intra = 0; intra < 2; intra++) {
1732 for (i = 0; i < 64; i++) {
1738 for (i = 0; i < 64; i++) {
1789 for (i = 0; i < 4; i++) {
1820 const AVFrame *pic_arg,
int *got_packet)
1823 int i, stuffing_count, ret;
1852 for (i = 0; i < context_count; i++) {
1869 if (growing_buffer) {
1877 #if FF_API_STAT_BITS
1926 for (i = 0; i < context_count; i++) {
1941 for (i = 0; i < 4; i++) {
1959 if (stuffing_count) {
1961 stuffing_count + 50) {
1969 while (stuffing_count--) {
1976 stuffing_count -= 4;
1977 while (stuffing_count--) {
2006 "Internal error, negative bits\n");
2014 vbv_delay =
FFMAX(vbv_delay, min_delay);
2036 #if FF_API_VBV_DELAY
2043 #if FF_API_STAT_BITS
2076 *got_packet = !!pkt->
size;
2081 int n,
int threshold)
2083 static const char tab[64] = {
2084 3, 2, 2, 1, 1, 1, 1, 1,
2085 1, 1, 1, 1, 1, 1, 1, 1,
2086 1, 1, 1, 1, 1, 1, 1, 1,
2087 0, 0, 0, 0, 0, 0, 0, 0,
2088 0, 0, 0, 0, 0, 0, 0, 0,
2089 0, 0, 0, 0, 0, 0, 0, 0,
2090 0, 0, 0, 0, 0, 0, 0, 0,
2091 0, 0, 0, 0, 0, 0, 0, 0
2100 if (threshold < 0) {
2102 threshold = -threshold;
2107 if (last_index <= skip_dc - 1)
2110 for (i = 0; i <= last_index; i++) {
2114 if (skip_dc && i == 0)
2118 }
else if (level > 1) {
2124 if (score >= threshold)
2126 for (i = skip_dc; i <= last_index; i++) {
2149 for (; i <= last_index; i++) {
2151 int level = block[j];
2153 if (level > maxlevel) {
2156 }
else if (level < minlevel) {
2166 "warning, clipping %d dct coefficients to %d..%d\n",
2167 overflow, minlevel, maxlevel);
2174 for (y = 0; y < 8; y++) {
2175 for (x = 0; x < 8; x++) {
2181 for (y2 =
FFMAX(y - 1, 0); y2 <
FFMIN(8, y + 2); y2++) {
2182 for (x2=
FFMAX(x - 1, 0); x2 <
FFMIN(8, x + 2); x2++) {
2183 int v = ptr[x2 + y2 *
stride];
2189 weight[x + 8 * y]= (36 *
ff_sqrt(count * sqr - sum * sum)) / count;
2195 int motion_x,
int motion_y,
2196 int mb_block_height,
2201 int16_t orig[12][64];
2208 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2209 ptrdiff_t wrap_y, wrap_c;
2211 for (i = 0; i < mb_block_count; i++)
2215 const int last_qp = s->
qscale;
2216 const int mb_xy = mb_x + mb_y * s->
mb_stride;
2247 (mb_y * 16 * wrap_y) + mb_x * 16;
2249 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2251 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2259 16, 16, mb_x * 16, mb_y * 16,
2264 mb_block_width, mb_block_height,
2265 mb_x * mb_block_width, mb_y * mb_block_height,
2267 ptr_cb = ebuf + 16 * wrap_y;
2270 mb_block_width, mb_block_height,
2271 mb_x * mb_block_width, mb_y * mb_block_height,
2273 ptr_cr = ebuf + 16 * wrap_y + 16;
2278 int progressive_score, interlaced_score;
2283 NULL, wrap_y, 8) - 400;
2285 if (progressive_score > 0) {
2287 NULL, wrap_y * 2, 8) +
2289 NULL, wrap_y * 2, 8);
2290 if (progressive_score > interlaced_score) {
2293 dct_offset = wrap_y;
2294 uv_dct_offset = wrap_c;
2329 uint8_t *dest_y, *dest_cb, *dest_cr;
2331 dest_y = s->
dest[0];
2332 dest_cb = s->
dest[1];
2333 dest_cr = s->
dest[2];
2357 int progressive_score, interlaced_score;
2360 progressive_score = s->
mecc.
ildct_cmp[0](
s, dest_y, ptr_y, wrap_y, 8) +
2366 progressive_score -= 400;
2368 if (progressive_score > 0) {
2375 if (progressive_score > interlaced_score) {
2378 dct_offset = wrap_y;
2379 uv_dct_offset = wrap_c;
2390 dest_y + dct_offset, wrap_y);
2392 dest_y + dct_offset + 8, wrap_y);
2402 dest_cb + uv_dct_offset, wrap_c);
2404 dest_cr + uv_dct_offset, wrap_c);
2415 if (s->
mecc.
sad[1](
NULL, ptr_y + dct_offset, dest_y + dct_offset,
2416 wrap_y, 8) < 20 * s->
qscale)
2418 if (s->
mecc.
sad[1](
NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2419 wrap_y, 8) < 20 * s->
qscale)
2427 dest_cb + uv_dct_offset,
2428 wrap_c, 8) < 20 * s->
qscale)
2431 dest_cr + uv_dct_offset,
2432 wrap_c, 8) < 20 * s->
qscale)
2459 memcpy(orig[0], s->
block[0],
sizeof(int16_t) * 64 * mb_block_count);
2465 for (i = 0; i < mb_block_count; i++) {
2480 for (i = 0; i < mb_block_count; i++) {
2490 for (i = 0; i < 4; i++)
2493 for (i = 4; i < mb_block_count; i++)
2497 for (i = 0; i < mb_block_count; i++) {
2510 for (i=6; i<12; i++) {
2519 for (i = 0; i < mb_block_count; i++) {
2522 for (j = 63; j > 0; j--) {
2535 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2539 if (CONFIG_MPEG4_ENCODER)
2549 if (CONFIG_WMV2_ENCODER)
2553 if (CONFIG_H261_ENCODER)
2561 if (CONFIG_H263_ENCODER)
2566 if (CONFIG_MJPEG_ENCODER)
2612 memcpy(d->
mv, s->
mv, 2*4*2*
sizeof(
int));
2650 int *dmin,
int *next_block,
int motion_x,
int motion_y)
2658 s->
pb= pb[*next_block];
2660 s->
pb2 = pb2 [*next_block];
2661 s->
tex_pb= tex_pb[*next_block];
2665 memcpy(dest_backup, s->
dest,
sizeof(s->
dest));
2688 memcpy(s->
dest, dest_backup,
sizeof(s->
dest));
2706 else if(w==8 && h==8)
2796 for(mb_x=0; mb_x < s->
mb_width; mb_x++) {
2804 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2839 if (CONFIG_H263_ENCODER)
2841 bytestream_put_le32(&ptr, offset);
2842 bytestream_put_byte(&ptr, s->
qscale);
2843 bytestream_put_byte(&ptr, gobn);
2844 bytestream_put_le16(&ptr, mba);
2845 bytestream_put_byte(&ptr, pred_x);
2846 bytestream_put_byte(&ptr, pred_y);
2848 bytestream_put_byte(&ptr, 0);
2849 bytestream_put_byte(&ptr, 0);
2884 int new_buffer_size = 0;
2961 if (CONFIG_H263_ENCODER)
2981 for(mb_x=0; mb_x < s->
mb_width; mb_x++) {
3015 int current_packet_size, is_gob_start;
3023 if(s->
start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3032 if(s->
mb_x==0 && s->
mb_y!=0) is_gob_start=1;
3037 if(s->
mb_x==0 && s->
mb_y!=0) is_gob_start=1;
3057 current_packet_size=0;
3063 #if FF_API_RTP_CALLBACK
3075 if (CONFIG_MPEG4_ENCODER) {
3082 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3089 if (CONFIG_H263_ENCODER)
3119 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3126 backup_s.pb2= s->
pb2;
3127 backup_s.tex_pb= s->
tex_pb;
3136 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3137 &dmin, &next_block, s->
mv[0][0][0], s->
mv[0][0][1]);
3148 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3149 &dmin, &next_block, 0, 0);
3157 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3158 &dmin, &next_block, s->
mv[0][0][0], s->
mv[0][0][1]);
3168 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3169 &dmin, &next_block, 0, 0);
3177 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3178 &dmin, &next_block, s->
mv[0][0][0], s->
mv[0][0][1]);
3186 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3187 &dmin, &next_block, s->
mv[1][0][0], s->
mv[1][0][1]);
3197 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3198 &dmin, &next_block, 0, 0);
3209 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3210 &dmin, &next_block, 0, 0);
3221 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3222 &dmin, &next_block, 0, 0);
3228 for(dir=0; dir<2; dir++){
3235 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3236 &dmin, &next_block, 0, 0);
3244 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3245 &dmin, &next_block, 0, 0);
3256 const int last_qp= backup_s.qscale;
3260 static const int dquant_tab[4]={-1,1,-2,2};
3269 s->
mv[0][0][0] = best_s.
mv[0][0][0];
3270 s->
mv[0][0][1] = best_s.
mv[0][0][1];
3271 s->
mv[1][0][0] = best_s.
mv[1][0][0];
3272 s->
mv[1][0][1] = best_s.
mv[1][0][1];
3275 for(; qpi<4; qpi++){
3276 int dquant= dquant_tab[qpi];
3288 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER , pb, pb2, tex_pb,
3289 &dmin, &next_block, s->
mv[mvdir][0][0], s->
mv[mvdir][0][1]);
3305 backup_s.dquant = 0;
3309 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3310 &dmin, &next_block, mx, my);
3313 backup_s.dquant = 0;
3317 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3318 &dmin, &next_block, 0, 0);
3326 memcpy(s->
mv, best_s.
mv,
sizeof(s->
mv));
3347 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER , pb, pb2, tex_pb,
3348 &dmin, &next_block, mx, my);
3366 s->
pb2= backup_s.pb2;
3370 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3371 s->
tex_pb= backup_s.tex_pb;
3375 if (CONFIG_H263_ENCODER &&
3388 int motion_x = 0, motion_y = 0;
3396 motion_x= s->
mv[0][0][0] = 0;
3397 motion_y= s->
mv[0][0][1] = 0;
3425 if (CONFIG_MPEG4_ENCODER) {
3434 if (CONFIG_MPEG4_ENCODER) {
3484 for(dir=0; dir<2; dir++){
3501 if (CONFIG_H263_ENCODER &&
3546 #if FF_API_RTP_CALLBACK
3562 #define MERGE(field) dst->field += src->field; src->field=0
3589 for(i=0; i<64; i++){
3617 if (CONFIG_MPEG4_ENCODER)
3623 if (CONFIG_H263_ENCODER)
3704 for(i=1; i<context_count; i++){
3735 for(i=1; i<context_count; i++){
3749 ff_dlog(s,
"Scene change detected, encoding as I Frame %"PRId64
" %"PRId64
"\n",
3793 for(dir=0; dir<2; dir++){
3844 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3845 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3881 if (CONFIG_H261_ENCODER)
3889 else if (CONFIG_MPEG4_ENCODER && s->
h263_pred) {
3902 else if (CONFIG_H263_ENCODER)
3906 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3915 for(i=1; i<context_count; i++){
3919 for(i=1; i<context_count; i++){
3934 for(i=0; i<64; i++){
3935 int level= block[i];
3941 if(level<0) level=0;
3945 if(level>0) level=0;
3954 int qscale,
int *overflow){
3956 const uint16_t *matrix;
3958 const uint8_t *perm_scantable;
3960 unsigned int threshold1, threshold2;
3972 int coeff_count[64];
3973 int qmul, qadd, start_i, last_non_zero, i,
dc;
3985 qadd= ((qscale-1)|1)*8;
3988 else mpeg2_qscale = qscale << 1;
4007 block[0] = (block[0] + (q >> 1)) / q;
4035 threshold2= (threshold1<<1);
4037 for(i=63; i>=start_i; i--) {
4038 const int j = scantable[i];
4039 int level = block[j] * qmat[j];
4041 if(((
unsigned)(level+threshold1))>threshold2){
4047 for(i=start_i; i<=last_non_zero; i++) {
4048 const int j = scantable[i];
4049 int level = block[j] * qmat[j];
4053 if(((
unsigned)(level+threshold1))>threshold2){
4057 coeff[1][i]= level-1;
4061 coeff[0][i]= -
level;
4062 coeff[1][i]= -level+1;
4065 coeff_count[i]=
FFMIN(level, 2);
4069 coeff[0][i]= (level>>31)|1;
4076 if(last_non_zero < start_i){
4077 memset(block + start_i, 0, (64-start_i)*
sizeof(int16_t));
4078 return last_non_zero;
4081 score_tab[start_i]= 0;
4082 survivor[0]= start_i;
4085 for(i=start_i; i<=last_non_zero; i++){
4086 int level_index, j, zero_distortion;
4087 int dct_coeff=
FFABS(block[ scantable[i] ]);
4088 int best_score=256*256*256*120;
4092 zero_distortion= dct_coeff*dct_coeff;
4094 for(level_index=0; level_index < coeff_count[i]; level_index++){
4096 int level= coeff[level_index][i];
4097 const int alevel=
FFABS(level);
4103 unquant_coeff= alevel*qmul + qadd;
4106 unquant_coeff = alevel * matrix[j] * 8;
4110 unquant_coeff = (
int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4111 unquant_coeff = (unquant_coeff - 1) | 1;
4113 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4114 unquant_coeff = (unquant_coeff - 1) | 1;
4119 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4121 if((level&(~127)) == 0){
4122 for(j=survivor_count-1; j>=0; j--){
4123 int run= i - survivor[j];
4125 score += score_tab[i-
run];
4127 if(score < best_score){
4130 level_tab[i+1]= level-64;
4135 for(j=survivor_count-1; j>=0; j--){
4136 int run= i - survivor[j];
4138 score += score_tab[i-
run];
4139 if(score < last_score){
4142 last_level= level-64;
4148 distortion += esc_length*
lambda;
4149 for(j=survivor_count-1; j>=0; j--){
4150 int run= i - survivor[j];
4151 int score= distortion + score_tab[i-
run];
4153 if(score < best_score){
4156 level_tab[i+1]= level-64;
4161 for(j=survivor_count-1; j>=0; j--){
4162 int run= i - survivor[j];
4163 int score= distortion + score_tab[i-
run];
4164 if(score < last_score){
4167 last_level= level-64;
4175 score_tab[i+1]= best_score;
4178 if(last_non_zero <= 27){
4179 for(; survivor_count; survivor_count--){
4180 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4184 for(; survivor_count; survivor_count--){
4185 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4190 survivor[ survivor_count++ ]= i+1;
4194 last_score= 256*256*256*120;
4195 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4196 int score= score_tab[i];
4198 score += lambda * 2;
4200 if(score < last_score){
4203 last_level= level_tab[i];
4204 last_run= run_tab[i];
4211 dc=
FFABS(block[0]);
4212 last_non_zero= last_i - 1;
4213 memset(block + start_i, 0, (64-start_i)*
sizeof(int16_t));
4215 if(last_non_zero < start_i)
4216 return last_non_zero;
4218 if(last_non_zero == 0 && start_i == 0){
4220 int best_score= dc *
dc;
4222 for(i=0; i<coeff_count[0]; i++){
4223 int level= coeff[i][0];
4224 int alevel=
FFABS(level);
4225 int unquant_coeff, score, distortion;
4228 unquant_coeff= (alevel*qmul + qadd)>>3;
4230 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4231 unquant_coeff = (unquant_coeff - 1) | 1;
4233 unquant_coeff = (unquant_coeff + 4) >> 3;
4234 unquant_coeff<<= 3 + 3;
4236 distortion= (unquant_coeff -
dc) * (unquant_coeff - dc);
4239 else score= distortion + esc_length*
lambda;
4241 if(score < best_score){
4243 best_level= level - 64;
4246 block[0]= best_level;
4248 if(best_level == 0)
return -1;
4249 else return last_non_zero;
4255 block[ perm_scantable[last_non_zero] ]= last_level;
4258 for(; i>start_i; i -= run_tab[i] + 1){
4259 block[ perm_scantable[i-1] ]= level_tab[i];
4262 return last_non_zero;
4277 int perm_index= perm[
index];
4278 if(i==0) s*= sqrt(0.5);
4279 if(j==0) s*= sqrt(0.5);
4280 basis[perm_index][8*x + y]=
lrintf(s * cos((
M_PI/8.0)*i*(x+0.5)) * cos((
M_PI/8.0)*j*(y+0.5)));
4293 const uint8_t *perm_scantable;
4299 int qmul, qadd, start_i, last_non_zero, i,
dc;
4303 int rle_index,
run, q = 1, sum;
4306 static int after_last=0;
4307 static int to_zero=0;
4308 static int from_zero=0;
4311 static int messed_sign=0;
4314 if(basis[0][0] == 0)
4360 for(i=0; i<64; i++){
4367 for(i=0; i<64; i++){
4372 w=
FFABS(weight[i]) + qns*one;
4373 w= 15 + (48*qns*one + w/2)/w;
4388 for(i=start_i; i<=last_non_zero; i++){
4389 int j= perm_scantable[i];
4390 const int level= block[j];
4394 if(level<0) coeff= qmul*level - qadd;
4395 else coeff= qmul*level + qadd;
4396 run_tab[rle_index++]=
run;
4405 if(last_non_zero>0){
4416 int run2, best_unquant_change=0, analyze_gradient;
4422 if(analyze_gradient){
4426 for(i=0; i<64; i++){
4442 const int level= block[0];
4443 int change, old_coeff;
4449 for(change=-1; change<=1; change+=2){
4450 int new_level= level + change;
4451 int score, new_coeff;
4453 new_coeff= q*new_level;
4454 if(new_coeff >= 2048 || new_coeff < 0)
4458 new_coeff - old_coeff);
4459 if(score<best_score){
4462 best_change= change;
4463 best_unquant_change= new_coeff - old_coeff;
4470 run2= run_tab[rle_index++];
4474 for(i=start_i; i<64; i++){
4475 int j= perm_scantable[i];
4476 const int level= block[j];
4477 int change, old_coeff;
4483 if(level<0) old_coeff= qmul*level - qadd;
4484 else old_coeff= qmul*level + qadd;
4485 run2= run_tab[rle_index++];
4492 for(change=-1; change<=1; change+=2){
4493 int new_level= level + change;
4494 int score, new_coeff, unquant_change;
4501 if(new_level<0) new_coeff= qmul*new_level - qadd;
4502 else new_coeff= qmul*new_level + qadd;
4503 if(new_coeff >= 2048 || new_coeff <= -2048)
4508 if(level < 63 && level > -63){
4509 if(i < last_non_zero)
4519 if(analyze_gradient){
4520 int g= d1[ scantable[i] ];
4521 if(g && (g^new_level) >= 0)
4525 if(i < last_non_zero){
4526 int next_i= i + run2 + 1;
4527 int next_level= block[ perm_scantable[next_i] ] + 64;
4529 if(next_level&(~127))
4532 if(next_i < last_non_zero)
4552 if(i < last_non_zero){
4553 int next_i= i + run2 + 1;
4554 int next_level= block[ perm_scantable[next_i] ] + 64;
4556 if(next_level&(~127))
4559 if(next_i < last_non_zero)
4578 unquant_change= new_coeff - old_coeff;
4579 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4583 if(score<best_score){
4586 best_change= change;
4587 best_unquant_change= unquant_change;
4591 prev_level= level + 64;
4592 if(prev_level&(~127))
4605 int j= perm_scantable[ best_coeff ];
4607 block[j] += best_change;
4609 if(best_coeff > last_non_zero){
4610 last_non_zero= best_coeff;
4618 if(block[j] - best_change){
4619 if(
FFABS(block[j]) >
FFABS(block[j] - best_change)){
4631 for(; last_non_zero>=start_i; last_non_zero--){
4632 if(block[perm_scantable[last_non_zero]])
4638 if(256*256*256*64 % count == 0){
4639 av_log(s->
avctx,
AV_LOG_DEBUG,
"after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero,
raise, lower, messed_sign, s->
mb_x, s->
mb_y, s->
picture_number);
4644 for(i=start_i; i<=last_non_zero; i++){
4645 int j= perm_scantable[i];
4646 const int level= block[j];
4649 run_tab[rle_index++]=
run;
4662 if(last_non_zero>0){
4668 return last_non_zero;
4683 const uint8_t *scantable,
int last)
4694 for (i = 0; i <= last; i++) {
4695 const int j = scantable[i];
4700 for (i = 0; i <= last; i++) {
4701 const int j = scantable[i];
4702 const int perm_j = permutation[j];
4703 block[perm_j] = temp[j];
4709 int qscale,
int *overflow)
4711 int i, j,
level, last_non_zero, q, start_i;
4716 unsigned int threshold1, threshold2;
4736 block[0] = (block[0] + (q >> 1)) / q;
4749 threshold2= (threshold1<<1);
4750 for(i=63;i>=start_i;i--) {
4752 level = block[j] * qmat[j];
4754 if(((
unsigned)(level+threshold1))>threshold2){
4761 for(i=start_i; i<=last_non_zero; i++) {
4763 level = block[j] * qmat[j];
4767 if(((
unsigned)(level+threshold1))>threshold2){
4785 scantable, last_non_zero);
4787 return last_non_zero;
4790 #define OFFSET(x) offsetof(MpegEncContext, x)
4791 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4794 {
"mb_info",
"emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size",
OFFSET(
mb_info),
AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX,
VE },
4856 .
name =
"msmpeg4v2",
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
static const AVClass wmv1_class
void ff_h261_reorder_mb_index(MpegEncContext *s)
int(* try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale)
int chroma_elim_threshold
void ff_jpeg_fdct_islow_10(int16_t *data)
static const AVOption h263_options[]
int frame_bits
bits used for the current frame
av_cold int ff_dct_encode_init(MpegEncContext *s)
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
const struct AVCodec * codec
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
av_cold void ff_rate_control_uninit(MpegEncContext *s)
#define FF_MPV_FLAG_STRICT_GOP
void ff_init_block_index(MpegEncContext *s)
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
qpel_mc_func avg_qpel_pixels_tab[2][16]
me_cmp_func frame_skip_cmp[6]
#define CANDIDATE_MB_TYPE_SKIPPED
static int shift(int a, int b)
void(* dct_unquantize_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
void ff_mpeg1_encode_init(MpegEncContext *s)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number)
This structure describes decoded (raw) audio or video data.
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
int16_t(* p_mv_table)[2]
MV table (1MV per MB) P-frame encoding.
void ff_fdct_ifast(int16_t *data)
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
ptrdiff_t const GLvoid * data
uint8_t * fcode_tab
smallest fcode needed for each MV
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
#define MV_TYPE_FIELD
2 vectors, one per field
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
uint8_t * mb_mean
Table for MB luminance.
uint64_t error[AV_NUM_DATA_POINTERS]
error
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
int last_mv[2][2][2]
last MV, used for MV prediction in MPEG-1 & B-frame MPEG-4
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
int pre_pass
= 1 for the pre pass
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
#define FF_MPV_FLAG_SKIP_RD
AVFrame * tmp_frames[MAX_B_FRAMES+2]
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
#define AV_LOG_WARNING
Something somehow does not look correct.
qpel_mc_func put_no_rnd_qpel_pixels_tab[2][16]
void(* shrink[4])(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height)
#define CANDIDATE_MB_TYPE_INTER_I
int64_t bit_rate
the average bitrate
#define LIBAVUTIL_VERSION_INT
attribute_deprecated void(* rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb)
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
void ff_h263_encode_init(MpegEncContext *s)
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
static av_cold int init(AVCodecContext *avctx)
void ff_init_qscale_tab(MpegEncContext *s)
init s->current_picture.qscale_table from s->lambda_table
uint16_t * mb_var
Table for MB variances.
uint16_t(* q_chroma_intra_matrix16)[2][64]
uint16_t chroma_intra_matrix[64]
static int estimate_qp(MpegEncContext *s, int dry_run)
int max_bitrate
Maximum bitrate of the stream, in bits per second.
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
int16_t(*[3] ac_val)[16]
used for MPEG-4 AC prediction, all 3 arrays must be continuous
void(* add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale)
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
attribute_deprecated int frame_skip_cmp
#define FF_MPV_COMMON_OPTS
enum AVColorRange color_range
MPEG vs JPEG YUV range.
int msmpeg4_version
0=not msmpeg4, 1=mp41, 2=mp42, 3=mp43/divx3 4=wmv1/7 5=wmv2/8
#define CANDIDATE_MB_TYPE_BIDIR
av_cold void ff_h263dsp_init(H263DSPContext *ctx)
void ff_get_2pass_fcode(MpegEncContext *s)
const char * av_default_item_name(void *ptr)
Return the context name.
void avpriv_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
int obmc
overlapped block motion compensation
void avpriv_align_put_bits(PutBitContext *s)
Pad the bitstream with zeros up to the next byte boundary.
void ff_mpeg1_clean_buffers(MpegEncContext *s)
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
int ff_h261_get_picture_format(int width, int height)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int16_t(*[2][2] p_field_mv_table)[2]
MV table (2MV per MB) interlaced P-frame encoding.
static int select_input_picture(MpegEncContext *s)
static const AVClass msmpeg4v3_class
int min_qcoeff
minimum encodable coefficient
static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
int ildct_cmp
interlaced DCT comparison function
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
const uint16_t ff_h263_format[8][2]
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
int mpv_flags
flags set by private options
static const AVClass h263_class
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
uint8_t * intra_ac_vlc_length
int padding_bug_score
used to detect the VERY common padding bug in MPEG-4
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
#define UNI_AC_ENC_INDEX(run, level)
int mb_num
number of MBs of a picture
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
static void write_mb_info(MpegEncContext *s)
int time_base
time in seconds of last I,P,S Frame
uint8_t(* mv_penalty)[MAX_DMV *2+1]
bit amount needed to encode a MV
int h263_aic
Advanced INTRA Coding (AIC)
int16_t(* b_back_mv_table)[2]
MV table (1MV per MB) backward mode B-frame encoding.
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
int min_bitrate
Minimum bitrate of the stream, in bits per second.
int encoding
true if we are encoding (vs decoding)
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
void(* dct_unquantize_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
attribute_deprecated int frame_skip_exp
void ff_mpeg4_merge_partitions(MpegEncContext *s)
static int mb_var_thread(AVCodecContext *c, void *arg)
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
common functions for use with the Xvid wrappers
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
#define FF_MPV_FLAG_CBP_RD
int skipdct
skip dct and code zero residual
const uint8_t ff_mpeg2_non_linear_qscale[32]
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
void ff_mpeg4_clean_buffers(MpegEncContext *s)
attribute_deprecated int mv_bits
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
#define CANDIDATE_MB_TYPE_INTER
float p_masking
p block masking (0-> disabled)
int picture_in_gop_number
0-> first pic in gop, ...
#define av_assert0(cond)
assert() equivalent, that is always enabled.
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
int alt_inter_vlc
alternative inter vlc
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
int64_t time
time of current frame
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
static int encode_picture(MpegEncContext *s, int picture_number)
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
static const AVClass msmpeg4v2_class
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4) ...
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Picture ** input_picture
next pictures on display order for encoding
#define CANDIDATE_MB_TYPE_INTER4V
void(* denoise_dct)(struct MpegEncContext *s, int16_t *block)
PutBitContext pb2
used for data partitioned VOPs
enum OutputFormat out_format
output format
attribute_deprecated int i_count
#define CANDIDATE_MB_TYPE_FORWARD_I
uint16_t(* dct_offset)[64]
void ff_dct_encode_init_x86(MpegEncContext *s)
static av_cold int end(AVCodecContext *avctx)
uint16_t * chroma_intra_matrix
custom intra quantization matrix
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
void ff_msmpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
Multithreading support functions.
const uint32_t ff_square_tab[512]
int pre_dia_size
ME prepass diamond size & shape.
static const AVOption h263p_options[]
static int get_sae(uint8_t *src, int ref, int stride)
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
void ff_free_picture_tables(Picture *pic)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
int misc_bits
cbp, mb_type
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
int no_rounding
apply no rounding to motion compensation (MPEG-4, msmpeg4, ...) for B-frames rounding mode is always ...
#define CANDIDATE_MB_TYPE_BACKWARD_I
int(* q_chroma_intra_matrix)[64]
int me_cmp
motion estimation comparison function
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
Picture current_picture
copy of the current picture structure.
void(* diff_pixels)(int16_t *av_restrict block, const uint8_t *s1, const uint8_t *s2, ptrdiff_t stride)
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
#define PICT_BOTTOM_FIELD
int16_t(* b_bidir_forw_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame encoding.
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
const uint16_t ff_aanscales[64]
static double av_q2d(AVRational a)
Convert an AVRational to a double.
int ff_wmv2_encode_picture_header(MpegEncContext *s, int picture_number)
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
#define AVERROR_EOF
End of file.
uint16_t pp_time
time distance between the last 2 p,s,i frames
#define AV_LOG_VERBOSE
Detailed information.
const uint8_t * scantable
av_cold void ff_mpv_idct_init(MpegEncContext *s)
int mb_height
number of MBs horizontally & vertically
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
int buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
float lumi_masking
luminance masking (0-> disabled)
char * stats_out
pass1 encoding statistics output buffer
int max_qcoeff
maximum encodable coefficient
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
high precision timer, useful to profile code
static void update_noise_reduction(MpegEncContext *s)
#define FF_MPV_FLAG_QP_RD
int scenechange_threshold
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
attribute_deprecated uint64_t error[AV_NUM_DATA_POINTERS]
attribute_deprecated int frame_skip_threshold
void ff_h261_encode_picture_header(MpegEncContext *s, int picture_number)
int dquant
qscale difference to prev qscale
int num_entries
number of RateControlEntries
int gop_picture_number
index of the first picture of a GOP based on fake_pic_num & MPEG-1 specific
static void ff_update_block_index(MpegEncContext *s)
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
#define ROUNDED_DIV(a, b)
int(* q_inter_matrix)[64]
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
static int get_bits_diff(MpegEncContext *s)
attribute_deprecated int skip_count
int(* q_intra_matrix)[64]
precomputed matrix (combine qscale and DCT renorm)
int intra_only
if true, only intra pictures are generated
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
int16_t * dc_val[3]
used for MPEG-4 DC prediction, all 3 arrays must be continuous
int h263_plus
H.263+ headers.
int slice_context_count
number of used thread_contexts
int last_non_b_pict_type
used for MPEG-4 gmc B-frames & ratecontrol
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
int has_b_frames
Size of the frame reordering buffer in the decoder.
int last_dc[3]
last DC values for MPEG-1
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
uint8_t * inter_ac_vlc_last_length
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
#define PTRDIFF_SPECIFIER
int mb_skipped
MUST BE SET only during DECODING.
int strict_std_compliance
strictly follow the std (MPEG-4, ...)
int partitioned_frame
is current frame partitioned
uint8_t * rd_scratchpad
scratchpad for rate distortion mb decision
uint64_t encoding_error[AV_NUM_DATA_POINTERS]
#define MAX_PICTURE_COUNT
av_cold int ff_rate_control_init(MpegEncContext *s)
int me_sub_cmp
subpixel motion estimation comparison function
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
attribute_deprecated uint64_t vbv_delay
VBV delay coded in the last frame (in periods of a 27 MHz clock).
int qmax
maximum quantizer
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
static void update_mb_info(MpegEncContext *s, int startcode)
void ff_write_pass1_stats(MpegEncContext *s)
int unrestricted_mv
mv can point outside of the coded picture
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
int active_thread_type
Which multithreading methods are in use by the codec.
int last_lambda_for[5]
last lambda for a specific pict type
static int sse_mb(MpegEncContext *s)
void(* get_pixels)(int16_t *av_restrict block, const uint8_t *pixels, ptrdiff_t stride)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
uint8_t * intra_chroma_ac_vlc_length
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
int h263_slice_structured
int flags
AV_CODEC_FLAG_*.
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
MpegvideoEncDSPContext mpvencdsp
const char * name
Name of the codec implementation.
int quarter_sample
1->qpel, 0->half pel ME/MC
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
int me_pre
prepass for motion estimation
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Shrink the already allocated side data buffer.
int low_delay
no reordering needed / has no B-frames
qpel_mc_func put_qpel_pixels_tab[2][16]
uint8_t *[2][2] b_field_select_table
static const uint8_t offset[127][2]
void ff_mpv_common_end(MpegEncContext *s)
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
attribute_deprecated int b_sensitivity
int flags
A combination of AV_PKT_FLAG values.
static int put_bits_count(PutBitContext *s)
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
static void frame_end(MpegEncContext *s)
int resync_mb_x
x position of last resync marker
int rc_buffer_size
decoder bitstream buffer size
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in H.263 (limit difference to -2...
int coded_picture_number
used to set pic->coded_picture_number, should not be used for/by anything else
static int estimate_best_b_count(MpegEncContext *s)
int intra_dc_precision
precision of the intra DC coefficient - 8
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
int64_t rc_min_rate
minimum bitrate
common internal API header
uint8_t * intra_ac_vlc_last_length
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count)
const uint8_t *const ff_mpeg2_dc_scale_table[4]
void ff_h263_loop_filter(MpegEncContext *s)
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
enum AVPictureType pict_type
Picture type of the frame.
const uint8_t ff_h263_chroma_qscale_table[32]
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
int display_picture_number
picture number in display order
uint16_t(* q_inter_matrix16)[2][64]
uint8_t * vbv_delay_ptr
pointer to vbv_delay in the bitstream
int fixed_qscale
fixed qscale if non zero
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
int umvplus
== H.263+ && unrestricted_mv
Picture new_picture
copy of the source picture structure for encoding.
int intra_quant_bias
bias for the quantizer
int width
picture width / height.
int(* pix_sum)(uint8_t *pix, int line_size)
int16_t(*[2] motion_val)[2]
Picture * current_picture_ptr
pointer to the current picture
attribute_deprecated int noise_reduction
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow. ...
int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
uint16_t(* q_intra_matrix16)[2][64]
identical to the above but for MMX & these are not permutated, second 64 entries are bias ...
attribute_deprecated int frame_skip_factor
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
int(* ac_stats)[2][MAX_LEVEL+1][MAX_RUN+1][2]
[mb_intra][isChroma][level][run][last]
int block_last_index[12]
last non zero coefficient in block
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
uint8_t idct_permutation[64]
IDCT input permutation.
const int16_t ff_mpeg4_default_non_intra_matrix[64]
int mb_decision
macroblock decision mode
static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride)
uint8_t * mbintra_table
used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding
int ff_msmpeg4_encode_init(MpegEncContext *s)
int ac_esc_length
num of bits needed to encode the longest esc
preferred ID for MPEG-1/2 video decoding
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
#define FF_ARRAY_ELEMS(a)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
int block_index[6]
index to current MB in block based arrays with edges
Compute and use optimal Huffman tables.
the normal 2^n-1 "JPEG" YUV ranges
int * mb_index2xy
mb_index -> mb_x + mb_y*mb_stride
static uint8_t default_fcode_tab[MAX_MV *2+1]
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
attribute_deprecated int i_tex_bits
static void build_basis(uint8_t *perm)
#define MV_TYPE_16X16
1 vector for the whole mb
int first_slice_line
used in MPEG-4 too to handle resync markers
attribute_deprecated int misc_bits
This structure describes the bitrate properties of an encoded bitstream.
uint16_t * mc_mb_var
Table for motion compensated MB variances.
void ff_flv_encode_picture_header(MpegEncContext *s, int picture_number)
int coded_picture_number
picture number in bitstream order
#define AV_LOG_INFO
Standard information.
uint16_t inter_matrix[64]
void ff_jpeg_fdct_islow_8(int16_t *data)
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
struct MpegEncContext * thread_context[MAX_THREADS]
#define CONFIG_MSMPEG4_ENCODER
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
void ff_faandct(int16_t *data)
double buffer_index
amount of bits in the video/audio buffer
Libavcodec external API header.
attribute_deprecated int mpeg_quant
void ff_h263_update_motion_val(MpegEncContext *s)
int h263_flv
use flv H.263 header
attribute_deprecated int scenechange_threshold
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
static const AVClass h263p_class
ptrdiff_t linesize
line size, in bytes, may be different from width
attribute_deprecated int prediction_method
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
const uint16_t ff_inv_aanscales[64]
attribute_deprecated int b_frame_strategy
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
uint8_t * intra_chroma_ac_vlc_last_length
void(* fdct)(int16_t *block)
main external API structure.
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
ScanTable intra_scantable
int qmin
minimum quantizer
int height
picture size. must be a multiple of 16
static void write_slice_end(MpegEncContext *s)
int64_t dts_delta
pts difference between the first and second input frame, used for calculating dts of the first frame ...
int64_t user_specified_pts
last non-zero pts from AVFrame which was passed into avcodec_encode_video2()
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
static int frame_start(MpegEncContext *s)
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
attribute_deprecated int header_bits
void ff_fix_long_p_mvs(MpegEncContext *s)
Picture * picture
main picture buffer
int data_partitioning
data partitioning flag from header
uint8_t * inter_ac_vlc_length
uint16_t * intra_matrix
custom intra quantization matrix
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
Describe the class of an AVClass context structure.
int stuffing_bits
bits used for stuffing
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
int16_t(*[2][2][2] b_field_mv_table)[2]
MV table (4MV per MB) interlaced B-frame encoding.
int(* pix_norm1)(uint8_t *pix, int line_size)
#define FF_COMPLIANCE_NORMAL
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
#define CANDIDATE_MB_TYPE_DIRECT
#define FF_MB_DECISION_RD
rate distortion
static void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type)
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
const uint16_t ff_mpeg1_default_intra_matrix[256]
int input_picture_number
used to set pic->display_picture_number, should not be used for/by anything else
const uint8_t ff_zigzag_direct[64]
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
int mb_info
interval for outputting info about mb offsets as side data
void ff_set_mpeg4_time(MpegEncContext *s)
static void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type)
attribute_deprecated int brd_scale
av_cold void ff_mjpeg_encode_close(MpegEncContext *s)
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
#define CANDIDATE_MB_TYPE_BIDIR_I
const int16_t ff_mpeg4_default_intra_matrix[64]
int f_code
forward MV resolution
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
#define CANDIDATE_MB_TYPE_DIRECT0
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
void ff_mjpeg_encode_picture_header(AVCodecContext *avctx, PutBitContext *pb, ScanTable *intra_scantable, int pred, uint16_t luma_intra_matrix[64], uint16_t chroma_intra_matrix[64])
attribute_deprecated int p_tex_bits
static int weight(int i, int blen, int offset)
uint16_t * inter_matrix
custom inter quantization matrix
int max_b_frames
max number of B-frames for encoding
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
static enum AVPixelFormat pix_fmts[]
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
int last_mv_dir
last mv_dir, used for B-frame encoding
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
int h263_pred
use MPEG-4/H.263 ac/dc predictions
int16_t(* b_bidir_back_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame encoding.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
float dark_masking
darkness masking (0-> disabled)
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
int ff_init_me(MpegEncContext *s)
uint8_t *[2] p_field_select_table
int16_t(* b_direct_mv_table)[2]
MV table (1MV per MB) direct mode B-frame encoding.
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
AAN (Arai, Agui and Nakajima) (I)DCT tables.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
int me_penalty_compensation
int64_t mc_mb_var_sum_temp
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
int16_t(* b_forw_mv_table)[2]
MV table (1MV per MB) forward mode B-frame encoding.
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
static int estimate_motion_thread(AVCodecContext *c, void *arg)
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
struct AVCodecContext * avctx
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
PutBitContext pb
bit output
static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
GLint GLenum GLboolean GLsizei stride
static void update_qscale(MpegEncContext *s)
int mb_cmp
macroblock comparison function (not supported yet)
int quantizer_noise_shaping
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
#define FF_DISABLE_DEPRECATION_WARNINGS
static const int32_t qmat16[MAT_SIZE]
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
static int ref[MAX_W *MAX_W]
#define CANDIDATE_MB_TYPE_FORWARD
attribute_deprecated int rtp_payload_size
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
int adaptive_quant
use adaptive quantization
static int16_t basis[64][64]
Picture last_picture
copy of the previous picture structure.
Picture * last_picture_ptr
pointer to the previous picture.
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
int64_t reordered_pts
reordered pts to be used as dts for the next output frame when there's a delay
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
int ff_vbv_update(MpegEncContext *s, int frame_size)
#define H263_GOB_HEIGHT(h)
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
const uint8_t * chroma_qscale_table
qscale -> chroma_qscale (H.263)
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
AVCodec ff_msmpeg4v3_encoder
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
int trellis
trellis RD quantization
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
#define CANDIDATE_MB_TYPE_INTRA
int16_t(* blocks)[12][64]
int slices
Number of slices.
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
const AVOption ff_mpv_generic_options[]
int last_bits
temp var used for calculating the above vars
void ff_mpeg4_init_partitions(MpegEncContext *s)
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
static av_always_inline int diff(const uint32_t a, const uint32_t b)
int dia_size
ME diamond size & shape.
attribute_deprecated int frame_bits
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
#define FF_ENABLE_DEPRECATION_WARNINGS
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
attribute_deprecated int me_penalty_compensation
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
int avg_bitrate
Average bitrate of the stream, in bits per second.
void(* draw_edges)(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
int ff_get_best_fcode(MpegEncContext *s, int16_t(*mv_table)[2], int type)
int resync_mb_y
y position of last resync marker
struct AVCodecInternal * internal
Private context used for internal data.
int16_t(* block)[64]
points to one of the following blocks
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
int64_t bit_rate
wanted bit rate
This side data corresponds to the AVCPBProperties struct.
PutBitContext tex_pb
used for data partitioned VOPs
Picture next_picture
copy of the next picture structure.
attribute_deprecated int p_count
int key_frame
1 -> keyframe, 0-> not
static void set_frame_distances(MpegEncContext *s)
static const double coeff[2][5]
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Picture ** reordered_input_picture
pointer to the next pictures in coded order for encoding
static const struct twinvq_data tab
unsigned int byte_buffer_size
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number)
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
static int encode_thread(AVCodecContext *c, void *arg)
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding). ...
int(* fast_dct_quantize)(struct MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
#define LOCAL_ALIGNED_16(t, v,...)
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src)
ScanTable inter_scantable
if inter == intra then intra should be used to reduce the cache usage
int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
#define AV_CODEC_FLAG_CLOSED_GOP
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_DMV *2+1]
int inter_quant_bias
bias for the quantizer
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
#define CANDIDATE_MB_TYPE_BACKWARD
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
int(* dct_quantize)(struct MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
int b_code
backward MV resolution for B-frames (MPEG-4)
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
void ff_h261_encode_init(MpegEncContext *s)
int64_t mb_var_sum
sum of MB variance for current frame
static int encode_frame(AVCodecContext *c, AVFrame *frame)
AVPixelFormat
Pixel format.
This structure stores compressed data.
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
int ff_check_alignment(void)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
#define AV_NOPTS_VALUE
Undefined timestamp value.
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
unsigned int lambda
Lagrange multiplier used in rate distortion.
AVCodec ff_msmpeg4v2_encoder
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
int64_t rc_max_rate
maximum bitrate
uint16_t pb_time
time distance between the last b and p,s,i frame
enum idct_permutation_type perm_type
attribute_deprecated int pre_me
static const uint8_t sp5x_quant_table[20][64]
int next_lambda
next lambda used for retrying to encode a frame