Go to the documentation of this file.
   71 #define QUANT_BIAS_SHIFT 8 
   73 #define QMAT_SHIFT_MMX 16 
   92                        const uint16_t *quant_matrix,
 
   93                        int bias, 
int qmin, 
int qmax, 
int intra)
 
  104         else                 qscale2 = 
qscale << 1;
 
  111             for (
i = 0; 
i < 64; 
i++) {
 
  112                 const int j = 
s->idsp.idct_permutation[
i];
 
  113                 int64_t den = (int64_t) qscale2 * quant_matrix[j];
 
  123             for (
i = 0; 
i < 64; 
i++) {
 
  124                 const int j = 
s->idsp.idct_permutation[
i];
 
  125                 int64_t den = 
ff_aanscales[
i] * (int64_t) qscale2 * quant_matrix[j];
 
  135             for (
i = 0; 
i < 64; 
i++) {
 
  136                 const int j = 
s->idsp.idct_permutation[
i];
 
  137                 int64_t den = (int64_t) qscale2 * quant_matrix[j];
 
  157         for (
i = intra; 
i < 64; 
i++) {
 
  169                "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
 
  176     if (
s->q_scale_type == 1 && 0) {
 
  178         int bestdiff=INT_MAX;
 
  186             if (
diff < bestdiff) {
 
  195         s->qscale = av_clip(
s->qscale, 
s->avctx->qmin, 
s->vbv_ignore_qmax ? 31 : 
s->avctx->qmax);
 
  208         for (
i = 0; 
i < 64; 
i++) {
 
  220     int8_t * 
const qscale_table = 
s->current_picture.qscale_table;
 
  223     for (
i = 0; 
i < 
s->mb_num; 
i++) {
 
  224         unsigned int lam = 
s->lambda_table[
s->mb_index2xy[
i]];
 
  226         qscale_table[
s->mb_index2xy[
i]] = av_clip(qp, 
s->avctx->qmin,
 
  234 #define COPY(a) dst->a= src->a 
  259     for (
i = -16; 
i < 16; 
i++) {
 
  265     s->input_picture_number  = 0;
 
  266     s->picture_in_gop_number = 0;
 
  274     if (CONFIG_H263_ENCODER)
 
  276     if (!
s->dct_quantize)
 
  280     s->fast_dct_quantize = 
s->dct_quantize;
 
  281     if (
s->avctx->trellis)
 
  292     int i, 
ret, format_supported;
 
  301                    "only YUV420 and YUV422 are supported\n");
 
  307         format_supported = 0;
 
  316             format_supported = 1;
 
  322             format_supported = 1;
 
  324         if (!format_supported) {
 
  354 #if FF_API_PRIVATE_OPT 
  371                "keyframe interval too large!, reducing it from %d to %d\n",
 
  386     s->rtp_mode           = !!
s->rtp_payload_size;
 
  390     if (
s->intra_dc_precision < 0) {
 
  391         s->intra_dc_precision += 8;
 
  392     } 
else if (
s->intra_dc_precision >= 8)
 
  393         s->intra_dc_precision -= 8;
 
  395     if (
s->intra_dc_precision < 0) {
 
  397                 "intra dc precision must be positive, note some applications use" 
  398                 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
 
  411     if (
s->gop_size <= 1) {
 
  421     s->adaptive_quant = (
s->avctx->lumi_masking ||
 
  422                          s->avctx->dark_masking ||
 
  423                          s->avctx->temporal_cplx_masking ||
 
  424                          s->avctx->spatial_cplx_masking  ||
 
  425                          s->avctx->p_masking      ||
 
  465                "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
 
  482                "impossible bitrate constraints, this will fail\n");
 
  492     if (!
s->fixed_qscale &&
 
  498         if (nbt <= INT_MAX) {
 
  504     if (
s->avctx->rc_max_rate &&
 
  505         s->avctx->rc_min_rate == 
s->avctx->rc_max_rate &&
 
  509             s->avctx->rc_max_rate * 0xFFFFLL) {
 
  511                "Warning vbv_delay will be set to 0xFFFF (=VBR) as the " 
  512                "specified vbv buffer is too large for the given bitrate!\n");
 
  524                "OBMC is only supported with simple mb decision\n");
 
  533     if (
s->max_b_frames                    &&
 
  540     if (
s->max_b_frames < 0) {
 
  542                "max b frames must be 0 or positive for mpegvideo based encoders\n");
 
  552                "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
 
  614 #if FF_API_PRIVATE_OPT 
  625                "mpeg2 style quantization not supported by codec\n");
 
  645                "QP RD is no longer compatible with MJPEG or AMV\n");
 
  649 #if FF_API_PRIVATE_OPT 
  656     if (
s->scenechange_threshold < 1000000000 &&
 
  659                "closed gop with scene change detection are not supported yet, " 
  660                "set threshold to 1000000000\n");
 
  668                    "low delay forcing is only available for mpeg2, " 
  669                    "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
 
  672         if (
s->max_b_frames != 0) {
 
  674                    "B-frames cannot be used with low delay\n");
 
  679     if (
s->q_scale_type == 1) {
 
  682                    "non linear quant only supports qmax <= 28 currently\n");
 
  693     if (
s->avctx->thread_count > 1         &&
 
  700                "multi threaded encoding not supported by codec\n");
 
  704     if (
s->avctx->thread_count < 1) {
 
  706                "automatic thread number detection not supported by codec, " 
  716 #if FF_API_PRIVATE_OPT 
  727                "notice: b_frame_strategy only affects the first pass\n");
 
  728         s->b_frame_strategy = 0;
 
  742         s->inter_quant_bias = 0;
 
  744         s->intra_quant_bias = 0;
 
  757         s->avctx->time_base.den > (1 << 16) - 1) {
 
  759                "timebase %d/%d not supported by MPEG 4 standard, " 
  760                "the maximum admitted value for the timebase denominator " 
  761                "is %d\n", 
s->avctx->time_base.num, 
s->avctx->time_base.den,
 
  765     s->time_increment_bits = 
av_log2(
s->avctx->time_base.den - 1) + 1;
 
  771         avctx->
delay  = 
s->low_delay ? 0 : (
s->max_b_frames + 1);
 
  776         avctx->
delay  = 
s->low_delay ? 0 : (
s->max_b_frames + 1);
 
  783         if (!CONFIG_MJPEG_ENCODER)
 
  791         if (!CONFIG_H261_ENCODER)
 
  795                    "The specified picture size of %dx%d is not valid for the " 
  796                    "H.261 codec.\nValid sizes are 176x144, 352x288\n",
 
  797                     s->width, 
s->height);
 
  806         if (!CONFIG_H263_ENCODER)
 
  809                              s->width, 
s->height) == 8) {
 
  811                    "The specified picture size of %dx%d is not valid for " 
  812                    "the H.263 codec.\nValid sizes are 128x96, 176x144, " 
  813                    "352x288, 704x576, and 1408x1152. " 
  814                    "Try H.263+.\n", 
s->width, 
s->height);
 
  826         s->modified_quant  = 
s->h263_aic;
 
  828         s->unrestricted_mv = 
s->obmc || 
s->loop_filter || 
s->umvplus;
 
  838         s->unrestricted_mv = 1;
 
  852         s->modified_quant  = 1;
 
  856         s->unrestricted_mv = 0;
 
  861         s->unrestricted_mv = 1;
 
  862         s->low_delay       = 
s->max_b_frames ? 0 : 1;
 
  863         avctx->
delay       = 
s->low_delay ? 0 : (
s->max_b_frames + 1);
 
  868         s->unrestricted_mv = 1;
 
  869         s->msmpeg4_version = 2;
 
  876         s->unrestricted_mv   = 1;
 
  877         s->msmpeg4_version   = 3;
 
  878         s->flipflop_rounding = 1;
 
  885         s->unrestricted_mv   = 1;
 
  886         s->msmpeg4_version   = 4;
 
  887         s->flipflop_rounding = 1;
 
  894         s->unrestricted_mv   = 1;
 
  895         s->msmpeg4_version   = 5;
 
  896         s->flipflop_rounding = 1;
 
  904 #if FF_API_PRIVATE_OPT 
  915     s->progressive_frame    =
 
  931     if (
s->msmpeg4_version) {
 
  950     if (
s->noise_reduction) {
 
  952                           2 * 64 * 
sizeof(uint16_t), 
fail);
 
  957     if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && 
s->modified_quant)
 
  960     if (
s->slice_context_count > 1) {
 
  964             s->h263_slice_structured = 1;
 
  967     s->quant_precision = 5;
 
  969 #if FF_API_PRIVATE_OPT 
  982     ff_set_cmp(&
s->mecc, 
s->mecc.ildct_cmp,      
s->avctx->ildct_cmp);
 
  983     ff_set_cmp(&
s->mecc, 
s->mecc.frame_skip_cmp, 
s->frame_skip_cmp);
 
  985     if (CONFIG_H261_ENCODER && 
s->out_format == 
FMT_H261)
 
  987     if (CONFIG_H263_ENCODER && 
s->out_format == 
FMT_H263)
 
  992     if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
 
  997     for (
i = 0; 
i < 64; 
i++) {
 
  998         int j = 
s->idsp.idct_permutation[
i];
 
 1004             s->intra_matrix[j] =
 
 1008             s->chroma_intra_matrix[j] =
 
 1012         if (
s->avctx->intra_matrix)
 
 1013             s->intra_matrix[j] = 
s->avctx->intra_matrix[
i];
 
 1014         if (
s->avctx->inter_matrix)
 
 1015             s->inter_matrix[j] = 
s->avctx->inter_matrix[
i];
 
 1022                           s->intra_matrix, 
s->intra_quant_bias, 
avctx->
qmin,
 
 1025                           s->inter_matrix, 
s->inter_quant_bias, 
avctx->
qmin,
 
 1032 #if FF_API_PRIVATE_OPT 
 1042     if (
s->b_frame_strategy == 2) {
 
 1043         for (
i = 0; 
i < 
s->max_b_frames + 2; 
i++) {
 
 1045             if (!
s->tmp_frames[
i])
 
 1049             s->tmp_frames[
i]->width  = 
s->width  >> 
s->brd_scale;
 
 1050             s->tmp_frames[
i]->height = 
s->height >> 
s->brd_scale;
 
 1080     if (CONFIG_MJPEG_ENCODER &&
 
 1095     if(
s->q_chroma_intra_matrix   != 
s->q_intra_matrix  ) 
av_freep(&
s->q_chroma_intra_matrix);
 
 1096     if(
s->q_chroma_intra_matrix16 != 
s->q_intra_matrix16) 
av_freep(&
s->q_chroma_intra_matrix16);
 
 1097     s->q_chroma_intra_matrix=   
NULL;
 
 1098     s->q_chroma_intra_matrix16= 
NULL;
 
 1115     for (y = 0; y < 16; y++) {
 
 1116         for (x = 0; x < 16; x++) {
 
 1131     h = 
s->height & ~15;
 
 1133     for (y = 0; y < 
h; y += 16) {
 
 1134         for (x = 0; x < 
w; x += 16) {
 
 1141             acc += sae + 500 < sad;
 
 1150                             s->chroma_x_shift, 
s->chroma_y_shift, 
s->out_format,
 
 1151                             s->mb_stride, 
s->mb_width, 
s->mb_height, 
s->b8_stride,
 
 1152                             &
s->linesize, &
s->uvlinesize);
 
 1159     int i, display_picture_number = 0, 
ret;
 
 1160     int encoding_delay = 
s->max_b_frames ? 
s->max_b_frames
 
 1161                                          : (
s->low_delay ? 0 : 1);
 
 1162     int flush_offset = 1;
 
 1167         display_picture_number = 
s->input_picture_number++;
 
 1171                 int64_t last = 
s->user_specified_pts;
 
 1175                            "Invalid pts (%"PRId64
") <= last (%"PRId64
")\n",
 
 1180                 if (!
s->low_delay && display_picture_number == 1)
 
 1181                     s->dts_delta = 
pts - last;
 
 1183             s->user_specified_pts = 
pts;
 
 1186                 s->user_specified_pts =
 
 1187                 pts = 
s->user_specified_pts + 1;
 
 1189                        "Warning: AVFrame.pts=? trying to guess (%"PRId64
")\n",
 
 1192                 pts = display_picture_number;
 
 1196         if (!pic_arg->
buf[0] ||
 
 1198             pic_arg->
linesize[1] != 
s->uvlinesize ||
 
 1201         if ((
s->width & 15) || (
s->height & 15))
 
 1209                 pic_arg->
linesize[1], 
s->linesize, 
s->uvlinesize);
 
 1215         pic = &
s->picture[
i];
 
 1232                 int h_chroma_shift, v_chroma_shift;
 
 1237                 for (
i = 0; 
i < 3; 
i++) {
 
 1239                     int dst_stride = 
i ? 
s->uvlinesize : 
s->linesize;
 
 1240                     int h_shift = 
i ? h_chroma_shift : 0;
 
 1241                     int v_shift = 
i ? v_chroma_shift : 0;
 
 1242                     int w = 
s->width  >> h_shift;
 
 1243                     int h = 
s->height >> v_shift;
 
 1249                         && !
s->progressive_sequence
 
 1250                         && 
FFALIGN(
s->height, 32) - 
s->height > 16)
 
 1253                     if (!
s->avctx->rc_buffer_size)
 
 1256                     if (src_stride == dst_stride)
 
 1257                         memcpy(dst, 
src, src_stride * 
h);
 
 1262                             memcpy(dst2, 
src, 
w);
 
 1267                     if ((
s->width & 15) || (
s->height & (vpad-1))) {
 
 1268                         s->mpvencdsp.draw_edges(dst, dst_stride,
 
 1287         for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
 
 1288             if (
s->input_picture[flush_offset])
 
 1291         if (flush_offset <= 1)
 
 1294             encoding_delay = encoding_delay - flush_offset + 1;
 
 1299         s->input_picture[
i - flush_offset] = 
s->input_picture[
i];
 
 1301     s->input_picture[encoding_delay] = (
Picture*) pic;
 
 1310     int64_t score64 = 0;
 
 1312     for (plane = 0; plane < 3; plane++) {
 
 1314         const int bw = plane ? 1 : 2;
 
 1315         for (y = 0; y < 
s->mb_height * bw; y++) {
 
 1316             for (x = 0; x < 
s->mb_width * bw; x++) {
 
 1317                 int off = p->
shared ? 0 : 16;
 
 1320                 int v = 
s->mecc.frame_skip_cmp[1](
s, dptr, rptr, 
stride, 8);
 
 1322                 switch (
FFABS(
s->frame_skip_exp)) {
 
 1323                 case 0: score    =  
FFMAX(score, v);          
break;
 
 1324                 case 1: score   += 
FFABS(v);                  
break;
 
 1325                 case 2: score64 += v * (int64_t)v;                       
break;
 
 1326                 case 3: score64 += 
FFABS(v * (int64_t)v * v);            
break;
 
 1327                 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v);  
break;
 
 1336     if (
s->frame_skip_exp < 0)
 
 1337         score64 = pow(score64 / (
double)(
s->mb_width * 
s->mb_height),
 
 1338                       -1.0/
s->frame_skip_exp);
 
 1342     if (score64 < ((
s->frame_skip_factor * (int64_t) 
s->lambda) >> 8))
 
 1374     const int scale = 
s->brd_scale;
 
 1375     int width  = 
s->width  >> scale;
 
 1376     int height = 
s->height >> scale;
 
 1378     int64_t best_rd  = INT64_MAX;
 
 1379     int best_b_count = -1;
 
 1390         b_lambda = p_lambda;
 
 1394     for (
i = 0; 
i < 
s->max_b_frames + 2; 
i++) {
 
 1395         Picture pre_input, *pre_input_ptr = 
i ? 
s->input_picture[
i - 1] :
 
 1396                                                 s->next_picture_ptr;
 
 1399         if (pre_input_ptr && (!
i || 
s->input_picture[
i - 1])) {
 
 1400             pre_input = *pre_input_ptr;
 
 1409             s->mpvencdsp.shrink[scale](
s->tmp_frames[
i]->data[0],
 
 1410                                        s->tmp_frames[
i]->linesize[0],
 
 1414             s->mpvencdsp.shrink[scale](
s->tmp_frames[
i]->data[1],
 
 1415                                        s->tmp_frames[
i]->linesize[1],
 
 1419             s->mpvencdsp.shrink[scale](
s->tmp_frames[
i]->data[2],
 
 1420                                        s->tmp_frames[
i]->linesize[2],
 
 1427     for (j = 0; j < 
s->max_b_frames + 1; j++) {
 
 1431         if (!
s->input_picture[j])
 
 1442         c->mb_decision  = 
s->avctx->mb_decision;
 
 1443         c->me_cmp       = 
s->avctx->me_cmp;
 
 1444         c->mb_cmp       = 
s->avctx->mb_cmp;
 
 1445         c->me_sub_cmp   = 
s->avctx->me_sub_cmp;
 
 1447         c->time_base    = 
s->avctx->time_base;
 
 1448         c->max_b_frames = 
s->max_b_frames;
 
 1465         for (
i = 0; 
i < 
s->max_b_frames + 1; 
i++) {
 
 1466             int is_p = 
i % (j + 1) == j || 
i == 
s->max_b_frames;
 
 1468             s->tmp_frames[
i + 1]->pict_type = is_p ?
 
 1470             s->tmp_frames[
i + 1]->quality   = is_p ? p_lambda : b_lambda;
 
 1489         rd += 
c->error[0] + 
c->error[1] + 
c->error[2];
 
 1502     return best_b_count;
 
 1510         s->reordered_input_picture[
i - 1] = 
s->reordered_input_picture[
i];
 
 1514     if (!
s->reordered_input_picture[0] && 
s->input_picture[0]) {
 
 1515         if (
s->frame_skip_threshold || 
s->frame_skip_factor) {
 
 1516             if (
s->picture_in_gop_number < 
s->gop_size &&
 
 1517                 s->next_picture_ptr &&
 
 1529             !
s->next_picture_ptr || 
s->intra_only) {
 
 1530             s->reordered_input_picture[0] = 
s->input_picture[0];
 
 1532             s->reordered_input_picture[0]->f->coded_picture_number =
 
 1533                 s->coded_picture_number++;
 
 1538                 for (
i = 0; 
i < 
s->max_b_frames + 1; 
i++) {
 
 1539                     int pict_num = 
s->input_picture[0]->f->display_picture_number + 
i;
 
 1541                     if (pict_num >= 
s->rc_context.num_entries)
 
 1543                     if (!
s->input_picture[
i]) {
 
 1548                     s->input_picture[
i]->f->pict_type =
 
 1549                         s->rc_context.entry[pict_num].new_pict_type;
 
 1553             if (
s->b_frame_strategy == 0) {
 
 1554                 b_frames = 
s->max_b_frames;
 
 1555                 while (b_frames && !
s->input_picture[b_frames])
 
 1557             } 
else if (
s->b_frame_strategy == 1) {
 
 1558                 for (
i = 1; 
i < 
s->max_b_frames + 1; 
i++) {
 
 1559                     if (
s->input_picture[
i] &&
 
 1560                         s->input_picture[
i]->b_frame_score == 0) {
 
 1561                         s->input_picture[
i]->b_frame_score =
 
 1563                                             s->input_picture[
i    ]->f->data[0],
 
 1564                                             s->input_picture[
i - 1]->f->data[0],
 
 1568                 for (
i = 0; 
i < 
s->max_b_frames + 1; 
i++) {
 
 1569                     if (!
s->input_picture[
i] ||
 
 1570                         s->input_picture[
i]->b_frame_score - 1 >
 
 1571                             s->mb_num / 
s->b_sensitivity)
 
 1575                 b_frames = 
FFMAX(0, 
i - 1);
 
 1578                 for (
i = 0; 
i < b_frames + 1; 
i++) {
 
 1579                     s->input_picture[
i]->b_frame_score = 0;
 
 1581             } 
else if (
s->b_frame_strategy == 2) {
 
 1589             for (
i = b_frames - 1; 
i >= 0; 
i--) {
 
 1590                 int type = 
s->input_picture[
i]->f->pict_type;
 
 1595                 b_frames == 
s->max_b_frames) {
 
 1597                        "warning, too many B-frames in a row\n");
 
 1600             if (
s->picture_in_gop_number + b_frames >= 
s->gop_size) {
 
 1602                     s->gop_size > 
s->picture_in_gop_number) {
 
 1603                     b_frames = 
s->gop_size - 
s->picture_in_gop_number - 1;
 
 1615             s->reordered_input_picture[0] = 
s->input_picture[b_frames];
 
 1618             s->reordered_input_picture[0]->f->coded_picture_number =
 
 1619                 s->coded_picture_number++;
 
 1620             for (
i = 0; 
i < b_frames; 
i++) {
 
 1621                 s->reordered_input_picture[
i + 1] = 
s->input_picture[
i];
 
 1622                 s->reordered_input_picture[
i + 1]->f->pict_type =
 
 1624                 s->reordered_input_picture[
i + 1]->f->coded_picture_number =
 
 1625                     s->coded_picture_number++;
 
 1632     if (
s->reordered_input_picture[0]) {
 
 1633         s->reordered_input_picture[0]->reference =
 
 1634            s->reordered_input_picture[0]->f->pict_type !=
 
 1640         if (
s->reordered_input_picture[0]->shared || 
s->avctx->rc_buffer_size) {
 
 1648             pic = &
s->picture[
i];
 
 1650             pic->
reference = 
s->reordered_input_picture[0]->reference;
 
 1661             s->reordered_input_picture[0]->shared = 0;
 
 1663             s->current_picture_ptr = pic;
 
 1666             s->current_picture_ptr = 
s->reordered_input_picture[0];
 
 1667             for (
i = 0; 
i < 4; 
i++) {
 
 1673                                        s->current_picture_ptr)) < 0)
 
 1676         s->picture_number = 
s->new_picture.f->display_picture_number;
 
 1683     if (
s->unrestricted_mv &&
 
 1684         s->current_picture.reference &&
 
 1687         int hshift = 
desc->log2_chroma_w;
 
 1688         int vshift = 
desc->log2_chroma_h;
 
 1689         s->mpvencdsp.draw_edges(
s->current_picture.f->data[0],
 
 1690                                 s->current_picture.f->linesize[0],
 
 1691                                 s->h_edge_pos, 
s->v_edge_pos,
 
 1694         s->mpvencdsp.draw_edges(
s->current_picture.f->data[1],
 
 1695                                 s->current_picture.f->linesize[1],
 
 1696                                 s->h_edge_pos >> hshift,
 
 1697                                 s->v_edge_pos >> vshift,
 
 1701         s->mpvencdsp.draw_edges(
s->current_picture.f->data[2],
 
 1702                                 s->current_picture.f->linesize[2],
 
 1703                                 s->h_edge_pos >> hshift,
 
 1704                                 s->v_edge_pos >> vshift,
 
 1712     s->last_pict_type                 = 
s->pict_type;
 
 1713     s->last_lambda_for [
s->pict_type] = 
s->current_picture_ptr->f->quality;
 
 1715         s->last_non_b_pict_type = 
s->pict_type;
 
 1717 #if FF_API_CODED_FRAME 
 1723 #if FF_API_ERROR_FRAME 
 1725     memcpy(
s->current_picture.f->error, 
s->current_picture.encoding_error,
 
 1726            sizeof(
s->current_picture.encoding_error));
 
 1735     for (intra = 0; intra < 2; intra++) {
 
 1736         if (
s->dct_count[intra] > (1 << 16)) {
 
 1737             for (
i = 0; 
i < 64; 
i++) {
 
 1738                 s->dct_error_sum[intra][
i] >>= 1;
 
 1740             s->dct_count[intra] >>= 1;
 
 1743         for (
i = 0; 
i < 64; 
i++) {
 
 1744             s->dct_offset[intra][
i] = (
s->noise_reduction *
 
 1745                                        s->dct_count[intra] +
 
 1746                                        s->dct_error_sum[intra][
i] / 2) /
 
 1747                                       (
s->dct_error_sum[intra][
i] + 1);
 
 1758         s->last_picture_ptr != 
s->next_picture_ptr &&
 
 1759         s->last_picture_ptr->f->buf[0]) {
 
 1763     s->current_picture_ptr->f->pict_type = 
s->pict_type;
 
 1768                                    s->current_picture_ptr)) < 0)
 
 1772         s->last_picture_ptr = 
s->next_picture_ptr;
 
 1774             s->next_picture_ptr = 
s->current_picture_ptr;
 
 1777     if (
s->last_picture_ptr) {
 
 1779         if (
s->last_picture_ptr->f->buf[0] &&
 
 1781                                        s->last_picture_ptr)) < 0)
 
 1784     if (
s->next_picture_ptr) {
 
 1786         if (
s->next_picture_ptr->f->buf[0] &&
 
 1788                                        s->next_picture_ptr)) < 0)
 
 1794         for (
i = 0; 
i < 4; 
i++) {
 
 1796                 s->current_picture.f->data[
i] +=
 
 1797                     s->current_picture.f->linesize[
i];
 
 1799             s->current_picture.f->linesize[
i] *= 2;
 
 1800             s->last_picture.f->linesize[
i]    *= 2;
 
 1801             s->next_picture.f->linesize[
i]    *= 2;
 
 1806         s->dct_unquantize_intra = 
s->dct_unquantize_mpeg2_intra;
 
 1807         s->dct_unquantize_inter = 
s->dct_unquantize_mpeg2_inter;
 
 1809         s->dct_unquantize_intra = 
s->dct_unquantize_h263_intra;
 
 1810         s->dct_unquantize_inter = 
s->dct_unquantize_h263_inter;
 
 1812         s->dct_unquantize_intra = 
s->dct_unquantize_mpeg1_intra;
 
 1813         s->dct_unquantize_inter = 
s->dct_unquantize_mpeg1_inter;
 
 1816     if (
s->dct_error_sum) {
 
 1825                           const AVFrame *pic_arg, 
int *got_packet)
 
 1828     int i, stuffing_count, 
ret;
 
 1829     int context_count = 
s->slice_context_count;
 
 1831     s->vbv_ignore_qmax = 0;
 
 1833     s->picture_in_gop_number++;
 
 1843     if (
s->new_picture.f->data[0]) {
 
 1844         int growing_buffer = context_count == 1 && !
pkt->
data && !
s->data_partitioning;
 
 1853                                  s->mb_width*
s->mb_height*12);
 
 1854             s->prev_mb_info = 
s->last_mb_info = 
s->mb_info_size = 0;
 
 1857         for (
i = 0; 
i < context_count; 
i++) {
 
 1858             int start_y = 
s->thread_context[
i]->start_mb_y;
 
 1860             int h       = 
s->mb_height;
 
 1867         s->pict_type = 
s->new_picture.f->pict_type;
 
 1874         if (growing_buffer) {
 
 1882 #if FF_API_STAT_BITS 
 1898         if (CONFIG_MJPEG_ENCODER && 
s->out_format == 
FMT_MJPEG)
 
 1908                 s->lambda < 
s->lmax) {
 
 1909                 s->next_lambda = 
FFMAX(
s->lambda + min_step, 
s->lambda *
 
 1910                                        (
s->qscale + 1) / 
s->qscale);
 
 1911                 if (
s->adaptive_quant) {
 
 1913                     for (
i = 0; 
i < 
s->mb_height * 
s->mb_stride; 
i++)
 
 1914                         s->lambda_table[
i] =
 
 1915                             FFMAX(
s->lambda_table[
i] + min_step,
 
 1916                                   s->lambda_table[
i] * (
s->qscale + 1) /
 
 1922                     if (
s->flipflop_rounding          ||
 
 1925                         s->no_rounding ^= 1;
 
 1928                     s->time_base       = 
s->last_time_base;
 
 1929                     s->last_non_b_time = 
s->time - 
s->pp_time;
 
 1931                 for (
i = 0; 
i < context_count; 
i++) {
 
 1935                 s->vbv_ignore_qmax = 1;
 
 1946         for (
i = 0; 
i < 4; 
i++) {
 
 1947             s->current_picture_ptr->encoding_error[
i] = 
s->current_picture.encoding_error[
i];
 
 1948             avctx->
error[
i] += 
s->current_picture_ptr->encoding_error[
i];
 
 1951                                        s->current_picture_ptr->encoding_error,
 
 1957                                              s->misc_bits + 
s->i_tex_bits +
 
 1963         s->stuffing_bits = 8*stuffing_count;
 
 1964         if (stuffing_count) {
 
 1966                     stuffing_count + 50) {
 
 1971             switch (
s->codec_id) {
 
 1974                 while (stuffing_count--) {
 
 1981                 stuffing_count -= 4;
 
 1982                 while (stuffing_count--) {
 
 1994         if (
s->avctx->rc_max_rate                          &&
 
 1995             s->avctx->rc_min_rate == 
s->avctx->rc_max_rate &&
 
 1998                 s->avctx->rc_max_rate * 0xFFFFLL) {
 
 2003             double inbits  = 
s->avctx->rc_max_rate *
 
 2005             int    minbits = 
s->frame_bits - 8 *
 
 2006                              (
s->vbv_delay_ptr - 
s->pb.buf - 1);
 
 2007             double bits    = 
s->rc_context.buffer_index + minbits - inbits;
 
 2011                        "Internal error, negative bits\n");
 
 2016             min_delay = (minbits * 90000LL + 
s->avctx->rc_max_rate - 1) /
 
 2017                         s->avctx->rc_max_rate;
 
 2023             s->vbv_delay_ptr[0] &= 0xF8;
 
 2026             s->vbv_delay_ptr[2] &= 0x07;
 
 2041 #if FF_API_VBV_DELAY 
 2047         s->total_bits     += 
s->frame_bits;
 
 2048 #if FF_API_STAT_BITS 
 2055         pkt->
pts = 
s->current_picture.f->pts;
 
 2057             if (!
s->current_picture.f->coded_picture_number)
 
 2064         if (
s->current_picture.f->key_frame)
 
 2074         if (!
s->picture[
i].reference)
 
 2086                                                 int n, 
int threshold)
 
 2088     static const char tab[64] = {
 
 2089         3, 2, 2, 1, 1, 1, 1, 1,
 
 2090         1, 1, 1, 1, 1, 1, 1, 1,
 
 2091         1, 1, 1, 1, 1, 1, 1, 1,
 
 2092         0, 0, 0, 0, 0, 0, 0, 0,
 
 2093         0, 0, 0, 0, 0, 0, 0, 0,
 
 2094         0, 0, 0, 0, 0, 0, 0, 0,
 
 2095         0, 0, 0, 0, 0, 0, 0, 0,
 
 2096         0, 0, 0, 0, 0, 0, 0, 0
 
 2101     int16_t *
block = 
s->block[n];
 
 2102     const int last_index = 
s->block_last_index[n];
 
 2105     if (threshold < 0) {
 
 2107         threshold = -threshold;
 
 2112     if (last_index <= skip_dc - 1)
 
 2115     for (
i = 0; 
i <= last_index; 
i++) {
 
 2116         const int j = 
s->intra_scantable.permutated[
i];
 
 2119             if (skip_dc && 
i == 0)
 
 2123         } 
else if (
level > 1) {
 
 2129     if (score >= threshold)
 
 2131     for (
i = skip_dc; 
i <= last_index; 
i++) {
 
 2132         const int j = 
s->intra_scantable.permutated[
i];
 
 2136         s->block_last_index[n] = 0;
 
 2138         s->block_last_index[n] = -1;
 
 2145     const int maxlevel = 
s->max_qcoeff;
 
 2146     const int minlevel = 
s->min_qcoeff;
 
 2154     for (; 
i <= last_index; 
i++) {
 
 2155         const int j = 
s->intra_scantable.permutated[
i];
 
 2158         if (
level > maxlevel) {
 
 2161         } 
else if (
level < minlevel) {
 
 2171                "warning, clipping %d dct coefficients to %d..%d\n",
 
 2179     for (y = 0; y < 8; y++) {
 
 2180         for (x = 0; x < 8; x++) {
 
 2186             for (y2 = 
FFMAX(y - 1, 0); y2 < 
FFMIN(8, y + 2); y2++) {
 
 2187                 for (x2= 
FFMAX(x - 1, 0); x2 < 
FFMIN(8, x + 2); x2++) {
 
 2188                     int v = ptr[x2 + y2 * 
stride];
 
 2194             weight[x + 8 * y]= (36 * 
ff_sqrt(count * sqr - sum * sum)) / count;
 
 2200                                                 int motion_x, 
int motion_y,
 
 2201                                                 int mb_block_height,
 
 2206     int16_t orig[12][64];
 
 2207     const int mb_x = 
s->mb_x;
 
 2208     const int mb_y = 
s->mb_y;
 
 2212     int uv_dct_offset = 
s->uvlinesize * 8;
 
 2213     uint8_t *ptr_y, *ptr_cb, *ptr_cr;
 
 2214     ptrdiff_t wrap_y, wrap_c;
 
 2216     for (
i = 0; 
i < mb_block_count; 
i++)
 
 2217         skip_dct[
i] = 
s->skipdct;
 
 2219     if (
s->adaptive_quant) {
 
 2220         const int last_qp = 
s->qscale;
 
 2221         const int mb_xy = 
mb_x + 
mb_y * 
s->mb_stride;
 
 2223         s->lambda = 
s->lambda_table[mb_xy];
 
 2227             s->qscale = 
s->current_picture_ptr->qscale_table[mb_xy];
 
 2228             s->dquant = 
s->qscale - last_qp;
 
 2231                 s->dquant = av_clip(
s->dquant, -2, 2);
 
 2249     wrap_y = 
s->linesize;
 
 2250     wrap_c = 
s->uvlinesize;
 
 2251     ptr_y  = 
s->new_picture.f->data[0] +
 
 2253     ptr_cb = 
s->new_picture.f->data[1] +
 
 2254              (
mb_y * mb_block_height * wrap_c) + 
mb_x * mb_block_width;
 
 2255     ptr_cr = 
s->new_picture.f->data[2] +
 
 2256              (
mb_y * mb_block_height * wrap_c) + 
mb_x * mb_block_width;
 
 2259         uint8_t *ebuf = 
s->sc.edge_emu_buffer + 38 * wrap_y;
 
 2260         int cw = (
s->width  + 
s->chroma_x_shift) >> 
s->chroma_x_shift;
 
 2261         int ch = (
s->height + 
s->chroma_y_shift) >> 
s->chroma_y_shift;
 
 2262         s->vdsp.emulated_edge_mc(ebuf, ptr_y,
 
 2265                                  s->width, 
s->height);
 
 2267         s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
 
 2269                                  mb_block_width, mb_block_height,
 
 2270                                  mb_x * mb_block_width, 
mb_y * mb_block_height,
 
 2272         ptr_cb = ebuf + 16 * wrap_y;
 
 2273         s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
 
 2275                                  mb_block_width, mb_block_height,
 
 2276                                  mb_x * mb_block_width, 
mb_y * mb_block_height,
 
 2278         ptr_cr = ebuf + 16 * wrap_y + 16;
 
 2283             int progressive_score, interlaced_score;
 
 2285             s->interlaced_dct = 0;
 
 2286             progressive_score = 
s->mecc.ildct_cmp[4](
s, ptr_y, 
NULL, wrap_y, 8) +
 
 2287                                 s->mecc.ildct_cmp[4](
s, ptr_y + wrap_y * 8,
 
 2288                                                      NULL, wrap_y, 8) - 400;
 
 2290             if (progressive_score > 0) {
 
 2291                 interlaced_score = 
s->mecc.ildct_cmp[4](
s, ptr_y,
 
 2292                                                         NULL, wrap_y * 2, 8) +
 
 2293                                    s->mecc.ildct_cmp[4](
s, ptr_y + wrap_y,
 
 2294                                                         NULL, wrap_y * 2, 8);
 
 2295                 if (progressive_score > interlaced_score) {
 
 2296                     s->interlaced_dct = 1;
 
 2299                     uv_dct_offset = wrap_c;
 
 2308         s->pdsp.get_pixels(
s->block[0], ptr_y,                  wrap_y);
 
 2309         s->pdsp.get_pixels(
s->block[1], ptr_y + 8,              wrap_y);
 
 2310         s->pdsp.get_pixels(
s->block[2], ptr_y + 
dct_offset,     wrap_y);
 
 2311         s->pdsp.get_pixels(
s->block[3], ptr_y + 
dct_offset + 8, wrap_y);
 
 2317             s->pdsp.get_pixels(
s->block[4], ptr_cb, wrap_c);
 
 2318             s->pdsp.get_pixels(
s->block[5], ptr_cr, wrap_c);
 
 2319             if (!
s->chroma_y_shift && 
s->chroma_x_shift) { 
 
 2320                 s->pdsp.get_pixels(
s->block[6], ptr_cb + uv_dct_offset, wrap_c);
 
 2321                 s->pdsp.get_pixels(
s->block[7], ptr_cr + uv_dct_offset, wrap_c);
 
 2322             } 
else if (!
s->chroma_y_shift && !
s->chroma_x_shift) { 
 
 2323                 s->pdsp.get_pixels(
s->block[ 6], ptr_cb + 8, wrap_c);
 
 2324                 s->pdsp.get_pixels(
s->block[ 7], ptr_cr + 8, wrap_c);
 
 2325                 s->pdsp.get_pixels(
s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
 
 2326                 s->pdsp.get_pixels(
s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
 
 2327                 s->pdsp.get_pixels(
s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
 
 2328                 s->pdsp.get_pixels(
s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
 
 2334         uint8_t *dest_y, *dest_cb, *dest_cr;
 
 2336         dest_y  = 
s->dest[0];
 
 2337         dest_cb = 
s->dest[1];
 
 2338         dest_cr = 
s->dest[2];
 
 2341             op_pix  = 
s->hdsp.put_pixels_tab;
 
 2342             op_qpix = 
s->qdsp.put_qpel_pixels_tab;
 
 2344             op_pix  = 
s->hdsp.put_no_rnd_pixels_tab;
 
 2345             op_qpix = 
s->qdsp.put_no_rnd_qpel_pixels_tab;
 
 2350                           s->last_picture.f->data,
 
 2352             op_pix  = 
s->hdsp.avg_pixels_tab;
 
 2353             op_qpix = 
s->qdsp.avg_qpel_pixels_tab;
 
 2357                           s->next_picture.f->data,
 
 2362             int progressive_score, interlaced_score;
 
 2364             s->interlaced_dct = 0;
 
 2365             progressive_score = 
s->mecc.ildct_cmp[0](
s, dest_y, ptr_y, wrap_y, 8) +
 
 2366                                 s->mecc.ildct_cmp[0](
s, dest_y + wrap_y * 8,
 
 2371                 progressive_score -= 400;
 
 2373             if (progressive_score > 0) {
 
 2374                 interlaced_score = 
s->mecc.ildct_cmp[0](
s, dest_y, ptr_y,
 
 2376                                    s->mecc.ildct_cmp[0](
s, dest_y + wrap_y,
 
 2380                 if (progressive_score > interlaced_score) {
 
 2381                     s->interlaced_dct = 1;
 
 2384                     uv_dct_offset = wrap_c;
 
 2392         s->pdsp.diff_pixels(
s->block[0], ptr_y, dest_y, wrap_y);
 
 2393         s->pdsp.diff_pixels(
s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
 
 2396         s->pdsp.diff_pixels(
s->block[3], ptr_y + 
dct_offset + 8,
 
 2403             s->pdsp.diff_pixels(
s->block[4], ptr_cb, dest_cb, wrap_c);
 
 2404             s->pdsp.diff_pixels(
s->block[5], ptr_cr, dest_cr, wrap_c);
 
 2405             if (!
s->chroma_y_shift) { 
 
 2406                 s->pdsp.diff_pixels(
s->block[6], ptr_cb + uv_dct_offset,
 
 2407                                     dest_cb + uv_dct_offset, wrap_c);
 
 2408                 s->pdsp.diff_pixels(
s->block[7], ptr_cr + uv_dct_offset,
 
 2409                                     dest_cr + uv_dct_offset, wrap_c);
 
 2413         if (
s->current_picture.mc_mb_var[
s->mb_stride * 
mb_y + 
mb_x] <
 
 2414                 2 * 
s->qscale * 
s->qscale) {
 
 2416             if (
s->mecc.sad[1](
NULL, ptr_y, dest_y, wrap_y, 8) < 20 * 
s->qscale)
 
 2418             if (
s->mecc.sad[1](
NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * 
s->qscale)
 
 2421                                wrap_y, 8) < 20 * 
s->qscale)
 
 2424                                wrap_y, 8) < 20 * 
s->qscale)
 
 2426             if (
s->mecc.sad[1](
NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * 
s->qscale)
 
 2428             if (
s->mecc.sad[1](
NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * 
s->qscale)
 
 2430             if (!
s->chroma_y_shift) { 
 
 2431                 if (
s->mecc.sad[1](
NULL, ptr_cb + uv_dct_offset,
 
 2432                                    dest_cb + uv_dct_offset,
 
 2433                                    wrap_c, 8) < 20 * 
s->qscale)
 
 2435                 if (
s->mecc.sad[1](
NULL, ptr_cr + uv_dct_offset,
 
 2436                                    dest_cr + uv_dct_offset,
 
 2437                                    wrap_c, 8) < 20 * 
s->qscale)
 
 2443     if (
s->quantizer_noise_shaping) {
 
 2456         if (!
s->chroma_y_shift) { 
 
 2464         memcpy(orig[0], 
s->block[0], 
sizeof(int16_t) * 64 * mb_block_count);
 
 2470         for (
i = 0; 
i < mb_block_count; 
i++) {
 
 2473                 s->block_last_index[
i] = 
s->dct_quantize(
s, 
s->block[
i], 
i, 
s->qscale, &
overflow);
 
 2482                 s->block_last_index[
i] = -1;
 
 2484         if (
s->quantizer_noise_shaping) {
 
 2485             for (
i = 0; 
i < mb_block_count; 
i++) {
 
 2487                     s->block_last_index[
i] =
 
 2489                                             orig[
i], 
i, 
s->qscale);
 
 2494         if (
s->luma_elim_threshold && !
s->mb_intra)
 
 2495             for (
i = 0; 
i < 4; 
i++)
 
 2497         if (
s->chroma_elim_threshold && !
s->mb_intra)
 
 2498             for (
i = 4; 
i < mb_block_count; 
i++)
 
 2502             for (
i = 0; 
i < mb_block_count; 
i++) {
 
 2503                 if (
s->block_last_index[
i] == -1)
 
 2504                     s->coded_score[
i] = INT_MAX / 256;
 
 2510         s->block_last_index[4] =
 
 2511         s->block_last_index[5] = 0;
 
 2513         s->block[5][0] = (1024 + 
s->c_dc_scale / 2) / 
s->c_dc_scale;
 
 2514         if (!
s->chroma_y_shift) { 
 
 2515             for (
i=6; 
i<12; 
i++) {
 
 2516                 s->block_last_index[
i] = 0;
 
 2517                 s->block[
i][0] = 
s->block[4][0];
 
 2524         for (
i = 0; 
i < mb_block_count; 
i++) {
 
 2526             if (
s->block_last_index[
i] > 0) {
 
 2527                 for (j = 63; j > 0; j--) {
 
 2528                     if (
s->block[
i][
s->intra_scantable.permutated[j]])
 
 2531                 s->block_last_index[
i] = j;
 
 2537     switch(
s->codec_id){ 
 
 2540         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
 
 2544         if (CONFIG_MPEG4_ENCODER)
 
 2554         if (CONFIG_WMV2_ENCODER)
 
 2558         if (CONFIG_H261_ENCODER)
 
 2566         if (CONFIG_H263_ENCODER)
 
 2571         if (CONFIG_MJPEG_ENCODER)
 
 2589     memcpy(d->
last_mv, 
s->last_mv, 2*2*2*
sizeof(
int)); 
 
 2617     memcpy(d->
mv, 
s->mv, 2*4*2*
sizeof(
int));
 
 2618     memcpy(d->
last_mv, 
s->last_mv, 2*2*2*
sizeof(
int)); 
 
 2640     if(
s->data_partitioning){
 
 2655                            int *dmin, 
int *next_block, 
int motion_x, 
int motion_y)
 
 2662     s->block= 
s->blocks[*next_block];
 
 2663     s->pb= 
pb[*next_block];
 
 2664     if(
s->data_partitioning){
 
 2665         s->pb2   = 
pb2   [*next_block];
 
 2666         s->tex_pb= 
tex_pb[*next_block];
 
 2670         memcpy(dest_backup, 
s->dest, 
sizeof(
s->dest));
 
 2671         s->dest[0] = 
s->sc.rd_scratchpad;
 
 2672         s->dest[1] = 
s->sc.rd_scratchpad + 16*
s->linesize;
 
 2673         s->dest[2] = 
s->sc.rd_scratchpad + 16*
s->linesize + 8;
 
 2680     if(
s->data_partitioning){
 
 2688         score *= 
s->lambda2;
 
 2693         memcpy(
s->dest, dest_backup, 
sizeof(
s->dest));
 
 2711     else if(
w==8 && 
h==8)
 
 2729     if(
s->mb_x*16 + 16 > 
s->width ) 
w= 
s->width - 
s->mb_x*16;
 
 2730     if(
s->mb_y*16 + 16 > 
s->height) 
h= 
s->height- 
s->mb_y*16;
 
 2734         return s->mecc.nsse[0](
s, 
s->new_picture.f->data[0] + 
s->mb_x * 16 + 
s->mb_y * 
s->linesize   * 16, 
s->dest[0], 
s->linesize,   16) +
 
 2735                s->mecc.nsse[1](
s, 
s->new_picture.f->data[1] + 
s->mb_x *  8 + 
s->mb_y * 
s->uvlinesize *  8, 
s->dest[1], 
s->uvlinesize,  8) +
 
 2736                s->mecc.nsse[1](
s, 
s->new_picture.f->data[2] + 
s->mb_x *  8 + 
s->mb_y * 
s->uvlinesize *  8, 
s->dest[2], 
s->uvlinesize,  8);
 
 2738         return s->mecc.sse[0](
NULL, 
s->new_picture.f->data[0] + 
s->mb_x * 16 + 
s->mb_y * 
s->linesize   * 16, 
s->dest[0], 
s->linesize,   16) +
 
 2739                s->mecc.sse[1](
NULL, 
s->new_picture.f->data[1] + 
s->mb_x *  8 + 
s->mb_y * 
s->uvlinesize *  8, 
s->dest[1], 
s->uvlinesize,  8) +
 
 2740                s->mecc.sse[1](
NULL, 
s->new_picture.f->data[2] + 
s->mb_x *  8 + 
s->mb_y * 
s->uvlinesize *  8, 
s->dest[2], 
s->uvlinesize,  8);
 
 2743         return  sse(
s, 
s->new_picture.f->data[0] + 
s->mb_x*16 + 
s->mb_y*
s->linesize*16, 
s->dest[0], 
w, 
h, 
s->linesize)
 
 2744                +
sse(
s, 
s->new_picture.f->data[1] + 
s->mb_x*8  + 
s->mb_y*
s->uvlinesize*8,
s->dest[1], 
w>>1, 
h>>1, 
s->uvlinesize)
 
 2745                +
sse(
s, 
s->new_picture.f->data[2] + 
s->mb_x*8  + 
s->mb_y*
s->uvlinesize*8,
s->dest[2], 
w>>1, 
h>>1, 
s->uvlinesize);
 
 2753     s->me.dia_size= 
s->avctx->pre_dia_size;
 
 2754     s->first_slice_line=1;
 
 2755     for(
s->mb_y= 
s->end_mb_y-1; 
s->mb_y >= 
s->start_mb_y; 
s->mb_y--) {
 
 2756         for(
s->mb_x=
s->mb_width-1; 
s->mb_x >=0 ;
s->mb_x--) {
 
 2759         s->first_slice_line=0;
 
 2772     s->me.dia_size= 
s->avctx->dia_size;
 
 2773     s->first_slice_line=1;
 
 2774     for(
s->mb_y= 
s->start_mb_y; 
s->mb_y < 
s->end_mb_y; 
s->mb_y++) {
 
 2777         for(
s->mb_x=0; 
s->mb_x < 
s->mb_width; 
s->mb_x++) {
 
 2778             s->block_index[0]+=2;
 
 2779             s->block_index[1]+=2;
 
 2780             s->block_index[2]+=2;
 
 2781             s->block_index[3]+=2;
 
 2789         s->first_slice_line=0;
 
 2804             uint8_t *pix = 
s->new_picture.f->data[0] + (yy * 
s->linesize) + xx;
 
 2806             int sum = 
s->mpvencdsp.pix_sum(pix, 
s->linesize);
 
 2808             varc = (
s->mpvencdsp.pix_norm1(pix, 
s->linesize) -
 
 2809                     (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
 
 2811             s->current_picture.mb_var [
s->mb_stride * 
mb_y + 
mb_x] = varc;
 
 2812             s->current_picture.mb_mean[
s->mb_stride * 
mb_y + 
mb_x] = (sum+128)>>8;
 
 2813             s->me.mb_var_sum_temp    += varc;
 
 2821         if(
s->partitioned_frame){
 
 2826     }
else if(CONFIG_MJPEG_ENCODER && 
s->out_format == 
FMT_MJPEG){
 
 2839     uint8_t *ptr = 
s->mb_info_ptr + 
s->mb_info_size - 12;
 
 2841     int mba  = 
s->mb_x + 
s->mb_width * (
s->mb_y % 
s->gob_index);
 
 2842     int gobn = 
s->mb_y / 
s->gob_index;
 
 2844     if (CONFIG_H263_ENCODER)
 
 2846     bytestream_put_le32(&ptr, 
offset);
 
 2847     bytestream_put_byte(&ptr, 
s->qscale);
 
 2848     bytestream_put_byte(&ptr, gobn);
 
 2849     bytestream_put_le16(&ptr, mba);
 
 2850     bytestream_put_byte(&ptr, pred_x); 
 
 2851     bytestream_put_byte(&ptr, pred_y); 
 
 2853     bytestream_put_byte(&ptr, 0); 
 
 2854     bytestream_put_byte(&ptr, 0); 
 
 2862         s->mb_info_size += 12;
 
 2863         s->prev_mb_info = 
s->last_mb_info;
 
 2875     if (!
s->mb_info_size)
 
 2876         s->mb_info_size += 12;
 
 2883         && 
s->slice_context_count == 1
 
 2884         && 
s->pb.buf == 
s->avctx->internal->byte_buffer) {
 
 2885         int lastgob_pos = 
s->ptr_lastgob - 
s->pb.buf;
 
 2886         int vbv_pos     = 
s->vbv_delay_ptr - 
s->pb.buf;
 
 2889         int new_buffer_size = 0;
 
 2891         if ((
s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
 
 2899                               s->avctx->internal->byte_buffer_size + size_increase);
 
 2903         memcpy(new_buffer, 
s->avctx->internal->byte_buffer, 
s->avctx->internal->byte_buffer_size);
 
 2904         av_free(
s->avctx->internal->byte_buffer);
 
 2905         s->avctx->internal->byte_buffer      = new_buffer;
 
 2906         s->avctx->internal->byte_buffer_size = new_buffer_size;
 
 2908         s->ptr_lastgob   = 
s->pb.buf + lastgob_pos;
 
 2909         s->vbv_delay_ptr = 
s->pb.buf + vbv_pos;
 
 2919     int chr_h= 16>>
s->chroma_y_shift;
 
 2948         s->last_dc[
i] = 128 << 
s->intra_dc_precision;
 
 2950         s->current_picture.encoding_error[
i] = 0;
 
 2953         s->last_dc[0] = 128*8/13;
 
 2954         s->last_dc[1] = 128*8/14;
 
 2955         s->last_dc[2] = 128*8/14;
 
 2958     memset(
s->last_mv, 0, 
sizeof(
s->last_mv));
 
 2962     switch(
s->codec_id){
 
 2966         if (CONFIG_H263_ENCODER)
 
 2970         if(CONFIG_MPEG4_ENCODER && 
s->partitioned_frame)
 
 2977     s->first_slice_line = 1;
 
 2978     s->ptr_lastgob = 
s->pb.buf;
 
 2992             int size_increase =  
s->avctx->internal->byte_buffer_size/4
 
 3000             if(
s->data_partitioning){
 
 3014                 xy= 
s->mb_y*
s->mb_stride + 
s->mb_x;
 
 3020                 int current_packet_size, is_gob_start;
 
 3022                 current_packet_size= ((
put_bits_count(&
s->pb)+7)>>3) - (
s->ptr_lastgob - 
s->pb.buf);
 
 3024                 is_gob_start = 
s->rtp_payload_size &&
 
 3025                                current_packet_size >= 
s->rtp_payload_size &&
 
 3028                 if(
s->start_mb_y == 
mb_y && 
mb_y > 0 && 
mb_x==0) is_gob_start=1;
 
 3030                 switch(
s->codec_id){
 
 3033                     if(!
s->h263_slice_structured)
 
 3034                         if(
s->mb_x || 
s->mb_y%
s->gob_index) is_gob_start=0;
 
 3037                     if(
s->mb_x==0 && 
s->mb_y!=0) is_gob_start=1;
 
 3039                     if(
s->mb_skip_run) is_gob_start=0;
 
 3042                     if(
s->mb_x==0 && 
s->mb_y!=0) is_gob_start=1;
 
 3058                     if (
s->error_rate && 
s->resync_mb_x + 
s->resync_mb_y > 0) {
 
 3060                         int d = 100 / 
s->error_rate;
 
 3062                             current_packet_size=0;
 
 3063                             s->pb.buf_ptr= 
s->ptr_lastgob;
 
 3068 #if FF_API_RTP_CALLBACK 
 3070                     if (
s->avctx->rtp_callback){
 
 3071                         int number_mb = (
mb_y - 
s->resync_mb_y)*
s->mb_width + 
mb_x - 
s->resync_mb_x;
 
 3072                         s->avctx->rtp_callback(
s->avctx, 
s->ptr_lastgob, current_packet_size, number_mb);
 
 3078                     switch(
s->codec_id){
 
 3080                         if (CONFIG_MPEG4_ENCODER) {
 
 3087                         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
 
 3094                         if (CONFIG_H263_ENCODER)
 
 3101                         s->misc_bits+= 
bits - 
s->last_bits;
 
 3105                     s->ptr_lastgob += current_packet_size;
 
 3106                     s->first_slice_line=1;
 
 3107                     s->resync_mb_x=
mb_x;
 
 3108                     s->resync_mb_y=
mb_y;
 
 3112             if(  (
s->resync_mb_x   == 
s->mb_x)
 
 3113                && 
s->resync_mb_y+1 == 
s->mb_y){
 
 3114                 s->first_slice_line=0;
 
 3124                 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
 
 3130                 if(
s->data_partitioning){
 
 3131                     backup_s.pb2= 
s->pb2;
 
 3132                     backup_s.tex_pb= 
s->tex_pb;
 
 3139                     s->mv[0][0][0] = 
s->p_mv_table[xy][0];
 
 3140                     s->mv[0][0][1] = 
s->p_mv_table[xy][1];
 
 3142                                  &dmin, &next_block, 
s->mv[0][0][0], 
s->mv[0][0][1]);
 
 3149                         j= 
s->field_select[0][
i] = 
s->p_field_select_table[
i][xy];
 
 3150                         s->mv[0][
i][0] = 
s->p_field_mv_table[
i][j][xy][0];
 
 3151                         s->mv[0][
i][1] = 
s->p_field_mv_table[
i][j][xy][1];
 
 3154                                  &dmin, &next_block, 0, 0);
 
 3163                                  &dmin, &next_block, 
s->mv[0][0][0], 
s->mv[0][0][1]);
 
 3170                         s->mv[0][
i][0] = 
s->current_picture.motion_val[0][
s->block_index[
i]][0];
 
 3171                         s->mv[0][
i][1] = 
s->current_picture.motion_val[0][
s->block_index[
i]][1];
 
 3174                                  &dmin, &next_block, 0, 0);
 
 3180                     s->mv[0][0][0] = 
s->b_forw_mv_table[xy][0];
 
 3181                     s->mv[0][0][1] = 
s->b_forw_mv_table[xy][1];
 
 3183                                  &dmin, &next_block, 
s->mv[0][0][0], 
s->mv[0][0][1]);
 
 3189                     s->mv[1][0][0] = 
s->b_back_mv_table[xy][0];
 
 3190                     s->mv[1][0][1] = 
s->b_back_mv_table[xy][1];
 
 3192                                  &dmin, &next_block, 
s->mv[1][0][0], 
s->mv[1][0][1]);
 
 3198                     s->mv[0][0][0] = 
s->b_bidir_forw_mv_table[xy][0];
 
 3199                     s->mv[0][0][1] = 
s->b_bidir_forw_mv_table[xy][1];
 
 3200                     s->mv[1][0][0] = 
s->b_bidir_back_mv_table[xy][0];
 
 3201                     s->mv[1][0][1] = 
s->b_bidir_back_mv_table[xy][1];
 
 3203                                  &dmin, &next_block, 0, 0);
 
 3210                         j= 
s->field_select[0][
i] = 
s->b_field_select_table[0][
i][xy];
 
 3211                         s->mv[0][
i][0] = 
s->b_field_mv_table[0][
i][j][xy][0];
 
 3212                         s->mv[0][
i][1] = 
s->b_field_mv_table[0][
i][j][xy][1];
 
 3215                                  &dmin, &next_block, 0, 0);
 
 3222                         j= 
s->field_select[1][
i] = 
s->b_field_select_table[1][
i][xy];
 
 3223                         s->mv[1][
i][0] = 
s->b_field_mv_table[1][
i][j][xy][0];
 
 3224                         s->mv[1][
i][1] = 
s->b_field_mv_table[1][
i][j][xy][1];
 
 3227                                  &dmin, &next_block, 0, 0);
 
 3233                     for(dir=0; dir<2; dir++){
 
 3235                             j= 
s->field_select[dir][
i] = 
s->b_field_select_table[dir][
i][xy];
 
 3236                             s->mv[dir][
i][0] = 
s->b_field_mv_table[dir][
i][j][xy][0];
 
 3237                             s->mv[dir][
i][1] = 
s->b_field_mv_table[dir][
i][j][xy][1];
 
 3241                                  &dmin, &next_block, 0, 0);
 
 3250                                  &dmin, &next_block, 0, 0);
 
 3251                     if(
s->h263_pred || 
s->h263_aic){
 
 3253                             s->mbintra_table[
mb_x + 
mb_y*
s->mb_stride]=1;
 
 3261                         const int last_qp= backup_s.qscale;
 
 3265                         static const int dquant_tab[4]={-1,1,-2,2};
 
 3266                         int storecoefs = 
s->mb_intra && 
s->dc_val[0];
 
 3274                         s->mv[0][0][0] = best_s.
mv[0][0][0];
 
 3275                         s->mv[0][0][1] = best_s.
mv[0][0][1];
 
 3276                         s->mv[1][0][0] = best_s.
mv[1][0][0];
 
 3277                         s->mv[1][0][1] = best_s.
mv[1][0][1];
 
 3280                         for(; qpi<4; qpi++){
 
 3281                             int dquant= dquant_tab[qpi];
 
 3283                             if(qp < s->
avctx->
qmin || qp > 
s->avctx->qmax)
 
 3288                                     dc[
i]= 
s->dc_val[0][ 
s->block_index[
i] ];
 
 3289                                     memcpy(ac[
i], 
s->ac_val[0][
s->block_index[
i]], 
sizeof(int16_t)*16);
 
 3294                                          &dmin, &next_block, 
s->mv[mvdir][0][0], 
s->mv[mvdir][0][1]);
 
 3298                                         s->dc_val[0][ 
s->block_index[
i] ]= 
dc[
i];
 
 3299                                         memcpy(
s->ac_val[0][
s->block_index[
i]], ac[
i], 
sizeof(int16_t)*16);
 
 3307                     int mx= 
s->b_direct_mv_table[xy][0];
 
 3308                     int my= 
s->b_direct_mv_table[xy][1];
 
 3310                     backup_s.dquant = 0;
 
 3315                                  &dmin, &next_block, mx, my);
 
 3318                     backup_s.dquant = 0;
 
 3323                                  &dmin, &next_block, 0, 0);
 
 3328                         coded |= 
s->block_last_index[
i];
 
 3331                         memcpy(
s->mv, best_s.
mv, 
sizeof(
s->mv));
 
 3353                                         &dmin, &next_block, mx, my);
 
 3358                 s->current_picture.qscale_table[xy] = best_s.
qscale;
 
 3367                 if(
s->data_partitioning){
 
 3371                     s->pb2= backup_s.pb2;
 
 3375                     avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
 
 3376                     s->tex_pb= backup_s.tex_pb;
 
 3380                 if (CONFIG_H263_ENCODER &&
 
 3385                     s->hdsp.put_pixels_tab[0][0](
s->dest[0], 
s->sc.rd_scratchpad                     , 
s->linesize  ,16);
 
 3386                     s->hdsp.put_pixels_tab[1][0](
s->dest[1], 
s->sc.rd_scratchpad + 16*
s->linesize    , 
s->uvlinesize, 8);
 
 3387                     s->hdsp.put_pixels_tab[1][0](
s->dest[2], 
s->sc.rd_scratchpad + 16*
s->linesize + 8, 
s->uvlinesize, 8);
 
 3393                 int motion_x = 0, motion_y = 0;
 
 3401                     motion_x= 
s->mv[0][0][0] = 0;
 
 3402                     motion_y= 
s->mv[0][0][1] = 0;
 
 3407                     motion_x= 
s->mv[0][0][0] = 
s->p_mv_table[xy][0];
 
 3408                     motion_y= 
s->mv[0][0][1] = 
s->p_mv_table[xy][1];
 
 3415                         j= 
s->field_select[0][
i] = 
s->p_field_select_table[
i][xy];
 
 3416                         s->mv[0][
i][0] = 
s->p_field_mv_table[
i][j][xy][0];
 
 3417                         s->mv[0][
i][1] = 
s->p_field_mv_table[
i][j][xy][1];
 
 3425                         s->mv[0][
i][0] = 
s->current_picture.motion_val[0][
s->block_index[
i]][0];
 
 3426                         s->mv[0][
i][1] = 
s->current_picture.motion_val[0][
s->block_index[
i]][1];
 
 3430                     if (CONFIG_MPEG4_ENCODER) {
 
 3433                         motion_x=
s->b_direct_mv_table[xy][0];
 
 3434                         motion_y=
s->b_direct_mv_table[xy][1];
 
 3439                     if (CONFIG_MPEG4_ENCODER) {
 
 3448                     s->mv[0][0][0] = 
s->b_bidir_forw_mv_table[xy][0];
 
 3449                     s->mv[0][0][1] = 
s->b_bidir_forw_mv_table[xy][1];
 
 3450                     s->mv[1][0][0] = 
s->b_bidir_back_mv_table[xy][0];
 
 3451                     s->mv[1][0][1] = 
s->b_bidir_back_mv_table[xy][1];
 
 3456                     motion_x= 
s->mv[1][0][0] = 
s->b_back_mv_table[xy][0];
 
 3457                     motion_y= 
s->mv[1][0][1] = 
s->b_back_mv_table[xy][1];
 
 3462                     motion_x= 
s->mv[0][0][0] = 
s->b_forw_mv_table[xy][0];
 
 3463                     motion_y= 
s->mv[0][0][1] = 
s->b_forw_mv_table[xy][1];
 
 3470                         j= 
s->field_select[0][
i] = 
s->b_field_select_table[0][
i][xy];
 
 3471                         s->mv[0][
i][0] = 
s->b_field_mv_table[0][
i][j][xy][0];
 
 3472                         s->mv[0][
i][1] = 
s->b_field_mv_table[0][
i][j][xy][1];
 
 3480                         j= 
s->field_select[1][
i] = 
s->b_field_select_table[1][
i][xy];
 
 3481                         s->mv[1][
i][0] = 
s->b_field_mv_table[1][
i][j][xy][0];
 
 3482                         s->mv[1][
i][1] = 
s->b_field_mv_table[1][
i][j][xy][1];
 
 3489                     for(dir=0; dir<2; dir++){
 
 3491                             j= 
s->field_select[dir][
i] = 
s->b_field_select_table[dir][
i][xy];
 
 3492                             s->mv[dir][
i][0] = 
s->b_field_mv_table[dir][
i][j][xy][0];
 
 3493                             s->mv[dir][
i][1] = 
s->b_field_mv_table[dir][
i][j][xy][1];
 
 3504                 s->last_mv_dir = 
s->mv_dir;
 
 3506                 if (CONFIG_H263_ENCODER &&
 
 3515                 s->p_mv_table[xy][0]=0;
 
 3516                 s->p_mv_table[xy][1]=0;
 
 3523                 if(
s->mb_x*16 + 16 > 
s->width ) 
w= 
s->width - 
s->mb_x*16;
 
 3524                 if(
s->mb_y*16 + 16 > 
s->height) 
h= 
s->height- 
s->mb_y*16;
 
 3526                 s->current_picture.encoding_error[0] += 
sse(
 
 3527                     s, 
s->new_picture.f->data[0] + 
s->mb_x*16 + 
s->mb_y*
s->linesize*16,
 
 3528                     s->dest[0], 
w, 
h, 
s->linesize);
 
 3529                 s->current_picture.encoding_error[1] += 
sse(
 
 3530                     s, 
s->new_picture.f->data[1] + 
s->mb_x*8  + 
s->mb_y*
s->uvlinesize*chr_h,
 
 3531                     s->dest[1], 
w>>1, 
h>>
s->chroma_y_shift, 
s->uvlinesize);
 
 3532                 s->current_picture.encoding_error[2] += 
sse(
 
 3533                     s, 
s->new_picture.f->data[2] + 
s->mb_x*8  + 
s->mb_y*
s->uvlinesize*chr_h,
 
 3534                     s->dest[2], 
w>>1, 
h>>
s->chroma_y_shift, 
s->uvlinesize);
 
 3537                 if(CONFIG_H263_ENCODER && 
s->out_format == 
FMT_H263)
 
 3540             ff_dlog(
s->avctx, 
"MB %d %d bits\n",
 
 3551 #if FF_API_RTP_CALLBACK 
 3554     if (
s->avctx->rtp_callback) {
 
 3555         int number_mb = (
mb_y - 
s->resync_mb_y)*
s->mb_width - 
s->resync_mb_x;
 
 3559         s->avctx->rtp_callback(
s->avctx, 
s->ptr_lastgob, pdif, number_mb);
 
 3567 #define MERGE(field) dst->field += src->field; src->field=0 
 3594         for(
i=0; 
i<64; 
i++){
 
 3607     if (
s->next_lambda){
 
 3608         s->current_picture_ptr->f->quality =
 
 3609         s->current_picture.f->quality = 
s->next_lambda;
 
 3610         if(!dry_run) 
s->next_lambda= 0;
 
 3611     } 
else if (!
s->fixed_qscale) {
 
 3613         s->current_picture_ptr->f->quality =
 
 3614         s->current_picture.f->quality = 
quality;
 
 3615         if (
s->current_picture.f->quality < 0)
 
 3619     if(
s->adaptive_quant){
 
 3620         switch(
s->codec_id){
 
 3622             if (CONFIG_MPEG4_ENCODER)
 
 3628             if (CONFIG_H263_ENCODER)
 
 3635         s->lambda= 
s->lambda_table[0];
 
 3638         s->lambda = 
s->current_picture.f->quality;
 
 3646     s->time = 
s->current_picture_ptr->f->pts * 
s->avctx->time_base.num;
 
 3649         s->pb_time= 
s->pp_time - (
s->last_non_b_time - 
s->time);
 
 3652         s->pp_time= 
s->time - 
s->last_non_b_time;
 
 3653         s->last_non_b_time= 
s->time;
 
 3662     int context_count = 
s->slice_context_count;
 
 3667     s->me.mb_var_sum_temp    =
 
 3668     s->me.mc_mb_var_sum_temp = 0;
 
 3677     s->me.scene_change_score=0;
 
 3682         if(
s->msmpeg4_version >= 3) 
s->no_rounding=1;
 
 3683         else                        s->no_rounding=0;
 
 3686             s->no_rounding ^= 1;
 
 3695             s->lambda= 
s->last_lambda_for[
s->pict_type];
 
 3697             s->lambda= 
s->last_lambda_for[
s->last_non_b_pict_type];
 
 3702         if(
s->q_chroma_intra_matrix   != 
s->q_intra_matrix  ) 
av_freep(&
s->q_chroma_intra_matrix);
 
 3703         if(
s->q_chroma_intra_matrix16 != 
s->q_intra_matrix16) 
av_freep(&
s->q_chroma_intra_matrix16);
 
 3704         s->q_chroma_intra_matrix   = 
s->q_intra_matrix;
 
 3705         s->q_chroma_intra_matrix16 = 
s->q_intra_matrix16;
 
 3709     for(
i=1; 
i<context_count; 
i++){
 
 3720         s->lambda  = (
s->lambda  * 
s->me_penalty_compensation + 128) >> 8;
 
 3721         s->lambda2 = (
s->lambda2 * (int64_t) 
s->me_penalty_compensation + 128) >> 8;
 
 3732         for(
i=0; 
i<
s->mb_stride*
s->mb_height; 
i++)
 
 3735         if(!
s->fixed_qscale){
 
 3737             s->avctx->execute(
s->avctx, 
mb_var_thread, &
s->thread_context[0], 
NULL, context_count, 
sizeof(
void*));
 
 3740     for(
i=1; 
i<context_count; 
i++){
 
 3743     s->current_picture.mc_mb_var_sum= 
s->current_picture_ptr->mc_mb_var_sum= 
s->me.mc_mb_var_sum_temp;
 
 3744     s->current_picture.   mb_var_sum= 
s->current_picture_ptr->   mb_var_sum= 
s->me.   mb_var_sum_temp;
 
 3747     if (
s->me.scene_change_score > 
s->scenechange_threshold &&
 
 3750         for(
i=0; 
i<
s->mb_stride*
s->mb_height; 
i++)
 
 3752         if(
s->msmpeg4_version >= 3)
 
 3754         ff_dlog(
s, 
"Scene change detected, encoding as I Frame %"PRId64
" %"PRId64
"\n",
 
 3755                 s->current_picture.mb_var_sum, 
s->current_picture.mc_mb_var_sum);
 
 3798                 for(dir=0; dir<2; dir++){
 
 3804                                             s->b_field_mv_table[dir][
i][j], dir ? 
s->b_code : 
s->f_code, 
type, 1);
 
 3815     if (
s->qscale < 3 && 
s->max_qcoeff <= 128 &&
 
 3824         if (
s->avctx->intra_matrix) {
 
 3826             luma_matrix = 
s->avctx->intra_matrix;
 
 3828         if (
s->avctx->chroma_intra_matrix)
 
 3829             chroma_matrix = 
s->avctx->chroma_intra_matrix;
 
 3833             int j = 
s->idsp.idct_permutation[
i];
 
 3835             s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[
i] * 
s->qscale) >> 3);
 
 3836             s->       
intra_matrix[j] = av_clip_uint8((  luma_matrix[
i] * 
s->qscale) >> 3);
 
 3838         s->y_dc_scale_table=
 
 3840         s->chroma_intra_matrix[0] =
 
 3843                        s->intra_matrix, 
s->intra_quant_bias, 8, 8, 1);
 
 3845                        s->chroma_intra_matrix, 
s->intra_quant_bias, 8, 8, 1);
 
 3849         static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
 
 3850         static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
 
 3857         s->y_dc_scale_table= y;
 
 3858         s->c_dc_scale_table= 
c;
 
 3859         s->intra_matrix[0] = 13;
 
 3860         s->chroma_intra_matrix[0] = 14;
 
 3862                        s->intra_matrix, 
s->intra_quant_bias, 8, 8, 1);
 
 3864                        s->chroma_intra_matrix, 
s->intra_quant_bias, 8, 8, 1);
 
 3869     s->current_picture_ptr->f->key_frame =
 
 3871     s->current_picture_ptr->f->pict_type =
 
 3872     s->current_picture.f->pict_type = 
s->pict_type;
 
 3874     if (
s->current_picture.f->key_frame)
 
 3875         s->picture_in_gop_number=0;
 
 3877     s->mb_x = 
s->mb_y = 0;
 
 3879     switch(
s->out_format) {
 
 3883                                            s->pred, 
s->intra_matrix, 
s->chroma_intra_matrix);
 
 3886         if (CONFIG_H261_ENCODER)
 
 3894         else if (CONFIG_MPEG4_ENCODER && 
s->h263_pred) {
 
 3907         else if (CONFIG_H263_ENCODER)
 
 3911         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
 
 3918     s->header_bits= 
bits - 
s->last_bits;
 
 3920     for(
i=1; 
i<context_count; 
i++){
 
 3923     s->avctx->execute(
s->avctx, 
encode_thread, &
s->thread_context[0], 
NULL, context_count, 
sizeof(
void*));
 
 3924     for(
i=1; 
i<context_count; 
i++){
 
 3925         if (
s->pb.buf_end == 
s->thread_context[
i]->pb.buf)
 
 3934     const int intra= 
s->mb_intra;
 
 3937     s->dct_count[intra]++;
 
 3939     for(
i=0; 
i<64; 
i++){
 
 3944                 s->dct_error_sum[intra][
i] += 
level;
 
 3945                 level -= 
s->dct_offset[intra][
i];
 
 3948                 s->dct_error_sum[intra][
i] -= 
level;
 
 3949                 level += 
s->dct_offset[intra][
i];
 
 3958                                   int16_t *
block, 
int n,
 
 3961     const uint16_t *matrix;
 
 3963     const uint8_t *perm_scantable;
 
 3965     unsigned int threshold1, threshold2;
 
 3977     int coeff_count[64];
 
 3978     int qmul, qadd, start_i, last_non_zero, 
i, 
dc;
 
 3979     const int esc_length= 
s->ac_esc_length;
 
 3987     if(
s->dct_error_sum)
 
 3993     else                 mpeg2_qscale = 
qscale << 1;
 
 3997         scantable= 
s->intra_scantable.scantable;
 
 3998         perm_scantable= 
s->intra_scantable.permutated;
 
 4015         qmat = n < 4 ? 
s->q_intra_matrix[
qscale] : 
s->q_chroma_intra_matrix[
qscale];
 
 4016         matrix = n < 4 ? 
s->intra_matrix : 
s->chroma_intra_matrix;
 
 4020         if (n > 3 && 
s->intra_chroma_ac_vlc_length) {
 
 4021             length     = 
s->intra_chroma_ac_vlc_length;
 
 4022             last_length= 
s->intra_chroma_ac_vlc_last_length;
 
 4024             length     = 
s->intra_ac_vlc_length;
 
 4025             last_length= 
s->intra_ac_vlc_last_length;
 
 4028         scantable= 
s->inter_scantable.scantable;
 
 4029         perm_scantable= 
s->inter_scantable.permutated;
 
 4032         qmat = 
s->q_inter_matrix[
qscale];
 
 4033         matrix = 
s->inter_matrix;
 
 4034         length     = 
s->inter_ac_vlc_length;
 
 4035         last_length= 
s->inter_ac_vlc_last_length;
 
 4040     threshold2= (threshold1<<1);
 
 4042     for(
i=63; 
i>=start_i; 
i--) {
 
 4043         const int j = scantable[
i];
 
 4046         if(((
unsigned)(
level+threshold1))>threshold2){
 
 4052     for(
i=start_i; 
i<=last_non_zero; 
i++) {
 
 4053         const int j = scantable[
i];
 
 4058         if(((
unsigned)(
level+threshold1))>threshold2){
 
 4081     if(last_non_zero < start_i){
 
 4082         memset(
block + start_i, 0, (64-start_i)*
sizeof(int16_t));
 
 4083         return last_non_zero;
 
 4086     score_tab[start_i]= 0;
 
 4087     survivor[0]= start_i;
 
 4090     for(
i=start_i; 
i<=last_non_zero; 
i++){
 
 4091         int level_index, j, zero_distortion;
 
 4093         int best_score=256*256*256*120;
 
 4097         zero_distortion= dct_coeff*dct_coeff;
 
 4099         for(level_index=0; level_index < coeff_count[
i]; level_index++){
 
 4108                 unquant_coeff= alevel*qmul + qadd;
 
 4110                 j = 
s->idsp.idct_permutation[scantable[
i]];
 
 4111                 unquant_coeff = alevel * matrix[j] * 8;
 
 4113                 j = 
s->idsp.idct_permutation[scantable[
i]]; 
 
 4115                         unquant_coeff = (
int)(  alevel  * mpeg2_qscale * matrix[j]) >> 4;
 
 4116                         unquant_coeff =   (unquant_coeff - 1) | 1;
 
 4118                         unquant_coeff = (((  alevel  << 1) + 1) * mpeg2_qscale * ((
int) matrix[j])) >> 5;
 
 4119                         unquant_coeff =   (unquant_coeff - 1) | 1;
 
 4124             distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
 
 4126             if((
level&(~127)) == 0){
 
 4127                 for(j=survivor_count-1; j>=0; j--){
 
 4128                     int run= 
i - survivor[j];
 
 4130                     score += score_tab[
i-
run];
 
 4132                     if(score < best_score){
 
 4135                         level_tab[
i+1]= 
level-64;
 
 4140                     for(j=survivor_count-1; j>=0; j--){
 
 4141                         int run= 
i - survivor[j];
 
 4143                         score += score_tab[
i-
run];
 
 4144                         if(score < last_score){
 
 4147                             last_level= 
level-64;
 
 4153                 distortion += esc_length*
lambda;
 
 4154                 for(j=survivor_count-1; j>=0; j--){
 
 4155                     int run= 
i - survivor[j];
 
 4156                     int score= distortion + score_tab[
i-
run];
 
 4158                     if(score < best_score){
 
 4161                         level_tab[
i+1]= 
level-64;
 
 4166                   for(j=survivor_count-1; j>=0; j--){
 
 4167                         int run= 
i - survivor[j];
 
 4168                         int score= distortion + score_tab[
i-
run];
 
 4169                         if(score < last_score){
 
 4172                             last_level= 
level-64;
 
 4180         score_tab[
i+1]= best_score;
 
 4183         if(last_non_zero <= 27){
 
 4184             for(; survivor_count; survivor_count--){
 
 4185                 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
 
 4189             for(; survivor_count; survivor_count--){
 
 4190                 if(score_tab[ survivor[survivor_count-1] ] <= best_score + 
lambda)
 
 4195         survivor[ survivor_count++ ]= 
i+1;
 
 4199         last_score= 256*256*256*120;
 
 4200         for(
i= survivor[0]; 
i<=last_non_zero + 1; 
i++){
 
 4201             int score= score_tab[
i];
 
 4205             if(score < last_score){
 
 4208                 last_level= level_tab[
i];
 
 4209                 last_run= run_tab[
i];
 
 4214     s->coded_score[n] = last_score;
 
 4217     last_non_zero= last_i - 1;
 
 4218     memset(
block + start_i, 0, (64-start_i)*
sizeof(int16_t));
 
 4220     if(last_non_zero < start_i)
 
 4221         return last_non_zero;
 
 4223     if(last_non_zero == 0 && start_i == 0){
 
 4225         int best_score= 
dc * 
dc;
 
 4227         for(
i=0; 
i<coeff_count[0]; 
i++){
 
 4230             int unquant_coeff, score, distortion;
 
 4233                     unquant_coeff= (alevel*qmul + qadd)>>3;
 
 4235                     unquant_coeff = (((  alevel  << 1) + 1) * mpeg2_qscale * ((
int) matrix[0])) >> 5;
 
 4236                     unquant_coeff =   (unquant_coeff - 1) | 1;
 
 4238             unquant_coeff = (unquant_coeff + 4) >> 3;
 
 4239             unquant_coeff<<= 3 + 3;
 
 4241             distortion= (unquant_coeff - 
dc) * (unquant_coeff - 
dc);
 
 4244             else                    score= distortion + esc_length*
lambda;
 
 4246             if(score < best_score){
 
 4248                 best_level= 
level - 64;
 
 4251         block[0]= best_level;
 
 4252         s->coded_score[n] = best_score - 
dc*
dc;
 
 4253         if(best_level == 0) 
return -1;
 
 4254         else                return last_non_zero;
 
 4260     block[ perm_scantable[last_non_zero] ]= last_level;
 
 4263     for(; 
i>start_i; 
i -= run_tab[
i] + 1){
 
 4264         block[ perm_scantable[
i-1] ]= level_tab[
i];
 
 4267     return last_non_zero;
 
 4282                     if(
i==0) 
s*= sqrt(0.5);
 
 4283                     if(j==0) 
s*= sqrt(0.5);
 
 4297     const uint8_t *perm_scantable;
 
 4303     int qmul, qadd, start_i, last_non_zero, 
i, 
dc;
 
 4307     int rle_index, 
run, q = 1, sum; 
 
 4309     if(
basis[0][0] == 0)
 
 4315         scantable= 
s->intra_scantable.scantable;
 
 4316         perm_scantable= 
s->intra_scantable.permutated;
 
 4334         if (n > 3 && 
s->intra_chroma_ac_vlc_length) {
 
 4335             length     = 
s->intra_chroma_ac_vlc_length;
 
 4336             last_length= 
s->intra_chroma_ac_vlc_last_length;
 
 4338             length     = 
s->intra_ac_vlc_length;
 
 4339             last_length= 
s->intra_ac_vlc_last_length;
 
 4342         scantable= 
s->inter_scantable.scantable;
 
 4343         perm_scantable= 
s->inter_scantable.permutated;
 
 4346         length     = 
s->inter_ac_vlc_length;
 
 4347         last_length= 
s->inter_ac_vlc_last_length;
 
 4349     last_non_zero = 
s->block_last_index[n];
 
 4352     for(
i=0; 
i<64; 
i++){
 
 4357     for(
i=0; 
i<64; 
i++){
 
 4363         w= 15 + (48*qns*one + 
w/2)/
w; 
 
 4376     for(
i=start_i; 
i<=last_non_zero; 
i++){
 
 4377         int j= perm_scantable[
i];
 
 4384             run_tab[rle_index++]=
run;
 
 4394         int best_score = 
s->mpvencdsp.try_8x8basis(rem, 
weight, 
basis[0], 0);
 
 4397         int run2, best_unquant_change=0, analyze_gradient;
 
 4398         analyze_gradient = last_non_zero > 2 || 
s->quantizer_noise_shaping >= 3;
 
 4400         if(analyze_gradient){
 
 4401             for(
i=0; 
i<64; 
i++){
 
 4411             int change, old_coeff;
 
 4417             for(change=-1; change<=1; change+=2){
 
 4418                 int new_level= 
level + change;
 
 4419                 int score, new_coeff;
 
 4421                 new_coeff= q*new_level;
 
 4422                 if(new_coeff >= 2048 || new_coeff < 0)
 
 4425                 score = 
s->mpvencdsp.try_8x8basis(rem, 
weight, 
basis[0],
 
 4426                                                   new_coeff - old_coeff);
 
 4427                 if(score<best_score){
 
 4430                     best_change= change;
 
 4431                     best_unquant_change= new_coeff - old_coeff;
 
 4438         run2= run_tab[rle_index++];
 
 4442         for(
i=start_i; 
i<64; 
i++){
 
 4443             int j= perm_scantable[
i];
 
 4445             int change, old_coeff;
 
 4447             if(
s->quantizer_noise_shaping < 3 && 
i > last_non_zero + 1)
 
 4452                 else        old_coeff= qmul*
level + qadd;
 
 4453                 run2= run_tab[rle_index++]; 
 
 4460             for(change=-1; change<=1; change+=2){
 
 4461                 int new_level= 
level + change;
 
 4462                 int score, new_coeff, unquant_change;
 
 4469                     if(new_level<0) new_coeff= qmul*new_level - qadd;
 
 4470                     else            new_coeff= qmul*new_level + qadd;
 
 4471                     if(new_coeff >= 2048 || new_coeff <= -2048)
 
 4476                         if(level < 63 && level > -63){
 
 4477                             if(
i < last_non_zero)
 
 4487                         if(analyze_gradient){
 
 4488                             int g= d1[ scantable[
i] ];
 
 4489                             if(
g && (
g^new_level) >= 0)
 
 4493                         if(
i < last_non_zero){
 
 4494                             int next_i= 
i + run2 + 1;
 
 4495                             int next_level= 
block[ perm_scantable[next_i] ] + 64;
 
 4497                             if(next_level&(~127))
 
 4500                             if(next_i < last_non_zero)
 
 4520                     if(
i < last_non_zero){
 
 4521                         int next_i= 
i + run2 + 1;
 
 4522                         int next_level= 
block[ perm_scantable[next_i] ] + 64;
 
 4524                         if(next_level&(~127))
 
 4527                         if(next_i < last_non_zero)
 
 4546                 unquant_change= new_coeff - old_coeff;
 
 4549                 score += 
s->mpvencdsp.try_8x8basis(rem, 
weight, 
basis[j],
 
 4551                 if(score<best_score){
 
 4554                     best_change= change;
 
 4555                     best_unquant_change= unquant_change;
 
 4559                 prev_level= 
level + 64;
 
 4560                 if(prev_level&(~127))
 
 4570             int j= perm_scantable[ best_coeff ];
 
 4572             block[j] += best_change;
 
 4574             if(best_coeff > last_non_zero){
 
 4575                 last_non_zero= best_coeff;
 
 4578                 for(; last_non_zero>=start_i; last_non_zero--){
 
 4579                     if(
block[perm_scantable[last_non_zero]])
 
 4586             for(
i=start_i; 
i<=last_non_zero; 
i++){
 
 4587                 int j= perm_scantable[
i];
 
 4591                      run_tab[rle_index++]=
run;
 
 4598             s->mpvencdsp.add_8x8basis(rem, 
basis[j], best_unquant_change);
 
 4604     return last_non_zero;
 
 4619                       const uint8_t *scantable, 
int last)
 
 4630     for (
i = 0; 
i <= last; 
i++) {
 
 4631         const int j = scantable[
i];
 
 4636     for (
i = 0; 
i <= last; 
i++) {
 
 4637         const int j = scantable[
i];
 
 4638         const int perm_j = permutation[j];
 
 4644                         int16_t *
block, 
int n,
 
 4647     int i, j, 
level, last_non_zero, q, start_i;
 
 4652     unsigned int threshold1, threshold2;
 
 4656     if(
s->dct_error_sum)
 
 4660         scantable= 
s->intra_scantable.scantable;
 
 4675         qmat = n < 4 ? 
s->q_intra_matrix[
qscale] : 
s->q_chroma_intra_matrix[
qscale];
 
 4678         scantable= 
s->inter_scantable.scantable;
 
 4681         qmat = 
s->q_inter_matrix[
qscale];
 
 4685     threshold2= (threshold1<<1);
 
 4686     for(
i=63;
i>=start_i;
i--) {
 
 4690         if(((
unsigned)(
level+threshold1))>threshold2){
 
 4697     for(
i=start_i; 
i<=last_non_zero; 
i++) {
 
 4703         if(((
unsigned)(
level+threshold1))>threshold2){
 
 4721                       scantable, last_non_zero);
 
 4723     return last_non_zero;
 
 4726 #define OFFSET(x) offsetof(MpegEncContext, x) 
 4727 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM 
 4730     { 
"mb_info",      
"emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", 
OFFSET(
mb_info), 
AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, 
VE },
 
 4792     .
name           = 
"msmpeg4v2",
 
  
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
 
int mb_skipped
MUST BE SET only during DECODING.
 
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
 
int ff_wmv2_encode_picture_header(MpegEncContext *s, int picture_number)
 
#define FF_ENABLE_DEPRECATION_WARNINGS
 
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
 
#define MV_TYPE_16X16
1 vector for the whole mb
 
#define AV_LOG_WARNING
Something somehow does not look correct.
 
static void direct(const float *in, const FFTComplex *ir, int len, float *out)
 
AVPixelFormat
Pixel format.
 
int data_partitioning
data partitioning flag from header
 
static void set_frame_distances(MpegEncContext *s)
 
static av_cold int init(AVCodecContext *avctx)
 
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
 
unsigned int lambda
Lagrange multiplier used in rate distortion.
 
#define H263_GOB_HEIGHT(h)
 
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
 
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
 
int b_code
backward MV resolution for B-frames (MPEG-4)
 
attribute_deprecated int mpeg_quant
 
void ff_mpeg4_merge_partitions(MpegEncContext *s)
 
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
 
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
 
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
 
int64_t rc_min_rate
minimum bitrate
 
void ff_fix_long_p_mvs(MpegEncContext *s, int type)
 
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
 
#define AVERROR_EOF
End of file.
 
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
 
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
 
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
 
static int sse_mb(MpegEncContext *s)
 
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
 
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
 
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
 
static int16_t basis[64][64]
 
void ff_mjpeg_encode_picture_header(AVCodecContext *avctx, PutBitContext *pb, ScanTable *intra_scantable, int pred, uint16_t luma_intra_matrix[64], uint16_t chroma_intra_matrix[64])
 
static int encode_frame(AVCodecContext *c, AVFrame *frame)
 
Picture current_picture
copy of the current picture structure.
 
static int estimate_motion_thread(AVCodecContext *c, void *arg)
 
static void update_noise_reduction(MpegEncContext *s)
 
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
 
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
 
static av_cold int end(AVCodecContext *avctx)
 
int partitioned_frame
is current frame partitioned
 
uint16_t(* dct_offset)[64]
 
#define UNI_AC_ENC_INDEX(run, level)
 
This structure describes decoded (raw) audio or video data.
 
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
 
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
 
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
 
#define CANDIDATE_MB_TYPE_INTER_I
 
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
 
void ff_free_picture_tables(Picture *pic)
 
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
 
const AVOption ff_mpv_generic_options[]
 
int last_dc[3]
last DC values for MPEG-1
 
#define CANDIDATE_MB_TYPE_BACKWARD_I
 
const uint8_t ff_mpeg2_non_linear_qscale[32]
 
attribute_deprecated int p_tex_bits
 
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
 
attribute_deprecated int skip_count
 
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
 
#define AV_LOG_VERBOSE
Detailed information.
 
#define PICT_BOTTOM_FIELD
 
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
 
void ff_h261_encode_init(MpegEncContext *s)
 
void ff_init_block_index(MpegEncContext *s)
 
struct AVCodecContext * avctx
 
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number)
 
av_cold int ff_rate_control_init(MpegEncContext *s)
 
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
 
#define CANDIDATE_MB_TYPE_SKIPPED
 
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
 
PutBitContext pb
bit output
 
#define CANDIDATE_MB_TYPE_INTER
 
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
 
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
 
int qmax
maximum quantizer
 
attribute_deprecated int frame_skip_threshold
 
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
 
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
 
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
 
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
 
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
 
void ff_msmpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
 
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
 
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
 
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second "         : depend...
 
#define FF_MPV_FLAG_SKIP_RD
 
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
 
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
 
#define CANDIDATE_MB_TYPE_FORWARD_I
 
void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
 
attribute_deprecated int frame_bits
 
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
 
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
 
attribute_deprecated int pre_me
 
static int estimate_qp(MpegEncContext *s, int dry_run)
 
attribute_deprecated int prediction_method
 
int ff_get_best_fcode(MpegEncContext *s, int16_t(*mv_table)[2], int type)
 
void ff_set_mpeg4_time(MpegEncContext *s)
 
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
 
FF_ENABLE_DEPRECATION_WARNINGS int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
 
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
 
const struct AVCodec * codec
 
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
 
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
 
#define CANDIDATE_MB_TYPE_BIDIR
 
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
 
int padding_bug_score
used to detect the VERY common padding bug in MPEG-4
 
static const struct twinvq_data tab
 
static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride)
 
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Shrink the already allocated side data buffer.
 
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
 
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
 
av_cold void ff_h263dsp_init(H263DSPContext *ctx)
 
int flags
AV_CODEC_FLAG_*.
 
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
 
int umvplus
== H.263+ && unrestricted_mv
 
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
 
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
 
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
 
void ff_h263_update_motion_val(MpegEncContext *s)
 
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
 
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
 
#define CANDIDATE_MB_TYPE_INTER4V
 
static const AVOption h263_options[]
 
static const uint8_t sp5x_quant_table[20][64]
 
#define MAX_PICTURE_COUNT
 
av_cold int ff_dct_encode_init(MpegEncContext *s)
 
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
 
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
 
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
 
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
 
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
 
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
 
int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
 
attribute_deprecated int p_count
 
void ff_mpv_common_end(MpegEncContext *s)
 
static int frame_start(MpegEncContext *s)
 
void ff_init_qscale_tab(MpegEncContext *s)
init s->current_picture.qscale_table from s->lambda_table
 
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
 
static void update_mb_info(MpegEncContext *s, int startcode)
 
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
 
int ff_h261_get_picture_format(int width, int height)
 
static uint8_t default_fcode_tab[MAX_MV *2+1]
 
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
 
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
 
static void build_basis(uint8_t *perm)
 
int has_b_frames
Size of the frame reordering buffer in the decoder.
 
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
 
void ff_h263_encode_init(MpegEncContext *s)
 
const uint8_t ff_h263_chroma_qscale_table[32]
 
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
 
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
 
const uint8_t *const ff_mpeg2_dc_scale_table[4]
 
static const AVClass msmpeg4v3_class
 
static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride)
 
static double av_q2d(AVRational a)
Convert an AVRational to a double.
 
@ HUFFMAN_TABLE_OPTIMAL
Compute and use optimal Huffman tables.
 
attribute_deprecated int mv_bits
 
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
 
#define av_assert0(cond)
assert() equivalent, that is always enabled.
 
static enum AVPixelFormat pix_fmts[]
 
attribute_deprecated int brd_scale
 
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
 
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
 
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
 
int ff_check_alignment(void)
 
void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number)
 
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
 
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
 
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
 
int64_t rc_max_rate
maximum bitrate
 
uint64_t error[AV_NUM_DATA_POINTERS]
error
 
This structure describes the bitrate properties of an encoded bitstream.
 
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
 
uint64_t encoding_error[AV_NUM_DATA_POINTERS]
 
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
 
static int mb_var_thread(AVCodecContext *c, void *arg)
 
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
 
int rc_buffer_size
decoder bitstream buffer size
 
int avg_bitrate
Average bitrate of the stream, in bits per second.
 
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
 
static const AVClass h263_class
 
PutBitContext pb2
used for data partitioned VOPs
 
#define LIBAVUTIL_VERSION_INT
 
void ff_write_pass1_stats(MpegEncContext *s)
 
Describe the class of an AVClass context structure.
 
#define PTRDIFF_SPECIFIER
 
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
 
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
 
static void write_mb_info(MpegEncContext *s)
 
enum AVColorRange color_range
MPEG vs JPEG YUV range.
 
int f_code
forward MV resolution
 
av_cold void ff_mpv_idct_init(MpegEncContext *s)
 
attribute_deprecated int i_tex_bits
 
attribute_deprecated int misc_bits
 
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
 
struct AVCodecInternal * internal
Private context used for internal data.
 
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
 
int64_t bit_rate
the average bitrate
 
#define ROUNDED_DIV(a, b)
 
void ff_faandct(int16_t *data)
 
const char * av_default_item_name(void *ptr)
Return the context name.
 
@ AV_PICTURE_TYPE_I
Intra.
 
void ff_fdct_ifast(int16_t *data)
 
AVCodec ff_msmpeg4v3_encoder
 
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
 
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
 
attribute_deprecated int b_frame_strategy
 
attribute_deprecated int noise_reduction
 
int ff_vbv_update(MpegEncContext *s, int frame_size)
 
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
 
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
 
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
 
void ff_jpeg_fdct_islow_8(int16_t *data)
 
int trellis
trellis RD quantization
 
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
 
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
 
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
 
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
 
void ff_jpeg_fdct_islow_10(int16_t *data)
 
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in H.263 (limit difference to -2....
 
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
 
static int weight(int i, int blen, int offset)
 
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
 
void avpriv_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
 
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
 
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
 
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
 
void avpriv_align_put_bits(PutBitContext *s)
Pad the bitstream with zeros up to the next byte boundary.
 
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
 
static int get_sae(uint8_t *src, int ref, int stride)
 
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
 
attribute_deprecated uint64_t vbv_delay
VBV delay coded in the last frame (in periods of a 27 MHz clock).
 
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
 
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
 
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
 
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
 
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
 
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
 
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
 
void ff_mpeg4_clean_buffers(MpegEncContext *s)
 
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
 
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
 
#define CONFIG_MSMPEG4_ENCODER
 
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
 
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
 
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
 
void ff_mpeg1_clean_buffers(MpegEncContext *s)
 
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
 
const int16_t ff_mpeg4_default_intra_matrix[64]
 
attribute_deprecated int frame_skip_exp
 
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
 
#define AV_NOPTS_VALUE
Undefined timestamp value.
 
attribute_deprecated int rtp_payload_size
 
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
 
#define CANDIDATE_MB_TYPE_DIRECT0
 
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
 
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
 
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
 
const uint16_t ff_mpeg1_default_intra_matrix[256]
 
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
 
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count)
 
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
 
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
 
const int16_t ff_mpeg4_default_non_intra_matrix[64]
 
int ff_msmpeg4_encode_init(MpegEncContext *s)
 
int max_bitrate
Maximum bitrate of the stream, in bits per second.
 
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
 
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
 
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
 
#define MV_TYPE_FIELD
2 vectors, one per field
 
int flags
A combination of AV_PKT_FLAG values.
 
int picture_in_gop_number
0-> first pic in gop, ...
 
unsigned int byte_buffer_size
 
attribute_deprecated int i_count
 
#define FF_COMPLIANCE_NORMAL
 
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
 
#define AV_LOG_INFO
Standard information.
 
static void update_qscale(MpegEncContext *s)
 
static void ff_update_block_index(MpegEncContext *s)
 
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
 
AVCodec ff_msmpeg4v2_encoder
 
int block_last_index[12]
last non zero coefficient in block
 
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
 
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
 
int last_mv[2][2][2]
last MV, used for MV prediction in MPEG-1 & B-frame MPEG-4
 
void ff_dct_encode_init_x86(MpegEncContext *s)
 
attribute_deprecated int b_sensitivity
 
#define i(width, name, range_min, range_max)
 
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
 
static int put_bits_count(PutBitContext *s)
 
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
 
static int encode_thread(AVCodecContext *c, void *arg)
 
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
 
const uint32_t ff_square_tab[512]
 
static int estimate_best_b_count(MpegEncContext *s)
 
int intra_dc_precision
precision of the intra DC coefficient - 8
 
int obmc
overlapped block motion compensation
 
PutBitContext tex_pb
used for data partitioned VOPs
 
attribute_deprecated int frame_skip_cmp
 
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
 
static const int32_t qmat16[MAT_SIZE]
 
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
 
attribute_deprecated int header_bits
 
static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
 
const uint16_t ff_h263_format[8][2]
 
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
 
void ff_flv_encode_picture_header(MpegEncContext *s, int picture_number)
 
static const AVClass h263p_class
 
void ff_mpeg4_init_partitions(MpegEncContext *s)
 
const char * name
Name of the codec implementation.
 
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
 
int ff_init_me(MpegEncContext *s)
 
int min_bitrate
Minimum bitrate of the stream, in bits per second.
 
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
 
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
 
av_cold void ff_rate_control_uninit(MpegEncContext *s)
 
#define CANDIDATE_MB_TYPE_DIRECT
 
double buffer_index
amount of bits in the video/audio buffer
 
int h263_slice_structured
 
const uint8_t ff_zigzag_direct[64]
 
static int get_bits_diff(MpegEncContext *s)
 
#define AV_CODEC_FLAG_CLOSED_GOP
 
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
 
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
 
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
 
int buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
 
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
 
#define CANDIDATE_MB_TYPE_BIDIR_I
 
const uint16_t ff_inv_aanscales[64]
 
av_cold void ff_mjpeg_encode_close(MpegEncContext *s)
 
void ff_h263_loop_filter(MpegEncContext *s)
 
#define FF_MPV_FLAG_CBP_RD
 
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
 
#define AV_INPUT_BUFFER_PADDING_SIZE
 
static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src)
 
#define FF_ARRAY_ELEMS(a)
 
int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
 
attribute_deprecated int scenechange_threshold
 
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
 
int16_t(* block)[64]
points to one of the following blocks
 
int dquant
qscale difference to prev qscale
 
main external API structure.
 
int active_thread_type
Which multithreading methods are in use by the codec.
 
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_DMV *2+1]
 
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
 
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
 
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
 
#define CANDIDATE_MB_TYPE_INTRA
 
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
 
int last_bits
temp var used for calculating the above vars
 
int qmin
minimum quantizer
 
int gop_picture_number
index of the first picture of a GOP based on fake_pic_num & MPEG-1 specific
 
static int select_input_picture(MpegEncContext *s)
 
static void frame_end(MpegEncContext *s)
 
static int ref[MAX_W *MAX_W]
 
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
 
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
 
#define CANDIDATE_MB_TYPE_FORWARD
 
#define FF_MB_DECISION_RD
rate distortion
 
#define FF_DISABLE_DEPRECATION_WARNINGS
 
static int shift(int a, int b)
 
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
 
@ AV_PICTURE_TYPE_P
Predicted.
 
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
 
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
 
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
 
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
 
void ff_mpeg1_encode_init(MpegEncContext *s)
 
Undefined Behavior In the C some operations are like signed integer overflow
 
void(* fdct)(int16_t *block)
 
static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
 
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
 
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
 
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
 
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
 
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
 
static av_always_inline int diff(const uint32_t a, const uint32_t b)
 
#define FF_MPV_FLAG_STRICT_GOP
 
#define LOCAL_ALIGNED_16(t, v,...)
 
int slices
Number of slices.
 
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
 
This structure stores compressed data.
 
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
 
static int encode_picture(MpegEncContext *s, int picture_number)
 
int width
picture width / height.
 
attribute_deprecated int me_penalty_compensation
 
static const AVClass wmv1_class
 
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
 
static const double coeff[2][5]
 
The exact code depends on how similar the blocks are and how related they are to the block
 
#define FF_MPV_FLAG_QP_RD
 
int misc_bits
cbp, mb_type
 
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
 
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
 
static void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type)
 
void ff_get_2pass_fcode(MpegEncContext *s)
 
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
 
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
 
static void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type)
 
void ff_h261_reorder_mb_index(MpegEncContext *s)
 
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
 
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
 
int display_picture_number
picture number in display order
 
static const AVClass msmpeg4v2_class
 
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
 
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
 
#define FF_MPV_COMMON_OPTS
 
attribute_deprecated int frame_skip_factor
 
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
 
int alt_inter_vlc
alternative inter vlc
 
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
 
void ff_h261_encode_picture_header(MpegEncContext *s, int picture_number)
 
#define CANDIDATE_MB_TYPE_BACKWARD
 
const uint16_t ff_aanscales[64]
 
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
 
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
 
static const AVOption h263p_options[]
 
static void write_slice_end(MpegEncContext *s)