FFmpeg
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
mpeg4videoenc.c
Go to the documentation of this file.
1 /*
2  * MPEG-4 encoder
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2010 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "libavutil/attributes.h"
24 #include "libavutil/log.h"
25 #include "libavutil/mem.h"
26 #include "libavutil/opt.h"
27 #include "libavutil/thread.h"
28 #include "codec_internal.h"
29 #include "mpegvideo.h"
30 #include "h263.h"
31 #include "h263enc.h"
32 #include "mpeg4video.h"
33 #include "mpeg4videodata.h"
34 #include "mpeg4videodefs.h"
35 #include "mpeg4videoenc.h"
36 #include "mpegvideoenc.h"
37 #include "profiles.h"
38 #include "put_bits.h"
39 #include "version.h"
40 
41 /**
42  * Minimal fcode that a motion vector component would need.
43  */
44 static uint8_t fcode_tab[MAX_MV*2+1];
45 
46 /* The uni_DCtab_* tables below contain unified bits+length tables to encode DC
47  * differences in MPEG-4. Unified in the sense that the specification specifies
48  * this encoding in several steps. */
49 static uint8_t uni_DCtab_lum_len[512];
50 static uint8_t uni_DCtab_chrom_len[512];
51 static uint16_t uni_DCtab_lum_bits[512];
52 static uint16_t uni_DCtab_chrom_bits[512];
53 
54 /* Unified encoding tables for run length encoding of coefficients.
55  * Unified in the sense that the specification specifies the encoding in several steps. */
56 static uint32_t uni_mpeg4_intra_rl_bits[64 * 64 * 2 * 2];
57 static uint8_t uni_mpeg4_intra_rl_len[64 * 64 * 2 * 2];
58 static uint32_t uni_mpeg4_inter_rl_bits[64 * 64 * 2 * 2];
59 static uint8_t uni_mpeg4_inter_rl_len[64 * 64 * 2 * 2];
60 
61 //#define UNI_MPEG4_ENC_INDEX(last, run, level) ((last) * 128 + (run) * 256 + (level))
62 //#define UNI_MPEG4_ENC_INDEX(last, run, level) ((last) * 128 * 64 + (run) + (level) * 64)
63 #define UNI_MPEG4_ENC_INDEX(last, run, level) ((last) * 128 * 64 + (run) * 128 + (level))
64 
65 /* MPEG-4
66  * inter
67  * max level: 24/6
68  * max run: 53/63
69  *
70  * intra
71  * max level: 53/16
72  * max run: 29/41
73  */
74 
75 typedef struct Mpeg4EncContext {
77  /// number of bits to represent the fractional part of time
80 
82 {
83  return (Mpeg4EncContext*)m;
84 }
85 
86 /**
87  * Return the number of bits that encoding the 8x8 block in block would need.
88  * @param[in] block_last_index last index in scantable order that refers to a non zero element in block.
89  */
90 static inline int get_block_rate(MPVEncContext *const s, int16_t block[64],
91  int block_last_index, const uint8_t scantable[64])
92 {
93  int last = 0;
94  int j;
95  int rate = 0;
96 
97  for (j = 1; j <= block_last_index; j++) {
98  const int index = scantable[j];
99  int level = block[index];
100  if (level) {
101  level += 64;
102  if ((level & (~127)) == 0) {
103  if (j < block_last_index)
104  rate += s->intra_ac_vlc_length[UNI_AC_ENC_INDEX(j - last - 1, level)];
105  else
106  rate += s->intra_ac_vlc_last_length[UNI_AC_ENC_INDEX(j - last - 1, level)];
107  } else
108  rate += s->ac_esc_length;
109 
110  last = j;
111  }
112  }
113 
114  return rate;
115 }
116 
117 /**
118  * Restore the ac coefficients in block that have been changed by decide_ac_pred().
119  * This function also restores s->c.block_last_index.
120  * @param[in,out] block MB coefficients, these will be restored
121  * @param[in] dir ac prediction direction for each 8x8 block
122  * @param[out] st scantable for each 8x8 block
123  * @param[in] zigzag_last_index index referring to the last non zero coefficient in zigzag order
124  */
125 static inline void restore_ac_coeffs(MPVEncContext *const s, int16_t block[6][64],
126  const int dir[6], const uint8_t *st[6],
127  const int zigzag_last_index[6])
128 {
129  int i, n;
130  memcpy(s->c.block_last_index, zigzag_last_index, sizeof(int) * 6);
131 
132  for (n = 0; n < 6; n++) {
133  int16_t *ac_val = &s->c.ac_val[0][0] + s->c.block_index[n] * 16;
134 
135  st[n] = s->c.intra_scantable.permutated;
136  if (dir[n]) {
137  /* top prediction */
138  for (i = 1; i < 8; i++)
139  block[n][s->c.idsp.idct_permutation[i]] = ac_val[i + 8];
140  } else {
141  /* left prediction */
142  for (i = 1; i < 8; i++)
143  block[n][s->c.idsp.idct_permutation[i << 3]] = ac_val[i];
144  }
145  }
146 }
147 
148 /**
149  * Predict the dc.
150  * @param n block index (0-3 are luma, 4-5 are chroma)
151  * @param dir_ptr pointer to an integer where the prediction direction will be stored
152  */
153 static int mpeg4_pred_dc(MpegEncContext *s, int n, int *dir_ptr)
154 {
155  const int16_t *const dc_val = s->dc_val + s->block_index[n];
156  const int wrap = s->block_wrap[n];
157 
158  /* B C
159  * A X
160  */
161  const int a = dc_val[-1];
162  const int b = dc_val[-1 - wrap];
163  const int c = dc_val[-wrap];
164  int pred;
165 
166  // There is no need for out-of-slice handling here, as all values are set
167  // appropriately when a new slice is opened.
168  if (abs(a - b) < abs(b - c)) {
169  pred = c;
170  *dir_ptr = 1; /* top */
171  } else {
172  pred = a;
173  *dir_ptr = 0; /* left */
174  }
175  return pred;
176 }
177 
178 /**
179  * Return the optimal value (0 or 1) for the ac_pred element for the given MB in MPEG-4.
180  * This function will also update s->c.block_last_index and s->c.ac_val.
181  * @param[in,out] block MB coefficients, these will be updated if 1 is returned
182  * @param[in] dir ac prediction direction for each 8x8 block
183  * @param[out] st scantable for each 8x8 block
184  * @param[out] zigzag_last_index index referring to the last non zero coefficient in zigzag order
185  */
186 static inline int decide_ac_pred(MPVEncContext *const s, int16_t block[6][64],
187  const int dir[6], const uint8_t *st[6],
188  int zigzag_last_index[6])
189 {
190  int score = 0;
191  int i, n;
192  const int8_t *const qscale_table = s->c.cur_pic.qscale_table;
193 
194  memcpy(zigzag_last_index, s->c.block_last_index, sizeof(int) * 6);
195 
196  for (n = 0; n < 6; n++) {
197  int16_t *ac_val, *ac_val1;
198 
199  score -= get_block_rate(s, block[n], s->c.block_last_index[n],
200  s->c.intra_scantable.permutated);
201 
202  ac_val = &s->c.ac_val[0][0] + s->c.block_index[n] * 16;
203  ac_val1 = ac_val;
204  if (dir[n]) {
205  const int xy = s->c.mb_x + s->c.mb_y * s->c.mb_stride - s->c.mb_stride;
206  /* top prediction */
207  ac_val -= s->c.block_wrap[n] * 16;
208  if (s->c.first_slice_line || s->c.qscale == qscale_table[xy] || n == 2 || n == 3) {
209  /* same qscale */
210  for (i = 1; i < 8; i++) {
211  const int level = block[n][s->c.idsp.idct_permutation[i]];
212  block[n][s->c.idsp.idct_permutation[i]] = level - ac_val[i + 8];
213  ac_val1[i] = block[n][s->c.idsp.idct_permutation[i << 3]];
214  ac_val1[i + 8] = level;
215  }
216  } else {
217  /* different qscale, we must rescale */
218  for (i = 1; i < 8; i++) {
219  const int level = block[n][s->c.idsp.idct_permutation[i]];
220  block[n][s->c.idsp.idct_permutation[i]] = level - ROUNDED_DIV(ac_val[i + 8] * qscale_table[xy], s->c.qscale);
221  ac_val1[i] = block[n][s->c.idsp.idct_permutation[i << 3]];
222  ac_val1[i + 8] = level;
223  }
224  }
225  st[n] = s->c.permutated_intra_h_scantable;
226  } else {
227  const int xy = s->c.mb_x - 1 + s->c.mb_y * s->c.mb_stride;
228  /* left prediction */
229  ac_val -= 16;
230  if (s->c.mb_x == 0 || s->c.qscale == qscale_table[xy] || n == 1 || n == 3) {
231  /* same qscale */
232  for (i = 1; i < 8; i++) {
233  const int level = block[n][s->c.idsp.idct_permutation[i << 3]];
234  block[n][s->c.idsp.idct_permutation[i << 3]] = level - ac_val[i];
235  ac_val1[i] = level;
236  ac_val1[i + 8] = block[n][s->c.idsp.idct_permutation[i]];
237  }
238  } else {
239  /* different qscale, we must rescale */
240  for (i = 1; i < 8; i++) {
241  const int level = block[n][s->c.idsp.idct_permutation[i << 3]];
242  block[n][s->c.idsp.idct_permutation[i << 3]] = level - ROUNDED_DIV(ac_val[i] * qscale_table[xy], s->c.qscale);
243  ac_val1[i] = level;
244  ac_val1[i + 8] = block[n][s->c.idsp.idct_permutation[i]];
245  }
246  }
247  st[n] = s->c.permutated_intra_v_scantable;
248  }
249 
250  for (i = 63; i > 0; i--) // FIXME optimize
251  if (block[n][st[n][i]])
252  break;
253  s->c.block_last_index[n] = i;
254 
255  score += get_block_rate(s, block[n], s->c.block_last_index[n], st[n]);
256  }
257 
258  if (score < 0) {
259  return 1;
260  } else {
261  restore_ac_coeffs(s, block, dir, st, zigzag_last_index);
262  return 0;
263  }
264 }
265 
266 /**
267  * modify mb_type & qscale so that encoding is actually possible in MPEG-4
268  */
270 {
272 
273  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
274  int8_t *const qscale_table = s->c.cur_pic.qscale_table;
275  int odd = 0;
276  /* ok, come on, this isn't funny anymore, there's more code for
277  * handling this MPEG-4 mess than for the actual adaptive quantization */
278 
279  for (int i = 0; i < s->c.mb_num; i++) {
280  int mb_xy = s->c.mb_index2xy[i];
281  odd += qscale_table[mb_xy] & 1;
282  }
283 
284  if (2 * odd > s->c.mb_num)
285  odd = 1;
286  else
287  odd = 0;
288 
289  for (int i = 0; i < s->c.mb_num; i++) {
290  int mb_xy = s->c.mb_index2xy[i];
291  if ((qscale_table[mb_xy] & 1) != odd)
292  qscale_table[mb_xy]++;
293  if (qscale_table[mb_xy] > 31)
294  qscale_table[mb_xy] = 31;
295  }
296 
297  for (int i = 1; i < s->c.mb_num; i++) {
298  int mb_xy = s->c.mb_index2xy[i];
299  if (qscale_table[mb_xy] != qscale_table[s->c.mb_index2xy[i - 1]] &&
300  (s->mb_type[mb_xy] & CANDIDATE_MB_TYPE_DIRECT)) {
301  s->mb_type[mb_xy] |= CANDIDATE_MB_TYPE_BIDIR;
302  }
303  }
304  }
305 }
306 
307 /**
308  * Encode the dc value.
309  * @param n block index (0-3 are luma, 4-5 are chroma)
310  */
311 static inline void mpeg4_encode_dc(PutBitContext *s, int level, int n)
312 {
313  /* DC will overflow if level is outside the [-255,255] range. */
314  level += 256;
315  if (n < 4) {
316  /* luminance */
318  } else {
319  /* chrominance */
321  }
322 }
323 
324 /**
325  * Encode the AC coefficients of an 8x8 block.
326  */
327 static inline void mpeg4_encode_ac_coeffs(const int16_t block[64],
328  const int last_index, int i,
329  const uint8_t *const scan_table,
330  PutBitContext *const ac_pb,
331  const uint32_t *const bits_tab,
332  const uint8_t *const len_tab)
333 {
334  int last_non_zero = i - 1;
335 
336  /* AC coefs */
337  for (; i < last_index; i++) {
338  int level = block[scan_table[i]];
339  if (level) {
340  int run = i - last_non_zero - 1;
341  level += 64;
342  if ((level & (~127)) == 0) {
343  const int index = UNI_MPEG4_ENC_INDEX(0, run, level);
344  put_bits(ac_pb, len_tab[index], bits_tab[index]);
345  } else { // ESC3
346  put_bits(ac_pb,
347  7 + 2 + 1 + 6 + 1 + 12 + 1,
348  (3 << 23) + (3 << 21) + (0 << 20) + (run << 14) +
349  (1 << 13) + (((level - 64) & 0xfff) << 1) + 1);
350  }
351  last_non_zero = i;
352  }
353  }
354  /* if (i <= last_index) */ {
355  int level = block[scan_table[i]];
356  int run = i - last_non_zero - 1;
357  level += 64;
358  if ((level & (~127)) == 0) {
359  const int index = UNI_MPEG4_ENC_INDEX(1, run, level);
360  put_bits(ac_pb, len_tab[index], bits_tab[index]);
361  } else { // ESC3
362  put_bits(ac_pb,
363  7 + 2 + 1 + 6 + 1 + 12 + 1,
364  (3 << 23) + (3 << 21) + (1 << 20) + (run << 14) +
365  (1 << 13) + (((level - 64) & 0xfff) << 1) + 1);
366  }
367  }
368 }
369 
371  const int16_t block[6][64],
372  PutBitContext *ac_pb)
373 {
374  /* encode each block */
375  for (int n = 0; n < 6; ++n) {
376  const int last_index = s->c.block_last_index[n];
377  if (last_index < 0)
378  continue;
379 
380  mpeg4_encode_ac_coeffs(block[n], last_index, 0,
381  s->c.intra_scantable.permutated, ac_pb,
383  }
384 }
385 
387  const int16_t block[6][64],
388  const int intra_dc[6],
389  const uint8_t * const *scan_table,
390  PutBitContext *dc_pb,
391  PutBitContext *ac_pb)
392 {
393  /* encode each block */
394  for (int n = 0; n < 6; ++n) {
395  mpeg4_encode_dc(dc_pb, intra_dc[n], n);
396 
397  const int last_index = s->c.block_last_index[n];
398  if (last_index <= 0)
399  continue;
400 
401  mpeg4_encode_ac_coeffs(block[n], last_index, 1,
402  scan_table[n], ac_pb,
404  }
405 }
406 
407 static inline int get_b_cbp(MPVEncContext *const s, int16_t block[6][64],
408  int motion_x, int motion_y, int mb_type)
409 {
410  int cbp = 0, i;
411 
412  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
413  int score = 0;
414  const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
415 
416  for (i = 0; i < 6; i++) {
417  if (s->coded_score[i] < 0) {
418  score += s->coded_score[i];
419  cbp |= 1 << (5 - i);
420  }
421  }
422 
423  if (cbp) {
424  int zero_score = -6;
425  if ((motion_x | motion_y | s->dquant | mb_type) == 0)
426  zero_score -= 4; // 2 * MV + mb_type + cbp bit
427 
428  zero_score *= lambda;
429  if (zero_score <= score)
430  cbp = 0;
431  }
432 
433  for (i = 0; i < 6; i++) {
434  if (s->c.block_last_index[i] >= 0 && ((cbp >> (5 - i)) & 1) == 0) {
435  s->c.block_last_index[i] = -1;
436  s->c.bdsp.clear_block(s->c.block[i]);
437  }
438  }
439  } else {
440  for (i = 0; i < 6; i++) {
441  if (s->c.block_last_index[i] >= 0)
442  cbp |= 1 << (5 - i);
443  }
444  }
445  return cbp;
446 }
447 
448 // FIXME this is duplicated to h263.c
449 static const int dquant_code[5] = { 1, 0, 9, 2, 3 };
450 
451 static void mpeg4_encode_mb(MPVEncContext *const s, int16_t block[][64],
452  int motion_x, int motion_y)
453 {
454  int cbpc, cbpy, pred_x, pred_y;
455  PutBitContext *const pb2 = s->c.data_partitioning ? &s->pb2 : &s->pb;
456  PutBitContext *const tex_pb = s->c.data_partitioning && s->c.pict_type != AV_PICTURE_TYPE_B ? &s->tex_pb : &s->pb;
457  PutBitContext *const dc_pb = s->c.data_partitioning && s->c.pict_type != AV_PICTURE_TYPE_I ? &s->pb2 : &s->pb;
458  const int interleaved_stats = (s->c.avctx->flags & AV_CODEC_FLAG_PASS1) && !s->c.data_partitioning ? 1 : 0;
459 
460  if (!s->c.mb_intra) {
461  int i, cbp;
462 
463  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
464  /* convert from mv_dir to type */
465  static const int mb_type_table[8] = { -1, 3, 2, 1, -1, -1, -1, 0 };
466  int mb_type = mb_type_table[s->c.mv_dir];
467 
468  if (s->c.mb_x == 0) {
469  for (i = 0; i < 2; i++)
470  s->c.last_mv[i][0][0] =
471  s->c.last_mv[i][0][1] =
472  s->c.last_mv[i][1][0] =
473  s->c.last_mv[i][1][1] = 0;
474  }
475 
476  av_assert2(s->dquant >= -2 && s->dquant <= 2);
477  av_assert2((s->dquant & 1) == 0);
478  av_assert2(mb_type >= 0);
479 
480  /* nothing to do if this MB was skipped in the next P-frame */
481  if (s->c.next_pic.mbskip_table[s->c.mb_y * s->c.mb_stride + s->c.mb_x]) { // FIXME avoid DCT & ...
482  s->c.mv[0][0][0] =
483  s->c.mv[0][0][1] =
484  s->c.mv[1][0][0] =
485  s->c.mv[1][0][1] = 0;
486  s->c.mv_dir = MV_DIR_FORWARD; // doesn't matter
487  s->c.qscale -= s->dquant;
488 // s->c.mb_skipped = 1;
489 
490  return;
491  }
492 
493  cbp = get_b_cbp(s, block, motion_x, motion_y, mb_type);
494 
495  if ((cbp | motion_x | motion_y | mb_type) == 0) {
496  /* direct MB with MV={0,0} */
497  av_assert2(s->dquant == 0);
498 
499  put_bits(&s->pb, 1, 1); /* mb not coded modb1=1 */
500 
501  if (interleaved_stats) {
502  s->misc_bits++;
503  s->last_bits++;
504  }
505  return;
506  }
507 
508  put_bits(&s->pb, 1, 0); /* mb coded modb1=0 */
509  put_bits(&s->pb, 1, cbp ? 0 : 1); /* modb2 */ // FIXME merge
510  put_bits(&s->pb, mb_type + 1, 1); // this table is so simple that we don't need it :)
511  if (cbp)
512  put_bits(&s->pb, 6, cbp);
513 
514  if (cbp && mb_type) {
515  if (s->dquant)
516  put_bits(&s->pb, 2, (s->dquant >> 2) + 3);
517  else
518  put_bits(&s->pb, 1, 0);
519  } else
520  s->c.qscale -= s->dquant;
521 
522  if (!s->c.progressive_sequence) {
523  if (cbp)
524  put_bits(&s->pb, 1, s->c.interlaced_dct);
525  if (mb_type) // not direct mode
526  put_bits(&s->pb, 1, s->c.mv_type == MV_TYPE_FIELD);
527  }
528 
529  if (interleaved_stats)
530  s->misc_bits += get_bits_diff(s);
531 
532  if (!mb_type) {
533  av_assert2(s->c.mv_dir & MV_DIRECT);
534  ff_h263_encode_motion_vector(s, motion_x, motion_y, 1);
535  } else {
536  av_assert2(mb_type > 0 && mb_type < 4);
537  if (s->c.mv_type != MV_TYPE_FIELD) {
538  if (s->c.mv_dir & MV_DIR_FORWARD) {
540  s->c.mv[0][0][0] - s->c.last_mv[0][0][0],
541  s->c.mv[0][0][1] - s->c.last_mv[0][0][1],
542  s->f_code);
543  s->c.last_mv[0][0][0] =
544  s->c.last_mv[0][1][0] = s->c.mv[0][0][0];
545  s->c.last_mv[0][0][1] =
546  s->c.last_mv[0][1][1] = s->c.mv[0][0][1];
547  }
548  if (s->c.mv_dir & MV_DIR_BACKWARD) {
550  s->c.mv[1][0][0] - s->c.last_mv[1][0][0],
551  s->c.mv[1][0][1] - s->c.last_mv[1][0][1],
552  s->b_code);
553  s->c.last_mv[1][0][0] =
554  s->c.last_mv[1][1][0] = s->c.mv[1][0][0];
555  s->c.last_mv[1][0][1] =
556  s->c.last_mv[1][1][1] = s->c.mv[1][0][1];
557  }
558  } else {
559  if (s->c.mv_dir & MV_DIR_FORWARD) {
560  put_bits(&s->pb, 1, s->c.field_select[0][0]);
561  put_bits(&s->pb, 1, s->c.field_select[0][1]);
562  }
563  if (s->c.mv_dir & MV_DIR_BACKWARD) {
564  put_bits(&s->pb, 1, s->c.field_select[1][0]);
565  put_bits(&s->pb, 1, s->c.field_select[1][1]);
566  }
567  if (s->c.mv_dir & MV_DIR_FORWARD) {
568  for (i = 0; i < 2; i++) {
570  s->c.mv[0][i][0] - s->c.last_mv[0][i][0],
571  s->c.mv[0][i][1] - s->c.last_mv[0][i][1] / 2,
572  s->f_code);
573  s->c.last_mv[0][i][0] = s->c.mv[0][i][0];
574  s->c.last_mv[0][i][1] = s->c.mv[0][i][1] * 2;
575  }
576  }
577  if (s->c.mv_dir & MV_DIR_BACKWARD) {
578  for (i = 0; i < 2; i++) {
580  s->c.mv[1][i][0] - s->c.last_mv[1][i][0],
581  s->c.mv[1][i][1] - s->c.last_mv[1][i][1] / 2,
582  s->b_code);
583  s->c.last_mv[1][i][0] = s->c.mv[1][i][0];
584  s->c.last_mv[1][i][1] = s->c.mv[1][i][1] * 2;
585  }
586  }
587  }
588  }
589 
590  if (interleaved_stats)
591  s->mv_bits += get_bits_diff(s);
592 
594 
595  if (interleaved_stats)
596  s->p_tex_bits += get_bits_diff(s);
597  } else { /* s->c.pict_type == AV_PICTURE_TYPE_B */
598  cbp = get_p_cbp(s, block, motion_x, motion_y);
599 
600  if ((cbp | motion_x | motion_y | s->dquant) == 0 &&
601  s->c.mv_type == MV_TYPE_16X16) {
602  const MPVMainEncContext *const m = slice_to_mainenc(s);
603  /* Check if the B-frames can skip it too, as we must skip it
604  * if we skip here why didn't they just compress
605  * the skip-mb bits instead of reusing them ?! */
606  if (m->max_b_frames > 0) {
607  int x, y, offset;
608  const uint8_t *p_pic;
609 
610  x = s->c.mb_x * 16;
611  y = s->c.mb_y * 16;
612 
613  offset = x + y * s->c.linesize;
614  p_pic = s->new_pic->data[0] + offset;
615 
616  s->c.mb_skipped = 1;
617  for (int i = 0; i < m->max_b_frames; i++) {
618  const uint8_t *b_pic;
619  int diff;
620  const MPVPicture *pic = m->reordered_input_picture[i + 1];
621 
622  if (!pic || pic->f->pict_type != AV_PICTURE_TYPE_B)
623  break;
624 
625  b_pic = pic->f->data[0] + offset;
626  if (!pic->shared)
627  b_pic += INPLACE_OFFSET;
628 
629  if (x + 16 > s->c.width || y + 16 > s->c.height) {
630  int x1, y1;
631  int xe = FFMIN(16, s->c.width - x);
632  int ye = FFMIN(16, s->c.height - y);
633  diff = 0;
634  for (y1 = 0; y1 < ye; y1++) {
635  for (x1 = 0; x1 < xe; x1++) {
636  diff += FFABS(p_pic[x1 + y1 * s->c.linesize] - b_pic[x1 + y1 * s->c.linesize]);
637  }
638  }
639  diff = diff * 256 / (xe * ye);
640  } else {
641  diff = s->sad_cmp[0](NULL, p_pic, b_pic, s->c.linesize, 16);
642  }
643  if (diff > s->c.qscale * 70) { // FIXME check that 70 is optimal
644  s->c.mb_skipped = 0;
645  break;
646  }
647  }
648  } else
649  s->c.mb_skipped = 1;
650 
651  if (s->c.mb_skipped == 1) {
652  /* skip macroblock */
653  put_bits(&s->pb, 1, 1);
654 
655  if (interleaved_stats) {
656  s->misc_bits++;
657  s->last_bits++;
658  }
659 
660  return;
661  }
662  }
663 
664  put_bits(&s->pb, 1, 0); /* mb coded */
665  cbpc = cbp & 3;
666  cbpy = cbp >> 2;
667  cbpy ^= 0xf;
668  if (s->c.mv_type == MV_TYPE_16X16) {
669  if (s->dquant)
670  cbpc += 8;
671  put_bits(&s->pb,
674 
675  put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
676  if (s->dquant)
677  put_bits(pb2, 2, dquant_code[s->dquant + 2]);
678 
679  if (!s->c.progressive_sequence) {
680  if (cbp)
681  put_bits(pb2, 1, s->c.interlaced_dct);
682  put_bits(pb2, 1, 0);
683  }
684 
685  if (interleaved_stats)
686  s->misc_bits += get_bits_diff(s);
687 
688  /* motion vectors: 16x16 mode */
689  ff_h263_pred_motion(&s->c, 0, 0, &pred_x, &pred_y);
690 
692  motion_x - pred_x,
693  motion_y - pred_y,
694  s->f_code);
695  } else if (s->c.mv_type == MV_TYPE_FIELD) {
696  if (s->dquant)
697  cbpc += 8;
698  put_bits(&s->pb,
701 
702  put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
703  if (s->dquant)
704  put_bits(pb2, 2, dquant_code[s->dquant + 2]);
705 
706  av_assert2(!s->c.progressive_sequence);
707  if (cbp)
708  put_bits(pb2, 1, s->c.interlaced_dct);
709  put_bits(pb2, 1, 1);
710 
711  if (interleaved_stats)
712  s->misc_bits += get_bits_diff(s);
713 
714  /* motion vectors: 16x8 interlaced mode */
715  ff_h263_pred_motion(&s->c, 0, 0, &pred_x, &pred_y);
716  pred_y /= 2;
717 
718  put_bits(&s->pb, 1, s->c.field_select[0][0]);
719  put_bits(&s->pb, 1, s->c.field_select[0][1]);
720 
722  s->c.mv[0][0][0] - pred_x,
723  s->c.mv[0][0][1] - pred_y,
724  s->f_code);
726  s->c.mv[0][1][0] - pred_x,
727  s->c.mv[0][1][1] - pred_y,
728  s->f_code);
729  } else {
730  av_assert2(s->c.mv_type == MV_TYPE_8X8);
731  put_bits(&s->pb,
732  ff_h263_inter_MCBPC_bits[cbpc + 16],
733  ff_h263_inter_MCBPC_code[cbpc + 16]);
734  put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
735 
736  if (!s->c.progressive_sequence && cbp)
737  put_bits(pb2, 1, s->c.interlaced_dct);
738 
739  if (interleaved_stats)
740  s->misc_bits += get_bits_diff(s);
741 
742  for (i = 0; i < 4; i++) {
743  /* motion vectors: 8x8 mode*/
744  ff_h263_pred_motion(&s->c, i, 0, &pred_x, &pred_y);
745 
747  s->c.cur_pic.motion_val[0][s->c.block_index[i]][0] - pred_x,
748  s->c.cur_pic.motion_val[0][s->c.block_index[i]][1] - pred_y,
749  s->f_code);
750  }
751  }
752 
753  if (interleaved_stats)
754  s->mv_bits += get_bits_diff(s);
755 
757 
758  if (interleaved_stats)
759  s->p_tex_bits += get_bits_diff(s);
760  }
761  } else {
762  int cbp;
763  int dc_diff[6]; // dc values with the dc prediction subtracted
764  int dir[6]; // prediction direction
765  int zigzag_last_index[6];
766  const uint8_t *scan_table[6];
767  int i;
768 
769  for (int i = 0; i < 6; i++) {
770  int pred = mpeg4_pred_dc(&s->c, i, &dir[i]);
771  int scale = i < 4 ? s->c.y_dc_scale : s->c.c_dc_scale;
772 
773  pred = FASTDIV((pred + (scale >> 1)), scale);
774  dc_diff[i] = block[i][0] - pred;
775  s->c.dc_val[s->c.block_index[i]] = av_clip_uintp2(block[i][0] * scale, 11);
776  }
777 
778  if (s->c.avctx->flags & AV_CODEC_FLAG_AC_PRED) {
779  s->c.ac_pred = decide_ac_pred(s, block, dir, scan_table, zigzag_last_index);
780  } else {
781  for (i = 0; i < 6; i++)
782  scan_table[i] = s->c.intra_scantable.permutated;
783  }
784 
785  /* compute cbp */
786  cbp = 0;
787  for (i = 0; i < 6; i++)
788  if (s->c.block_last_index[i] >= 1)
789  cbp |= 1 << (5 - i);
790 
791  cbpc = cbp & 3;
792  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
793  if (s->dquant)
794  cbpc += 4;
795  put_bits(&s->pb,
798  } else {
799  if (s->dquant)
800  cbpc += 8;
801  put_bits(&s->pb, 1, 0); /* mb coded */
802  put_bits(&s->pb,
803  ff_h263_inter_MCBPC_bits[cbpc + 4],
804  ff_h263_inter_MCBPC_code[cbpc + 4]);
805  }
806  put_bits(pb2, 1, s->c.ac_pred);
807  cbpy = cbp >> 2;
808  put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
809  if (s->dquant)
810  put_bits(dc_pb, 2, dquant_code[s->dquant + 2]);
811 
812  if (!s->c.progressive_sequence)
813  put_bits(dc_pb, 1, s->c.interlaced_dct);
814 
815  if (interleaved_stats)
816  s->misc_bits += get_bits_diff(s);
817 
818  mpeg4_encode_blocks_intra(s, block, dc_diff, scan_table, dc_pb, tex_pb);
819 
820  if (interleaved_stats)
821  s->i_tex_bits += get_bits_diff(s);
822  s->i_count++;
823 
824  /* restore ac coeffs & last_index stuff
825  * if we messed them up with the prediction */
826  if (s->c.ac_pred)
827  restore_ac_coeffs(s, block, dir, scan_table, zigzag_last_index);
828  }
829 }
830 
831 /**
832  * add MPEG-4 stuffing bits (01...1)
833  */
835 {
836  int length = 8 - (put_bits_count(pbc) & 7);
837 
838  put_bits(pbc, length, (1 << (length - 1)) - 1);
839 }
840 
841 /* must be called before writing the header */
843 {
844  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
846  } else {
847  s->c.last_time_base = s->c.time_base;
848  s->c.time_base = FFUDIV(s->c.time, s->c.avctx->time_base.den);
849  }
850 }
851 
853 {
854  MPVEncContext *const s = &m->s;
855  int64_t hours, minutes, seconds;
856  int64_t time;
857 
858  put_bits32(&s->pb, GOP_STARTCODE);
859 
860  time = s->c.cur_pic.ptr->f->pts;
861  if (m->reordered_input_picture[1])
862  time = FFMIN(time, m->reordered_input_picture[1]->f->pts);
863  time = time * s->c.avctx->time_base.num;
864  s->c.last_time_base = FFUDIV(time, s->c.avctx->time_base.den);
865 
866  seconds = FFUDIV(time, s->c.avctx->time_base.den);
867  minutes = FFUDIV(seconds, 60); seconds = FFUMOD(seconds, 60);
868  hours = FFUDIV(minutes, 60); minutes = FFUMOD(minutes, 60);
869  hours = FFUMOD(hours , 24);
870 
871  put_bits(&s->pb, 5, hours);
872  put_bits(&s->pb, 6, minutes);
873  put_bits(&s->pb, 1, 1);
874  put_bits(&s->pb, 6, seconds);
875 
876  put_bits(&s->pb, 1, !!(s->c.avctx->flags & AV_CODEC_FLAG_CLOSED_GOP));
877  put_bits(&s->pb, 1, 0); // broken link == NO
878 
879  ff_mpeg4_stuffing(&s->pb);
880 }
881 
883 {
884  MPVEncContext *const s = &m->s;
885  int profile_and_level_indication;
886  int vo_ver_id;
887 
888  if (s->c.avctx->profile != AV_PROFILE_UNKNOWN) {
889  profile_and_level_indication = s->c.avctx->profile << 4;
890  } else if (m->max_b_frames || s->c.quarter_sample) {
891  profile_and_level_indication = 0xF0; // adv simple
892  } else {
893  profile_and_level_indication = 0x00; // simple
894  }
895 
896  if (s->c.avctx->level != AV_LEVEL_UNKNOWN)
897  profile_and_level_indication |= s->c.avctx->level;
898  else
899  profile_and_level_indication |= 1; // level 1
900 
901  if (profile_and_level_indication >> 4 == 0xF)
902  vo_ver_id = 5;
903  else
904  vo_ver_id = 1;
905 
906  // FIXME levels
907 
908  put_bits32(&s->pb, VOS_STARTCODE);
909 
910  put_bits(&s->pb, 8, profile_and_level_indication);
911 
913 
914  put_bits(&s->pb, 1, 1);
915  put_bits(&s->pb, 4, vo_ver_id);
916  put_bits(&s->pb, 3, 1); // priority
917 
918  put_bits(&s->pb, 4, 1); // visual obj type== video obj
919 
920  put_bits(&s->pb, 1, 0); // video signal type == no clue // FIXME
921 
922  ff_mpeg4_stuffing(&s->pb);
923 }
924 
926  int vo_number,
927  int vol_number)
928 {
929  MPVEncContext *const s = &m4->m.s;
930  int vo_ver_id, vo_type, aspect_ratio_info;
931 
932  if (m4->m.max_b_frames || s->c.quarter_sample) {
933  vo_ver_id = 5;
934  vo_type = ADV_SIMPLE_VO_TYPE;
935  } else {
936  vo_ver_id = 1;
937  vo_type = SIMPLE_VO_TYPE;
938  }
939 
940  put_bits32(&s->pb, 0x100 + vo_number); /* video obj */
941  put_bits32(&s->pb, 0x120 + vol_number); /* video obj layer */
942 
943  put_bits(&s->pb, 1, 0); /* random access vol */
944  put_bits(&s->pb, 8, vo_type); /* video obj type indication */
945  put_bits(&s->pb, 1, 1); /* is obj layer id= yes */
946  put_bits(&s->pb, 4, vo_ver_id); /* is obj layer ver id */
947  put_bits(&s->pb, 3, 1); /* is obj layer priority */
948 
949  aspect_ratio_info = ff_h263_aspect_to_info(s->c.avctx->sample_aspect_ratio);
950 
951  put_bits(&s->pb, 4, aspect_ratio_info); /* aspect ratio info */
952  if (aspect_ratio_info == FF_ASPECT_EXTENDED) {
953  av_reduce(&s->c.avctx->sample_aspect_ratio.num, &s->c.avctx->sample_aspect_ratio.den,
954  s->c.avctx->sample_aspect_ratio.num, s->c.avctx->sample_aspect_ratio.den, 255);
955  put_bits(&s->pb, 8, s->c.avctx->sample_aspect_ratio.num);
956  put_bits(&s->pb, 8, s->c.avctx->sample_aspect_ratio.den);
957  }
958 
959  put_bits(&s->pb, 1, 1); /* vol control parameters= yes */
960  put_bits(&s->pb, 2, 1); /* chroma format YUV 420/YV12 */
961  put_bits(&s->pb, 1, s->c.low_delay);
962  put_bits(&s->pb, 1, 0); /* vbv parameters= no */
963 
964  put_bits(&s->pb, 2, RECT_SHAPE); /* vol shape= rectangle */
965  put_bits(&s->pb, 1, 1); /* marker bit */
966 
967  put_bits(&s->pb, 16, s->c.avctx->time_base.den);
968  if (m4->time_increment_bits < 1)
969  m4->time_increment_bits = 1;
970  put_bits(&s->pb, 1, 1); /* marker bit */
971  put_bits(&s->pb, 1, 0); /* fixed vop rate=no */
972  put_bits(&s->pb, 1, 1); /* marker bit */
973  put_bits(&s->pb, 13, s->c.width); /* vol width */
974  put_bits(&s->pb, 1, 1); /* marker bit */
975  put_bits(&s->pb, 13, s->c.height); /* vol height */
976  put_bits(&s->pb, 1, 1); /* marker bit */
977  put_bits(&s->pb, 1, s->c.progressive_sequence ? 0 : 1);
978  put_bits(&s->pb, 1, 1); /* obmc disable */
979  if (vo_ver_id == 1)
980  put_bits(&s->pb, 1, 0); /* sprite enable */
981  else
982  put_bits(&s->pb, 2, 0); /* sprite enable */
983 
984  put_bits(&s->pb, 1, 0); /* not 8 bit == false */
985  put_bits(&s->pb, 1, s->mpeg_quant); /* quant type = (0 = H.263 style) */
986 
987  if (s->mpeg_quant) {
988  ff_write_quant_matrix(&s->pb, s->c.avctx->intra_matrix);
989  ff_write_quant_matrix(&s->pb, s->c.avctx->inter_matrix);
990  }
991 
992  if (vo_ver_id != 1)
993  put_bits(&s->pb, 1, s->c.quarter_sample);
994  put_bits(&s->pb, 1, 1); /* complexity estimation disable */
995  put_bits(&s->pb, 1, s->rtp_mode ? 0 : 1); /* resync marker disable */
996  put_bits(&s->pb, 1, s->c.data_partitioning ? 1 : 0);
997  if (s->c.data_partitioning)
998  put_bits(&s->pb, 1, 0); /* no rvlc */
999 
1000  if (vo_ver_id != 1) {
1001  put_bits(&s->pb, 1, 0); /* newpred */
1002  put_bits(&s->pb, 1, 0); /* reduced res vop */
1003  }
1004  put_bits(&s->pb, 1, 0); /* scalability */
1005 
1006  ff_mpeg4_stuffing(&s->pb);
1007 
1008  /* user data */
1009  if (!(s->c.avctx->flags & AV_CODEC_FLAG_BITEXACT)) {
1011  ff_put_string(&s->pb, LIBAVCODEC_IDENT, 0);
1012  }
1013 }
1014 
1015 /* write MPEG-4 VOP header */
1017 {
1018  Mpeg4EncContext *const m4 = mainctx_to_mpeg4(m);
1019  MPVEncContext *const s = &m->s;
1020  uint64_t time_incr;
1021  int64_t time_div, time_mod;
1022 
1023  put_bits_assume_flushed(&s->pb);
1024 
1025  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
1026  if (!(s->c.avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
1027  if (s->c.avctx->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT) // HACK, the reference sw is buggy
1029  if (s->c.avctx->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT || s->c.picture_number == 0) // HACK, the reference sw is buggy
1030  mpeg4_encode_vol_header(m4, 0, 0);
1031  }
1033  }
1034 
1035  s->c.partitioned_frame = s->c.data_partitioning && s->c.pict_type != AV_PICTURE_TYPE_B;
1036 
1037  put_bits32(&s->pb, VOP_STARTCODE); /* vop header */
1038  put_bits(&s->pb, 2, s->c.pict_type - 1); /* pict type: I = 0 , P = 1 */
1039 
1040  time_div = FFUDIV(s->c.time, s->c.avctx->time_base.den);
1041  time_mod = FFUMOD(s->c.time, s->c.avctx->time_base.den);
1042  time_incr = time_div - s->c.last_time_base;
1043 
1044  // This limits the frame duration to max 1 day
1045  if (time_incr > 3600*24) {
1046  av_log(s->c.avctx, AV_LOG_ERROR, "time_incr %"PRIu64" too large\n", time_incr);
1047  return AVERROR(EINVAL);
1048  }
1049  while (time_incr--)
1050  put_bits(&s->pb, 1, 1);
1051 
1052  put_bits(&s->pb, 1, 0);
1053 
1054  put_bits(&s->pb, 1, 1); /* marker */
1055  put_bits(&s->pb, m4->time_increment_bits, time_mod); /* time increment */
1056  put_bits(&s->pb, 1, 1); /* marker */
1057  put_bits(&s->pb, 1, 1); /* vop coded */
1058  if (s->c.pict_type == AV_PICTURE_TYPE_P) {
1059  put_bits(&s->pb, 1, s->c.no_rounding); /* rounding type */
1060  }
1061  put_bits(&s->pb, 3, 0); /* intra dc VLC threshold */
1062  if (!s->c.progressive_sequence) {
1063  put_bits(&s->pb, 1, !!(s->c.cur_pic.ptr->f->flags & AV_FRAME_FLAG_TOP_FIELD_FIRST));
1064  put_bits(&s->pb, 1, s->c.alternate_scan);
1065  }
1066  // FIXME sprite stuff
1067 
1068  put_bits(&s->pb, 5, s->c.qscale);
1069 
1070  if (s->c.pict_type != AV_PICTURE_TYPE_I)
1071  put_bits(&s->pb, 3, s->f_code); /* fcode_for */
1072  if (s->c.pict_type == AV_PICTURE_TYPE_B)
1073  put_bits(&s->pb, 3, s->b_code); /* fcode_back */
1074 
1075  return 0;
1076 }
1077 
1078 static av_cold void init_uni_dc_tab(void)
1079 {
1080  int level, uni_code, uni_len;
1081 
1082  for (level = -256; level < 256; level++) {
1083  int size, v, l;
1084  /* find number of bits */
1085  size = 0;
1086  v = abs(level);
1087  while (v) {
1088  v >>= 1;
1089  size++;
1090  }
1091 
1092  if (level < 0)
1093  l = (-level) ^ ((1 << size) - 1);
1094  else
1095  l = level;
1096 
1097  /* luminance */
1098  uni_code = ff_mpeg4_DCtab_lum[size][0];
1099  uni_len = ff_mpeg4_DCtab_lum[size][1];
1100 
1101  if (size > 0) {
1102  uni_code <<= size;
1103  uni_code |= l;
1104  uni_len += size;
1105  if (size > 8) {
1106  uni_code <<= 1;
1107  uni_code |= 1;
1108  uni_len++;
1109  }
1110  }
1111  uni_DCtab_lum_bits[level + 256] = uni_code;
1112  uni_DCtab_lum_len[level + 256] = uni_len;
1113 
1114  /* chrominance */
1115  uni_code = ff_mpeg4_DCtab_chrom[size][0];
1116  uni_len = ff_mpeg4_DCtab_chrom[size][1];
1117 
1118  if (size > 0) {
1119  uni_code <<= size;
1120  uni_code |= l;
1121  uni_len += size;
1122  if (size > 8) {
1123  uni_code <<= 1;
1124  uni_code |= 1;
1125  uni_len++;
1126  }
1127  }
1128  uni_DCtab_chrom_bits[level + 256] = uni_code;
1129  uni_DCtab_chrom_len[level + 256] = uni_len;
1130  }
1131 }
1132 
1133 static av_cold void init_uni_mpeg4_rl_tab(RLTable *rl, uint32_t *bits_tab,
1134  uint8_t *len_tab)
1135 {
1136  // Type 3 escape method. The escape code is the same for both VLCs
1137  // (0x3, seven bits), so it is hardcoded.
1138  memset(len_tab, 30, 2 * 2 * 64 * 64);
1139  len_tab += 64;
1140  bits_tab += 64;
1141  for (int run = 0; run < 64; ++run) {
1142  for (int level = 1;; ++level) {
1143  // Escape code type 3 not last run (6 bits) marker marker
1144  unsigned code = (3 << 23) | (3 << 21) | (0 << 20) | (run << 14) | (1 << 13) | 1;
1145  // first the negative levels
1146  bits_tab[UNI_MPEG4_ENC_INDEX(0, run, -level)] = code | (-level & 0xfff) << 1;
1147  bits_tab[UNI_MPEG4_ENC_INDEX(1, run, -level)] =
1148  bits_tab[UNI_MPEG4_ENC_INDEX(0, run, -level)] | (1 << 20) /* last */;
1149 
1150  if (level == 64) // positive levels have a range of 1..63
1151  break;
1152  bits_tab[UNI_MPEG4_ENC_INDEX(0, run, level)] = code | level << 1;
1153  bits_tab[UNI_MPEG4_ENC_INDEX(1, run, level)] =
1154  bits_tab[UNI_MPEG4_ENC_INDEX(0, run, level)] | (1 << 20) /* last */;
1155  }
1156  // Is this needed at all?
1157  len_tab[UNI_MPEG4_ENC_INDEX(0, run, 0)] =
1158  len_tab[UNI_MPEG4_ENC_INDEX(1, run, 0)] = 0;
1159  }
1160 
1161  uint8_t max_run[2][32] = { 0 };
1162 
1163 #define VLC_NUM_CODES 102 // excluding the escape
1164  av_assert2(rl->n == VLC_NUM_CODES);
1165  for (int i = VLC_NUM_CODES - 1, max_level, cur_run = 0; i >= 0; --i) {
1166  int run = rl->table_run[i], level = rl->table_level[i];
1167  int last = i >= rl->last;
1168  unsigned code = rl->table_vlc[i][0] << 1;
1169  int len = rl->table_vlc[i][1] + 1;
1170 
1171  bits_tab[UNI_MPEG4_ENC_INDEX(last, run, level)] = code;
1172  len_tab [UNI_MPEG4_ENC_INDEX(last, run, level)] = len;
1173  bits_tab[UNI_MPEG4_ENC_INDEX(last, run, -level)] = code | 1;
1174  len_tab [UNI_MPEG4_ENC_INDEX(last, run, -level)] = len;
1175 
1176  if (!max_run[last][level])
1177  max_run[last][level] = run + 1;
1178  av_assert2(run + 1 <= max_run[last][level]);
1179 
1180  int run3 = run + max_run[last][level];
1181  int len3 = len + 7 + 2;
1182 
1183  if (run3 < 64 && len3 < len_tab[UNI_MPEG4_ENC_INDEX(last, run3, level)]) {
1184  unsigned code3 = code | (0x3 << 2 | 0x2) << len;
1185  bits_tab[UNI_MPEG4_ENC_INDEX(last, run3, level)] = code3;
1186  len_tab [UNI_MPEG4_ENC_INDEX(last, run3, level)] = len3;
1187  bits_tab[UNI_MPEG4_ENC_INDEX(last, run3, -level)] = code3 | 1;
1188  len_tab [UNI_MPEG4_ENC_INDEX(last, run3, -level)] = len3;
1189  }
1190  // table_run and table_level are ordered so that all the entries
1191  // with the same last and run are consecutive and level is ascending
1192  // among these entries. By traversing downwards we therefore automatically
1193  // encounter max_level of a given run first, needed for escape method 1.
1194  if (run != cur_run) {
1195  max_level = level;
1196  cur_run = run;
1197  } else
1198  av_assert2(max_level > level);
1199 
1200  code |= 0x3 << (len + 1);
1201  len += 7 + 1;
1202  level += max_level;
1203  av_assert2(len_tab [UNI_MPEG4_ENC_INDEX(last, run, level)] >= len);
1204  bits_tab[UNI_MPEG4_ENC_INDEX(last, run, level)] = code;
1205  len_tab [UNI_MPEG4_ENC_INDEX(last, run, level)] = len;
1206  bits_tab[UNI_MPEG4_ENC_INDEX(last, run, -level)] = code | 1;
1207  len_tab [UNI_MPEG4_ENC_INDEX(last, run, -level)] = len;
1208  }
1209 }
1210 
1212 {
1213  init_uni_dc_tab();
1214 
1217 
1218  for (int f_code = MAX_FCODE; f_code > 0; f_code--) {
1219  for (int mv = -(16 << f_code); mv < (16 << f_code); mv++)
1220  fcode_tab[mv + MAX_MV] = f_code;
1221  }
1222 }
1223 
1225 {
1226  static AVOnce init_static_once = AV_ONCE_INIT;
1227  Mpeg4EncContext *const m4 = avctx->priv_data;
1228  MPVMainEncContext *const m = &m4->m;
1229  MPVEncContext *const s = &m->s;
1230  int ret;
1231 
1232  if (avctx->width >= (1<<13) || avctx->height >= (1<<13)) {
1233  av_log(avctx, AV_LOG_ERROR, "dimensions too large for MPEG-4\n");
1234  return AVERROR(EINVAL);
1235  }
1236 
1238  s->encode_mb = mpeg4_encode_mb;
1239 
1240  m->fcode_tab = fcode_tab + MAX_MV;
1241 
1242  s->min_qcoeff = -2048;
1243  s->max_qcoeff = 2047;
1244  s->intra_ac_vlc_length = uni_mpeg4_intra_rl_len;
1245  s->intra_ac_vlc_last_length = uni_mpeg4_intra_rl_len + 128 * 64;
1246  s->inter_ac_vlc_length = uni_mpeg4_inter_rl_len;
1247  s->inter_ac_vlc_last_length = uni_mpeg4_inter_rl_len + 128 * 64;
1248  s->luma_dc_vlc_length = uni_DCtab_lum_len;
1249  s->ac_esc_length = 7 + 2 + 1 + 6 + 1 + 12 + 1;
1250  s->c.y_dc_scale_table = ff_mpeg4_y_dc_scale_table;
1251  s->c.c_dc_scale_table = ff_mpeg4_c_dc_scale_table;
1252 
1253  ff_qpeldsp_init(&s->c.qdsp);
1254  if ((ret = ff_mpv_encode_init(avctx)) < 0)
1255  return ret;
1256 
1257  ff_thread_once(&init_static_once, mpeg4_encode_init_static);
1258 
1259  if (avctx->time_base.den > (1 << 16) - 1) {
1260  av_log(avctx, AV_LOG_ERROR,
1261  "timebase %d/%d not supported by MPEG 4 standard, "
1262  "the maximum admitted value for the timebase denominator "
1263  "is %d\n", avctx->time_base.num, avctx->time_base.den,
1264  (1 << 16) - 1);
1265  return AVERROR(EINVAL);
1266  }
1267 
1268  m4->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
1269 
1270  if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
1271  avctx->extradata = av_malloc(1024);
1272  if (!avctx->extradata)
1273  return AVERROR(ENOMEM);
1274  init_put_bits(&s->pb, avctx->extradata, 1024);
1275 
1277  mpeg4_encode_vol_header(m4, 0, 0);
1278 
1279 // ff_mpeg4_stuffing(&s->pb); ?
1280  flush_put_bits(&s->pb);
1281  avctx->extradata_size = put_bytes_output(&s->pb);
1282  }
1283  return 0;
1284 }
1285 
1287 {
1288  uint8_t *start = put_bits_ptr(&s->pb);
1289  uint8_t *end = s->pb.buf_end;
1290  int size = end - start;
1291  int pb_size = (((intptr_t)start + size / 3) & (~3)) - (intptr_t)start;
1292  int tex_size = (size - 2 * pb_size) & (~3);
1293 
1294  set_put_bits_buffer_size(&s->pb, pb_size);
1295  init_put_bits(&s->tex_pb, start + pb_size, tex_size);
1296  init_put_bits(&s->pb2, start + pb_size + tex_size, pb_size);
1297 }
1298 
1300 {
1301  const int pb2_len = put_bits_count(&s->pb2);
1302  const int tex_pb_len = put_bits_count(&s->tex_pb);
1303  const int bits = put_bits_count(&s->pb);
1304 
1305  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
1306  put_bits(&s->pb, 19, DC_MARKER);
1307  s->misc_bits += 19 + pb2_len + bits - s->last_bits;
1308  s->i_tex_bits += tex_pb_len;
1309  } else {
1310  put_bits(&s->pb, 17, MOTION_MARKER);
1311  s->misc_bits += 17 + pb2_len;
1312  s->mv_bits += bits - s->last_bits;
1313  s->p_tex_bits += tex_pb_len;
1314  }
1315 
1316  flush_put_bits(&s->pb2);
1317  flush_put_bits(&s->tex_pb);
1318 
1319  set_put_bits_buffer_size(&s->pb, s->pb2.buf_end - s->pb.buf);
1320  ff_copy_bits(&s->pb, s->pb2.buf, pb2_len);
1321  ff_copy_bits(&s->pb, s->tex_pb.buf, tex_pb_len);
1322  s->last_bits = put_bits_count(&s->pb);
1323 }
1324 
1326 {
1327  int mb_num_bits = av_log2(s->c.mb_num - 1) + 1;
1328 
1329  put_bits(&s->pb, ff_mpeg4_get_video_packet_prefix_length(s->c.pict_type, s->f_code, s->b_code), 0);
1330  put_bits(&s->pb, 1, 1);
1331 
1332  put_bits(&s->pb, mb_num_bits, s->c.mb_x + s->c.mb_y * s->c.mb_width);
1333  put_bits(&s->pb, 5 /* quant_precision */, s->c.qscale);
1334  put_bits(&s->pb, 1, 0); /* no HEC */
1335 }
1336 
1337 #define OFFSET(x) offsetof(MPVEncContext, x)
1338 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
1339 static const AVOption options[] = {
1340  { "data_partitioning", "Use data partitioning.", OFFSET(c.data_partitioning), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
1341  { "alternate_scan", "Enable alternate scantable.", OFFSET(c.alternate_scan), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
1342  { "mpeg_quant", "Use MPEG quantizers instead of H.263",
1343  OFFSET(mpeg_quant), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, VE },
1348  { NULL },
1349 };
1350 
1351 static const AVClass mpeg4enc_class = {
1352  .class_name = "MPEG4 encoder",
1353  .item_name = av_default_item_name,
1354  .option = options,
1355  .version = LIBAVUTIL_VERSION_INT,
1356 };
1357 
1359  .p.name = "mpeg4",
1360  CODEC_LONG_NAME("MPEG-4 part 2"),
1361  .p.type = AVMEDIA_TYPE_VIDEO,
1362  .p.id = AV_CODEC_ID_MPEG4,
1363  .priv_data_size = sizeof(Mpeg4EncContext),
1364  .init = encode_init,
1366  .close = ff_mpv_encode_end,
1368  .color_ranges = AVCOL_RANGE_MPEG,
1369  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
1372  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1373  .p.priv_class = &mpeg4enc_class,
1374 };
SIMPLE_VO_TYPE
#define SIMPLE_VO_TYPE
Definition: mpeg4videodefs.h:32
mpeg4_encode_init_static
static av_cold void mpeg4_encode_init_static(void)
Definition: mpeg4videoenc.c:1211
MPVMainEncContext::fcode_tab
const uint8_t * fcode_tab
smallest fcode needed for each MV
Definition: mpegvideoenc.h:218
CODEC_PIXFMTS
#define CODEC_PIXFMTS(...)
Definition: codec_internal.h:386
FFUMOD
#define FFUMOD(a, b)
Definition: common.h:66
CANDIDATE_MB_TYPE_BIDIR
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegvideoenc.h:277
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:185
mpeg4_encode_ac_coeffs
static void mpeg4_encode_ac_coeffs(const int16_t block[64], const int last_index, int i, const uint8_t *const scan_table, PutBitContext *const ac_pb, const uint32_t *const bits_tab, const uint8_t *const len_tab)
Encode the AC coefficients of an 8x8 block.
Definition: mpeg4videoenc.c:327
FF_ASPECT_EXTENDED
#define FF_ASPECT_EXTENDED
Definition: h263.h:26
level
uint8_t level
Definition: svq3.c:208
MPVEncContext
Definition: mpegvideoenc.h:45
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
put_bits32
static void av_unused put_bits32(PutBitContext *s, uint32_t value)
Write exactly 32 bits into a bitstream.
Definition: put_bits.h:301
LIBAVCODEC_IDENT
#define LIBAVCODEC_IDENT
Definition: version.h:43
put_bytes_output
static int put_bytes_output(const PutBitContext *s)
Definition: put_bits.h:99
MAX_FCODE
#define MAX_FCODE
Definition: mpegvideoenc.h:264
thread.h
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:124
mpegvideoenc.h
int64_t
long long int64_t
Definition: coverity.c:34
mv
static const int8_t mv[256][2]
Definition: 4xm.c:81
mpeg4_encode_gop_header
static void mpeg4_encode_gop_header(MPVMainEncContext *const m)
Definition: mpeg4videoenc.c:852
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
ff_qpeldsp_init
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
Definition: qpeldsp.c:784
h263enc.h
ff_clean_h263_qscales
void ff_clean_h263_qscales(MPVEncContext *s)
MV_DIRECT
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:183
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
OFFSET
#define OFFSET(x)
Definition: mpeg4videoenc.c:1337
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:223
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:512
VOS_STARTCODE
#define VOS_STARTCODE
Definition: mpeg4videodefs.h:55
AVOption
AVOption.
Definition: opt.h:429
b
#define b
Definition: input.c:42
init_uni_dc_tab
static av_cold void init_uni_dc_tab(void)
Definition: mpeg4videoenc.c:1078
FFCodec
Definition: codec_internal.h:127
version.h
mpegvideo.h
Mpeg4EncContext::time_increment_bits
int time_increment_bits
number of bits to represent the fractional part of time
Definition: mpeg4videoenc.c:78
ff_mpeg4_get_video_packet_prefix_length
int ff_mpeg4_get_video_packet_prefix_length(enum AVPictureType pict_type, int f_code, int b_code)
Definition: mpeg4video.c:28
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:224
mpeg4_encode_mb
static void mpeg4_encode_mb(MPVEncContext *const s, int16_t block[][64], int motion_x, int motion_y)
Definition: mpeg4videoenc.c:451
MPVMainEncContext::encode_picture_header
int(* encode_picture_header)(struct MPVMainEncContext *m)
Definition: mpegvideoenc.h:227
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:431
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:182
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AV_CODEC_FLAG_GLOBAL_HEADER
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Definition: avcodec.h:318
uni_mpeg4_intra_rl_bits
static uint32_t uni_mpeg4_intra_rl_bits[64 *64 *2 *2]
Definition: mpeg4videoenc.c:56
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:638
FF_MPV_COMMON_MOTION_EST_OPTS
#define FF_MPV_COMMON_MOTION_EST_OPTS
Definition: mpegvideoenc.h:356
mpeg4videoenc.h
ff_mpv_encode_picture
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
Definition: mpegvideo_enc.c:1941
FF_MPV_COMMON_OPTS
#define FF_MPV_COMMON_OPTS
Definition: mpegvideoenc.h:315
ff_copy_bits
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:49
uni_mpeg4_intra_rl_len
static uint8_t uni_mpeg4_intra_rl_len[64 *64 *2 *2]
Definition: mpeg4videoenc.c:57
ff_mpeg4_DCtab_chrom
const uint8_t ff_mpeg4_DCtab_chrom[13][2]
Definition: mpeg4data.h:40
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
mainctx_to_mpeg4
static Mpeg4EncContext * mainctx_to_mpeg4(MPVMainEncContext *m)
Definition: mpeg4videoenc.c:81
ff_h263_pred_motion
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:182
wrap
#define wrap(func)
Definition: neontest.h:65
VOP_STARTCODE
#define VOP_STARTCODE
Definition: mpeg4videodefs.h:59
RLTable
RLTable.
Definition: rl.h:39
mpeg4_encode_visual_object_header
static void mpeg4_encode_visual_object_header(MPVMainEncContext *const m)
Definition: mpeg4videoenc.c:882
uni_mpeg4_inter_rl_bits
static uint32_t uni_mpeg4_inter_rl_bits[64 *64 *2 *2]
Definition: mpeg4videoenc.c:58
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:488
uni_DCtab_chrom_len
static uint8_t uni_DCtab_chrom_len[512]
Definition: mpeg4videoenc.c:50
FFUDIV
#define FFUDIV(a, b)
Definition: common.h:65
FF_MPV_FLAG_CBP_RD
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideoenc.h:290
ff_mpeg4_init_partitions
void ff_mpeg4_init_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1286
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:353
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVRational::num
int num
Numerator.
Definition: rational.h:59
dquant_code
static const int dquant_code[5]
Definition: mpeg4videoenc.c:449
CANDIDATE_MB_TYPE_DIRECT
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegvideoenc.h:274
RLTable::n
int n
number of entries of table_vlc minus 1
Definition: rl.h:40
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
av_cold
#define av_cold
Definition: attributes.h:90
MAX_MV
#define MAX_MV
Definition: motion_est.h:37
MPVPicture::shared
int shared
Definition: mpegpicture.h:87
AV_PROFILE_UNKNOWN
#define AV_PROFILE_UNKNOWN
Definition: defs.h:65
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:515
s
#define s(width, name)
Definition: cbs_vp9.c:198
uni_mpeg4_inter_rl_len
static uint8_t uni_mpeg4_inter_rl_len[64 *64 *2 *2]
Definition: mpeg4videoenc.c:59
VLC_NUM_CODES
#define VLC_NUM_CODES
MPVMainEncContext::reordered_input_picture
MPVPicture * reordered_input_picture[MPVENC_MAX_B_FRAMES+1]
next pictures in coded order
Definition: mpegvideoenc.h:189
ff_mpeg4_stuffing
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
Definition: mpeg4videoenc.c:834
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
Definition: codec.h:144
ff_mpeg4_rl_intra
RLTable ff_mpeg4_rl_intra
Definition: mpeg4data.h:108
uni_DCtab_chrom_bits
static uint16_t uni_DCtab_chrom_bits[512]
Definition: mpeg4videoenc.c:52
bits
uint8_t bits
Definition: vp3data.h:128
UNI_MPEG4_ENC_INDEX
#define UNI_MPEG4_ENC_INDEX(last, run, level)
Definition: mpeg4videoenc.c:63
uni_DCtab_lum_bits
static uint16_t uni_DCtab_lum_bits[512]
Definition: mpeg4videoenc.c:51
ff_write_quant_matrix
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
Definition: mpegvideo_enc.c:228
DC_MARKER
#define DC_MARKER
Definition: mpeg4videodefs.h:53
MPVMainEncContext::max_b_frames
int max_b_frames
max number of B-frames
Definition: mpegvideoenc.h:183
ff_put_string
void ff_put_string(PutBitContext *pb, const char *string, int terminate_string)
Put the string string in the bitstream.
Definition: bitstream.c:39
ff_clean_mpeg4_qscales
void ff_clean_mpeg4_qscales(MPVEncContext *const s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
Definition: mpeg4videoenc.c:269
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
mpeg4_encode_vol_header
static void mpeg4_encode_vol_header(Mpeg4EncContext *const m4, int vo_number, int vol_number)
Definition: mpeg4videoenc.c:925
init_uni_mpeg4_rl_tab
static av_cold void init_uni_mpeg4_rl_tab(RLTable *rl, uint32_t *bits_tab, uint8_t *len_tab)
Definition: mpeg4videoenc.c:1133
PutBitContext
Definition: put_bits.h:50
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:326
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
ff_mpeg4_DCtab_lum
const uint8_t ff_mpeg4_DCtab_lum[13][2]
Definition: mpeg4data.h:34
get_block_rate
static int get_block_rate(MPVEncContext *const s, int16_t block[64], int block_last_index, const uint8_t scantable[64])
Return the number of bits that encoding the 8x8 block in block would need.
Definition: mpeg4videoenc.c:90
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
NULL
#define NULL
Definition: coverity.c:32
FF_COMPLIANCE_VERY_STRICT
#define FF_COMPLIANCE_VERY_STRICT
Strictly conform to an older more strict version of the spec or reference software.
Definition: defs.h:58
run
uint8_t run
Definition: svq3.c:207
RLTable::table_vlc
const uint16_t(* table_vlc)[2]
Definition: rl.h:42
AV_LEVEL_UNKNOWN
#define AV_LEVEL_UNKNOWN
Definition: defs.h:206
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:58
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:240
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
profiles.h
options
Definition: swscale.c:43
AV_CODEC_FLAG_AC_PRED
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:327
Mpeg4EncContext
Definition: mpeg4videoenc.c:75
MOTION_MARKER
#define MOTION_MARKER
Definition: mpeg4videodefs.h:52
ff_mpv_encode_end
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:1121
abs
#define abs(x)
Definition: cuda_runtime.h:35
FASTDIV
#define FASTDIV(a, b)
Definition: mathops.h:213
ff_mpeg4_encode_video_packet_header
void ff_mpeg4_encode_video_packet_header(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1325
get_p_cbp
static int get_p_cbp(MPVEncContext *const s, int16_t block[6][64], int motion_x, int motion_y)
Definition: h263enc.h:46
mpeg4_encode_picture_header
static int mpeg4_encode_picture_header(MPVMainEncContext *const m)
Definition: mpeg4videoenc.c:1016
MPVMainEncContext
Definition: mpegvideoenc.h:178
VISUAL_OBJ_STARTCODE
#define VISUAL_OBJ_STARTCODE
Definition: mpeg4videodefs.h:58
AVOnce
#define AVOnce
Definition: thread.h:202
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:186
set_put_bits_buffer_size
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:436
ff_set_mpeg4_time
void ff_set_mpeg4_time(MPVEncContext *const s)
Definition: mpeg4videoenc.c:842
ADV_SIMPLE_VO_TYPE
#define ADV_SIMPLE_VO_TYPE
Definition: mpeg4videodefs.h:40
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:535
RLTable::table_level
const int8_t * table_level
Definition: rl.h:44
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:502
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ff_h263_rl_inter
RLTable ff_h263_rl_inter
Definition: h263data.c:159
ff_mpeg4_y_dc_scale_table
const uint8_t ff_mpeg4_y_dc_scale_table[32]
Definition: mpeg4data.h:356
codec_internal.h
Mpeg4EncContext::m
MPVMainEncContext m
Definition: mpeg4videoenc.c:76
put_bits_assume_flushed
static void put_bits_assume_flushed(const PutBitContext *s)
Inform the compiler that a PutBitContext is flushed (i.e.
Definition: put_bits.h:82
ff_h263_cbpy_tab
const uint8_t ff_h263_cbpy_tab[16][2]
Definition: h263data.c:82
size
int size
Definition: twinvq_data.h:10344
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
fcode_tab
static uint8_t fcode_tab[MAX_MV *2+1]
Minimal fcode that a motion vector component would need.
Definition: mpeg4videoenc.c:44
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:99
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:188
ff_h263_inter_MCBPC_bits
const uint8_t ff_h263_inter_MCBPC_bits[28]
Definition: h263data.c:47
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideoenc.h:265
FF_MPEG4_PROFILE_OPTS
#define FF_MPEG4_PROFILE_OPTS
Definition: profiles.h:42
get_bits_diff
static int get_bits_diff(MPVEncContext *s)
Definition: mpegvideoenc.h:388
VE
#define VE
Definition: mpeg4videoenc.c:1338
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
RECT_SHAPE
#define RECT_SHAPE
Definition: mpeg4videodefs.h:27
log.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:90
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:514
uni_DCtab_lum_len
static uint8_t uni_DCtab_lum_len[512]
Definition: mpeg4videoenc.c:49
restore_ac_coeffs
static void restore_ac_coeffs(MPVEncContext *const s, int16_t block[6][64], const int dir[6], const uint8_t *st[6], const int zigzag_last_index[6])
Restore the ac coefficients in block that have been changed by decide_ac_pred().
Definition: mpeg4videoenc.c:125
mpeg4enc_class
static const AVClass mpeg4enc_class
Definition: mpeg4videoenc.c:1351
options
static const AVOption options[]
Definition: mpeg4videoenc.c:1339
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
ff_mpeg4_encoder
const FFCodec ff_mpeg4_encoder
Definition: mpeg4videoenc.c:1358
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
ff_h263_aspect_to_info
av_const int ff_h263_aspect_to_info(AVRational aspect)
len
int len
Definition: vorbis_enc_data.h:426
AVCodecContext::height
int height
Definition: avcodec.h:592
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:733
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:332
mpeg4videodefs.h
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
encode_init
static av_cold int encode_init(AVCodecContext *avctx)
Definition: mpeg4videoenc.c:1224
ff_mpeg4_init_direct_mv
void ff_mpeg4_init_direct_mv(MpegEncContext *s)
Definition: mpeg4video.c:73
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
ff_h263_intra_MCBPC_bits
const uint8_t ff_h263_intra_MCBPC_bits[9]
Definition: h263data.c:33
AVCodecContext
main external API structure.
Definition: avcodec.h:431
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:402
ff_h263_intra_MCBPC_code
const uint8_t ff_h263_intra_MCBPC_code[9]
Definition: h263data.c:32
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:280
mpeg4video.h
AVRational::den
int den
Denominator.
Definition: rational.h:60
mpeg4_encode_dc
static void mpeg4_encode_dc(PutBitContext *s, int level, int n)
Encode the dc value.
Definition: mpeg4videoenc.c:311
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
RLTable::last
int last
number of values for last = 0
Definition: rl.h:41
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
FF_MPV_COMMON_BFRAME_OPTS
#define FF_MPV_COMMON_BFRAME_OPTS
Definition: mpegvideoenc.h:351
USER_DATA_STARTCODE
#define USER_DATA_STARTCODE
Definition: mpeg4videodefs.h:56
ff_h263_inter_MCBPC_code
const uint8_t ff_h263_inter_MCBPC_code[28]
Definition: h263data.c:38
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
INPLACE_OFFSET
#define INPLACE_OFFSET
Definition: mpegvideoenc.h:266
mem.h
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:322
ff_mpv_encode_init
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:557
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:153
ff_mpeg4_merge_partitions
void ff_mpeg4_merge_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1299
ff_mpeg4_c_dc_scale_table
const uint8_t ff_mpeg4_c_dc_scale_table[32]
Definition: mpeg4data.h:360
decide_ac_pred
static int decide_ac_pred(MPVEncContext *const s, int16_t block[6][64], const int dir[6], const uint8_t *st[6], int zigzag_last_index[6])
Return the optimal value (0 or 1) for the ac_pred element for the given MB in MPEG-4.
Definition: mpeg4videoenc.c:186
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:273
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:181
mpeg4_encode_blocks_intra
static void mpeg4_encode_blocks_intra(MPVEncContext *const s, const int16_t block[6][64], const int intra_dc[6], const uint8_t *const *scan_table, PutBitContext *dc_pb, PutBitContext *ac_pb)
Definition: mpeg4videoenc.c:386
slice_to_mainenc
static const MPVMainEncContext * slice_to_mainenc(const MPVEncContext *s)
Definition: mpegvideoenc.h:253
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
mpeg4videodata.h
GOP_STARTCODE
#define GOP_STARTCODE
Definition: mpeg4videodefs.h:57
ff_h263_encode_motion_vector
static void ff_h263_encode_motion_vector(MPVEncContext *s, int x, int y, int f_code)
Definition: h263enc.h:39
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:592
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
get_b_cbp
static int get_b_cbp(MPVEncContext *const s, int16_t block[6][64], int motion_x, int motion_y, int mb_type)
Definition: mpeg4videoenc.c:407
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
mpeg4_pred_dc
static int mpeg4_pred_dc(MpegEncContext *s, int n, int *dir_ptr)
Predict the dc.
Definition: mpeg4videoenc.c:153
put_bits.h
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:64
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
MPVMainEncContext::s
MPVEncContext s
The main slicecontext.
Definition: mpegvideoenc.h:179
mpeg4_encode_blocks_inter
static void mpeg4_encode_blocks_inter(MPVEncContext *const s, const int16_t block[6][64], PutBitContext *ac_pb)
Definition: mpeg4videoenc.c:370
RLTable::table_run
const int8_t * table_run
Definition: rl.h:43
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:290
h263.h