FFmpeg
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include "config_components.h"
35 
36 #include <assert.h>
37 #include <stdint.h>
38 
39 #include "libavutil/emms.h"
40 #include "libavutil/internal.h"
41 #include "libavutil/intmath.h"
42 #include "libavutil/mathematics.h"
43 #include "libavutil/mem.h"
44 #include "libavutil/mem_internal.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/thread.h"
47 #include "avcodec.h"
48 #include "encode.h"
49 #include "idctdsp.h"
50 #include "mpeg12codecs.h"
51 #include "mpeg12data.h"
52 #include "mpeg12enc.h"
53 #include "mpegvideo.h"
54 #include "mpegvideodata.h"
55 #include "mpegvideoenc.h"
56 #include "h261enc.h"
57 #include "h263.h"
58 #include "h263data.h"
59 #include "h263enc.h"
60 #include "mjpegenc_common.h"
61 #include "mathops.h"
62 #include "mpegutils.h"
63 #include "mpegvideo_unquantize.h"
64 #include "mjpegenc.h"
65 #include "speedhqenc.h"
66 #include "msmpeg4enc.h"
67 #include "pixblockdsp.h"
68 #include "qpeldsp.h"
69 #include "faandct.h"
70 #include "aandcttab.h"
71 #include "mpeg4video.h"
72 #include "mpeg4videodata.h"
73 #include "mpeg4videoenc.h"
74 #include "internal.h"
75 #include "bytestream.h"
76 #include "rv10enc.h"
77 #include "libavutil/refstruct.h"
78 #include <limits.h>
79 #include "sp5x.h"
80 
81 #define QUANT_BIAS_SHIFT 8
82 
83 #define QMAT_SHIFT_MMX 16
84 #define QMAT_SHIFT 21
85 
86 static int encode_picture(MPVMainEncContext *const s, const AVPacket *pkt);
87 static int dct_quantize_refine(MPVEncContext *const s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
88 static int sse_mb(MPVEncContext *const s);
89 static int dct_quantize_c(MPVEncContext *const s,
90  int16_t *block, int n,
91  int qscale, int *overflow);
92 static int dct_quantize_trellis_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow);
93 
94 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
95 
96 static const AVOption mpv_generic_options[] = {
99  { NULL },
100 };
101 
103  .class_name = "generic mpegvideo encoder",
104  .item_name = av_default_item_name,
105  .option = mpv_generic_options,
106  .version = LIBAVUTIL_VERSION_INT,
107 };
108 
109 void ff_convert_matrix(MPVEncContext *const s, int (*qmat)[64],
110  uint16_t (*qmat16)[2][64],
111  const uint16_t *quant_matrix,
112  int bias, int qmin, int qmax, int intra)
113 {
114  FDCTDSPContext *fdsp = &s->fdsp;
115  int qscale;
116  int shift = 0;
117 
118  for (qscale = qmin; qscale <= qmax; qscale++) {
119  int i;
120  int qscale2;
121 
122  if (s->c.q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
123  else qscale2 = qscale << 1;
124 
125  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
126 #if CONFIG_FAANDCT
127  fdsp->fdct == ff_faandct ||
128 #endif /* CONFIG_FAANDCT */
129  fdsp->fdct == ff_jpeg_fdct_islow_10) {
130  for (i = 0; i < 64; i++) {
131  const int j = s->c.idsp.idct_permutation[i];
132  int64_t den = (int64_t) qscale2 * quant_matrix[j];
133  /* 1 * 1 <= qscale2 * quant_matrix[j] <= 112 * 255
134  * Assume x = qscale2 * quant_matrix[j]
135  * 1 <= x <= 28560
136  * (1 << 22) / 1 >= (1 << 22) / (x) >= (1 << 22) / 28560
137  * 4194304 >= (1 << 22) / (x) >= 146 */
138 
139  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
140  }
141  } else if (fdsp->fdct == ff_fdct_ifast) {
142  for (i = 0; i < 64; i++) {
143  const int j = s->c.idsp.idct_permutation[i];
144  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
145  /* 1247 * 1 * 1 <= ff_aanscales[i] * qscale2 * quant_matrix[j] <= 31521 * 112 * 255
146  * Assume x = ff_aanscales[i] * qscale2 * quant_matrix[j]
147  * 1247 <= x <= 900239760
148  * (1 << 36) / 1247 >= (1 << 36) / (x) >= (1 << 36) / 900239760
149  * 55107840 >= (1 << 36) / (x) >= 76 */
150 
151  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
152  }
153  } else {
154  for (i = 0; i < 64; i++) {
155  const int j = s->c.idsp.idct_permutation[i];
156  int64_t den = (int64_t) qscale2 * quant_matrix[j];
157  /* 1 * 1 <= qscale2 * quant_matrix[j] <= 112 * 255
158  * Assume x = qscale2 * quant_matrix[j]
159  * 1 <= x <= 28560
160  * (1 << 22) / 1 >= (1 << 22) / (x) >= (1 << 22) / 28560
161  * 4194304 >= (1 << 22) / (x) >= 146
162  *
163  * 1 <= x <= 28560
164  * (1 << 17) / 1 >= (1 << 17) / (x) >= (1 << 17) / 28560
165  * 131072 >= (1 << 17) / (x) >= 4 */
166 
167  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
168  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
169 
170  if (qmat16[qscale][0][i] == 0 ||
171  qmat16[qscale][0][i] == 128 * 256)
172  qmat16[qscale][0][i] = 128 * 256 - 1;
173  qmat16[qscale][1][i] =
174  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
175  qmat16[qscale][0][i]);
176  }
177  }
178 
179  for (i = intra; i < 64; i++) {
180  int64_t max = 8191;
181  if (fdsp->fdct == ff_fdct_ifast) {
182  max = (8191LL * ff_aanscales[i]) >> 14;
183  }
184  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
185  shift++;
186  }
187  }
188  }
189  if (shift) {
190  av_log(s->c.avctx, AV_LOG_INFO,
191  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
192  QMAT_SHIFT - shift);
193  }
194 }
195 
196 static inline void update_qscale(MPVMainEncContext *const m)
197 {
198  MPVEncContext *const s = &m->s;
199 
200  if (s->c.q_scale_type == 1 && 0) {
201  int i;
202  int bestdiff=INT_MAX;
203  int best = 1;
204 
205  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
206  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
207  if (ff_mpeg2_non_linear_qscale[i] < s->c.avctx->qmin ||
208  (ff_mpeg2_non_linear_qscale[i] > s->c.avctx->qmax && !m->vbv_ignore_qmax))
209  continue;
210  if (diff < bestdiff) {
211  bestdiff = diff;
212  best = i;
213  }
214  }
215  s->c.qscale = best;
216  } else {
217  s->c.qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
218  (FF_LAMBDA_SHIFT + 7);
219  s->c.qscale = av_clip(s->c.qscale, s->c.avctx->qmin, m->vbv_ignore_qmax ? 31 : s->c.avctx->qmax);
220  }
221 
222  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
224 }
225 
227 {
228  int i;
229 
230  if (matrix) {
231  put_bits(pb, 1, 1);
232  for (i = 0; i < 64; i++) {
233  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
234  }
235  } else
236  put_bits(pb, 1, 0);
237 }
238 
239 /**
240  * init s->c.cur_pic.qscale_table from s->lambda_table
241  */
242 static void init_qscale_tab(MPVEncContext *const s)
243 {
244  int8_t *const qscale_table = s->c.cur_pic.qscale_table;
245 
246  for (int i = 0; i < s->c.mb_num; i++) {
247  unsigned int lam = s->lambda_table[s->c.mb_index2xy[i]];
248  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
249  qscale_table[s->c.mb_index2xy[i]] = av_clip(qp, s->c.avctx->qmin,
250  s->c.avctx->qmax);
251  }
252 }
253 
255  const MPVEncContext *const src)
256 {
257 #define COPY(a) dst->a = src->a
258  COPY(c.pict_type);
259  COPY(f_code);
260  COPY(b_code);
261  COPY(c.qscale);
262  COPY(lambda);
263  COPY(lambda2);
264  COPY(c.frame_pred_frame_dct); // FIXME don't set in encode_header
265  COPY(c.progressive_frame); // FIXME don't set in encode_header
266  COPY(partitioned_frame); // FIXME don't set in encode_header
267 #undef COPY
268 }
269 
271 {
272  for (int i = -16; i < 16; i++)
273  default_fcode_tab[i + MAX_MV] = 1;
274 }
275 
276 /**
277  * Set the given MPVEncContext to defaults for encoding.
278  */
280 {
281  MPVEncContext *const s = &m->s;
282  static AVOnce init_static_once = AV_ONCE_INIT;
283 
285 
286  s->f_code = 1;
287  s->b_code = 1;
288 
289  if (!m->fcode_tab) {
291  ff_thread_once(&init_static_once, mpv_encode_init_static);
292  }
293  if (!s->c.y_dc_scale_table) {
294  s->c.y_dc_scale_table =
295  s->c.c_dc_scale_table = ff_mpeg1_dc_scale_table;
296  }
297 }
298 
300 {
301  s->dct_quantize = dct_quantize_c;
302 
303 #if ARCH_X86
305 #endif
306 
307  if (s->c.avctx->trellis)
308  s->dct_quantize = dct_quantize_trellis_c;
309 }
310 
312 {
313  MpegEncContext *const s = &s2->c;
314  MPVUnquantDSPContext unquant_dsp_ctx;
315 
316  ff_mpv_unquantize_init(&unquant_dsp_ctx,
317  avctx->flags & AV_CODEC_FLAG_BITEXACT, s->q_scale_type);
318 
319  if (s2->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
320  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_mpeg2_intra;
321  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_mpeg2_inter;
322  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
323  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_h263_intra;
324  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_h263_inter;
325  } else {
326  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_mpeg1_intra;
327  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_mpeg1_inter;
328  }
329 }
330 
332 {
333  MPVEncContext *const s = &m->s;
334  MECmpContext mecc;
335  me_cmp_func me_cmp[6];
336  int ret;
337 
338  ff_me_cmp_init(&mecc, avctx);
339  ret = ff_me_init(&s->me, avctx, &mecc, 1);
340  if (ret < 0)
341  return ret;
342  ret = ff_set_cmp(&mecc, me_cmp, m->frame_skip_cmp, 1);
343  if (ret < 0)
344  return ret;
345  m->frame_skip_cmp_fn = me_cmp[1];
347  ret = ff_set_cmp(&mecc, me_cmp, avctx->ildct_cmp, 1);
348  if (ret < 0)
349  return ret;
350  if (!me_cmp[0] || !me_cmp[4])
351  return AVERROR(EINVAL);
352  s->ildct_cmp[0] = me_cmp[0];
353  s->ildct_cmp[1] = me_cmp[4];
354  }
355 
356  s->sum_abs_dctelem = mecc.sum_abs_dctelem;
357 
358  s->sse_cmp[0] = mecc.sse[0];
359  s->sse_cmp[1] = mecc.sse[1];
360  s->sad_cmp[0] = mecc.sad[0];
361  s->sad_cmp[1] = mecc.sad[1];
362  if (avctx->mb_cmp == FF_CMP_NSSE) {
363  s->n_sse_cmp[0] = mecc.nsse[0];
364  s->n_sse_cmp[1] = mecc.nsse[1];
365  } else {
366  s->n_sse_cmp[0] = mecc.sse[0];
367  s->n_sse_cmp[1] = mecc.sse[1];
368  }
369 
370  return 0;
371 }
372 
373 #define ALLOCZ_ARRAYS(p, mult, numb) ((p) = av_calloc(numb, mult * sizeof(*(p))))
375 {
376  MPVEncContext *const s = &m->s;
377  const int nb_matrices = 1 + (s->c.out_format == FMT_MJPEG) + !m->intra_only;
378  const uint16_t *intra_matrix, *inter_matrix;
379  int ret;
380 
381  if (!ALLOCZ_ARRAYS(s->q_intra_matrix, 32, nb_matrices) ||
382  !ALLOCZ_ARRAYS(s->q_intra_matrix16, 32, nb_matrices))
383  return AVERROR(ENOMEM);
384 
385  if (s->c.out_format == FMT_MJPEG) {
386  s->q_chroma_intra_matrix = s->q_intra_matrix + 32;
387  s->q_chroma_intra_matrix16 = s->q_intra_matrix16 + 32;
388  // No need to set q_inter_matrix
390  // intra_matrix, chroma_intra_matrix will be set later for MJPEG.
391  return 0;
392  } else {
393  s->q_chroma_intra_matrix = s->q_intra_matrix;
394  s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
395  }
396  if (!m->intra_only) {
397  s->q_inter_matrix = s->q_intra_matrix + 32;
398  s->q_inter_matrix16 = s->q_intra_matrix16 + 32;
399  }
400 
401  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4 &&
402  s->mpeg_quant) {
405  } else if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
406  intra_matrix =
408  } else {
409  /* MPEG-1/2, SpeedHQ */
412  }
413  if (avctx->intra_matrix)
415  if (avctx->inter_matrix)
417 
418  /* init q matrix */
419  for (int i = 0; i < 64; i++) {
420  int j = s->c.idsp.idct_permutation[i];
421 
422  s->c.intra_matrix[j] = s->c.chroma_intra_matrix[j] = intra_matrix[i];
423  s->c.inter_matrix[j] = inter_matrix[i];
424  }
425 
426  /* precompute matrix */
428  if (ret < 0)
429  return ret;
430 
431  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
432  s->c.intra_matrix, s->intra_quant_bias, avctx->qmin,
433  31, 1);
434  if (s->q_inter_matrix)
435  ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
436  s->c.inter_matrix, s->inter_quant_bias, avctx->qmin,
437  31, 0);
438 
439  return 0;
440 }
441 
443 {
444  MPVEncContext *const s = &m->s;
445  int has_b_frames = !!m->max_b_frames;
446  int16_t (*mv_table)[2];
447 
448  /* Allocate MB type table */
449  unsigned mb_array_size = s->c.mb_stride * s->c.mb_height;
450  s->mb_type = av_calloc(mb_array_size, 3 * sizeof(*s->mb_type) + sizeof(*s->mb_mean));
451  if (!s->mb_type)
452  return AVERROR(ENOMEM);
453  s->mc_mb_var = s->mb_type + mb_array_size;
454  s->mb_var = s->mc_mb_var + mb_array_size;
455  s->mb_mean = (uint8_t*)(s->mb_var + mb_array_size);
456 
457  if (!FF_ALLOCZ_TYPED_ARRAY(s->lambda_table, mb_array_size))
458  return AVERROR(ENOMEM);
459 
460  unsigned mv_table_size = (s->c.mb_height + 2) * s->c.mb_stride + 1;
461  unsigned nb_mv_tables = 1 + 5 * has_b_frames;
462  if (s->c.codec_id == AV_CODEC_ID_MPEG4 ||
463  (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
464  nb_mv_tables += 8 * has_b_frames;
465  s->p_field_select_table[0] = av_calloc(mv_table_size, 2 * (2 + 4 * has_b_frames));
466  if (!s->p_field_select_table[0])
467  return AVERROR(ENOMEM);
468  s->p_field_select_table[1] = s->p_field_select_table[0] + 2 * mv_table_size;
469  }
470 
471  mv_table = av_calloc(mv_table_size, nb_mv_tables * sizeof(*mv_table));
472  if (!mv_table)
473  return AVERROR(ENOMEM);
474  m->mv_table_base = mv_table;
475  mv_table += s->c.mb_stride + 1;
476 
477  s->p_mv_table = mv_table;
478  if (has_b_frames) {
479  s->b_forw_mv_table = mv_table += mv_table_size;
480  s->b_back_mv_table = mv_table += mv_table_size;
481  s->b_bidir_forw_mv_table = mv_table += mv_table_size;
482  s->b_bidir_back_mv_table = mv_table += mv_table_size;
483  s->b_direct_mv_table = mv_table += mv_table_size;
484 
485  if (s->p_field_select_table[1]) { // MPEG-4 or INTERLACED_ME above
486  uint8_t *field_select = s->p_field_select_table[1];
487  for (int j = 0; j < 2; j++) {
488  for (int k = 0; k < 2; k++) {
489  for (int l = 0; l < 2; l++)
490  s->b_field_mv_table[j][k][l] = mv_table += mv_table_size;
491  s->b_field_select_table[j][k] = field_select += 2 * mv_table_size;
492  }
493  }
494  }
495  }
496 
497  return 0;
498 }
499 
501 {
502  MPVEncContext *const s = &m->s;
503  // Align the following per-thread buffers to avoid false sharing.
504  enum {
505 #ifndef _MSC_VER
506  /// The number is supposed to match/exceed the cache-line size.
507  ALIGN = FFMAX(128, _Alignof(max_align_t)),
508 #else
509  ALIGN = 128,
510 #endif
511  DCT_ERROR_SIZE = FFALIGN(2 * sizeof(*s->dct_error_sum), ALIGN),
512  };
513  static_assert(DCT_ERROR_SIZE * MAX_THREADS + ALIGN - 1 <= SIZE_MAX,
514  "Need checks for potential overflow.");
515  unsigned nb_slices = s->c.slice_context_count;
516  char *dct_error = NULL;
517 
518  if (m->noise_reduction) {
519  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
520  return AVERROR(ENOMEM);
521  dct_error = av_mallocz(ALIGN - 1 + nb_slices * DCT_ERROR_SIZE);
522  if (!dct_error)
523  return AVERROR(ENOMEM);
525  dct_error += FFALIGN((uintptr_t)dct_error, ALIGN) - (uintptr_t)dct_error;
526  }
527 
528  const int y_size = s->c.b8_stride * (2 * s->c.mb_height + 1);
529  const int c_size = s->c.mb_stride * (s->c.mb_height + 1);
530  const int yc_size = y_size + 2 * c_size;
531  ptrdiff_t offset = 0;
532 
533  for (unsigned i = 0; i < nb_slices; ++i) {
534  MPVEncContext *const s2 = s->c.enc_contexts[i];
535 
536  s2->block = s2->blocks[0];
537 
538  if (dct_error) {
539  s2->dct_offset = s->dct_offset;
540  s2->dct_error_sum = (void*)dct_error;
541  dct_error += DCT_ERROR_SIZE;
542  }
543 
544  if (s2->c.ac_val) {
545  s2->c.dc_val += offset + i;
546  s2->c.ac_val += offset;
547  offset += yc_size;
548  }
549  }
550  return 0;
551 }
552 
553 /* init video encoder */
555 {
556  MPVMainEncContext *const m = avctx->priv_data;
557  MPVEncContext *const s = &m->s;
558  AVCPBProperties *cpb_props;
559  int gcd, ret;
560 
562 
563  switch (avctx->pix_fmt) {
564  case AV_PIX_FMT_YUVJ444P:
565  case AV_PIX_FMT_YUV444P:
566  s->c.chroma_format = CHROMA_444;
567  break;
568  case AV_PIX_FMT_YUVJ422P:
569  case AV_PIX_FMT_YUV422P:
570  s->c.chroma_format = CHROMA_422;
571  break;
572  default:
573  av_unreachable("Already checked via CODEC_PIXFMTS");
574  case AV_PIX_FMT_YUVJ420P:
575  case AV_PIX_FMT_YUV420P:
576  s->c.chroma_format = CHROMA_420;
577  break;
578  }
579 
581 
582  m->bit_rate = avctx->bit_rate;
583  s->c.width = avctx->width;
584  s->c.height = avctx->height;
585  if (avctx->gop_size > 600 &&
588  "keyframe interval too large!, reducing it from %d to %d\n",
589  avctx->gop_size, 600);
590  avctx->gop_size = 600;
591  }
592  m->gop_size = avctx->gop_size;
593  s->c.avctx = avctx;
595  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
596  "is " AV_STRINGIFY(MPVENC_MAX_B_FRAMES) ".\n");
598  } else if (avctx->max_b_frames < 0) {
600  "max b frames must be 0 or positive for mpegvideo based encoders\n");
601  return AVERROR(EINVAL);
602  }
604  s->c.codec_id = avctx->codec->id;
606  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
607  return AVERROR(EINVAL);
608  }
609 
610  s->c.quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
611  s->rtp_mode = !!s->rtp_payload_size;
612  s->c.intra_dc_precision = avctx->intra_dc_precision;
613 
614  // workaround some differences between how applications specify dc precision
615  if (s->c.intra_dc_precision < 0) {
616  s->c.intra_dc_precision += 8;
617  } else if (s->c.intra_dc_precision >= 8)
618  s->c.intra_dc_precision -= 8;
619 
620  if (s->c.intra_dc_precision < 0) {
622  "intra dc precision must be positive, note some applications use"
623  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
624  return AVERROR(EINVAL);
625  }
626 
627  if (s->c.intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
628  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
629  return AVERROR(EINVAL);
630  }
632 
633  if (m->gop_size <= 1) {
634  m->intra_only = 1;
635  m->gop_size = 12;
636  } else {
637  m->intra_only = 0;
638  }
639 
640  /* Fixed QSCALE */
642 
643  s->adaptive_quant = (avctx->lumi_masking ||
644  avctx->dark_masking ||
647  avctx->p_masking ||
648  m->border_masking ||
649  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
650  !m->fixed_qscale;
651 
652  s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
653 
655  switch(avctx->codec_id) {
658  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
659  break;
660  case AV_CODEC_ID_MPEG4:
664  if (avctx->rc_max_rate >= 15000000) {
665  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
666  } else if(avctx->rc_max_rate >= 2000000) {
667  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
668  } else if(avctx->rc_max_rate >= 384000) {
669  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
670  } else
671  avctx->rc_buffer_size = 40;
672  avctx->rc_buffer_size *= 16384;
673  break;
674  }
675  if (avctx->rc_buffer_size) {
676  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
677  }
678  }
679 
680  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
681  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
682  return AVERROR(EINVAL);
683  }
684 
687  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
688  }
689 
691  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
692  return AVERROR(EINVAL);
693  }
694 
696  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
697  return AVERROR(EINVAL);
698  }
699 
700  if (avctx->rc_max_rate &&
704  "impossible bitrate constraints, this will fail\n");
705  }
706 
707  if (avctx->rc_buffer_size &&
710  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
711  return AVERROR(EINVAL);
712  }
713 
714  if (!m->fixed_qscale &&
717  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
719  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
720  if (nbt <= INT_MAX) {
721  avctx->bit_rate_tolerance = nbt;
722  } else
723  avctx->bit_rate_tolerance = INT_MAX;
724  }
725 
726  if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->c.codec_id != AV_CODEC_ID_MPEG4 &&
727  s->c.codec_id != AV_CODEC_ID_H263 && s->c.codec_id != AV_CODEC_ID_H263P &&
728  s->c.codec_id != AV_CODEC_ID_FLV1) {
729  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
730  return AVERROR(EINVAL);
731  }
732 
733  if (s->c.obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
735  "OBMC is only supported with simple mb decision\n");
736  return AVERROR(EINVAL);
737  }
738 
739  if (s->c.quarter_sample && s->c.codec_id != AV_CODEC_ID_MPEG4) {
740  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
741  return AVERROR(EINVAL);
742  }
743 
744  if ((s->c.codec_id == AV_CODEC_ID_MPEG4 ||
745  s->c.codec_id == AV_CODEC_ID_H263 ||
746  s->c.codec_id == AV_CODEC_ID_H263P) &&
747  (avctx->sample_aspect_ratio.num > 255 ||
748  avctx->sample_aspect_ratio.den > 255)) {
750  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
754  }
755 
756  if ((s->c.codec_id == AV_CODEC_ID_H263 ||
757  s->c.codec_id == AV_CODEC_ID_H263P) &&
758  (avctx->width > 2048 ||
759  avctx->height > 1152 )) {
760  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
761  return AVERROR(EINVAL);
762  }
763  if (s->c.codec_id == AV_CODEC_ID_FLV1 &&
764  (avctx->width > 65535 ||
765  avctx->height > 65535 )) {
766  av_log(avctx, AV_LOG_ERROR, "FLV does not support resolutions above 16bit\n");
767  return AVERROR(EINVAL);
768  }
769  if ((s->c.codec_id == AV_CODEC_ID_H263 ||
770  s->c.codec_id == AV_CODEC_ID_H263P ||
771  s->c.codec_id == AV_CODEC_ID_RV20) &&
772  ((avctx->width &3) ||
773  (avctx->height&3) )) {
774  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
775  return AVERROR(EINVAL);
776  }
777 
778  if (s->c.codec_id == AV_CODEC_ID_RV10 &&
779  (avctx->width &15 ||
780  avctx->height&15 )) {
781  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
782  return AVERROR(EINVAL);
783  }
784 
785  if ((s->c.codec_id == AV_CODEC_ID_WMV1 ||
786  s->c.codec_id == AV_CODEC_ID_WMV2) &&
787  avctx->width & 1) {
788  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
789  return AVERROR(EINVAL);
790  }
791 
793  s->c.codec_id != AV_CODEC_ID_MPEG4 && s->c.codec_id != AV_CODEC_ID_MPEG2VIDEO) {
794  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
795  return AVERROR(EINVAL);
796  }
797 
798  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
799  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
800  return AVERROR(EINVAL);
801  }
802 
803  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
805  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=rd\n");
806  return AVERROR(EINVAL);
807  }
808 
809  if (m->scenechange_threshold < 1000000000 &&
812  "closed gop with scene change detection are not supported yet, "
813  "set threshold to 1000000000\n");
814  return AVERROR_PATCHWELCOME;
815  }
816 
818  if (s->c.codec_id != AV_CODEC_ID_MPEG2VIDEO &&
821  "low delay forcing is only available for mpeg2, "
822  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
823  return AVERROR(EINVAL);
824  }
825  if (m->max_b_frames != 0) {
827  "B-frames cannot be used with low delay\n");
828  return AVERROR(EINVAL);
829  }
830  }
831 
832  if (avctx->slices > 1 &&
834  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
835  return AVERROR(EINVAL);
836  }
837 
840  "notice: b_frame_strategy only affects the first pass\n");
841  m->b_frame_strategy = 0;
842  }
843 
845  if (gcd > 1) {
846  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
847  avctx->time_base.den /= gcd;
848  avctx->time_base.num /= gcd;
849  //return -1;
850  }
851 
852  if (s->mpeg_quant || s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO || s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO || s->c.codec_id == AV_CODEC_ID_MJPEG || s->c.codec_id == AV_CODEC_ID_AMV || s->c.codec_id == AV_CODEC_ID_SPEEDHQ) {
853  // (a + x * 3 / 8) / x
854  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
855  s->inter_quant_bias = 0;
856  } else {
857  s->intra_quant_bias = 0;
858  // (a - x / 4) / x
859  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
860  }
861 
862  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
863  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
864  return AVERROR(EINVAL);
865  }
866 
867  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
868 
869  switch (avctx->codec->id) {
870 #if CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER
872  s->rtp_mode = 1;
873  /* fallthrough */
875  s->c.out_format = FMT_MPEG1;
876  s->c.low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
877  avctx->delay = s->c.low_delay ? 0 : (m->max_b_frames + 1);
879  break;
880 #endif
881 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
882  case AV_CODEC_ID_MJPEG:
883  case AV_CODEC_ID_AMV:
884  s->c.out_format = FMT_MJPEG;
885  m->intra_only = 1; /* force intra only for jpeg */
886  avctx->delay = 0;
887  s->c.low_delay = 1;
888  break;
889 #endif
890  case AV_CODEC_ID_SPEEDHQ:
891  s->c.out_format = FMT_SPEEDHQ;
892  m->intra_only = 1; /* force intra only for SHQ */
893  avctx->delay = 0;
894  s->c.low_delay = 1;
895  break;
896  case AV_CODEC_ID_H261:
897  s->c.out_format = FMT_H261;
898  avctx->delay = 0;
899  s->c.low_delay = 1;
900  s->rtp_mode = 0; /* Sliced encoding not supported */
901  break;
902  case AV_CODEC_ID_H263:
903  if (!CONFIG_H263_ENCODER)
906  s->c.width, s->c.height) == 8) {
908  "The specified picture size of %dx%d is not valid for "
909  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
910  "352x288, 704x576, and 1408x1152. "
911  "Try H.263+.\n", s->c.width, s->c.height);
912  return AVERROR(EINVAL);
913  }
914  s->c.out_format = FMT_H263;
915  avctx->delay = 0;
916  s->c.low_delay = 1;
917  break;
918  case AV_CODEC_ID_H263P:
919  s->c.out_format = FMT_H263;
920  /* Fx */
921  s->c.h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
922  s->modified_quant = s->c.h263_aic;
923  s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
924  s->me.unrestricted_mv = s->c.obmc || s->loop_filter || s->umvplus;
925  s->flipflop_rounding = 1;
926 
927  /* /Fx */
928  /* These are just to be sure */
929  avctx->delay = 0;
930  s->c.low_delay = 1;
931  break;
932  case AV_CODEC_ID_FLV1:
933  s->c.out_format = FMT_H263;
934  s->me.unrestricted_mv = 1;
935  s->rtp_mode = 0; /* don't allow GOB */
936  avctx->delay = 0;
937  s->c.low_delay = 1;
938  break;
939 #if CONFIG_RV10_ENCODER
940  case AV_CODEC_ID_RV10:
942  s->c.out_format = FMT_H263;
943  avctx->delay = 0;
944  s->c.low_delay = 1;
945  break;
946 #endif
947 #if CONFIG_RV20_ENCODER
948  case AV_CODEC_ID_RV20:
950  s->c.out_format = FMT_H263;
951  avctx->delay = 0;
952  s->c.low_delay = 1;
953  s->modified_quant = 1;
954  // Set here to force allocation of dc_val;
955  // will be set later on a per-frame basis.
956  s->c.h263_aic = 1;
957  s->loop_filter = 1;
958  s->me.unrestricted_mv = 0;
959  break;
960 #endif
961  case AV_CODEC_ID_MPEG4:
962  s->c.out_format = FMT_H263;
963  s->c.h263_pred = 1;
964  s->me.unrestricted_mv = 1;
965  s->flipflop_rounding = 1;
966  s->c.low_delay = m->max_b_frames ? 0 : 1;
967  avctx->delay = s->c.low_delay ? 0 : (m->max_b_frames + 1);
968  break;
970  s->c.out_format = FMT_H263;
971  s->c.h263_pred = 1;
972  s->me.unrestricted_mv = 1;
973  s->c.msmpeg4_version = MSMP4_V2;
974  avctx->delay = 0;
975  s->c.low_delay = 1;
976  break;
978  s->c.out_format = FMT_H263;
979  s->c.h263_pred = 1;
980  s->me.unrestricted_mv = 1;
981  s->c.msmpeg4_version = MSMP4_V3;
982  s->flipflop_rounding = 1;
983  avctx->delay = 0;
984  s->c.low_delay = 1;
985  break;
986  case AV_CODEC_ID_WMV1:
987  s->c.out_format = FMT_H263;
988  s->c.h263_pred = 1;
989  s->me.unrestricted_mv = 1;
990  s->c.msmpeg4_version = MSMP4_WMV1;
991  s->flipflop_rounding = 1;
992  avctx->delay = 0;
993  s->c.low_delay = 1;
994  break;
995  case AV_CODEC_ID_WMV2:
996  s->c.out_format = FMT_H263;
997  s->c.h263_pred = 1;
998  s->me.unrestricted_mv = 1;
999  s->c.msmpeg4_version = MSMP4_WMV2;
1000  s->flipflop_rounding = 1;
1001  avctx->delay = 0;
1002  s->c.low_delay = 1;
1003  break;
1004  default:
1005  av_unreachable("List contains all codecs using ff_mpv_encode_init()");
1006  }
1007 
1008  avctx->has_b_frames = !s->c.low_delay;
1009 
1010  s->c.encoding = 1;
1011 
1012  s->c.progressive_frame =
1013  s->c.progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
1015  s->c.alternate_scan);
1016 
1019  s->frame_reconstruction_bitfield = (1 << AV_PICTURE_TYPE_I) |
1020  (1 << AV_PICTURE_TYPE_P) |
1021  (1 << AV_PICTURE_TYPE_B);
1022  } else if (!m->intra_only) {
1023  s->frame_reconstruction_bitfield = (1 << AV_PICTURE_TYPE_I) |
1024  (1 << AV_PICTURE_TYPE_P);
1025  } else {
1026  s->frame_reconstruction_bitfield = 0;
1027  }
1028 
1029  if (m->lmin > m->lmax) {
1030  av_log(avctx, AV_LOG_WARNING, "Clipping lmin value to %d\n", m->lmax);
1031  m->lmin = m->lmax;
1032  }
1033 
1034  /* ff_mpv_init_duplicate_contexts() will copy (memdup) the contents of the
1035  * main slice to the slice contexts, so we initialize various fields of it
1036  * before calling ff_mpv_init_duplicate_contexts(). */
1037  s->parent = m;
1038  ff_mpv_idct_init(&s->c);
1040  ff_fdctdsp_init(&s->fdsp, avctx);
1041  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
1042  ff_pixblockdsp_init(&s->pdsp, 8);
1043  ret = me_cmp_init(m, avctx);
1044  if (ret < 0)
1045  return ret;
1046 
1047  if (!(avctx->stats_out = av_mallocz(256)) ||
1048  !(s->new_pic = av_frame_alloc()) ||
1049  !(s->c.picture_pool = ff_mpv_alloc_pic_pool(0)))
1050  return AVERROR(ENOMEM);
1051 
1052  ret = init_matrices(m, avctx);
1053  if (ret < 0)
1054  return ret;
1055 
1057 
1058  if (CONFIG_H263_ENCODER && s->c.out_format == FMT_H263) {
1060 #if CONFIG_MSMPEG4ENC
1061  if (s->c.msmpeg4_version != MSMP4_UNUSED)
1063 #endif
1064  }
1065 
1066  s->c.slice_ctx_size = sizeof(*s);
1067  ret = ff_mpv_common_init(&s->c);
1068  if (ret < 0)
1069  return ret;
1070  ret = init_buffers(m);
1071  if (ret < 0)
1072  return ret;
1073  if (s->c.slice_context_count > 1) {
1074  s->rtp_mode = 1;
1076  s->h263_slice_structured = 1;
1077  }
1079  if (ret < 0)
1080  return ret;
1081 
1082  ret = init_slice_buffers(m);
1083  if (ret < 0)
1084  return ret;
1085 
1087  if (ret < 0)
1088  return ret;
1089 
1090  if (m->b_frame_strategy == 2) {
1091  for (int i = 0; i < m->max_b_frames + 2; i++) {
1092  m->tmp_frames[i] = av_frame_alloc();
1093  if (!m->tmp_frames[i])
1094  return AVERROR(ENOMEM);
1095 
1097  m->tmp_frames[i]->width = s->c.width >> m->brd_scale;
1098  m->tmp_frames[i]->height = s->c.height >> m->brd_scale;
1099 
1100  ret = av_frame_get_buffer(m->tmp_frames[i], 0);
1101  if (ret < 0)
1102  return ret;
1103  }
1104  }
1105 
1106  cpb_props = ff_encode_add_cpb_side_data(avctx);
1107  if (!cpb_props)
1108  return AVERROR(ENOMEM);
1109  cpb_props->max_bitrate = avctx->rc_max_rate;
1110  cpb_props->min_bitrate = avctx->rc_min_rate;
1111  cpb_props->avg_bitrate = avctx->bit_rate;
1112  cpb_props->buffer_size = avctx->rc_buffer_size;
1113 
1114  return 0;
1115 }
1116 
1118 {
1119  MPVMainEncContext *const m = avctx->priv_data;
1120  MPVEncContext *const s = &m->s;
1121 
1123 
1124  ff_mpv_common_end(&s->c);
1125  av_refstruct_pool_uninit(&s->c.picture_pool);
1126 
1127  for (int i = 0; i < MPVENC_MAX_B_FRAMES + 1; i++) {
1130  }
1131  for (int i = 0; i < FF_ARRAY_ELEMS(m->tmp_frames); i++)
1132  av_frame_free(&m->tmp_frames[i]);
1133 
1134  av_frame_free(&s->new_pic);
1135 
1137 
1138  av_freep(&m->mv_table_base);
1139  av_freep(&s->p_field_select_table[0]);
1141 
1142  av_freep(&s->mb_type);
1143  av_freep(&s->lambda_table);
1144 
1145  av_freep(&s->q_intra_matrix);
1146  av_freep(&s->q_intra_matrix16);
1147  av_freep(&s->dct_offset);
1148 
1149  return 0;
1150 }
1151 
1152 /* put block[] to dest[] */
1153 static inline void put_dct(MPVEncContext *const s,
1154  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1155 {
1156  s->c.dct_unquantize_intra(&s->c, block, i, qscale);
1157  s->c.idsp.idct_put(dest, line_size, block);
1158 }
1159 
1160 static inline void add_dequant_dct(MPVEncContext *const s,
1161  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1162 {
1163  if (s->c.block_last_index[i] >= 0) {
1164  s->c.dct_unquantize_inter(&s->c, block, i, qscale);
1165 
1166  s->c.idsp.idct_add(dest, line_size, block);
1167  }
1168 }
1169 
1170 /**
1171  * Performs dequantization and IDCT (if necessary)
1172  */
1173 static void mpv_reconstruct_mb(MPVEncContext *const s, int16_t block[12][64])
1174 {
1175  if (s->c.avctx->debug & FF_DEBUG_DCT_COEFF) {
1176  /* print DCT coefficients */
1177  av_log(s->c.avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->c.mb_x, s->c.mb_y);
1178  for (int i = 0; i < 6; i++) {
1179  for (int j = 0; j < 64; j++) {
1180  av_log(s->c.avctx, AV_LOG_DEBUG, "%5d",
1181  block[i][s->c.idsp.idct_permutation[j]]);
1182  }
1183  av_log(s->c.avctx, AV_LOG_DEBUG, "\n");
1184  }
1185  }
1186 
1187  if ((1 << s->c.pict_type) & s->frame_reconstruction_bitfield) {
1188  uint8_t *dest_y = s->c.dest[0], *dest_cb = s->c.dest[1], *dest_cr = s->c.dest[2];
1189  int dct_linesize, dct_offset;
1190  const int linesize = s->c.cur_pic.linesize[0];
1191  const int uvlinesize = s->c.cur_pic.linesize[1];
1192  const int block_size = 8;
1193 
1194  dct_linesize = linesize << s->c.interlaced_dct;
1195  dct_offset = s->c.interlaced_dct ? linesize : linesize * block_size;
1196 
1197  if (!s->c.mb_intra) {
1198  /* No MC, as that was already done otherwise */
1199  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->c.qscale);
1200  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->c.qscale);
1201  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->c.qscale);
1202  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->c.qscale);
1203 
1204  if (!CONFIG_GRAY || !(s->c.avctx->flags & AV_CODEC_FLAG_GRAY)) {
1205  if (s->c.chroma_y_shift) {
1206  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->c.chroma_qscale);
1207  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->c.chroma_qscale);
1208  } else {
1209  dct_linesize >>= 1;
1210  dct_offset >>= 1;
1211  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->c.chroma_qscale);
1212  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->c.chroma_qscale);
1213  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->c.chroma_qscale);
1214  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->c.chroma_qscale);
1215  }
1216  }
1217  } else {
1218  /* dct only in intra block */
1219  put_dct(s, block[0], 0, dest_y , dct_linesize, s->c.qscale);
1220  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->c.qscale);
1221  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->c.qscale);
1222  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->c.qscale);
1223 
1224  if (!CONFIG_GRAY || !(s->c.avctx->flags & AV_CODEC_FLAG_GRAY)) {
1225  if (s->c.chroma_y_shift) {
1226  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->c.chroma_qscale);
1227  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->c.chroma_qscale);
1228  } else {
1229  dct_offset >>= 1;
1230  dct_linesize >>= 1;
1231  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->c.chroma_qscale);
1232  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->c.chroma_qscale);
1233  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->c.chroma_qscale);
1234  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->c.chroma_qscale);
1235  }
1236  }
1237  }
1238  }
1239 }
1240 
1241 static int get_sae(const uint8_t *src, int ref, int stride)
1242 {
1243  int x,y;
1244  int acc = 0;
1245 
1246  for (y = 0; y < 16; y++) {
1247  for (x = 0; x < 16; x++) {
1248  acc += FFABS(src[x + y * stride] - ref);
1249  }
1250  }
1251 
1252  return acc;
1253 }
1254 
1255 static int get_intra_count(MPVEncContext *const s, const uint8_t *src,
1256  const uint8_t *ref, int stride)
1257 {
1258  int x, y, w, h;
1259  int acc = 0;
1260 
1261  w = s->c.width & ~15;
1262  h = s->c.height & ~15;
1263 
1264  for (y = 0; y < h; y += 16) {
1265  for (x = 0; x < w; x += 16) {
1266  int offset = x + y * stride;
1267  int sad = s->sad_cmp[0](NULL, src + offset, ref + offset,
1268  stride, 16);
1269  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1270  int sae = get_sae(src + offset, mean, stride);
1271 
1272  acc += sae + 500 < sad;
1273  }
1274  }
1275  return acc;
1276 }
1277 
1278 /**
1279  * Allocates new buffers for an AVFrame and copies the properties
1280  * from another AVFrame.
1281  */
1282 static int prepare_picture(MPVEncContext *const s, AVFrame *f, const AVFrame *props_frame)
1283 {
1284  AVCodecContext *avctx = s->c.avctx;
1285  int ret;
1286 
1287  f->width = avctx->width + 2 * EDGE_WIDTH;
1288  f->height = avctx->height + 2 * EDGE_WIDTH;
1289 
1291  if (ret < 0)
1292  return ret;
1293 
1294  ret = ff_mpv_pic_check_linesize(avctx, f, &s->c.linesize, &s->c.uvlinesize);
1295  if (ret < 0)
1296  return ret;
1297 
1298  for (int i = 0; f->data[i]; i++) {
1299  int offset = (EDGE_WIDTH >> (i ? s->c.chroma_y_shift : 0)) *
1300  f->linesize[i] +
1301  (EDGE_WIDTH >> (i ? s->c.chroma_x_shift : 0));
1302  f->data[i] += offset;
1303  }
1304  f->width = avctx->width;
1305  f->height = avctx->height;
1306 
1307  ret = av_frame_copy_props(f, props_frame);
1308  if (ret < 0)
1309  return ret;
1310 
1311  return 0;
1312 }
1313 
1314 static int load_input_picture(MPVMainEncContext *const m, const AVFrame *pic_arg)
1315 {
1316  MPVEncContext *const s = &m->s;
1317  MPVPicture *pic = NULL;
1318  int64_t pts;
1319  int display_picture_number = 0, ret;
1320  int encoding_delay = m->max_b_frames ? m->max_b_frames
1321  : (s->c.low_delay ? 0 : 1);
1322  int flush_offset = 1;
1323  int direct = 1;
1324 
1325  av_assert1(!m->input_picture[0]);
1326 
1327  if (pic_arg) {
1328  pts = pic_arg->pts;
1329  display_picture_number = m->input_picture_number++;
1330 
1331  if (pts != AV_NOPTS_VALUE) {
1332  if (m->user_specified_pts != AV_NOPTS_VALUE) {
1333  int64_t last = m->user_specified_pts;
1334 
1335  if (pts <= last) {
1336  av_log(s->c.avctx, AV_LOG_ERROR,
1337  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1338  pts, last);
1339  return AVERROR(EINVAL);
1340  }
1341 
1342  if (!s->c.low_delay && display_picture_number == 1)
1343  m->dts_delta = pts - last;
1344  }
1345  m->user_specified_pts = pts;
1346  } else {
1347  if (m->user_specified_pts != AV_NOPTS_VALUE) {
1348  m->user_specified_pts =
1349  pts = m->user_specified_pts + 1;
1350  av_log(s->c.avctx, AV_LOG_INFO,
1351  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1352  pts);
1353  } else {
1354  pts = display_picture_number;
1355  }
1356  }
1357 
1358  if (pic_arg->linesize[0] != s->c.linesize ||
1359  pic_arg->linesize[1] != s->c.uvlinesize ||
1360  pic_arg->linesize[2] != s->c.uvlinesize)
1361  direct = 0;
1362  if ((s->c.width & 15) || (s->c.height & 15))
1363  direct = 0;
1364  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1365  direct = 0;
1366  if (s->c.linesize & (STRIDE_ALIGN-1))
1367  direct = 0;
1368 
1369  ff_dlog(s->c.avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1370  pic_arg->linesize[1], s->c.linesize, s->c.uvlinesize);
1371 
1372  pic = av_refstruct_pool_get(s->c.picture_pool);
1373  if (!pic)
1374  return AVERROR(ENOMEM);
1375 
1376  if (direct) {
1377  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1378  goto fail;
1379  pic->shared = 1;
1380  } else {
1381  ret = prepare_picture(s, pic->f, pic_arg);
1382  if (ret < 0)
1383  goto fail;
1384 
1385  for (int i = 0; i < 3; i++) {
1386  ptrdiff_t src_stride = pic_arg->linesize[i];
1387  ptrdiff_t dst_stride = i ? s->c.uvlinesize : s->c.linesize;
1388  int h_shift = i ? s->c.chroma_x_shift : 0;
1389  int v_shift = i ? s->c.chroma_y_shift : 0;
1390  int w = AV_CEIL_RSHIFT(s->c.width , h_shift);
1391  int h = AV_CEIL_RSHIFT(s->c.height, v_shift);
1392  const uint8_t *src = pic_arg->data[i];
1393  uint8_t *dst = pic->f->data[i];
1394  int vpad = 16;
1395 
1396  if ( s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO
1397  && !s->c.progressive_sequence
1398  && FFALIGN(s->c.height, 32) - s->c.height > 16)
1399  vpad = 32;
1400 
1401  if (!s->c.avctx->rc_buffer_size)
1402  dst += INPLACE_OFFSET;
1403 
1404  if (src_stride == dst_stride)
1405  memcpy(dst, src, src_stride * h - src_stride + w);
1406  else {
1407  int h2 = h;
1408  uint8_t *dst2 = dst;
1409  while (h2--) {
1410  memcpy(dst2, src, w);
1411  dst2 += dst_stride;
1412  src += src_stride;
1413  }
1414  }
1415  if ((s->c.width & 15) || (s->c.height & (vpad-1))) {
1416  s->mpvencdsp.draw_edges(dst, dst_stride,
1417  w, h,
1418  16 >> h_shift,
1419  vpad >> v_shift,
1420  EDGE_BOTTOM);
1421  }
1422  }
1423  emms_c();
1424  }
1425 
1426  pic->display_picture_number = display_picture_number;
1427  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1428  } else if (!m->reordered_input_picture[1]) {
1429  /* Flushing: When the above check is true, the encoder is about to run
1430  * out of frames to encode. Check if there are input_pictures left;
1431  * if so, ensure m->input_picture[0] contains the first picture.
1432  * A flush_offset != 1 will only happen if we did not receive enough
1433  * input frames. */
1434  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1435  if (m->input_picture[flush_offset])
1436  break;
1437 
1438  encoding_delay -= flush_offset - 1;
1439  }
1440 
1441  /* shift buffer entries */
1442  for (int i = flush_offset; i <= MPVENC_MAX_B_FRAMES; i++)
1443  m->input_picture[i - flush_offset] = m->input_picture[i];
1444  for (int i = MPVENC_MAX_B_FRAMES + 1 - flush_offset; i <= MPVENC_MAX_B_FRAMES; i++)
1445  m->input_picture[i] = NULL;
1446 
1447  m->input_picture[encoding_delay] = pic;
1448 
1449  return 0;
1450 fail:
1451  av_refstruct_unref(&pic);
1452  return ret;
1453 }
1454 
1455 static int skip_check(MPVMainEncContext *const m,
1456  const MPVPicture *p, const MPVPicture *ref)
1457 {
1458  MPVEncContext *const s = &m->s;
1459  int score = 0;
1460  int64_t score64 = 0;
1461 
1462  for (int plane = 0; plane < 3; plane++) {
1463  const int stride = p->f->linesize[plane];
1464  const int bw = plane ? 1 : 2;
1465  for (int y = 0; y < s->c.mb_height * bw; y++) {
1466  for (int x = 0; x < s->c.mb_width * bw; x++) {
1467  int off = p->shared ? 0 : 16;
1468  const uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1469  const uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1470  int v = m->frame_skip_cmp_fn(s, dptr, rptr, stride, 8);
1471 
1472  switch (FFABS(m->frame_skip_exp)) {
1473  case 0: score = FFMAX(score, v); break;
1474  case 1: score += FFABS(v); break;
1475  case 2: score64 += v * (int64_t)v; break;
1476  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1477  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1478  }
1479  }
1480  }
1481  }
1482  emms_c();
1483 
1484  if (score)
1485  score64 = score;
1486  if (m->frame_skip_exp < 0)
1487  score64 = pow(score64 / (double)(s->c.mb_width * s->c.mb_height),
1488  -1.0/m->frame_skip_exp);
1489 
1490  if (score64 < m->frame_skip_threshold)
1491  return 1;
1492  if (score64 < ((m->frame_skip_factor * (int64_t) s->lambda) >> 8))
1493  return 1;
1494  return 0;
1495 }
1496 
1498 {
1499  int ret;
1500  int size = 0;
1501 
1503  if (ret < 0)
1504  return ret;
1505 
1506  do {
1508  if (ret >= 0) {
1509  size += pkt->size;
1511  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1512  return ret;
1513  } while (ret >= 0);
1514 
1515  return size;
1516 }
1517 
1519 {
1520  MPVEncContext *const s = &m->s;
1521  AVPacket *pkt;
1522  const int scale = m->brd_scale;
1523  int width = s->c.width >> scale;
1524  int height = s->c.height >> scale;
1525  int out_size, p_lambda, b_lambda, lambda2;
1526  int64_t best_rd = INT64_MAX;
1527  int best_b_count = -1;
1528  int ret = 0;
1529 
1530  av_assert0(scale >= 0 && scale <= 3);
1531 
1532  pkt = av_packet_alloc();
1533  if (!pkt)
1534  return AVERROR(ENOMEM);
1535 
1536  //emms_c();
1537  p_lambda = m->last_lambda_for[AV_PICTURE_TYPE_P];
1538  //p_lambda * FFABS(s->c.avctx->b_quant_factor) + s->c.avctx->b_quant_offset;
1539  b_lambda = m->last_lambda_for[AV_PICTURE_TYPE_B];
1540  if (!b_lambda) // FIXME we should do this somewhere else
1541  b_lambda = p_lambda;
1542  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1544 
1545  for (int i = 0; i < m->max_b_frames + 2; i++) {
1546  const MPVPicture *pre_input_ptr = i ? m->input_picture[i - 1] :
1547  s->c.next_pic.ptr;
1548 
1549  if (pre_input_ptr) {
1550  const uint8_t *data[4];
1551  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1552 
1553  if (!pre_input_ptr->shared && i) {
1554  data[0] += INPLACE_OFFSET;
1555  data[1] += INPLACE_OFFSET;
1556  data[2] += INPLACE_OFFSET;
1557  }
1558 
1559  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[0],
1560  m->tmp_frames[i]->linesize[0],
1561  data[0],
1562  pre_input_ptr->f->linesize[0],
1563  width, height);
1564  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[1],
1565  m->tmp_frames[i]->linesize[1],
1566  data[1],
1567  pre_input_ptr->f->linesize[1],
1568  width >> 1, height >> 1);
1569  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[2],
1570  m->tmp_frames[i]->linesize[2],
1571  data[2],
1572  pre_input_ptr->f->linesize[2],
1573  width >> 1, height >> 1);
1574  }
1575  }
1576 
1577  for (int j = 0; j < m->max_b_frames + 1; j++) {
1578  AVCodecContext *c;
1579  int64_t rd = 0;
1580 
1581  if (!m->input_picture[j])
1582  break;
1583 
1585  if (!c) {
1586  ret = AVERROR(ENOMEM);
1587  goto fail;
1588  }
1589 
1590  c->width = width;
1591  c->height = height;
1593  c->flags |= s->c.avctx->flags & AV_CODEC_FLAG_QPEL;
1594  c->mb_decision = s->c.avctx->mb_decision;
1595  c->me_cmp = s->c.avctx->me_cmp;
1596  c->mb_cmp = s->c.avctx->mb_cmp;
1597  c->me_sub_cmp = s->c.avctx->me_sub_cmp;
1598  c->pix_fmt = AV_PIX_FMT_YUV420P;
1599  c->time_base = s->c.avctx->time_base;
1600  c->max_b_frames = m->max_b_frames;
1601 
1602  ret = avcodec_open2(c, s->c.avctx->codec, NULL);
1603  if (ret < 0)
1604  goto fail;
1605 
1606 
1608  m->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1609 
1610  out_size = encode_frame(c, m->tmp_frames[0], pkt);
1611  if (out_size < 0) {
1612  ret = out_size;
1613  goto fail;
1614  }
1615 
1616  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1617 
1618  for (int i = 0; i < m->max_b_frames + 1; i++) {
1619  int is_p = i % (j + 1) == j || i == m->max_b_frames;
1620 
1621  m->tmp_frames[i + 1]->pict_type = is_p ?
1623  m->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1624 
1625  out_size = encode_frame(c, m->tmp_frames[i + 1], pkt);
1626  if (out_size < 0) {
1627  ret = out_size;
1628  goto fail;
1629  }
1630 
1631  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1632  }
1633 
1634  /* get the delayed frames */
1636  if (out_size < 0) {
1637  ret = out_size;
1638  goto fail;
1639  }
1640  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1641 
1642  rd += c->error[0] + c->error[1] + c->error[2];
1643 
1644  if (rd < best_rd) {
1645  best_rd = rd;
1646  best_b_count = j;
1647  }
1648 
1649 fail:
1652  if (ret < 0) {
1653  best_b_count = ret;
1654  break;
1655  }
1656  }
1657 
1658  av_packet_free(&pkt);
1659 
1660  return best_b_count;
1661 }
1662 
1663 /**
1664  * Determines whether an input picture is discarded or not
1665  * and if not determines the length of the next chain of B frames
1666  * and moves these pictures (including the P frame) into
1667  * reordered_input_picture.
1668  * input_picture[0] is always NULL when exiting this function, even on error;
1669  * reordered_input_picture[0] is always NULL when exiting this function on error.
1670  */
1672 {
1673  MPVEncContext *const s = &m->s;
1674 
1675  /* Either nothing to do or can't do anything */
1676  if (m->reordered_input_picture[0] || !m->input_picture[0])
1677  return 0;
1678 
1679  /* set next picture type & ordering */
1680  if (m->frame_skip_threshold || m->frame_skip_factor) {
1681  if (m->picture_in_gop_number < m->gop_size &&
1682  s->c.next_pic.ptr &&
1683  skip_check(m, m->input_picture[0], s->c.next_pic.ptr)) {
1684  // FIXME check that the gop check above is +-1 correct
1686 
1687  ff_vbv_update(m, 0);
1688 
1689  return 0;
1690  }
1691  }
1692 
1693  if (/* m->picture_in_gop_number >= m->gop_size || */
1694  !s->c.next_pic.ptr || m->intra_only) {
1695  m->reordered_input_picture[0] = m->input_picture[0];
1696  m->input_picture[0] = NULL;
1699  m->coded_picture_number++;
1700  } else {
1701  int b_frames = 0;
1702 
1703  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS2) {
1704  for (int i = 0; i < m->max_b_frames + 1; i++) {
1705  int pict_num = m->input_picture[0]->display_picture_number + i;
1706 
1707  if (pict_num >= m->rc_context.num_entries)
1708  break;
1709  if (!m->input_picture[i]) {
1710  m->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1711  break;
1712  }
1713 
1714  m->input_picture[i]->f->pict_type =
1715  m->rc_context.entry[pict_num].new_pict_type;
1716  }
1717  }
1718 
1719  if (m->b_frame_strategy == 0) {
1720  b_frames = m->max_b_frames;
1721  while (b_frames && !m->input_picture[b_frames])
1722  b_frames--;
1723  } else if (m->b_frame_strategy == 1) {
1724  for (int i = 1; i < m->max_b_frames + 1; i++) {
1725  if (m->input_picture[i] &&
1726  m->input_picture[i]->b_frame_score == 0) {
1729  m->input_picture[i ]->f->data[0],
1730  m->input_picture[i - 1]->f->data[0],
1731  s->c.linesize) + 1;
1732  }
1733  }
1734  for (int i = 0;; i++) {
1735  if (i >= m->max_b_frames + 1 ||
1736  !m->input_picture[i] ||
1737  m->input_picture[i]->b_frame_score - 1 >
1738  s->c.mb_num / m->b_sensitivity) {
1739  b_frames = FFMAX(0, i - 1);
1740  break;
1741  }
1742  }
1743 
1744  /* reset scores */
1745  for (int i = 0; i < b_frames + 1; i++)
1746  m->input_picture[i]->b_frame_score = 0;
1747  } else if (m->b_frame_strategy == 2) {
1748  b_frames = estimate_best_b_count(m);
1749  if (b_frames < 0) {
1751  return b_frames;
1752  }
1753  }
1754 
1755  emms_c();
1756 
1757  for (int i = b_frames - 1; i >= 0; i--) {
1758  int type = m->input_picture[i]->f->pict_type;
1759  if (type && type != AV_PICTURE_TYPE_B)
1760  b_frames = i;
1761  }
1762  if (m->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1763  b_frames == m->max_b_frames) {
1764  av_log(s->c.avctx, AV_LOG_ERROR,
1765  "warning, too many B-frames in a row\n");
1766  }
1767 
1768  if (m->picture_in_gop_number + b_frames >= m->gop_size) {
1769  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1770  m->gop_size > m->picture_in_gop_number) {
1771  b_frames = m->gop_size - m->picture_in_gop_number - 1;
1772  } else {
1773  if (s->c.avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1774  b_frames = 0;
1775  m->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1776  }
1777  }
1778 
1779  if ((s->c.avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1780  m->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1781  b_frames--;
1782 
1783  m->reordered_input_picture[0] = m->input_picture[b_frames];
1784  m->input_picture[b_frames] = NULL;
1788  m->coded_picture_number++;
1789  for (int i = 0; i < b_frames; i++) {
1790  m->reordered_input_picture[i + 1] = m->input_picture[i];
1791  m->input_picture[i] = NULL;
1792  m->reordered_input_picture[i + 1]->f->pict_type =
1795  m->coded_picture_number++;
1796  }
1797  }
1798 
1799  return 0;
1800 }
1801 
1803 {
1804  MPVEncContext *const s = &m->s;
1805  int ret;
1806 
1808 
1809  for (int i = 1; i <= MPVENC_MAX_B_FRAMES; i++)
1812 
1814  av_assert1(!m->input_picture[0]);
1815  if (ret < 0)
1816  return ret;
1817 
1818  av_frame_unref(s->new_pic);
1819 
1820  if (m->reordered_input_picture[0]) {
1823 
1824  if (m->reordered_input_picture[0]->shared || s->c.avctx->rc_buffer_size) {
1825  // input is a shared pix, so we can't modify it -> allocate a new
1826  // one & ensure that the shared one is reusable
1827  av_frame_move_ref(s->new_pic, m->reordered_input_picture[0]->f);
1828 
1829  ret = prepare_picture(s, m->reordered_input_picture[0]->f, s->new_pic);
1830  if (ret < 0)
1831  goto fail;
1832  } else {
1833  // input is not a shared pix -> reuse buffer for current_pix
1834  ret = av_frame_ref(s->new_pic, m->reordered_input_picture[0]->f);
1835  if (ret < 0)
1836  goto fail;
1837  for (int i = 0; i < MPV_MAX_PLANES; i++)
1838  s->new_pic->data[i] += INPLACE_OFFSET;
1839  }
1840  s->c.cur_pic.ptr = m->reordered_input_picture[0];
1841  m->reordered_input_picture[0] = NULL;
1842  av_assert1(s->c.mb_width == s->c.buffer_pools.alloc_mb_width);
1843  av_assert1(s->c.mb_height == s->c.buffer_pools.alloc_mb_height);
1844  av_assert1(s->c.mb_stride == s->c.buffer_pools.alloc_mb_stride);
1845  ret = ff_mpv_alloc_pic_accessories(s->c.avctx, &s->c.cur_pic,
1846  &s->c.sc, &s->c.buffer_pools, s->c.mb_height);
1847  if (ret < 0) {
1848  ff_mpv_unref_picture(&s->c.cur_pic);
1849  return ret;
1850  }
1851  s->picture_number = s->c.cur_pic.ptr->display_picture_number;
1852 
1853  }
1854  return 0;
1855 fail:
1857  return ret;
1858 }
1859 
1860 static void frame_end(MPVMainEncContext *const m)
1861 {
1862  MPVEncContext *const s = &m->s;
1863 
1864  if (s->me.unrestricted_mv &&
1865  s->c.cur_pic.reference &&
1866  !m->intra_only) {
1867  int hshift = s->c.chroma_x_shift;
1868  int vshift = s->c.chroma_y_shift;
1869  s->mpvencdsp.draw_edges(s->c.cur_pic.data[0],
1870  s->c.cur_pic.linesize[0],
1871  s->c.h_edge_pos, s->c.v_edge_pos,
1873  EDGE_TOP | EDGE_BOTTOM);
1874  s->mpvencdsp.draw_edges(s->c.cur_pic.data[1],
1875  s->c.cur_pic.linesize[1],
1876  s->c.h_edge_pos >> hshift,
1877  s->c.v_edge_pos >> vshift,
1878  EDGE_WIDTH >> hshift,
1879  EDGE_WIDTH >> vshift,
1880  EDGE_TOP | EDGE_BOTTOM);
1881  s->mpvencdsp.draw_edges(s->c.cur_pic.data[2],
1882  s->c.cur_pic.linesize[2],
1883  s->c.h_edge_pos >> hshift,
1884  s->c.v_edge_pos >> vshift,
1885  EDGE_WIDTH >> hshift,
1886  EDGE_WIDTH >> vshift,
1887  EDGE_TOP | EDGE_BOTTOM);
1888  }
1889 
1890  emms_c();
1891 
1892  m->last_pict_type = s->c.pict_type;
1893  m->last_lambda_for[s->c.pict_type] = s->c.cur_pic.ptr->f->quality;
1894  if (s->c.pict_type != AV_PICTURE_TYPE_B)
1895  m->last_non_b_pict_type = s->c.pict_type;
1896 }
1897 
1899 {
1900  MPVEncContext *const s = &m->s;
1901  int intra, i;
1902 
1903  for (intra = 0; intra < 2; intra++) {
1904  if (s->dct_count[intra] > (1 << 16)) {
1905  for (i = 0; i < 64; i++) {
1906  s->dct_error_sum[intra][i] >>= 1;
1907  }
1908  s->dct_count[intra] >>= 1;
1909  }
1910 
1911  for (i = 0; i < 64; i++) {
1912  s->dct_offset[intra][i] = (m->noise_reduction *
1913  s->dct_count[intra] +
1914  s->dct_error_sum[intra][i] / 2) /
1915  (s->dct_error_sum[intra][i] + 1);
1916  }
1917  }
1918 }
1919 
1920 static void frame_start(MPVMainEncContext *const m)
1921 {
1922  MPVEncContext *const s = &m->s;
1923 
1924  s->c.cur_pic.ptr->f->pict_type = s->c.pict_type;
1925 
1926  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
1927  ff_mpv_replace_picture(&s->c.last_pic, &s->c.next_pic);
1928  ff_mpv_replace_picture(&s->c.next_pic, &s->c.cur_pic);
1929  }
1930 
1931  av_assert2(!!m->noise_reduction == !!s->dct_error_sum);
1932  if (s->dct_error_sum) {
1934  }
1935 }
1936 
1938  const AVFrame *pic_arg, int *got_packet)
1939 {
1940  MPVMainEncContext *const m = avctx->priv_data;
1941  MPVEncContext *const s = &m->s;
1942  int stuffing_count, ret;
1943  int context_count = s->c.slice_context_count;
1944 
1945  ff_mpv_unref_picture(&s->c.cur_pic);
1946 
1947  m->vbv_ignore_qmax = 0;
1948 
1949  m->picture_in_gop_number++;
1950 
1951  ret = load_input_picture(m, pic_arg);
1952  if (ret < 0)
1953  return ret;
1954 
1956  if (ret < 0)
1957  return ret;
1958 
1959  /* output? */
1960  if (s->new_pic->data[0]) {
1961  int growing_buffer = context_count == 1 && !s->data_partitioning;
1962  size_t pkt_size = 10000 + s->c.mb_width * s->c.mb_height *
1963  (growing_buffer ? 64 : (MAX_MB_BYTES + 100));
1964  if (CONFIG_MJPEG_ENCODER && avctx->codec_id == AV_CODEC_ID_MJPEG) {
1965  ret = ff_mjpeg_add_icc_profile_size(avctx, s->new_pic, &pkt_size);
1966  if (ret < 0)
1967  return ret;
1968  }
1969  if ((ret = ff_alloc_packet(avctx, pkt, pkt_size)) < 0)
1970  return ret;
1972  if (s->mb_info) {
1973  s->mb_info_ptr = av_packet_new_side_data(pkt,
1975  s->c.mb_width*s->c.mb_height*12);
1976  if (!s->mb_info_ptr)
1977  return AVERROR(ENOMEM);
1978  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1979  }
1980 
1981  s->c.pict_type = s->new_pic->pict_type;
1982  //emms_c();
1983  frame_start(m);
1984 vbv_retry:
1985  ret = encode_picture(m, pkt);
1986  if (growing_buffer) {
1987  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1988  pkt->data = s->pb.buf;
1990  }
1991  if (ret < 0)
1992  return -1;
1993 
1994  frame_end(m);
1995 
1996  if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->c.out_format == FMT_MJPEG)
1998 
1999  if (avctx->rc_buffer_size) {
2000  RateControlContext *rcc = &m->rc_context;
2001  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
2002  int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
2003  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
2004 
2005  if (put_bits_count(&s->pb) > max_size &&
2006  s->lambda < m->lmax) {
2007  m->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
2008  (s->c.qscale + 1) / s->c.qscale);
2009  if (s->adaptive_quant) {
2010  for (int i = 0; i < s->c.mb_height * s->c.mb_stride; i++)
2011  s->lambda_table[i] =
2012  FFMAX(s->lambda_table[i] + min_step,
2013  s->lambda_table[i] * (s->c.qscale + 1) /
2014  s->c.qscale);
2015  }
2016  s->c.mb_skipped = 0; // done in frame_start()
2017  // done in encode_picture() so we must undo it
2018  if (s->c.pict_type == AV_PICTURE_TYPE_P) {
2019  s->c.no_rounding ^= s->flipflop_rounding;
2020  }
2021  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
2022  s->c.time_base = s->c.last_time_base;
2023  s->c.last_non_b_time = s->c.time - s->c.pp_time;
2024  }
2025  m->vbv_ignore_qmax = 1;
2026  av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
2027  goto vbv_retry;
2028  }
2029 
2031  }
2032 
2035 
2036  for (int i = 0; i < MPV_MAX_PLANES; i++)
2037  avctx->error[i] += s->encoding_error[i];
2038  ff_encode_add_stats_side_data(pkt, s->c.cur_pic.ptr->f->quality,
2039  s->encoding_error,
2041  s->c.pict_type);
2042 
2044  assert(put_bits_count(&s->pb) == m->header_bits + s->mv_bits +
2045  s->misc_bits + s->i_tex_bits +
2046  s->p_tex_bits);
2047  flush_put_bits(&s->pb);
2048  m->frame_bits = put_bits_count(&s->pb);
2049 
2050  stuffing_count = ff_vbv_update(m, m->frame_bits);
2051  m->stuffing_bits = 8*stuffing_count;
2052  if (stuffing_count) {
2053  if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
2054  av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
2055  return -1;
2056  }
2057 
2058  switch (s->c.codec_id) {
2061  while (stuffing_count--) {
2062  put_bits(&s->pb, 8, 0);
2063  }
2064  break;
2065  case AV_CODEC_ID_MPEG4:
2066  put_bits(&s->pb, 16, 0);
2067  put_bits(&s->pb, 16, 0x1C3);
2068  stuffing_count -= 4;
2069  while (stuffing_count--) {
2070  put_bits(&s->pb, 8, 0xFF);
2071  }
2072  break;
2073  default:
2074  av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2075  m->stuffing_bits = 0;
2076  }
2077  flush_put_bits(&s->pb);
2078  m->frame_bits = put_bits_count(&s->pb);
2079  }
2080 
2081  /* update MPEG-1/2 vbv_delay for CBR */
2082  if (avctx->rc_max_rate &&
2084  s->c.out_format == FMT_MPEG1 &&
2085  90000LL * (avctx->rc_buffer_size - 1) <=
2086  avctx->rc_max_rate * 0xFFFFLL) {
2087  AVCPBProperties *props;
2088  size_t props_size;
2089 
2090  int vbv_delay, min_delay;
2091  double inbits = avctx->rc_max_rate *
2093  int minbits = m->frame_bits - 8 *
2094  (m->vbv_delay_pos - 1);
2095  double bits = m->rc_context.buffer_index + minbits - inbits;
2096  uint8_t *const vbv_delay_ptr = s->pb.buf + m->vbv_delay_pos;
2097 
2098  if (bits < 0)
2100  "Internal error, negative bits\n");
2101 
2102  av_assert1(s->c.repeat_first_field == 0);
2103 
2104  vbv_delay = bits * 90000 / avctx->rc_max_rate;
2105  min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
2106  avctx->rc_max_rate;
2107 
2108  vbv_delay = FFMAX(vbv_delay, min_delay);
2109 
2110  av_assert0(vbv_delay < 0xFFFF);
2111 
2112  vbv_delay_ptr[0] &= 0xF8;
2113  vbv_delay_ptr[0] |= vbv_delay >> 13;
2114  vbv_delay_ptr[1] = vbv_delay >> 5;
2115  vbv_delay_ptr[2] &= 0x07;
2116  vbv_delay_ptr[2] |= vbv_delay << 3;
2117 
2118  props = av_cpb_properties_alloc(&props_size);
2119  if (!props)
2120  return AVERROR(ENOMEM);
2121  props->vbv_delay = vbv_delay * 300;
2122 
2124  (uint8_t*)props, props_size);
2125  if (ret < 0) {
2126  av_freep(&props);
2127  return ret;
2128  }
2129  }
2130  m->total_bits += m->frame_bits;
2131 
2132  pkt->pts = s->c.cur_pic.ptr->f->pts;
2133  pkt->duration = s->c.cur_pic.ptr->f->duration;
2134  if (!s->c.low_delay && s->c.pict_type != AV_PICTURE_TYPE_B) {
2135  if (!s->c.cur_pic.ptr->coded_picture_number)
2136  pkt->dts = pkt->pts - m->dts_delta;
2137  else
2138  pkt->dts = m->reordered_pts;
2139  m->reordered_pts = pkt->pts;
2140  } else
2141  pkt->dts = pkt->pts;
2142 
2143  // the no-delay case is handled in generic code
2145  ret = ff_encode_reordered_opaque(avctx, pkt, s->c.cur_pic.ptr->f);
2146  if (ret < 0)
2147  return ret;
2148  }
2149 
2150  if (s->c.cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
2152  if (s->mb_info)
2154  } else {
2155  m->frame_bits = 0;
2156  }
2157 
2158  ff_mpv_unref_picture(&s->c.cur_pic);
2159 
2160  av_assert1((m->frame_bits & 7) == 0);
2161 
2162  pkt->size = m->frame_bits / 8;
2163  *got_packet = !!pkt->size;
2164  return 0;
2165 }
2166 
2168  int n, int threshold)
2169 {
2170  static const char tab[64] = {
2171  3, 2, 2, 1, 1, 1, 1, 1,
2172  1, 1, 1, 1, 1, 1, 1, 1,
2173  1, 1, 1, 1, 1, 1, 1, 1,
2174  0, 0, 0, 0, 0, 0, 0, 0,
2175  0, 0, 0, 0, 0, 0, 0, 0,
2176  0, 0, 0, 0, 0, 0, 0, 0,
2177  0, 0, 0, 0, 0, 0, 0, 0,
2178  0, 0, 0, 0, 0, 0, 0, 0
2179  };
2180  int score = 0;
2181  int run = 0;
2182  int i;
2183  int16_t *block = s->block[n];
2184  const int last_index = s->c.block_last_index[n];
2185  int skip_dc;
2186 
2187  if (threshold < 0) {
2188  skip_dc = 0;
2189  threshold = -threshold;
2190  } else
2191  skip_dc = 1;
2192 
2193  /* Are all we could set to zero already zero? */
2194  if (last_index <= skip_dc - 1)
2195  return;
2196 
2197  for (i = 0; i <= last_index; i++) {
2198  const int j = s->c.intra_scantable.permutated[i];
2199  const int level = FFABS(block[j]);
2200  if (level == 1) {
2201  if (skip_dc && i == 0)
2202  continue;
2203  score += tab[run];
2204  run = 0;
2205  } else if (level > 1) {
2206  return;
2207  } else {
2208  run++;
2209  }
2210  }
2211  if (score >= threshold)
2212  return;
2213  for (i = skip_dc; i <= last_index; i++) {
2214  const int j = s->c.intra_scantable.permutated[i];
2215  block[j] = 0;
2216  }
2217  if (block[0])
2218  s->c.block_last_index[n] = 0;
2219  else
2220  s->c.block_last_index[n] = -1;
2221 }
2222 
2223 static inline void clip_coeffs(const MPVEncContext *const s, int16_t block[],
2224  int last_index)
2225 {
2226  int i;
2227  const int maxlevel = s->max_qcoeff;
2228  const int minlevel = s->min_qcoeff;
2229  int overflow = 0;
2230 
2231  if (s->c.mb_intra) {
2232  i = 1; // skip clipping of intra dc
2233  } else
2234  i = 0;
2235 
2236  for (; i <= last_index; i++) {
2237  const int j = s->c.intra_scantable.permutated[i];
2238  int level = block[j];
2239 
2240  if (level > maxlevel) {
2241  level = maxlevel;
2242  overflow++;
2243  } else if (level < minlevel) {
2244  level = minlevel;
2245  overflow++;
2246  }
2247 
2248  block[j] = level;
2249  }
2250 
2251  if (overflow && s->c.avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2252  av_log(s->c.avctx, AV_LOG_INFO,
2253  "warning, clipping %d dct coefficients to %d..%d\n",
2254  overflow, minlevel, maxlevel);
2255 }
2256 
2257 static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
2258 {
2259  int x, y;
2260  // FIXME optimize
2261  for (y = 0; y < 8; y++) {
2262  for (x = 0; x < 8; x++) {
2263  int x2, y2;
2264  int sum = 0;
2265  int sqr = 0;
2266  int count = 0;
2267 
2268  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2269  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2270  int v = ptr[x2 + y2 * stride];
2271  sum += v;
2272  sqr += v * v;
2273  count++;
2274  }
2275  }
2276  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2277  }
2278  }
2279 }
2280 
2282  int motion_x, int motion_y,
2283  int mb_block_height,
2284  int mb_block_width,
2285  int mb_block_count,
2286  int chroma_x_shift,
2287  int chroma_y_shift,
2288  int chroma_format)
2289 {
2290 /* Interlaced DCT is only possible with MPEG-2 and MPEG-4
2291  * and neither of these encoders currently supports 444. */
2292 #define INTERLACED_DCT(s) ((chroma_format == CHROMA_420 || chroma_format == CHROMA_422) && \
2293  (s)->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT)
2294  DECLARE_ALIGNED(16, int16_t, weight)[12][64];
2295  int16_t orig[12][64];
2296  const int mb_x = s->c.mb_x;
2297  const int mb_y = s->c.mb_y;
2298  int i;
2299  int skip_dct[12];
2300  int dct_offset = s->c.linesize * 8; // default for progressive frames
2301  int uv_dct_offset = s->c.uvlinesize * 8;
2302  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2303  ptrdiff_t wrap_y, wrap_c;
2304 
2305  for (i = 0; i < mb_block_count; i++)
2306  skip_dct[i] = s->skipdct;
2307 
2308  if (s->adaptive_quant) {
2309  const int last_qp = s->c.qscale;
2310  const int mb_xy = mb_x + mb_y * s->c.mb_stride;
2311 
2312  s->lambda = s->lambda_table[mb_xy];
2313  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
2315 
2316  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2317  s->dquant = s->c.cur_pic.qscale_table[mb_xy] - last_qp;
2318 
2319  if (s->c.out_format == FMT_H263) {
2320  s->dquant = av_clip(s->dquant, -2, 2);
2321 
2322  if (s->c.codec_id == AV_CODEC_ID_MPEG4) {
2323  if (!s->c.mb_intra) {
2324  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
2325  if (s->dquant & 1 || s->c.mv_dir & MV_DIRECT)
2326  s->dquant = 0;
2327  }
2328  if (s->c.mv_type == MV_TYPE_8X8)
2329  s->dquant = 0;
2330  }
2331  }
2332  }
2333  }
2334  ff_set_qscale(&s->c, last_qp + s->dquant);
2335  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2336  ff_set_qscale(&s->c, s->c.qscale + s->dquant);
2337 
2338  wrap_y = s->c.linesize;
2339  wrap_c = s->c.uvlinesize;
2340  ptr_y = s->new_pic->data[0] +
2341  (mb_y * 16 * wrap_y) + mb_x * 16;
2342  ptr_cb = s->new_pic->data[1] +
2343  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2344  ptr_cr = s->new_pic->data[2] +
2345  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2346 
2347  if ((mb_x * 16 + 16 > s->c.width || mb_y * 16 + 16 > s->c.height) &&
2348  s->c.codec_id != AV_CODEC_ID_AMV) {
2349  uint8_t *ebuf = s->c.sc.edge_emu_buffer + 38 * wrap_y;
2350  int cw = (s->c.width + chroma_x_shift) >> chroma_x_shift;
2351  int ch = (s->c.height + chroma_y_shift) >> chroma_y_shift;
2352  s->c.vdsp.emulated_edge_mc(ebuf, ptr_y,
2353  wrap_y, wrap_y,
2354  16, 16, mb_x * 16, mb_y * 16,
2355  s->c.width, s->c.height);
2356  ptr_y = ebuf;
2357  s->c.vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2358  wrap_c, wrap_c,
2359  mb_block_width, mb_block_height,
2360  mb_x * mb_block_width, mb_y * mb_block_height,
2361  cw, ch);
2362  ptr_cb = ebuf + 16 * wrap_y;
2363  s->c.vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2364  wrap_c, wrap_c,
2365  mb_block_width, mb_block_height,
2366  mb_x * mb_block_width, mb_y * mb_block_height,
2367  cw, ch);
2368  ptr_cr = ebuf + 16 * wrap_y + 16;
2369  }
2370 
2371  if (s->c.mb_intra) {
2372  if (INTERLACED_DCT(s)) {
2373  int progressive_score, interlaced_score;
2374 
2375  s->c.interlaced_dct = 0;
2376  progressive_score = s->ildct_cmp[1](s, ptr_y, NULL, wrap_y, 8) +
2377  s->ildct_cmp[1](s, ptr_y + wrap_y * 8,
2378  NULL, wrap_y, 8) - 400;
2379 
2380  if (progressive_score > 0) {
2381  interlaced_score = s->ildct_cmp[1](s, ptr_y,
2382  NULL, wrap_y * 2, 8) +
2383  s->ildct_cmp[1](s, ptr_y + wrap_y,
2384  NULL, wrap_y * 2, 8);
2385  if (progressive_score > interlaced_score) {
2386  s->c.interlaced_dct = 1;
2387 
2388  dct_offset = wrap_y;
2389  uv_dct_offset = wrap_c;
2390  wrap_y <<= 1;
2391  if (chroma_format == CHROMA_422 ||
2393  wrap_c <<= 1;
2394  }
2395  }
2396  }
2397 
2398  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2399  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2400  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2401  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2402 
2403  if (s->c.avctx->flags & AV_CODEC_FLAG_GRAY) {
2404  skip_dct[4] = 1;
2405  skip_dct[5] = 1;
2406  } else {
2407  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2408  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2409  if (chroma_format == CHROMA_422) {
2410  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2411  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2412  } else if (chroma_format == CHROMA_444) {
2413  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2414  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2415  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2416  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2417  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2418  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2419  }
2420  }
2421  } else {
2422  op_pixels_func (*op_pix)[4];
2423  qpel_mc_func (*op_qpix)[16];
2424  uint8_t *dest_y, *dest_cb, *dest_cr;
2425 
2426  dest_y = s->c.dest[0];
2427  dest_cb = s->c.dest[1];
2428  dest_cr = s->c.dest[2];
2429 
2430  if ((!s->c.no_rounding) || s->c.pict_type == AV_PICTURE_TYPE_B) {
2431  op_pix = s->c.hdsp.put_pixels_tab;
2432  op_qpix = s->c.qdsp.put_qpel_pixels_tab;
2433  } else {
2434  op_pix = s->c.hdsp.put_no_rnd_pixels_tab;
2435  op_qpix = s->c.qdsp.put_no_rnd_qpel_pixels_tab;
2436  }
2437 
2438  if (s->c.mv_dir & MV_DIR_FORWARD) {
2439  ff_mpv_motion(&s->c, dest_y, dest_cb, dest_cr, 0,
2440  s->c.last_pic.data,
2441  op_pix, op_qpix);
2442  op_pix = s->c.hdsp.avg_pixels_tab;
2443  op_qpix = s->c.qdsp.avg_qpel_pixels_tab;
2444  }
2445  if (s->c.mv_dir & MV_DIR_BACKWARD) {
2446  ff_mpv_motion(&s->c, dest_y, dest_cb, dest_cr, 1,
2447  s->c.next_pic.data,
2448  op_pix, op_qpix);
2449  }
2450 
2451  if (INTERLACED_DCT(s)) {
2452  int progressive_score, interlaced_score;
2453 
2454  s->c.interlaced_dct = 0;
2455  progressive_score = s->ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2456  s->ildct_cmp[0](s, dest_y + wrap_y * 8,
2457  ptr_y + wrap_y * 8,
2458  wrap_y, 8) - 400;
2459 
2460  if (s->c.avctx->ildct_cmp == FF_CMP_VSSE)
2461  progressive_score -= 400;
2462 
2463  if (progressive_score > 0) {
2464  interlaced_score = s->ildct_cmp[0](s, dest_y, ptr_y,
2465  wrap_y * 2, 8) +
2466  s->ildct_cmp[0](s, dest_y + wrap_y,
2467  ptr_y + wrap_y,
2468  wrap_y * 2, 8);
2469 
2470  if (progressive_score > interlaced_score) {
2471  s->c.interlaced_dct = 1;
2472 
2473  dct_offset = wrap_y;
2474  uv_dct_offset = wrap_c;
2475  wrap_y <<= 1;
2476  if (chroma_format == CHROMA_422)
2477  wrap_c <<= 1;
2478  }
2479  }
2480  }
2481 
2482  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2483  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2484  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2485  dest_y + dct_offset, wrap_y);
2486  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2487  dest_y + dct_offset + 8, wrap_y);
2488 
2489  if (s->c.avctx->flags & AV_CODEC_FLAG_GRAY) {
2490  skip_dct[4] = 1;
2491  skip_dct[5] = 1;
2492  } else {
2493  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2494  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2495  if (!chroma_y_shift) { /* 422 */
2496  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2497  dest_cb + uv_dct_offset, wrap_c);
2498  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2499  dest_cr + uv_dct_offset, wrap_c);
2500  }
2501  }
2502  /* pre quantization */
2503  if (s->mc_mb_var[s->c.mb_stride * mb_y + mb_x] < 2 * s->c.qscale * s->c.qscale) {
2504  // FIXME optimize
2505  if (s->sad_cmp[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->c.qscale)
2506  skip_dct[0] = 1;
2507  if (s->sad_cmp[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->c.qscale)
2508  skip_dct[1] = 1;
2509  if (s->sad_cmp[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2510  wrap_y, 8) < 20 * s->c.qscale)
2511  skip_dct[2] = 1;
2512  if (s->sad_cmp[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2513  wrap_y, 8) < 20 * s->c.qscale)
2514  skip_dct[3] = 1;
2515  if (s->sad_cmp[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->c.qscale)
2516  skip_dct[4] = 1;
2517  if (s->sad_cmp[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->c.qscale)
2518  skip_dct[5] = 1;
2519  if (!chroma_y_shift) { /* 422 */
2520  if (s->sad_cmp[1](NULL, ptr_cb + uv_dct_offset,
2521  dest_cb + uv_dct_offset,
2522  wrap_c, 8) < 20 * s->c.qscale)
2523  skip_dct[6] = 1;
2524  if (s->sad_cmp[1](NULL, ptr_cr + uv_dct_offset,
2525  dest_cr + uv_dct_offset,
2526  wrap_c, 8) < 20 * s->c.qscale)
2527  skip_dct[7] = 1;
2528  }
2529  }
2530  }
2531 
2532  if (s->quantizer_noise_shaping) {
2533  if (!skip_dct[0])
2534  get_visual_weight(weight[0], ptr_y , wrap_y);
2535  if (!skip_dct[1])
2536  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2537  if (!skip_dct[2])
2538  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2539  if (!skip_dct[3])
2540  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2541  if (!skip_dct[4])
2542  get_visual_weight(weight[4], ptr_cb , wrap_c);
2543  if (!skip_dct[5])
2544  get_visual_weight(weight[5], ptr_cr , wrap_c);
2545  if (!chroma_y_shift) { /* 422 */
2546  if (!skip_dct[6])
2547  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2548  wrap_c);
2549  if (!skip_dct[7])
2550  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2551  wrap_c);
2552  }
2553  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2554  }
2555 
2556  /* DCT & quantize */
2557  av_assert2(s->c.out_format != FMT_MJPEG || s->c.qscale == 8);
2558  {
2559  for (i = 0; i < mb_block_count; i++) {
2560  if (!skip_dct[i]) {
2561  int overflow;
2562  s->c.block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->c.qscale, &overflow);
2563  // FIXME we could decide to change to quantizer instead of
2564  // clipping
2565  // JS: I don't think that would be a good idea it could lower
2566  // quality instead of improve it. Just INTRADC clipping
2567  // deserves changes in quantizer
2568  if (overflow)
2569  clip_coeffs(s, s->block[i], s->c.block_last_index[i]);
2570  } else
2571  s->c.block_last_index[i] = -1;
2572  }
2573  if (s->quantizer_noise_shaping) {
2574  for (i = 0; i < mb_block_count; i++) {
2575  if (!skip_dct[i]) {
2576  s->c.block_last_index[i] =
2577  dct_quantize_refine(s, s->block[i], weight[i],
2578  orig[i], i, s->c.qscale);
2579  }
2580  }
2581  }
2582 
2583  if (s->luma_elim_threshold && !s->c.mb_intra)
2584  for (i = 0; i < 4; i++)
2585  dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2586  if (s->chroma_elim_threshold && !s->c.mb_intra)
2587  for (i = 4; i < mb_block_count; i++)
2588  dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2589 
2590  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2591  for (i = 0; i < mb_block_count; i++) {
2592  if (s->c.block_last_index[i] == -1)
2593  s->coded_score[i] = INT_MAX / 256;
2594  }
2595  }
2596  }
2597 
2598  if ((s->c.avctx->flags & AV_CODEC_FLAG_GRAY) && s->c.mb_intra) {
2599  s->c.block_last_index[4] =
2600  s->c.block_last_index[5] = 0;
2601  s->block[4][0] =
2602  s->block[5][0] = (1024 + s->c.c_dc_scale / 2) / s->c.c_dc_scale;
2603  if (!chroma_y_shift) { /* 422 / 444 */
2604  for (i=6; i<12; i++) {
2605  s->c.block_last_index[i] = 0;
2606  s->block[i][0] = s->block[4][0];
2607  }
2608  }
2609  }
2610 
2611  // non c quantize code returns incorrect block_last_index FIXME
2612  if (s->c.alternate_scan && s->dct_quantize != dct_quantize_c) {
2613  for (i = 0; i < mb_block_count; i++) {
2614  int j;
2615  if (s->c.block_last_index[i] > 0) {
2616  for (j = 63; j > 0; j--) {
2617  if (s->block[i][s->c.intra_scantable.permutated[j]])
2618  break;
2619  }
2620  s->c.block_last_index[i] = j;
2621  }
2622  }
2623  }
2624 
2625  s->encode_mb(s, s->block, motion_x, motion_y);
2626 }
2627 
2628 static void encode_mb(MPVEncContext *const s, int motion_x, int motion_y)
2629 {
2630  if (s->c.chroma_format == CHROMA_420)
2631  encode_mb_internal(s, motion_x, motion_y, 8, 8, 6, 1, 1, CHROMA_420);
2632  else if (s->c.chroma_format == CHROMA_422)
2633  encode_mb_internal(s, motion_x, motion_y, 16, 8, 8, 1, 0, CHROMA_422);
2634  else
2635  encode_mb_internal(s, motion_x, motion_y, 16, 16, 12, 0, 0, CHROMA_444);
2636 }
2637 
2638 typedef struct MBBackup {
2639  struct {
2640  int mv[2][4][2];
2641  int last_mv[2][2][2];
2643  int last_dc[3];
2645  int qscale;
2648  } c;
2651  int dquant;
2653  int16_t (*block)[64];
2655 } MBBackup;
2656 
2657 #define COPY_CONTEXT(BEFORE, AFTER, DST_TYPE, SRC_TYPE) \
2658 static inline void BEFORE ##_context_before_encode(DST_TYPE *const d, \
2659  const SRC_TYPE *const s) \
2660 { \
2661  /* FIXME is memcpy faster than a loop? */ \
2662  memcpy(d->c.last_mv, s->c.last_mv, 2*2*2*sizeof(int)); \
2663  \
2664  /* MPEG-1 */ \
2665  d->mb_skip_run = s->mb_skip_run; \
2666  for (int i = 0; i < 3; i++) \
2667  d->c.last_dc[i] = s->c.last_dc[i]; \
2668  \
2669  /* statistics */ \
2670  d->mv_bits = s->mv_bits; \
2671  d->i_tex_bits = s->i_tex_bits; \
2672  d->p_tex_bits = s->p_tex_bits; \
2673  d->i_count = s->i_count; \
2674  d->misc_bits = s->misc_bits; \
2675  d->last_bits = 0; \
2676  \
2677  d->c.mb_skipped = 0; \
2678  d->c.qscale = s->c.qscale; \
2679  d->dquant = s->dquant; \
2680  \
2681  d->esc3_level_length = s->esc3_level_length; \
2682 } \
2683  \
2684 static inline void AFTER ## _context_after_encode(DST_TYPE *const d, \
2685  const SRC_TYPE *const s, \
2686  int data_partitioning) \
2687 { \
2688  /* FIXME is memcpy faster than a loop? */ \
2689  memcpy(d->c.mv, s->c.mv, 2*4*2*sizeof(int)); \
2690  memcpy(d->c.last_mv, s->c.last_mv, 2*2*2*sizeof(int)); \
2691  \
2692  /* MPEG-1 */ \
2693  d->mb_skip_run = s->mb_skip_run; \
2694  for (int i = 0; i < 3; i++) \
2695  d->c.last_dc[i] = s->c.last_dc[i]; \
2696  \
2697  /* statistics */ \
2698  d->mv_bits = s->mv_bits; \
2699  d->i_tex_bits = s->i_tex_bits; \
2700  d->p_tex_bits = s->p_tex_bits; \
2701  d->i_count = s->i_count; \
2702  d->misc_bits = s->misc_bits; \
2703  \
2704  d->c.mb_intra = s->c.mb_intra; \
2705  d->c.mb_skipped = s->c.mb_skipped; \
2706  d->c.mv_type = s->c.mv_type; \
2707  d->c.mv_dir = s->c.mv_dir; \
2708  d->pb = s->pb; \
2709  if (data_partitioning) { \
2710  d->pb2 = s->pb2; \
2711  d->tex_pb = s->tex_pb; \
2712  } \
2713  d->block = s->block; \
2714  for (int i = 0; i < 8; i++) \
2715  d->c.block_last_index[i] = s->c.block_last_index[i]; \
2716  d->c.interlaced_dct = s->c.interlaced_dct; \
2717  d->c.qscale = s->c.qscale; \
2718  \
2719  d->esc3_level_length = s->esc3_level_length; \
2720 }
2721 
2722 COPY_CONTEXT(backup, save, MBBackup, MPVEncContext)
2723 COPY_CONTEXT(reset, store, MPVEncContext, MBBackup)
2724 
2725 static void encode_mb_hq(MPVEncContext *const s, MBBackup *const backup, MBBackup *const best,
2726  PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2727  int *dmin, int *next_block, int motion_x, int motion_y)
2728 {
2729  int score;
2730  uint8_t *dest_backup[3];
2731 
2732  reset_context_before_encode(s, backup);
2733 
2734  s->block = s->blocks[*next_block];
2735  s->pb = pb[*next_block];
2736  if (s->data_partitioning) {
2737  s->pb2 = pb2 [*next_block];
2738  s->tex_pb= tex_pb[*next_block];
2739  }
2740 
2741  if(*next_block){
2742  memcpy(dest_backup, s->c.dest, sizeof(s->c.dest));
2743  s->c.dest[0] = s->c.sc.rd_scratchpad;
2744  s->c.dest[1] = s->c.sc.rd_scratchpad + 16*s->c.linesize;
2745  s->c.dest[2] = s->c.sc.rd_scratchpad + 16*s->c.linesize + 8;
2746  av_assert0(s->c.linesize >= 32); //FIXME
2747  }
2748 
2749  encode_mb(s, motion_x, motion_y);
2750 
2751  score= put_bits_count(&s->pb);
2752  if (s->data_partitioning) {
2753  score+= put_bits_count(&s->pb2);
2754  score+= put_bits_count(&s->tex_pb);
2755  }
2756 
2757  if (s->c.avctx->mb_decision == FF_MB_DECISION_RD) {
2758  mpv_reconstruct_mb(s, s->block);
2759 
2760  score *= s->lambda2;
2761  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2762  }
2763 
2764  if(*next_block){
2765  memcpy(s->c.dest, dest_backup, sizeof(s->c.dest));
2766  }
2767 
2768  if(score<*dmin){
2769  *dmin= score;
2770  *next_block^=1;
2771 
2772  save_context_after_encode(best, s, s->data_partitioning);
2773  }
2774 }
2775 
2776 static int sse(const MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
2777 {
2778  const uint32_t *sq = ff_square_tab + 256;
2779  int acc=0;
2780  int x,y;
2781 
2782  if(w==16 && h==16)
2783  return s->sse_cmp[0](NULL, src1, src2, stride, 16);
2784  else if(w==8 && h==8)
2785  return s->sse_cmp[1](NULL, src1, src2, stride, 8);
2786 
2787  for(y=0; y<h; y++){
2788  for(x=0; x<w; x++){
2789  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2790  }
2791  }
2792 
2793  av_assert2(acc>=0);
2794 
2795  return acc;
2796 }
2797 
2798 static int sse_mb(MPVEncContext *const s)
2799 {
2800  int w= 16;
2801  int h= 16;
2802  int chroma_mb_w = w >> s->c.chroma_x_shift;
2803  int chroma_mb_h = h >> s->c.chroma_y_shift;
2804 
2805  if (s->c.mb_x*16 + 16 > s->c.width ) w = s->c.width - s->c.mb_x*16;
2806  if (s->c.mb_y*16 + 16 > s->c.height) h = s->c.height- s->c.mb_y*16;
2807 
2808  if(w==16 && h==16)
2809  return s->n_sse_cmp[0](s, s->new_pic->data[0] + s->c.mb_x * 16 + s->c.mb_y * s->c.linesize * 16,
2810  s->c.dest[0], s->c.linesize, 16) +
2811  s->n_sse_cmp[1](s, s->new_pic->data[1] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2812  s->c.dest[1], s->c.uvlinesize, chroma_mb_h) +
2813  s->n_sse_cmp[1](s, s->new_pic->data[2] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2814  s->c.dest[2], s->c.uvlinesize, chroma_mb_h);
2815  else
2816  return sse(s, s->new_pic->data[0] + s->c.mb_x * 16 + s->c.mb_y * s->c.linesize * 16,
2817  s->c.dest[0], w, h, s->c.linesize) +
2818  sse(s, s->new_pic->data[1] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2819  s->c.dest[1], w >> s->c.chroma_x_shift, h >> s->c.chroma_y_shift, s->c.uvlinesize) +
2820  sse(s, s->new_pic->data[2] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2821  s->c.dest[2], w >> s->c.chroma_x_shift, h >> s->c.chroma_y_shift, s->c.uvlinesize);
2822 }
2823 
2825  MPVEncContext *const s = *(void**)arg;
2826 
2827 
2828  s->me.pre_pass = 1;
2829  s->me.dia_size = s->c.avctx->pre_dia_size;
2830  s->c.first_slice_line = 1;
2831  for (s->c.mb_y = s->c.end_mb_y - 1; s->c.mb_y >= s->c.start_mb_y; s->c.mb_y--) {
2832  for (s->c.mb_x = s->c.mb_width - 1; s->c.mb_x >=0 ; s->c.mb_x--)
2833  ff_pre_estimate_p_frame_motion(s, s->c.mb_x, s->c.mb_y);
2834  s->c.first_slice_line = 0;
2835  }
2836 
2837  s->me.pre_pass = 0;
2838 
2839  return 0;
2840 }
2841 
2843  MPVEncContext *const s = *(void**)arg;
2844 
2845  s->me.dia_size = s->c.avctx->dia_size;
2846  s->c.first_slice_line = 1;
2847  for (s->c.mb_y = s->c.start_mb_y; s->c.mb_y < s->c.end_mb_y; s->c.mb_y++) {
2848  s->c.mb_x = 0; //for block init below
2849  ff_init_block_index(&s->c);
2850  for (s->c.mb_x = 0; s->c.mb_x < s->c.mb_width; s->c.mb_x++) {
2851  s->c.block_index[0] += 2;
2852  s->c.block_index[1] += 2;
2853  s->c.block_index[2] += 2;
2854  s->c.block_index[3] += 2;
2855 
2856  /* compute motion vector & mb_type and store in context */
2857  if (s->c.pict_type == AV_PICTURE_TYPE_B)
2858  ff_estimate_b_frame_motion(s, s->c.mb_x, s->c.mb_y);
2859  else
2860  ff_estimate_p_frame_motion(s, s->c.mb_x, s->c.mb_y);
2861  }
2862  s->c.first_slice_line = 0;
2863  }
2864  return 0;
2865 }
2866 
2867 static int mb_var_thread(AVCodecContext *c, void *arg){
2868  MPVEncContext *const s = *(void**)arg;
2869 
2870  for (int mb_y = s->c.start_mb_y; mb_y < s->c.end_mb_y; mb_y++) {
2871  for (int mb_x = 0; mb_x < s->c.mb_width; mb_x++) {
2872  int xx = mb_x * 16;
2873  int yy = mb_y * 16;
2874  const uint8_t *pix = s->new_pic->data[0] + (yy * s->c.linesize) + xx;
2875  int varc;
2876  int sum = s->mpvencdsp.pix_sum(pix, s->c.linesize);
2877 
2878  varc = (s->mpvencdsp.pix_norm1(pix, s->c.linesize) -
2879  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2880 
2881  s->mb_var [s->c.mb_stride * mb_y + mb_x] = varc;
2882  s->mb_mean[s->c.mb_stride * mb_y + mb_x] = (sum+128)>>8;
2883  s->me.mb_var_sum_temp += varc;
2884  }
2885  }
2886  return 0;
2887 }
2888 
2889 static void write_slice_end(MPVEncContext *const s)
2890 {
2891  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4) {
2892  if (s->partitioned_frame)
2894 
2895  ff_mpeg4_stuffing(&s->pb);
2896  } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2897  s->c.out_format == FMT_MJPEG) {
2899  } else if (CONFIG_SPEEDHQ_ENCODER && s->c.out_format == FMT_SPEEDHQ) {
2901  }
2902 
2903  flush_put_bits(&s->pb);
2904 
2905  if ((s->c.avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2906  s->misc_bits+= get_bits_diff(s);
2907 }
2908 
2909 static void write_mb_info(MPVEncContext *const s)
2910 {
2911  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2912  int offset = put_bits_count(&s->pb);
2913  int mba = s->c.mb_x + s->c.mb_width * (s->c.mb_y % s->gob_index);
2914  int gobn = s->c.mb_y / s->gob_index;
2915  int pred_x, pred_y;
2916  if (CONFIG_H263_ENCODER)
2917  ff_h263_pred_motion(&s->c, 0, 0, &pred_x, &pred_y);
2918  bytestream_put_le32(&ptr, offset);
2919  bytestream_put_byte(&ptr, s->c.qscale);
2920  bytestream_put_byte(&ptr, gobn);
2921  bytestream_put_le16(&ptr, mba);
2922  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2923  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2924  /* 4MV not implemented */
2925  bytestream_put_byte(&ptr, 0); /* hmv2 */
2926  bytestream_put_byte(&ptr, 0); /* vmv2 */
2927 }
2928 
2929 static void update_mb_info(MPVEncContext *const s, int startcode)
2930 {
2931  if (!s->mb_info)
2932  return;
2933  if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2934  s->mb_info_size += 12;
2935  s->prev_mb_info = s->last_mb_info;
2936  }
2937  if (startcode) {
2938  s->prev_mb_info = put_bytes_count(&s->pb, 0);
2939  /* This might have incremented mb_info_size above, and we return without
2940  * actually writing any info into that slot yet. But in that case,
2941  * this will be called again at the start of the after writing the
2942  * start code, actually writing the mb info. */
2943  return;
2944  }
2945 
2946  s->last_mb_info = put_bytes_count(&s->pb, 0);
2947  if (!s->mb_info_size)
2948  s->mb_info_size += 12;
2949  write_mb_info(s);
2950 }
2951 
2952 int ff_mpv_reallocate_putbitbuffer(MPVEncContext *const s, size_t threshold, size_t size_increase)
2953 {
2954  if (put_bytes_left(&s->pb, 0) < threshold
2955  && s->c.slice_context_count == 1
2956  && s->pb.buf == s->c.avctx->internal->byte_buffer) {
2957  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2958 
2959  uint8_t *new_buffer = NULL;
2960  int new_buffer_size = 0;
2961 
2962  if ((s->c.avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2963  av_log(s->c.avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2964  return AVERROR(ENOMEM);
2965  }
2966 
2967  emms_c();
2968 
2969  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2970  s->c.avctx->internal->byte_buffer_size + size_increase);
2971  if (!new_buffer)
2972  return AVERROR(ENOMEM);
2973 
2974  memcpy(new_buffer, s->c.avctx->internal->byte_buffer, s->c.avctx->internal->byte_buffer_size);
2975  av_free(s->c.avctx->internal->byte_buffer);
2976  s->c.avctx->internal->byte_buffer = new_buffer;
2977  s->c.avctx->internal->byte_buffer_size = new_buffer_size;
2978  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2979  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2980  }
2981  if (put_bytes_left(&s->pb, 0) < threshold)
2982  return AVERROR(EINVAL);
2983  return 0;
2984 }
2985 
2986 static int encode_thread(AVCodecContext *c, void *arg){
2987  MPVEncContext *const s = *(void**)arg;
2988  int chr_h = 16 >> s->c.chroma_y_shift;
2989  int i;
2990  MBBackup best_s = { 0 }, backup_s;
2991  uint8_t bit_buf[2][MAX_MB_BYTES];
2992  // + 2 because ff_copy_bits() overreads
2993  uint8_t bit_buf2[2][MAX_PB2_MB_SIZE + 2];
2994  uint8_t bit_buf_tex[2][MAX_AC_TEX_MB_SIZE + 2];
2995  PutBitContext pb[2], pb2[2], tex_pb[2];
2996 
2997  for(i=0; i<2; i++){
2998  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2999  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_PB2_MB_SIZE);
3000  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_AC_TEX_MB_SIZE);
3001  }
3002 
3003  s->last_bits= put_bits_count(&s->pb);
3004  s->mv_bits=0;
3005  s->misc_bits=0;
3006  s->i_tex_bits=0;
3007  s->p_tex_bits=0;
3008  s->i_count=0;
3009 
3010  for(i=0; i<3; i++){
3011  /* init last dc values */
3012  /* note: quant matrix value (8) is implied here */
3013  s->c.last_dc[i] = 128 << s->c.intra_dc_precision;
3014 
3015  s->encoding_error[i] = 0;
3016  }
3017  if (s->c.codec_id == AV_CODEC_ID_AMV) {
3018  s->c.last_dc[0] = 128 * 8 / 13;
3019  s->c.last_dc[1] = 128 * 8 / 14;
3020  s->c.last_dc[2] = 128 * 8 / 14;
3021 #if CONFIG_MPEG4_ENCODER
3022  } else if (s->partitioned_frame) {
3023  av_assert1(s->c.codec_id == AV_CODEC_ID_MPEG4);
3025 #endif
3026  }
3027  s->mb_skip_run = 0;
3028  memset(s->c.last_mv, 0, sizeof(s->c.last_mv));
3029 
3030  s->last_mv_dir = 0;
3031 
3032  s->c.resync_mb_x = 0;
3033  s->c.resync_mb_y = 0;
3034  s->c.first_slice_line = 1;
3035  s->ptr_lastgob = s->pb.buf;
3036  for (int mb_y_order = s->c.start_mb_y; mb_y_order < s->c.end_mb_y; mb_y_order++) {
3037  int mb_y;
3038  if (CONFIG_SPEEDHQ_ENCODER && s->c.codec_id == AV_CODEC_ID_SPEEDHQ) {
3039  int first_in_slice;
3040  mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->c.mb_height, &first_in_slice);
3041  if (first_in_slice && mb_y_order != s->c.start_mb_y)
3043  s->c.last_dc[0] = s->c.last_dc[1] = s->c.last_dc[2] = 1024 << s->c.intra_dc_precision;
3044  } else {
3045  mb_y = mb_y_order;
3046  }
3047  s->c.mb_x = 0;
3048  s->c.mb_y = mb_y;
3049 
3050  ff_set_qscale(&s->c, s->c.qscale);
3051  ff_init_block_index(&s->c);
3052 
3053  for (int mb_x = 0; mb_x < s->c.mb_width; mb_x++) {
3054  int mb_type, xy;
3055 // int d;
3056  int dmin= INT_MAX;
3057  int dir;
3058  int size_increase = s->c.avctx->internal->byte_buffer_size/4
3059  + s->c.mb_width*MAX_MB_BYTES;
3060 
3062  if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
3063  av_log(s->c.avctx, AV_LOG_ERROR, "encoded frame too large\n");
3064  return -1;
3065  }
3066  if (s->data_partitioning) {
3067  if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
3068  put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
3069  av_log(s->c.avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3070  return -1;
3071  }
3072  }
3073 
3074  s->c.mb_x = mb_x;
3075  s->c.mb_y = mb_y; // moved into loop, can get changed by H.261
3076  ff_update_block_index(&s->c, 8, 0, s->c.chroma_x_shift);
3077 
3078  if (CONFIG_H261_ENCODER && s->c.codec_id == AV_CODEC_ID_H261)
3080  xy = s->c.mb_y * s->c.mb_stride + s->c.mb_x;
3081  mb_type = s->mb_type[xy];
3082 
3083  /* write gob / video packet header */
3084  if(s->rtp_mode){
3085  int current_packet_size, is_gob_start;
3086 
3087  current_packet_size = put_bytes_count(&s->pb, 1)
3088  - (s->ptr_lastgob - s->pb.buf);
3089 
3090  is_gob_start = s->rtp_payload_size &&
3091  current_packet_size >= s->rtp_payload_size &&
3092  mb_y + mb_x > 0;
3093 
3094  if (s->c.start_mb_y == mb_y && mb_y > 0 && mb_x == 0) is_gob_start = 1;
3095 
3096  switch (s->c.codec_id) {
3097  case AV_CODEC_ID_H263:
3098  case AV_CODEC_ID_H263P:
3099  if (!s->h263_slice_structured)
3100  if (s->c.mb_x || s->c.mb_y % s->gob_index) is_gob_start = 0;
3101  break;
3103  if (s->c.mb_x == 0 && s->c.mb_y != 0) is_gob_start = 1;
3105  if (s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO && s->c.mb_y >= 175 ||
3106  s->mb_skip_run)
3107  is_gob_start=0;
3108  break;
3109  case AV_CODEC_ID_MJPEG:
3110  if (s->c.mb_x == 0 && s->c.mb_y != 0) is_gob_start = 1;
3111  break;
3112  }
3113 
3114  if(is_gob_start){
3115  if (s->c.start_mb_y != mb_y || mb_x != 0) {
3116  write_slice_end(s);
3117 
3118  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4 && s->partitioned_frame)
3120  }
3121 
3122  av_assert2((put_bits_count(&s->pb)&7) == 0);
3123  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3124 
3125  if (s->error_rate && s->c.resync_mb_x + s->c.resync_mb_y > 0) {
3126  int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->c.mb_x + s->c.mb_y;
3127  int d = 100 / s->error_rate;
3128  if(r % d == 0){
3129  current_packet_size=0;
3130  s->pb.buf_ptr= s->ptr_lastgob;
3131  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3132  }
3133  }
3134 
3135  switch (s->c.codec_id) {
3136  case AV_CODEC_ID_MPEG4:
3137  if (CONFIG_MPEG4_ENCODER) {
3141  }
3142  break;
3145  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3148  }
3149  break;
3150 #if CONFIG_H263P_ENCODER
3151  case AV_CODEC_ID_H263P:
3152  if (s->c.dc_val)
3154  // fallthrough
3155 #endif
3156  case AV_CODEC_ID_H263:
3157  if (CONFIG_H263_ENCODER) {
3158  update_mb_info(s, 1);
3160  }
3161  break;
3162  }
3163 
3164  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS1) {
3165  int bits= put_bits_count(&s->pb);
3166  s->misc_bits+= bits - s->last_bits;
3167  s->last_bits= bits;
3168  }
3169 
3170  s->ptr_lastgob += current_packet_size;
3171  s->c.first_slice_line = 1;
3172  s->c.resync_mb_x = mb_x;
3173  s->c.resync_mb_y = mb_y;
3174  }
3175  }
3176 
3177  if (s->c.resync_mb_x == s->c.mb_x &&
3178  s->c.resync_mb_y+1 == s->c.mb_y)
3179  s->c.first_slice_line = 0;
3180 
3181  s->c.mb_skipped = 0;
3182  s->dquant=0; //only for QP_RD
3183 
3184  update_mb_info(s, 0);
3185 
3186  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3187  int next_block=0;
3188  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3189 
3190  backup_context_before_encode(&backup_s, s);
3191  backup_s.pb= s->pb;
3192  if (s->data_partitioning) {
3193  backup_s.pb2= s->pb2;
3194  backup_s.tex_pb= s->tex_pb;
3195  }
3196 
3197  if(mb_type&CANDIDATE_MB_TYPE_INTER){
3198  s->c.mv_dir = MV_DIR_FORWARD;
3199  s->c.mv_type = MV_TYPE_16X16;
3200  s->c.mb_intra = 0;
3201  s->c.mv[0][0][0] = s->p_mv_table[xy][0];
3202  s->c.mv[0][0][1] = s->p_mv_table[xy][1];
3203  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3204  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3205  }
3206  if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3207  s->c.mv_dir = MV_DIR_FORWARD;
3208  s->c.mv_type = MV_TYPE_FIELD;
3209  s->c.mb_intra = 0;
3210  for(i=0; i<2; i++){
3211  int j = s->c.field_select[0][i] = s->p_field_select_table[i][xy];
3212  s->c.mv[0][i][0] = s->c.p_field_mv_table[i][j][xy][0];
3213  s->c.mv[0][i][1] = s->c.p_field_mv_table[i][j][xy][1];
3214  }
3215  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3216  &dmin, &next_block, 0, 0);
3217  }
3218  if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3219  s->c.mv_dir = MV_DIR_FORWARD;
3220  s->c.mv_type = MV_TYPE_16X16;
3221  s->c.mb_intra = 0;
3222  s->c.mv[0][0][0] = 0;
3223  s->c.mv[0][0][1] = 0;
3224  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3225  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3226  }
3227  if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3228  s->c.mv_dir = MV_DIR_FORWARD;
3229  s->c.mv_type = MV_TYPE_8X8;
3230  s->c.mb_intra = 0;
3231  for(i=0; i<4; i++){
3232  s->c.mv[0][i][0] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][0];
3233  s->c.mv[0][i][1] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][1];
3234  }
3235  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3236  &dmin, &next_block, 0, 0);
3237  }
3238  if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3239  s->c.mv_dir = MV_DIR_FORWARD;
3240  s->c.mv_type = MV_TYPE_16X16;
3241  s->c.mb_intra = 0;
3242  s->c.mv[0][0][0] = s->b_forw_mv_table[xy][0];
3243  s->c.mv[0][0][1] = s->b_forw_mv_table[xy][1];
3244  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3245  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3246  }
3247  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3248  s->c.mv_dir = MV_DIR_BACKWARD;
3249  s->c.mv_type = MV_TYPE_16X16;
3250  s->c.mb_intra = 0;
3251  s->c.mv[1][0][0] = s->b_back_mv_table[xy][0];
3252  s->c.mv[1][0][1] = s->b_back_mv_table[xy][1];
3253  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3254  &dmin, &next_block, s->c.mv[1][0][0], s->c.mv[1][0][1]);
3255  }
3256  if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3257  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3258  s->c.mv_type = MV_TYPE_16X16;
3259  s->c.mb_intra = 0;
3260  s->c.mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3261  s->c.mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3262  s->c.mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3263  s->c.mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3264  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3265  &dmin, &next_block, 0, 0);
3266  }
3267  if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3268  s->c.mv_dir = MV_DIR_FORWARD;
3269  s->c.mv_type = MV_TYPE_FIELD;
3270  s->c.mb_intra = 0;
3271  for(i=0; i<2; i++){
3272  int j = s->c.field_select[0][i] = s->b_field_select_table[0][i][xy];
3273  s->c.mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3274  s->c.mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3275  }
3276  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3277  &dmin, &next_block, 0, 0);
3278  }
3279  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3280  s->c.mv_dir = MV_DIR_BACKWARD;
3281  s->c.mv_type = MV_TYPE_FIELD;
3282  s->c.mb_intra = 0;
3283  for(i=0; i<2; i++){
3284  int j = s->c.field_select[1][i] = s->b_field_select_table[1][i][xy];
3285  s->c.mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3286  s->c.mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3287  }
3288  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3289  &dmin, &next_block, 0, 0);
3290  }
3291  if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3292  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3293  s->c.mv_type = MV_TYPE_FIELD;
3294  s->c.mb_intra = 0;
3295  for(dir=0; dir<2; dir++){
3296  for(i=0; i<2; i++){
3297  int j = s->c.field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3298  s->c.mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3299  s->c.mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3300  }
3301  }
3302  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3303  &dmin, &next_block, 0, 0);
3304  }
3305  if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3306  s->c.mv_dir = 0;
3307  s->c.mv_type = MV_TYPE_16X16;
3308  s->c.mb_intra = 1;
3309  s->c.mv[0][0][0] = 0;
3310  s->c.mv[0][0][1] = 0;
3311  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3312  &dmin, &next_block, 0, 0);
3313  s->c.mbintra_table[xy] = 1;
3314  }
3315 
3316  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3317  if (best_s.c.mv_type == MV_TYPE_16X16) { //FIXME move 4mv after QPRD
3318  const int last_qp = backup_s.c.qscale;
3319  int qpi, qp, dc[6];
3320  int16_t ac[6][16];
3321  const int mvdir = (best_s.c.mv_dir & MV_DIR_BACKWARD) ? 1 : 0;
3322  static const int dquant_tab[4]={-1,1,-2,2};
3323  int storecoefs = s->c.mb_intra && s->c.dc_val;
3324 
3325  av_assert2(backup_s.dquant == 0);
3326 
3327  //FIXME intra
3328  s->c.mv_dir = best_s.c.mv_dir;
3329  s->c.mv_type = MV_TYPE_16X16;
3330  s->c.mb_intra = best_s.c.mb_intra;
3331  s->c.mv[0][0][0] = best_s.c.mv[0][0][0];
3332  s->c.mv[0][0][1] = best_s.c.mv[0][0][1];
3333  s->c.mv[1][0][0] = best_s.c.mv[1][0][0];
3334  s->c.mv[1][0][1] = best_s.c.mv[1][0][1];
3335 
3336  qpi = s->c.pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3337  for(; qpi<4; qpi++){
3338  int dquant= dquant_tab[qpi];
3339  qp= last_qp + dquant;
3340  if (qp < s->c.avctx->qmin || qp > s->c.avctx->qmax)
3341  continue;
3342  backup_s.dquant= dquant;
3343  if(storecoefs){
3344  for(i=0; i<6; i++){
3345  dc[i] = s->c.dc_val[s->c.block_index[i]];
3346  memcpy(ac[i], s->c.ac_val[s->c.block_index[i]], sizeof(*s->c.ac_val));
3347  }
3348  }
3349 
3350  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3351  &dmin, &next_block, s->c.mv[mvdir][0][0], s->c.mv[mvdir][0][1]);
3352  if (best_s.c.qscale != qp) {
3353  if(storecoefs){
3354  for(i=0; i<6; i++){
3355  s->c.dc_val[s->c.block_index[i]] = dc[i];
3356  memcpy(s->c.ac_val[s->c.block_index[i]], ac[i], sizeof(*s->c.ac_val));
3357  }
3358  }
3359  }
3360  }
3361  }
3362  }
3363  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3364  int mx= s->b_direct_mv_table[xy][0];
3365  int my= s->b_direct_mv_table[xy][1];
3366 
3367  backup_s.dquant = 0;
3368  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3369  s->c.mb_intra = 0;
3370  ff_mpeg4_set_direct_mv(&s->c, mx, my);
3371  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3372  &dmin, &next_block, mx, my);
3373  }
3374  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3375  backup_s.dquant = 0;
3376  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3377  s->c.mb_intra = 0;
3378  ff_mpeg4_set_direct_mv(&s->c, 0, 0);
3379  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3380  &dmin, &next_block, 0, 0);
3381  }
3382  if (!best_s.c.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3383  int coded=0;
3384  for(i=0; i<6; i++)
3385  coded |= s->c.block_last_index[i];
3386  if(coded){
3387  int mx,my;
3388  memcpy(s->c.mv, best_s.c.mv, sizeof(s->c.mv));
3389  if (CONFIG_MPEG4_ENCODER && best_s.c.mv_dir & MV_DIRECT) {
3390  mx=my=0; //FIXME find the one we actually used
3391  ff_mpeg4_set_direct_mv(&s->c, mx, my);
3392  } else if (best_s.c.mv_dir & MV_DIR_BACKWARD) {
3393  mx = s->c.mv[1][0][0];
3394  my = s->c.mv[1][0][1];
3395  }else{
3396  mx = s->c.mv[0][0][0];
3397  my = s->c.mv[0][0][1];
3398  }
3399 
3400  s->c.mv_dir = best_s.c.mv_dir;
3401  s->c.mv_type = best_s.c.mv_type;
3402  s->c.mb_intra = 0;
3403 /* s->c.mv[0][0][0] = best_s.mv[0][0][0];
3404  s->c.mv[0][0][1] = best_s.mv[0][0][1];
3405  s->c.mv[1][0][0] = best_s.mv[1][0][0];
3406  s->c.mv[1][0][1] = best_s.mv[1][0][1];*/
3407  backup_s.dquant= 0;
3408  s->skipdct=1;
3409  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3410  &dmin, &next_block, mx, my);
3411  s->skipdct=0;
3412  }
3413  }
3414 
3415  store_context_after_encode(s, &best_s, s->data_partitioning);
3416 
3417  pb_bits_count= put_bits_count(&s->pb);
3418  flush_put_bits(&s->pb);
3419  ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3420  s->pb= backup_s.pb;
3421 
3422  if (s->data_partitioning) {
3423  pb2_bits_count= put_bits_count(&s->pb2);
3424  flush_put_bits(&s->pb2);
3425  ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3426  s->pb2= backup_s.pb2;
3427 
3428  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3429  flush_put_bits(&s->tex_pb);
3430  ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3431  s->tex_pb= backup_s.tex_pb;
3432  }
3433  s->last_bits= put_bits_count(&s->pb);
3434 
3435  if (CONFIG_H263_ENCODER &&
3436  s->c.out_format == FMT_H263 && s->c.pict_type != AV_PICTURE_TYPE_B)
3438 
3439  if(next_block==0){ //FIXME 16 vs linesize16
3440  s->c.hdsp.put_pixels_tab[0][0](s->c.dest[0], s->c.sc.rd_scratchpad , s->c.linesize ,16);
3441  s->c.hdsp.put_pixels_tab[1][0](s->c.dest[1], s->c.sc.rd_scratchpad + 16*s->c.linesize , s->c.uvlinesize, 8);
3442  s->c.hdsp.put_pixels_tab[1][0](s->c.dest[2], s->c.sc.rd_scratchpad + 16*s->c.linesize + 8, s->c.uvlinesize, 8);
3443  }
3444 
3445  if (s->c.avctx->mb_decision == FF_MB_DECISION_BITS)
3446  mpv_reconstruct_mb(s, s->block);
3447  } else {
3448  int motion_x = 0, motion_y = 0;
3449  s->c.mv_type = MV_TYPE_16X16;
3450  // only one MB-Type possible
3451 
3452  switch(mb_type){
3454  s->c.mv_dir = 0;
3455  s->c.mb_intra = 1;
3456  motion_x= s->c.mv[0][0][0] = 0;
3457  motion_y= s->c.mv[0][0][1] = 0;
3458  s->c.mbintra_table[xy] = 1;
3459  break;
3461  s->c.mv_dir = MV_DIR_FORWARD;
3462  s->c.mb_intra = 0;
3463  motion_x= s->c.mv[0][0][0] = s->p_mv_table[xy][0];
3464  motion_y= s->c.mv[0][0][1] = s->p_mv_table[xy][1];
3465  break;
3467  s->c.mv_dir = MV_DIR_FORWARD;
3468  s->c.mv_type = MV_TYPE_FIELD;
3469  s->c.mb_intra = 0;
3470  for(i=0; i<2; i++){
3471  int j = s->c.field_select[0][i] = s->p_field_select_table[i][xy];
3472  s->c.mv[0][i][0] = s->c.p_field_mv_table[i][j][xy][0];
3473  s->c.mv[0][i][1] = s->c.p_field_mv_table[i][j][xy][1];
3474  }
3475  break;
3477  s->c.mv_dir = MV_DIR_FORWARD;
3478  s->c.mv_type = MV_TYPE_8X8;
3479  s->c.mb_intra = 0;
3480  for(i=0; i<4; i++){
3481  s->c.mv[0][i][0] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][0];
3482  s->c.mv[0][i][1] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][1];
3483  }
3484  break;
3486  if (CONFIG_MPEG4_ENCODER) {
3488  s->c.mb_intra = 0;
3489  motion_x=s->b_direct_mv_table[xy][0];
3490  motion_y=s->b_direct_mv_table[xy][1];
3491  ff_mpeg4_set_direct_mv(&s->c, motion_x, motion_y);
3492  }
3493  break;
3495  if (CONFIG_MPEG4_ENCODER) {
3497  s->c.mb_intra = 0;
3498  ff_mpeg4_set_direct_mv(&s->c, 0, 0);
3499  }
3500  break;
3502  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3503  s->c.mb_intra = 0;
3504  s->c.mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3505  s->c.mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3506  s->c.mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3507  s->c.mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3508  break;
3510  s->c.mv_dir = MV_DIR_BACKWARD;
3511  s->c.mb_intra = 0;
3512  motion_x= s->c.mv[1][0][0] = s->b_back_mv_table[xy][0];
3513  motion_y= s->c.mv[1][0][1] = s->b_back_mv_table[xy][1];
3514  break;
3516  s->c.mv_dir = MV_DIR_FORWARD;
3517  s->c.mb_intra = 0;
3518  motion_x= s->c.mv[0][0][0] = s->b_forw_mv_table[xy][0];
3519  motion_y= s->c.mv[0][0][1] = s->b_forw_mv_table[xy][1];
3520  break;
3522  s->c.mv_dir = MV_DIR_FORWARD;
3523  s->c.mv_type = MV_TYPE_FIELD;
3524  s->c.mb_intra = 0;
3525  for(i=0; i<2; i++){
3526  int j = s->c.field_select[0][i] = s->b_field_select_table[0][i][xy];
3527  s->c.mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3528  s->c.mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3529  }
3530  break;
3532  s->c.mv_dir = MV_DIR_BACKWARD;
3533  s->c.mv_type = MV_TYPE_FIELD;
3534  s->c.mb_intra = 0;
3535  for(i=0; i<2; i++){
3536  int j = s->c.field_select[1][i] = s->b_field_select_table[1][i][xy];
3537  s->c.mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3538  s->c.mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3539  }
3540  break;
3542  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3543  s->c.mv_type = MV_TYPE_FIELD;
3544  s->c.mb_intra = 0;
3545  for(dir=0; dir<2; dir++){
3546  for(i=0; i<2; i++){
3547  int j = s->c.field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3548  s->c.mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3549  s->c.mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3550  }
3551  }
3552  break;
3553  default:
3554  av_unreachable("There is a case for every CANDIDATE_MB_TYPE_* "
3555  "except CANDIDATE_MB_TYPE_SKIPPED which is never "
3556  "the only candidate (always coupled with INTER) "
3557  "so that it never reaches this switch");
3558  }
3559 
3560  encode_mb(s, motion_x, motion_y);
3561 
3562  // RAL: Update last macroblock type
3563  s->last_mv_dir = s->c.mv_dir;
3564 
3565  if (CONFIG_H263_ENCODER &&
3566  s->c.out_format == FMT_H263 && s->c.pict_type != AV_PICTURE_TYPE_B)
3568 
3569  mpv_reconstruct_mb(s, s->block);
3570  }
3571 
3572  s->c.cur_pic.qscale_table[xy] = s->c.qscale;
3573 
3574  /* clean the MV table in IPS frames for direct mode in B-frames */
3575  if (s->c.mb_intra /* && I,P,S_TYPE */) {
3576  s->p_mv_table[xy][0]=0;
3577  s->p_mv_table[xy][1]=0;
3578 #if CONFIG_H263_ENCODER
3579  } else if (s->c.h263_pred || s->c.h263_aic) {
3581 #endif
3582  }
3583 
3584  if (s->c.avctx->flags & AV_CODEC_FLAG_PSNR) {
3585  int w= 16;
3586  int h= 16;
3587 
3588  if (s->c.mb_x*16 + 16 > s->c.width ) w = s->c.width - s->c.mb_x*16;
3589  if (s->c.mb_y*16 + 16 > s->c.height) h = s->c.height- s->c.mb_y*16;
3590 
3591  s->encoding_error[0] += sse(
3592  s, s->new_pic->data[0] + s->c.mb_x*16 + s->c.mb_y*s->c.linesize*16,
3593  s->c.dest[0], w, h, s->c.linesize);
3594  s->encoding_error[1] += sse(
3595  s, s->new_pic->data[1] + s->c.mb_x*8 + s->c.mb_y*s->c.uvlinesize*chr_h,
3596  s->c.dest[1], w>>1, h>>s->c.chroma_y_shift, s->c.uvlinesize);
3597  s->encoding_error[2] += sse(
3598  s, s->new_pic->data[2] + s->c.mb_x*8 + s->c.mb_y*s->c.uvlinesize*chr_h,
3599  s->c.dest[2], w>>1, h>>s->c.chroma_y_shift, s->c.uvlinesize);
3600  }
3601  if (s->loop_filter) {
3602  if (CONFIG_H263_ENCODER && s->c.out_format == FMT_H263)
3603  ff_h263_loop_filter(&s->c);
3604  }
3605  ff_dlog(s->c.avctx, "MB %d %d bits\n",
3606  s->c.mb_x + s->c.mb_y * s->c.mb_stride, put_bits_count(&s->pb));
3607  }
3608  }
3609 
3610 #if CONFIG_MSMPEG4ENC
3611  //not beautiful here but we must write it before flushing so it has to be here
3612  if (s->c.msmpeg4_version != MSMP4_UNUSED && s->c.msmpeg4_version < MSMP4_WMV1 &&
3613  s->c.pict_type == AV_PICTURE_TYPE_I)
3615 #endif
3616 
3617  write_slice_end(s);
3618 
3619  return 0;
3620 }
3621 
3622 #define ADD(field) dst->field += src->field;
3623 #define MERGE(field) dst->field += src->field; src->field=0
3625 {
3626  ADD(me.scene_change_score);
3627  ADD(me.mc_mb_var_sum_temp);
3628  ADD(me.mb_var_sum_temp);
3629 }
3630 
3632 {
3633  int i;
3634 
3635  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3636  MERGE(dct_count[1]);
3637  ADD(mv_bits);
3638  ADD(i_tex_bits);
3639  ADD(p_tex_bits);
3640  ADD(i_count);
3641  ADD(misc_bits);
3642  ADD(encoding_error[0]);
3643  ADD(encoding_error[1]);
3644  ADD(encoding_error[2]);
3645 
3646  if (dst->dct_error_sum) {
3647  for(i=0; i<64; i++){
3648  MERGE(dct_error_sum[0][i]);
3649  MERGE(dct_error_sum[1][i]);
3650  }
3651  }
3652 
3653  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3654  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3655  ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3656  flush_put_bits(&dst->pb);
3657 }
3658 
3659 static int estimate_qp(MPVMainEncContext *const m, int dry_run)
3660 {
3661  MPVEncContext *const s = &m->s;
3662 
3663  if (m->next_lambda){
3664  s->c.cur_pic.ptr->f->quality = m->next_lambda;
3665  if(!dry_run) m->next_lambda= 0;
3666  } else if (!m->fixed_qscale) {
3667  int quality = ff_rate_estimate_qscale(m, dry_run);
3668  s->c.cur_pic.ptr->f->quality = quality;
3669  if (s->c.cur_pic.ptr->f->quality < 0)
3670  return -1;
3671  }
3672 
3673  if(s->adaptive_quant){
3674  init_qscale_tab(s);
3675 
3676  switch (s->c.codec_id) {
3677  case AV_CODEC_ID_MPEG4:
3678  if (CONFIG_MPEG4_ENCODER)
3680  break;
3681  case AV_CODEC_ID_H263:
3682  case AV_CODEC_ID_H263P:
3683  case AV_CODEC_ID_FLV1:
3684  if (CONFIG_H263_ENCODER)
3686  break;
3687  }
3688 
3689  s->lambda = s->lambda_table[0];
3690  //FIXME broken
3691  }else
3692  s->lambda = s->c.cur_pic.ptr->f->quality;
3693  update_qscale(m);
3694  return 0;
3695 }
3696 
3697 /* must be called before writing the header */
3699 {
3700  av_assert1(s->c.cur_pic.ptr->f->pts != AV_NOPTS_VALUE);
3701  s->c.time = s->c.cur_pic.ptr->f->pts * s->c.avctx->time_base.num;
3702 
3703  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
3704  s->c.pb_time = s->c.pp_time - (s->c.last_non_b_time - s->c.time);
3705  av_assert1(s->c.pb_time > 0 && s->c.pb_time < s->c.pp_time);
3706  }else{
3707  s->c.pp_time = s->c.time - s->c.last_non_b_time;
3708  s->c.last_non_b_time = s->c.time;
3709  av_assert1(s->picture_number == 0 || s->c.pp_time > 0);
3710  }
3711 }
3712 
3713 static int encode_picture(MPVMainEncContext *const m, const AVPacket *pkt)
3714 {
3715  MPVEncContext *const s = &m->s;
3716  int i, ret;
3717  int bits;
3718  int context_count = s->c.slice_context_count;
3719 
3720  /* we need to initialize some time vars before we can encode B-frames */
3721  // RAL: Condition added for MPEG1VIDEO
3722  if (s->c.out_format == FMT_MPEG1 || (s->c.h263_pred && s->c.msmpeg4_version == MSMP4_UNUSED))
3724  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4)
3726 
3727 // s->lambda = s->c.cur_pic.ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3728 
3729  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
3730  s->c.no_rounding = s->c.msmpeg4_version >= MSMP4_V3;
3731  } else if (s->c.pict_type != AV_PICTURE_TYPE_B) {
3732  s->c.no_rounding ^= s->flipflop_rounding;
3733  }
3734 
3735  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS2) {
3736  ret = estimate_qp(m, 1);
3737  if (ret < 0)
3738  return ret;
3739  ff_get_2pass_fcode(m);
3740  } else if (!(s->c.avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3741  if (s->c.pict_type == AV_PICTURE_TYPE_B)
3742  s->lambda = m->last_lambda_for[s->c.pict_type];
3743  else
3744  s->lambda = m->last_lambda_for[m->last_non_b_pict_type];
3745  update_qscale(m);
3746  }
3747 
3748  s->c.mb_intra = 0; //for the rate distortion & bit compare functions
3749  for (int i = 0; i < context_count; i++) {
3750  MPVEncContext *const slice = s->c.enc_contexts[i];
3751  int h = s->c.mb_height;
3752  uint8_t *start = pkt->data + (int64_t)pkt->size * slice->c.start_mb_y / h;
3753  uint8_t *end = pkt->data + (int64_t)pkt->size * slice->c. end_mb_y / h;
3754 
3755  init_put_bits(&slice->pb, start, end - start);
3756 
3757  if (i) {
3758  ret = ff_update_duplicate_context(&slice->c, &s->c);
3759  if (ret < 0)
3760  return ret;
3761  slice->lambda = s->lambda;
3762  slice->lambda2 = s->lambda2;
3763  }
3764  slice->me.temp = slice->me.scratchpad = slice->c.sc.scratchpad_buf;
3765  ff_me_init_pic(slice);
3766  }
3767 
3768  /* Estimate motion for every MB */
3769  if (s->c.pict_type != AV_PICTURE_TYPE_I) {
3770  s->lambda = (s->lambda * m->me_penalty_compensation + 128) >> 8;
3771  s->lambda2 = (s->lambda2 * (int64_t) m->me_penalty_compensation + 128) >> 8;
3772  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
3773  if ((m->me_pre && m->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3774  m->me_pre == 2) {
3775  s->c.avctx->execute(s->c.avctx, pre_estimate_motion_thread,
3776  &s->c.enc_contexts[0], NULL,
3777  context_count, sizeof(void*));
3778  }
3779  }
3780 
3781  s->c.avctx->execute(s->c.avctx, estimate_motion_thread, &s->c.enc_contexts[0],
3782  NULL, context_count, sizeof(void*));
3783  }else /* if (s->c.pict_type == AV_PICTURE_TYPE_I) */{
3784  /* I-Frame */
3785  for (int i = 0; i < s->c.mb_stride * s->c.mb_height; i++)
3786  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3787 
3788  if (!m->fixed_qscale) {
3789  /* finding spatial complexity for I-frame rate control */
3790  s->c.avctx->execute(s->c.avctx, mb_var_thread, &s->c.enc_contexts[0],
3791  NULL, context_count, sizeof(void*));
3792  }
3793  }
3794  for(i=1; i<context_count; i++){
3795  merge_context_after_me(s, s->c.enc_contexts[i]);
3796  }
3797  m->mc_mb_var_sum = s->me.mc_mb_var_sum_temp;
3798  m->mb_var_sum = s->me. mb_var_sum_temp;
3799  emms_c();
3800 
3801  if (s->me.scene_change_score > m->scenechange_threshold &&
3802  s->c.pict_type == AV_PICTURE_TYPE_P) {
3803  s->c.pict_type = AV_PICTURE_TYPE_I;
3804  for (int i = 0; i < s->c.mb_stride * s->c.mb_height; i++)
3805  s->mb_type[i] = CANDIDATE_MB_TYPE_INTRA;
3806  if (s->c.msmpeg4_version >= MSMP4_V3)
3807  s->c.no_rounding = 1;
3808  ff_dlog(s->c.avctx, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3809  m->mb_var_sum, m->mc_mb_var_sum);
3810  }
3811 
3812  if (!s->umvplus) {
3813  if (s->c.pict_type == AV_PICTURE_TYPE_P || s->c.pict_type == AV_PICTURE_TYPE_S) {
3814  s->f_code = ff_get_best_fcode(m, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3815 
3816  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3817  int a,b;
3818  a = ff_get_best_fcode(m, s->c.p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3819  b = ff_get_best_fcode(m, s->c.p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3820  s->f_code = FFMAX3(s->f_code, a, b);
3821  }
3822 
3824  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3825  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3826  int j;
3827  for(i=0; i<2; i++){
3828  for(j=0; j<2; j++)
3829  ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3830  s->c.p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3831  }
3832  }
3833  } else if (s->c.pict_type == AV_PICTURE_TYPE_B) {
3834  int a, b;
3835 
3836  a = ff_get_best_fcode(m, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3837  b = ff_get_best_fcode(m, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3838  s->f_code = FFMAX(a, b);
3839 
3840  a = ff_get_best_fcode(m, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3841  b = ff_get_best_fcode(m, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3842  s->b_code = FFMAX(a, b);
3843 
3844  ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3845  ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3846  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3847  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3848  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3849  int dir, j;
3850  for(dir=0; dir<2; dir++){
3851  for(i=0; i<2; i++){
3852  for(j=0; j<2; j++){
3855  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3856  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3857  }
3858  }
3859  }
3860  }
3861  }
3862  }
3863 
3864  ret = estimate_qp(m, 0);
3865  if (ret < 0)
3866  return ret;
3867 
3868  if (s->c.qscale < 3 && s->max_qcoeff <= 128 &&
3869  s->c.pict_type == AV_PICTURE_TYPE_I &&
3870  !(s->c.avctx->flags & AV_CODEC_FLAG_QSCALE))
3871  s->c.qscale = 3; //reduce clipping problems
3872 
3873  if (s->c.out_format == FMT_MJPEG) {
3875  (7 + s->c.qscale) / s->c.qscale, 65535);
3876  if (ret < 0)
3877  return ret;
3878 
3879  if (s->c.codec_id != AV_CODEC_ID_AMV) {
3880  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3881  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3882 
3883  if (s->c.avctx->intra_matrix) {
3884  chroma_matrix =
3885  luma_matrix = s->c.avctx->intra_matrix;
3886  }
3887  if (s->c.avctx->chroma_intra_matrix)
3888  chroma_matrix = s->c.avctx->chroma_intra_matrix;
3889 
3890  /* for mjpeg, we do include qscale in the matrix */
3891  for (int i = 1; i < 64; i++) {
3892  int j = s->c.idsp.idct_permutation[i];
3893 
3894  s->c.chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->c.qscale) >> 3);
3895  s->c. intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->c.qscale) >> 3);
3896  }
3897  s->c.y_dc_scale_table =
3898  s->c.c_dc_scale_table = ff_mpeg12_dc_scale_table[s->c.intra_dc_precision];
3899  s->c.chroma_intra_matrix[0] =
3900  s->c.intra_matrix[0] = ff_mpeg12_dc_scale_table[s->c.intra_dc_precision][8];
3901  } else {
3902  static const uint8_t y[32] = {13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3903  static const uint8_t c[32] = {14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3904  for (int i = 1; i < 64; i++) {
3905  int j = s->c.idsp.idct_permutation[ff_zigzag_direct[i]];
3906 
3907  s->c.intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3908  s->c.chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3909  }
3910  s->c.y_dc_scale_table = y;
3911  s->c.c_dc_scale_table = c;
3912  s->c.intra_matrix[0] = 13;
3913  s->c.chroma_intra_matrix[0] = 14;
3914  }
3915  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3916  s->c.intra_matrix, s->intra_quant_bias, 8, 8, 1);
3917  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3918  s->c.chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3919  s->c.qscale = 8;
3920  }
3921 
3922  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
3923  s->c.cur_pic.ptr->f->flags |= AV_FRAME_FLAG_KEY;
3924  } else {
3925  s->c.cur_pic.ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
3926  }
3927  s->c.cur_pic.ptr->f->pict_type = s->c.pict_type;
3928 
3929  if (s->c.cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
3930  m->picture_in_gop_number = 0;
3931 
3932  s->c.mb_x = s->c.mb_y = 0;
3933  s->last_bits= put_bits_count(&s->pb);
3934  ret = m->encode_picture_header(m);
3935  if (ret < 0)
3936  return ret;
3937  bits= put_bits_count(&s->pb);
3938  m->header_bits = bits - s->last_bits;
3939 
3940  for(i=1; i<context_count; i++){
3941  update_duplicate_context_after_me(s->c.enc_contexts[i], s);
3942  }
3943  s->c.avctx->execute(s->c.avctx, encode_thread, &s->c.enc_contexts[0],
3944  NULL, context_count, sizeof(void*));
3945  for(i=1; i<context_count; i++){
3946  if (s->pb.buf_end == s->c.enc_contexts[i]->pb.buf)
3947  set_put_bits_buffer_size(&s->pb, FFMIN(s->c.enc_contexts[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3948  merge_context_after_encode(s, s->c.enc_contexts[i]);
3949  }
3950  emms_c();
3951  return 0;
3952 }
3953 
3954 static inline void denoise_dct(MPVEncContext *const s, int16_t block[])
3955 {
3956  if (!s->dct_error_sum)
3957  return;
3958 
3959  const int intra = s->c.mb_intra;
3960  s->dct_count[intra]++;
3961  s->mpvencdsp.denoise_dct(block, s->dct_error_sum[intra], s->dct_offset[intra]);
3962 }
3963 
3965  int16_t *block, int n,
3966  int qscale, int *overflow){
3967  const int *qmat;
3968  const uint16_t *matrix;
3969  const uint8_t *scantable;
3970  const uint8_t *perm_scantable;
3971  int max=0;
3972  unsigned int threshold1, threshold2;
3973  int bias=0;
3974  int run_tab[65];
3975  int level_tab[65];
3976  int score_tab[65];
3977  int survivor[65];
3978  int survivor_count;
3979  int last_run=0;
3980  int last_level=0;
3981  int last_score= 0;
3982  int last_i;
3983  int coeff[2][64];
3984  int coeff_count[64];
3985  int qmul, qadd, start_i, last_non_zero, i, dc;
3986  const int esc_length= s->ac_esc_length;
3987  const uint8_t *length, *last_length;
3988  const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3989  int mpeg2_qscale;
3990 
3991  s->fdsp.fdct(block);
3992 
3993  denoise_dct(s, block);
3994 
3995  qmul= qscale*16;
3996  qadd= ((qscale-1)|1)*8;
3997 
3998  if (s->c.q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3999  else mpeg2_qscale = qscale << 1;
4000 
4001  if (s->c.mb_intra) {
4002  int q;
4003  scantable = s->c.intra_scantable.scantable;
4004  perm_scantable = s->c.intra_scantable.permutated;
4005  if (!s->c.h263_aic) {
4006  if (n < 4)
4007  q = s->c.y_dc_scale;
4008  else
4009  q = s->c.c_dc_scale;
4010  q = q << 3;
4011  } else{
4012  /* For AIC we skip quant/dequant of INTRADC */
4013  q = 1 << 3;
4014  qadd=0;
4015  }
4016 
4017  /* note: block[0] is assumed to be positive */
4018  block[0] = (block[0] + (q >> 1)) / q;
4019  start_i = 1;
4020  last_non_zero = 0;
4021  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4022  matrix = n < 4 ? s->c.intra_matrix : s->c.chroma_intra_matrix;
4023  if (s->mpeg_quant || s->c.out_format == FMT_MPEG1 || s->c.out_format == FMT_MJPEG)
4024  bias= 1<<(QMAT_SHIFT-1);
4025 
4026  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4027  length = s->intra_chroma_ac_vlc_length;
4028  last_length= s->intra_chroma_ac_vlc_last_length;
4029  } else {
4030  length = s->intra_ac_vlc_length;
4031  last_length= s->intra_ac_vlc_last_length;
4032  }
4033  } else {
4034  scantable = s->c.inter_scantable.scantable;
4035  perm_scantable = s->c.inter_scantable.permutated;
4036  start_i = 0;
4037  last_non_zero = -1;
4038  qmat = s->q_inter_matrix[qscale];
4039  matrix = s->c.inter_matrix;
4040  length = s->inter_ac_vlc_length;
4041  last_length= s->inter_ac_vlc_last_length;
4042  }
4043  last_i= start_i;
4044 
4045  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4046  threshold2= (threshold1<<1);
4047 
4048  for(i=63; i>=start_i; i--) {
4049  const int j = scantable[i];
4050  int64_t level = (int64_t)block[j] * qmat[j];
4051 
4052  if(((uint64_t)(level+threshold1))>threshold2){
4053  last_non_zero = i;
4054  break;
4055  }
4056  }
4057 
4058  for(i=start_i; i<=last_non_zero; i++) {
4059  const int j = scantable[i];
4060  int64_t level = (int64_t)block[j] * qmat[j];
4061 
4062 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4063 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4064  if(((uint64_t)(level+threshold1))>threshold2){
4065  if(level>0){
4066  level= (bias + level)>>QMAT_SHIFT;
4067  coeff[0][i]= level;
4068  coeff[1][i]= level-1;
4069 // coeff[2][k]= level-2;
4070  }else{
4071  level= (bias - level)>>QMAT_SHIFT;
4072  coeff[0][i]= -level;
4073  coeff[1][i]= -level+1;
4074 // coeff[2][k]= -level+2;
4075  }
4076  coeff_count[i]= FFMIN(level, 2);
4077  av_assert2(coeff_count[i]);
4078  max |=level;
4079  }else{
4080  coeff[0][i]= (level>>31)|1;
4081  coeff_count[i]= 1;
4082  }
4083  }
4084 
4085  *overflow= s->max_qcoeff < max; //overflow might have happened
4086 
4087  if(last_non_zero < start_i){
4088  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4089  return last_non_zero;
4090  }
4091 
4092  score_tab[start_i]= 0;
4093  survivor[0]= start_i;
4094  survivor_count= 1;
4095 
4096  for(i=start_i; i<=last_non_zero; i++){
4097  int level_index, j, zero_distortion;
4098  int dct_coeff= FFABS(block[ scantable[i] ]);
4099  int best_score=256*256*256*120;
4100 
4101  if (s->fdsp.fdct == ff_fdct_ifast)
4102  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4103  zero_distortion= dct_coeff*dct_coeff;
4104 
4105  for(level_index=0; level_index < coeff_count[i]; level_index++){
4106  int distortion;
4107  int level= coeff[level_index][i];
4108  const int alevel= FFABS(level);
4109  int unquant_coeff;
4110 
4111  av_assert2(level);
4112 
4113  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4114  unquant_coeff= alevel*qmul + qadd;
4115  } else if (s->c.out_format == FMT_MJPEG) {
4116  j = s->c.idsp.idct_permutation[scantable[i]];
4117  unquant_coeff = alevel * matrix[j] * 8;
4118  }else{ // MPEG-1
4119  j = s->c.idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4120  if (s->c.mb_intra) {
4121  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4122  unquant_coeff = (unquant_coeff - 1) | 1;
4123  }else{
4124  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4125  unquant_coeff = (unquant_coeff - 1) | 1;
4126  }
4127  unquant_coeff<<= 3;
4128  }
4129 
4130  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4131  level+=64;
4132  if((level&(~127)) == 0){
4133  for(j=survivor_count-1; j>=0; j--){
4134  int run= i - survivor[j];
4135  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4136  score += score_tab[i-run];
4137 
4138  if(score < best_score){
4139  best_score= score;
4140  run_tab[i+1]= run;
4141  level_tab[i+1]= level-64;
4142  }
4143  }
4144 
4145  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4146  for(j=survivor_count-1; j>=0; j--){
4147  int run= i - survivor[j];
4148  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4149  score += score_tab[i-run];
4150  if(score < last_score){
4151  last_score= score;
4152  last_run= run;
4153  last_level= level-64;
4154  last_i= i+1;
4155  }
4156  }
4157  }
4158  }else{
4159  distortion += esc_length*lambda;
4160  for(j=survivor_count-1; j>=0; j--){
4161  int run= i - survivor[j];
4162  int score= distortion + score_tab[i-run];
4163 
4164  if(score < best_score){
4165  best_score= score;
4166  run_tab[i+1]= run;
4167  level_tab[i+1]= level-64;
4168  }
4169  }
4170 
4171  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4172  for(j=survivor_count-1; j>=0; j--){
4173  int run= i - survivor[j];
4174  int score= distortion + score_tab[i-run];
4175  if(score < last_score){
4176  last_score= score;
4177  last_run= run;
4178  last_level= level-64;
4179  last_i= i+1;
4180  }
4181  }
4182  }
4183  }
4184  }
4185 
4186  score_tab[i+1]= best_score;
4187 
4188  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4189  if(last_non_zero <= 27){
4190  for(; survivor_count; survivor_count--){
4191  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4192  break;
4193  }
4194  }else{
4195  for(; survivor_count; survivor_count--){
4196  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4197  break;
4198  }
4199  }
4200 
4201  survivor[ survivor_count++ ]= i+1;
4202  }
4203 
4204  if (s->c.out_format != FMT_H263 && s->c.out_format != FMT_H261) {
4205  last_score= 256*256*256*120;
4206  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4207  int score= score_tab[i];
4208  if (i)
4209  score += lambda * 2; // FIXME more exact?
4210 
4211  if(score < last_score){
4212  last_score= score;
4213  last_i= i;
4214  last_level= level_tab[i];
4215  last_run= run_tab[i];
4216  }
4217  }
4218  }
4219 
4220  s->coded_score[n] = last_score;
4221 
4222  dc= FFABS(block[0]);
4223  last_non_zero= last_i - 1;
4224  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4225 
4226  if(last_non_zero < start_i)
4227  return last_non_zero;
4228 
4229  if(last_non_zero == 0 && start_i == 0){
4230  int best_level= 0;
4231  int best_score= dc * dc;
4232 
4233  for(i=0; i<coeff_count[0]; i++){
4234  int level= coeff[i][0];
4235  int alevel= FFABS(level);
4236  int unquant_coeff, score, distortion;
4237 
4238  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4239  unquant_coeff= (alevel*qmul + qadd)>>3;
4240  } else{ // MPEG-1
4241  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4242  unquant_coeff = (unquant_coeff - 1) | 1;
4243  }
4244  unquant_coeff = (unquant_coeff + 4) >> 3;
4245  unquant_coeff<<= 3 + 3;
4246 
4247  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4248  level+=64;
4249  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4250  else score= distortion + esc_length*lambda;
4251 
4252  if(score < best_score){
4253  best_score= score;
4254  best_level= level - 64;
4255  }
4256  }
4257  block[0]= best_level;
4258  s->coded_score[n] = best_score - dc*dc;
4259  if(best_level == 0) return -1;
4260  else return last_non_zero;
4261  }
4262 
4263  i= last_i;
4264  av_assert2(last_level);
4265 
4266  block[ perm_scantable[last_non_zero] ]= last_level;
4267  i -= last_run + 1;
4268 
4269  for(; i>start_i; i -= run_tab[i] + 1){
4270  block[ perm_scantable[i-1] ]= level_tab[i];
4271  }
4272 
4273  return last_non_zero;
4274 }
4275 
4276 static DECLARE_ALIGNED(16, int16_t, basis)[64][64];
4277 
4278 static void build_basis(uint8_t *perm){
4279  int i, j, x, y;
4280  emms_c();
4281  for(i=0; i<8; i++){
4282  for(j=0; j<8; j++){
4283  for(y=0; y<8; y++){
4284  for(x=0; x<8; x++){
4285  double s= 0.25*(1<<BASIS_SHIFT);
4286  int index= 8*i + j;
4287  int perm_index= perm[index];
4288  if(i==0) s*= sqrt(0.5);
4289  if(j==0) s*= sqrt(0.5);
4290  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4291  }
4292  }
4293  }
4294  }
4295 }
4296 
4297 static int dct_quantize_refine(MPVEncContext *const s, //FIXME breaks denoise?
4298  int16_t *block, int16_t *weight, int16_t *orig,
4299  int n, int qscale){
4300  DECLARE_ALIGNED(16, int16_t, rem)[64];
4301  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4302  const uint8_t *scantable;
4303  const uint8_t *perm_scantable;
4304 // unsigned int threshold1, threshold2;
4305 // int bias=0;
4306  int run_tab[65];
4307  int prev_run=0;
4308  int prev_level=0;
4309  int qmul, qadd, start_i, last_non_zero, i, dc;
4310  const uint8_t *length;
4311  const uint8_t *last_length;
4312  int lambda;
4313  int rle_index, run, q = 1, sum; //q is only used when s->c.mb_intra is true
4314 
4315  if(basis[0][0] == 0)
4316  build_basis(s->c.idsp.idct_permutation);
4317 
4318  qmul= qscale*2;
4319  qadd= (qscale-1)|1;
4320  if (s->c.mb_intra) {
4321  scantable = s->c.intra_scantable.scantable;
4322  perm_scantable = s->c.intra_scantable.permutated;
4323  if (!s->c.h263_aic) {
4324  if (n < 4)
4325  q = s->c.y_dc_scale;
4326  else
4327  q = s->c.c_dc_scale;
4328  } else{
4329  /* For AIC we skip quant/dequant of INTRADC */
4330  q = 1;
4331  qadd=0;
4332  }
4333  q <<= RECON_SHIFT-3;
4334  /* note: block[0] is assumed to be positive */
4335  dc= block[0]*q;
4336 // block[0] = (block[0] + (q >> 1)) / q;
4337  start_i = 1;
4338 // if (s->mpeg_quant || s->c.out_format == FMT_MPEG1)
4339 // bias= 1<<(QMAT_SHIFT-1);
4340  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4341  length = s->intra_chroma_ac_vlc_length;
4342  last_length= s->intra_chroma_ac_vlc_last_length;
4343  } else {
4344  length = s->intra_ac_vlc_length;
4345  last_length= s->intra_ac_vlc_last_length;
4346  }
4347  } else {
4348  scantable = s->c.inter_scantable.scantable;
4349  perm_scantable = s->c.inter_scantable.permutated;
4350  dc= 0;
4351  start_i = 0;
4352  length = s->inter_ac_vlc_length;
4353  last_length= s->inter_ac_vlc_last_length;
4354  }
4355  last_non_zero = s->c.block_last_index[n];
4356 
4357  dc += (1<<(RECON_SHIFT-1));
4358  for(i=0; i<64; i++){
4359  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4360  }
4361 
4362  sum=0;
4363  for(i=0; i<64; i++){
4364  int one= 36;
4365  int qns=4;
4366  int w;
4367 
4368  w= FFABS(weight[i]) + qns*one;
4369  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4370 
4371  weight[i] = w;
4372 // w=weight[i] = (63*qns + (w/2)) / w;
4373 
4374  av_assert2(w>0);
4375  av_assert2(w<(1<<6));
4376  sum += w*w;
4377  }
4378  lambda = sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4379 
4380  run=0;
4381  rle_index=0;
4382  for(i=start_i; i<=last_non_zero; i++){
4383  int j= perm_scantable[i];
4384  const int level= block[j];
4385  int coeff;
4386 
4387  if(level){
4388  if(level<0) coeff= qmul*level - qadd;
4389  else coeff= qmul*level + qadd;
4390  run_tab[rle_index++]=run;
4391  run=0;
4392 
4393  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4394  }else{
4395  run++;
4396  }
4397  }
4398 
4399  for(;;){
4400  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4401  int best_coeff=0;
4402  int best_change=0;
4403  int run2, best_unquant_change=0, analyze_gradient;
4404  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4405 
4406  if(analyze_gradient){
4407  for(i=0; i<64; i++){
4408  int w= weight[i];
4409 
4410  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4411  }
4412  s->fdsp.fdct(d1);
4413  }
4414 
4415  if(start_i){
4416  const int level= block[0];
4417  int change, old_coeff;
4418 
4419  av_assert2(s->c.mb_intra);
4420 
4421  old_coeff= q*level;
4422 
4423  for(change=-1; change<=1; change+=2){
4424  int new_level= level + change;
4425  int score, new_coeff;
4426 
4427  new_coeff= q*new_level;
4428  if(new_coeff >= 2048 || new_coeff < 0)
4429  continue;
4430 
4431  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4432  new_coeff - old_coeff);
4433  if(score<best_score){
4434  best_score= score;
4435  best_coeff= 0;
4436  best_change= change;
4437  best_unquant_change= new_coeff - old_coeff;
4438  }
4439  }
4440  }
4441 
4442  run=0;
4443  rle_index=0;
4444  run2= run_tab[rle_index++];
4445  prev_level=0;
4446  prev_run=0;
4447 
4448  for(i=start_i; i<64; i++){
4449  int j= perm_scantable[i];
4450  const int level= block[j];
4451  int change, old_coeff;
4452 
4453  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4454  break;
4455 
4456  if(level){
4457  if(level<0) old_coeff= qmul*level - qadd;
4458  else old_coeff= qmul*level + qadd;
4459  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4460  }else{
4461  old_coeff=0;
4462  run2--;
4463  av_assert2(run2>=0 || i >= last_non_zero );
4464  }
4465 
4466  for(change=-1; change<=1; change+=2){
4467  int new_level= level + change;
4468  int score, new_coeff, unquant_change;
4469 
4470  score=0;
4471  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4472  continue;
4473 
4474  if(new_level){
4475  if(new_level<0) new_coeff= qmul*new_level - qadd;
4476  else new_coeff= qmul*new_level + qadd;
4477  if(new_coeff >= 2048 || new_coeff <= -2048)
4478  continue;
4479  //FIXME check for overflow
4480 
4481  if(level){
4482  if(level < 63 && level > -63){
4483  if(i < last_non_zero)
4484  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4485  - length[UNI_AC_ENC_INDEX(run, level+64)];
4486  else
4487  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4488  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4489  }
4490  }else{
4491  av_assert2(FFABS(new_level)==1);
4492 
4493  if(analyze_gradient){
4494  int g= d1[ scantable[i] ];
4495  if(g && (g^new_level) >= 0)
4496  continue;
4497  }
4498 
4499  if(i < last_non_zero){
4500  int next_i= i + run2 + 1;
4501  int next_level= block[ perm_scantable[next_i] ] + 64;
4502 
4503  if(next_level&(~127))
4504  next_level= 0;
4505 
4506  if(next_i < last_non_zero)
4507  score += length[UNI_AC_ENC_INDEX(run, 65)]
4508  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4509  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4510  else
4511  score += length[UNI_AC_ENC_INDEX(run, 65)]
4512  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4513  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4514  }else{
4515  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4516  if(prev_level){
4517  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4518  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4519  }
4520  }
4521  }
4522  }else{
4523  new_coeff=0;
4524  av_assert2(FFABS(level)==1);
4525 
4526  if(i < last_non_zero){
4527  int next_i= i + run2 + 1;
4528  int next_level= block[ perm_scantable[next_i] ] + 64;
4529 
4530  if(next_level&(~127))
4531  next_level= 0;
4532 
4533  if(next_i < last_non_zero)
4534  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4535  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4536  - length[UNI_AC_ENC_INDEX(run, 65)];
4537  else
4538  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4539  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4540  - length[UNI_AC_ENC_INDEX(run, 65)];
4541  }else{
4542  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4543  if(prev_level){
4544  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4545  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4546  }
4547  }
4548  }
4549 
4550  score *= lambda;
4551 
4552  unquant_change= new_coeff - old_coeff;
4553  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4554 
4555  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4556  unquant_change);
4557  if(score<best_score){
4558  best_score= score;
4559  best_coeff= i;
4560  best_change= change;
4561  best_unquant_change= unquant_change;
4562  }
4563  }
4564  if(level){
4565  prev_level= level + 64;
4566  if(prev_level&(~127))
4567  prev_level= 0;
4568  prev_run= run;
4569  run=0;
4570  }else{
4571  run++;
4572  }
4573  }
4574 
4575  if(best_change){
4576  int j= perm_scantable[ best_coeff ];
4577 
4578  block[j] += best_change;
4579 
4580  if(best_coeff > last_non_zero){
4581  last_non_zero= best_coeff;
4582  av_assert2(block[j]);
4583  }else{
4584  for(; last_non_zero>=start_i; last_non_zero--){
4585  if(block[perm_scantable[last_non_zero]])
4586  break;
4587  }
4588  }
4589 
4590  run=0;
4591  rle_index=0;
4592  for(i=start_i; i<=last_non_zero; i++){
4593  int j= perm_scantable[i];
4594  const int level= block[j];
4595 
4596  if(level){
4597  run_tab[rle_index++]=run;
4598  run=0;
4599  }else{
4600  run++;
4601  }
4602  }
4603 
4604  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4605  }else{
4606  break;
4607  }
4608  }
4609 
4610  return last_non_zero;
4611 }
4612 
4613 /**
4614  * Permute an 8x8 block according to permutation.
4615  * @param block the block which will be permuted according to
4616  * the given permutation vector
4617  * @param permutation the permutation vector
4618  * @param last the last non zero coefficient in scantable order, used to
4619  * speed the permutation up
4620  * @param scantable the used scantable, this is only used to speed the
4621  * permutation up, the block is not (inverse) permutated
4622  * to scantable order!
4623  */
4624 void ff_block_permute(int16_t *block, const uint8_t *permutation,
4625  const uint8_t *scantable, int last)
4626 {
4627  int i;
4628  int16_t temp[64];
4629 
4630  if (last <= 0)
4631  return;
4632  //FIXME it is ok but not clean and might fail for some permutations
4633  // if (permutation[1] == 1)
4634  // return;
4635 
4636  for (i = 0; i <= last; i++) {
4637  const int j = scantable[i];
4638  temp[j] = block[j];
4639  block[j] = 0;
4640  }
4641 
4642  for (i = 0; i <= last; i++) {
4643  const int j = scantable[i];
4644  const int perm_j = permutation[j];
4645  block[perm_j] = temp[j];
4646  }
4647 }
4648 
4649 static int dct_quantize_c(MPVEncContext *const s,
4650  int16_t *block, int n,
4651  int qscale, int *overflow)
4652 {
4653  int i, last_non_zero, q, start_i;
4654  const int *qmat;
4655  const uint8_t *scantable;
4656  int bias;
4657  int max=0;
4658  unsigned int threshold1, threshold2;
4659 
4660  s->fdsp.fdct(block);
4661 
4662  denoise_dct(s, block);
4663 
4664  if (s->c.mb_intra) {
4665  scantable = s->c.intra_scantable.scantable;
4666  if (!s->c.h263_aic) {
4667  if (n < 4)
4668  q = s->c.y_dc_scale;
4669  else
4670  q = s->c.c_dc_scale;
4671  q = q << 3;
4672  } else
4673  /* For AIC we skip quant/dequant of INTRADC */
4674  q = 1 << 3;
4675 
4676  /* note: block[0] is assumed to be positive */
4677  block[0] = (block[0] + (q >> 1)) / q;
4678  start_i = 1;
4679  last_non_zero = 0;
4680  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4681  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4682  } else {
4683  scantable = s->c.inter_scantable.scantable;
4684  start_i = 0;
4685  last_non_zero = -1;
4686  qmat = s->q_inter_matrix[qscale];
4687  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4688  }
4689  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4690  threshold2= (threshold1<<1);
4691  for(i=63;i>=start_i;i--) {
4692  const int j = scantable[i];
4693  int64_t level = (int64_t)block[j] * qmat[j];
4694 
4695  if(((uint64_t)(level+threshold1))>threshold2){
4696  last_non_zero = i;
4697  break;
4698  }else{
4699  block[j]=0;
4700  }
4701  }
4702  for(i=start_i; i<=last_non_zero; i++) {
4703  const int j = scantable[i];
4704  int64_t level = (int64_t)block[j] * qmat[j];
4705 
4706 // if( bias+level >= (1<<QMAT_SHIFT)
4707 // || bias-level >= (1<<QMAT_SHIFT)){
4708  if(((uint64_t)(level+threshold1))>threshold2){
4709  if(level>0){
4710  level= (bias + level)>>QMAT_SHIFT;
4711  block[j]= level;
4712  }else{
4713  level= (bias - level)>>QMAT_SHIFT;
4714  block[j]= -level;
4715  }
4716  max |=level;
4717  }else{
4718  block[j]=0;
4719  }
4720  }
4721  *overflow= s->max_qcoeff < max; //overflow might have happened
4722 
4723  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4724  if (s->c.idsp.perm_type != FF_IDCT_PERM_NONE)
4725  ff_block_permute(block, s->c.idsp.idct_permutation,
4726  scantable, last_non_zero);
4727 
4728  return last_non_zero;
4729 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
encode_frame
static int encode_frame(AVCodecContext *c, const AVFrame *frame, AVPacket *pkt)
Definition: mpegvideo_enc.c:1497
dct_quantize_trellis_c
static int dct_quantize_trellis_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:3964
put_dct
static void put_dct(MPVEncContext *const s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_enc.c:1153
MPV_MAX_PLANES
#define MPV_MAX_PLANES
Definition: mpegpicture.h:31
ff_fix_long_p_mvs
void ff_fix_long_p_mvs(MPVEncContext *const s, int type)
Definition: motion_est.c:1661
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:378
FF_MATRIX_TYPE_INTRA
#define FF_MATRIX_TYPE_INTRA
Check if the elements of codec context matrices (intra_matrix, inter_matrix or chroma_intra_matrix) a...
Definition: encode.h:106
QMAT_SHIFT_MMX
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:83
ff_encode_reordered_opaque
int ff_encode_reordered_opaque(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame)
Propagate user opaque values from the frame to avctx/pkt as needed.
Definition: encode.c:219
mpegvideo_unquantize.h
MPVMainEncContext::me_pre
int me_pre
prepass for motion estimation
Definition: mpegvideoenc.h:263
ff_fix_long_mvs
void ff_fix_long_mvs(MPVEncContext *const s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1710
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:432
MPVMainEncContext::fcode_tab
const uint8_t * fcode_tab
smallest fcode needed for each MV
Definition: mpegvideoenc.h:238
MPVMainEncContext::fixed_qscale
int fixed_qscale
fixed qscale if non zero
Definition: mpegvideoenc.h:256
CANDIDATE_MB_TYPE_BIDIR
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegvideoenc.h:297
encode_mb_hq
static void encode_mb_hq(MPVEncContext *const s, MBBackup *const backup, MBBackup *const best, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2725
MPVMainEncContext::frame_skip_cmp_fn
me_cmp_func frame_skip_cmp_fn
Definition: mpegvideoenc.h:245
MPVMainEncContext::bit_rate
int64_t bit_rate
Definition: mpegvideoenc.h:250
dct_single_coeff_elimination
static void dct_single_coeff_elimination(MPVEncContext *const s, int n, int threshold)
Definition: mpegvideo_enc.c:2167
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:175
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
h263data.h
init_unquantize
static av_cold void init_unquantize(MPVEncContext *const s2, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:311
ff_mpv_enc_class
const AVClass ff_mpv_enc_class
Definition: mpegvideo_enc.c:102
encode_mb
static void encode_mb(MPVEncContext *const s, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2628
level
uint8_t level
Definition: svq3.c:208
ff_estimate_b_frame_motion
void ff_estimate_b_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:1493
av_clip
#define av_clip
Definition: common.h:100
MPVEncContext
Definition: mpegvideoenc.h:46
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:523
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:225
ALIGN
#define ALIGN
Definition: hashtable.c:32
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], const qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:821
init_qscale_tab
static void init_qscale_tab(MPVEncContext *const s)
init s->c.cur_pic.qscale_table from s->lambda_table
Definition: mpegvideo_enc.c:242
ff_mpv_init_duplicate_contexts
av_cold int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:118
update_noise_reduction
static void update_noise_reduction(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1898
MBBackup::mv_bits
int mv_bits
Definition: mpegvideo_enc.c:2650
mem_internal.h
MPVMainEncContext::dct_error_sum_base
char * dct_error_sum_base
backs dct_error_sum
Definition: mpegvideoenc.h:269
ff_me_init
av_cold int ff_me_init(MotionEstContext *c, AVCodecContext *avctx, const MECmpContext *mecc, int mpvenc)
Definition: motion_est.c:309
MBBackup::misc_bits
int misc_bits
Definition: mpegvideo_enc.c:2650
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:206
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
mjpegenc_common.h
BUF_BITS
#define BUF_BITS
Definition: put_bits.h:47
AVCodecContext::rc_min_rate
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:1277
set_frame_distances
static void set_frame_distances(MPVEncContext *const s)
Definition: mpegvideo_enc.c:3698
thread.h
frame_start
static void frame_start(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1920
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
ff_speedhq_end_slice
void ff_speedhq_end_slice(MPVEncContext *const s)
Definition: speedhqenc.c:118
MBBackup::block_last_index
int block_last_index[8]
Definition: mpegvideo_enc.c:2646
estimate_qp
static int estimate_qp(MPVMainEncContext *const m, int dry_run)
Definition: mpegvideo_enc.c:3659
ff_msmpeg4_encode_init
av_cold void ff_msmpeg4_encode_init(MPVMainEncContext *const m)
Definition: msmpeg4enc.c:673
matrix
Definition: vc1dsp.c:43
src1
const pixel * src1
Definition: h264pred_template.c:420
MPVEncContext::c
MpegEncContext c
the common base context
Definition: mpegvideoenc.h:47
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:213
MBBackup::last_mv
int last_mv[2][2][2]
Definition: mpegvideo_enc.c:2641
MPVMainEncContext::total_bits
int64_t total_bits
Definition: mpegvideoenc.h:251
mpegvideoenc.h
int64_t
long long int64_t
Definition: coverity.c:34
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
ff_dct_encode_init
av_cold void ff_dct_encode_init(MPVEncContext *const s)
Definition: mpegvideo_enc.c:299
MPVMainEncContext::noise_reduction
int noise_reduction
Definition: mpegvideoenc.h:229
COPY
#define COPY(a)
ff_me_init_pic
void ff_me_init_pic(MPVEncContext *const s)
Definition: motion_est.c:371
h263enc.h
basis
static int16_t basis[64][64]
Definition: mpegvideo_enc.c:4276
AVCodecContext::intra_matrix
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:948
estimate_best_b_count
static int estimate_best_b_count(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1518
MPVMainEncContext::last_lambda_for
int last_lambda_for[5]
last lambda for a specific pict type
Definition: mpegvideoenc.h:257
mv_bits
static const uint8_t mv_bits[2][16][10]
Definition: mobiclip.c:165
estimate_motion_thread
static int estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2842
ff_clean_h263_qscales
void ff_clean_h263_qscales(MPVEncContext *s)
AVCodecContext::lumi_masking
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:808
out_size
int out_size
Definition: movenc.c:56
MV_DIRECT
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:173
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
sse
static int sse(const MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
Definition: mpegvideo_enc.c:2776
CANDIDATE_MB_TYPE_INTER
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegvideoenc.h:290
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:158
MPVMainEncContext::frame_skip_threshold
int frame_skip_threshold
Definition: mpegvideoenc.h:241
MPVUnquantDSPContext::dct_unquantize_mpeg1_intra
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:35
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:224
INTERLACED_DCT
#define INTERLACED_DCT(s)
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:529
AVFrame::width
int width
Definition: frame.h:499
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:191
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
MBBackup::last_bits
int last_bits
Definition: mpegvideo_enc.c:2650
AVPacket::data
uint8_t * data
Definition: packet.h:588
av_packet_shrink_side_data
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Shrink the already allocated side data buffer.
Definition: packet.c:379
AVOption
AVOption.
Definition: opt.h:429
encode.h
b
#define b
Definition: input.c:42
put_bytes_count
static int put_bytes_count(const PutBitContext *s, int round_up)
Definition: put_bits.h:110
MPVEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideoenc.h:52
data
const char data[16]
Definition: mxf.c:149
MPVMainEncContext::dts_delta
int64_t dts_delta
pts difference between the first and second input frame, used for calculating dts of the first frame ...
Definition: mpegvideoenc.h:215
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
MpegEncContext::MSMP4_UNUSED
@ MSMP4_UNUSED
Definition: mpegvideo.h:236
write_slice_end
static void write_slice_end(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2889
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
MpegEncContext::dest
uint8_t * dest[3]
Definition: mpegvideo.h:199
speedhqenc.h
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:491
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:606
FF_MPV_FLAG_SKIP_RD
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideoenc.h:307
max
#define max(a, b)
Definition: cuda_runtime.h:33
ff_mpeg12_dc_scale_table
const uint8_t ff_mpeg12_dc_scale_table[4][32]
Definition: mpegvideodata.c:33
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:81
mathematics.h
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: defs.h:62
sqr
static double sqr(double in)
Definition: af_afwtdn.c:872
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:306
pre_estimate_motion_thread
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2824
get_visual_weight
static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
Definition: mpegvideo_enc.c:2257
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:224
COPY_CONTEXT
#define COPY_CONTEXT(BEFORE, AFTER, DST_TYPE, SRC_TYPE)
Definition: mpegvideo_enc.c:2657
AVCodecContext::mb_decision
int mb_decision
macroblock decision mode
Definition: avcodec.h:936
FMT_H261
@ FMT_H261
Definition: mpegvideo.h:54
MPVMainEncContext::gop_size
int gop_size
Definition: mpegvideoenc.h:202
AVCodecContext::qmax
int qmax
maximum quantizer
Definition: avcodec.h:1241
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:331
MPVMainEncContext::mb_var_sum
int64_t mb_var_sum
sum of MB variance for current frame
Definition: mpegvideoenc.h:265
mpegutils.h
pix
enum AVPixelFormat pix
Definition: ohcodec.c:55
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:217
AVCodecContext::delay
int delay
Codec delay.
Definition: avcodec.h:575
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:643
AVCodecContext::mb_cmp
int mb_cmp
macroblock comparison function (not supported yet)
Definition: avcodec.h:862
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:74
MPVMainEncContext::encode_picture_header
int(* encode_picture_header)(struct MPVMainEncContext *m)
Definition: mpegvideoenc.h:247
quality
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
Definition: rate_distortion.txt:12
CANDIDATE_MB_TYPE_BACKWARD_I
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegvideoenc.h:301
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:172
MECmpContext::sum_abs_dctelem
int(* sum_abs_dctelem)(const int16_t *block)
Definition: me_cmp.h:51
MBBackup::c
struct MBBackup::@212 c
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:55
update_mb_info
static void update_mb_info(MPVEncContext *const s, int startcode)
Definition: mpegvideo_enc.c:2929
MBBackup::i_tex_bits
int i_tex_bits
Definition: mpegvideo_enc.c:2650
MPVMainEncContext::coded_picture_number
int coded_picture_number
used to set pic->coded_picture_number
Definition: mpegvideoenc.h:206
av_gcd
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
set_bframe_chain_length
static int set_bframe_chain_length(MPVMainEncContext *const m)
Determines whether an input picture is discarded or not and if not determines the length of the next ...
Definition: mpegvideo_enc.c:1671
FF_MPV_COMMON_MOTION_EST_OPTS
#define FF_MPV_COMMON_MOTION_EST_OPTS
Definition: mpegvideoenc.h:376
mpv_reconstruct_mb
static void mpv_reconstruct_mb(MPVEncContext *const s, int16_t block[12][64])
Performs dequantization and IDCT (if necessary)
Definition: mpegvideo_enc.c:1173
MBBackup::tex_pb
PutBitContext tex_pb
Definition: mpegvideo_enc.c:2654
mpeg4videoenc.h
FF_CMP_VSSE
#define FF_CMP_VSSE
Definition: avcodec.h:878
ff_mpv_encode_picture
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
Definition: mpegvideo_enc.c:1937
FF_MPV_COMMON_OPTS
#define FF_MPV_COMMON_OPTS
Definition: mpegvideoenc.h:335
sp5x.h
MBBackup::mb_skip_run
int mb_skip_run
Definition: mpegvideo_enc.c:2649
ff_copy_bits
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:49
FMT_MJPEG
@ FMT_MJPEG
Definition: mpegvideo.h:56
init_slice_buffers
static av_cold int init_slice_buffers(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:500
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:57
FDCTDSPContext
Definition: fdctdsp.h:28
MPVMainEncContext::b_sensitivity
int b_sensitivity
Definition: mpegvideoenc.h:224
faandct.h
Floating point AAN DCT.
av_packet_add_side_data
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: packet.c:197
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:53
ff_match_2uint16
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:843
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:440
mpeg12enc.h
ff_h263_pred_motion
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:182
MBBackup::interlaced_dct
int interlaced_dct
Definition: mpegvideo_enc.c:2647
STRIDE_ALIGN
#define STRIDE_ALIGN
Definition: internal.h:46
ff_vbv_update
int ff_vbv_update(MPVMainEncContext *m, int frame_size)
Definition: ratecontrol.c:722
MpegEncContext::chroma_y_shift
int chroma_y_shift
Definition: mpegvideo.h:268
fail
#define fail()
Definition: checkasm.h:207
FMT_SPEEDHQ
@ FMT_SPEEDHQ
Definition: mpegvideo.h:57
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:10345
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:103
ff_h263_encode_init
void ff_h263_encode_init(MPVMainEncContext *m)
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:961
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:488
CANDIDATE_MB_TYPE_SKIPPED
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegvideoenc.h:292
MPVUnquantDSPContext::dct_unquantize_h263_intra
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:43
perm
perm
Definition: f_perms.c:75
MAX_THREADS
#define MAX_THREADS
Definition: frame_thread_encoder.c:37
weight
const h264_weight_func weight
Definition: h264dsp_init.c:33
MPVMainEncContext::input_picture
MPVPicture * input_picture[MPVENC_MAX_B_FRAMES+1]
next pictures in display order
Definition: mpegvideoenc.h:208
MpegEncContext::MSMP4_WMV2
@ MSMP4_WMV2
Definition: mpegvideo.h:241
AVCodecContext::bit_rate_tolerance
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:1209
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:314
pts
static int64_t pts
Definition: transcode_aac.c:644
FF_MPV_FLAG_CBP_RD
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideoenc.h:310
get_intra_count
static int get_intra_count(MPVEncContext *const s, const uint8_t *src, const uint8_t *ref, int stride)
Definition: mpegvideo_enc.c:1255
ff_mpeg4_init_partitions
void ff_mpeg4_init_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1287
sse_mb
static int sse_mb(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2798
ff_encode_add_stats_side_data
int ff_encode_add_stats_side_data(AVPacket *pkt, int quality, const int64_t error[], int error_count, enum AVPictureType pict_type)
Definition: encode.c:918
AV_CODEC_ID_MSMPEG4V2
@ AV_CODEC_ID_MSMPEG4V2
Definition: codec_id.h:67
AV_CODEC_FLAG_LOOP_FILTER
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:298
ff_sqrt
#define ff_sqrt
Definition: mathops.h:217
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
ff_mpeg1_encode_init
static void ff_mpeg1_encode_init(MPVEncContext *s)
Definition: mpeg12enc.h:33
init_matrices
static av_cold int init_matrices(MPVMainEncContext *const m, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:374
AVRational::num
int num
Numerator.
Definition: rational.h:59
put_bytes_left
static int put_bytes_left(const PutBitContext *s, int round_up)
Definition: put_bits.h:145
refstruct.h
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:310
CANDIDATE_MB_TYPE_DIRECT
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegvideoenc.h:294
CANDIDATE_MB_TYPE_INTER_I
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegvideoenc.h:299
MPVMainEncContext::frame_skip_factor
int frame_skip_factor
Definition: mpegvideoenc.h:242
skip_check
static int skip_check(MPVMainEncContext *const m, const MPVPicture *p, const MPVPicture *ref)
Definition: mpegvideo_enc.c:1455
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
MPVMainEncContext::stuffing_bits
int stuffing_bits
bits used for stuffing
Definition: mpegvideoenc.h:254
MPVMainEncContext::picture_in_gop_number
int picture_in_gop_number
0-> first pic in gop, ...
Definition: mpegvideoenc.h:204
RateControlContext
rate control context.
Definition: ratecontrol.h:60
RateControlContext::num_entries
int num_entries
number of RateControlEntries
Definition: ratecontrol.h:61
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:106
ff_h263_encode_gob_header
void ff_h263_encode_gob_header(MPVEncContext *s, int mb_line)
MAX_MV
#define MAX_MV
Definition: motion_est.h:37
MPVPicture::shared
int shared
Definition: mpegpicture.h:87
MPVPicture::coded_picture_number
int coded_picture_number
Definition: mpegpicture.h:90
me_cmp_func
int(* me_cmp_func)(MPVEncContext *c, const uint8_t *blk1, const uint8_t *blk2, ptrdiff_t stride, int h)
Definition: me_cmp.h:45
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
default_fcode_tab
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:94
MpegEncContext::ac_val
int16_t(* ac_val)[16]
used for H.263 AIC, MPEG-4 AC prediction
Definition: mpegvideo.h:145
ff_mpeg4_set_direct_mv
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:119
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
emms_c
#define emms_c()
Definition: emms.h:63
build_basis
static void build_basis(uint8_t *perm)
Definition: mpegvideo_enc.c:4278
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:697
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
MPVMainEncContext::tmp_frames
AVFrame * tmp_frames[MPVENC_MAX_B_FRAMES+2]
temporary frames used by b_frame_strategy = 2
Definition: mpegvideoenc.h:222
MAX_MB_BYTES
#define MAX_MB_BYTES
Definition: mpegutils.h:35
get_sae
static int get_sae(const uint8_t *src, int ref, int stride)
Definition: mpegvideo_enc.c:1241
ff_rv10_encode_picture_header
int ff_rv10_encode_picture_header(MPVMainEncContext *const m)
Definition: rv10enc.c:34
s
#define s(width, name)
Definition: cbs_vp9.c:198
rebase_put_bits
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:122
CHROMA_422
#define CHROMA_422
Definition: mpegvideo.h:265
BASIS_SHIFT
#define BASIS_SHIFT
Definition: mpegvideoencdsp.h:26
MPVMainEncContext::brd_scale
int brd_scale
Definition: mpegvideoenc.h:225
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
MBBackup::esc3_level_length
int esc3_level_length
Definition: mpegvideo_enc.c:2652
MPVMainEncContext::reordered_input_picture
MPVPicture * reordered_input_picture[MPVENC_MAX_B_FRAMES+1]
next pictures in coded order
Definition: mpegvideoenc.h:209
MPVMainEncContext::intra_only
int intra_only
if true, only intra pictures are generated
Definition: mpegvideoenc.h:201
MPVMainEncContext::mc_mb_var_sum
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegvideoenc.h:266
merge_context_after_me
static void merge_context_after_me(MPVEncContext *const dst, MPVEncContext *const src)
Definition: mpegvideo_enc.c:3624
g
const char * g
Definition: vf_curves.c:128
ff_mpeg4_stuffing
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
Definition: mpeg4videoenc.c:835
MPVMainEncContext::rc_context
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
Definition: mpegvideoenc.h:260
MPVUnquantDSPContext::dct_unquantize_mpeg2_intra
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:39
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:70
ff_mpeg1_dc_scale_table
static const uint8_t *const ff_mpeg1_dc_scale_table
Definition: mpegvideodata.h:32
bits
uint8_t bits
Definition: vp3data.h:128
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:130
MPVEncContext::pb
PutBitContext pb
bit output
Definition: mpegvideoenc.h:50
MPVMainEncContext::header_bits
int header_bits
Definition: mpegvideoenc.h:253
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1553
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
RateControlEntry::new_pict_type
int new_pict_type
Definition: ratecontrol.h:51
ff_write_quant_matrix
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
Definition: mpegvideo_enc.c:226
limits.h
AV_CODEC_ID_MSMPEG4V1
@ AV_CODEC_ID_MSMPEG4V1
Definition: codec_id.h:66
MPVMainEncContext::max_b_frames
int max_b_frames
max number of B-frames
Definition: mpegvideoenc.h:203
MAX_AC_TEX_MB_SIZE
@ MAX_AC_TEX_MB_SIZE
Definition: mpeg4videoenc.h:39
ff_pre_estimate_p_frame_motion
int ff_pre_estimate_p_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:1067
ff_clean_mpeg4_qscales
void ff_clean_mpeg4_qscales(MPVEncContext *const s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
Definition: mpeg4videoenc.c:270
rv10enc.h
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1270
ff_block_permute
void ff_block_permute(int16_t *block, const uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
Definition: mpegvideo_enc.c:4624
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1505
AVCPBProperties
This structure describes the bitrate properties of an encoded bitstream.
Definition: defs.h:282
PutBitContext
Definition: put_bits.h:50
ff_speedhq_mb_y_order_to_mb
static int ff_speedhq_mb_y_order_to_mb(int mb_y_order, int mb_height, int *first_in_slice)
Definition: speedhqenc.h:41
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
CANDIDATE_MB_TYPE_FORWARD
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegvideoenc.h:295
MBBackup::mv_dir
int mv_dir
Definition: mpegvideo_enc.c:2642
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:441
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:57
AVCodecContext::p_masking
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:829
mb_var_thread
static int mb_var_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2867
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:55
arg
const char * arg
Definition: jacosubdec.c:67
mpv_encode_init_static
static av_cold void mpv_encode_init_static(void)
Definition: mpegvideo_enc.c:270
ff_mpv_common_end
av_cold void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:447
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1255
MECmpContext
Definition: me_cmp.h:50
MpegEncContext::field_select
int field_select[2][2]
Definition: mpegvideo.h:186
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
CANDIDATE_MB_TYPE_FORWARD_I
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegvideoenc.h:300
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
MPVEncContext::block
int16_t(* block)[64]
points into blocks below
Definition: mpegvideoenc.h:114
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:118
NULL
#define NULL
Definition: coverity.c:32
MPVEncContext::dct_error_sum
int(* dct_error_sum)[64]
Definition: mpegvideoenc.h:126
MPVMainEncContext::lmin
int lmin
Definition: mpegvideoenc.h:232
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:599
run
uint8_t run
Definition: svq3.c:207
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:194
bias
static int bias(int x, int c)
Definition: vqcdec.c:115
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:96
me
#define me
Definition: vf_colormatrix.c:102
aandcttab.h
ff_mpv_common_defaults
av_cold void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:190
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
av_unreachable
#define av_unreachable(msg)
Asserts that are used as compiler optimization hints depending upon ASSERT_LEVEL and NBDEBUG.
Definition: avassert.h:108
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MPVMainEncContext *const m, int dry_run)
Definition: ratecontrol.c:912
CANDIDATE_MB_TYPE_BACKWARD
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegvideoenc.h:296
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:466
MECmpContext::sad
me_cmp_func sad[6]
Definition: me_cmp.h:53
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:481
MPVPicture::display_picture_number
int display_picture_number
Definition: mpegpicture.h:89
EDGE_WIDTH
#define EDGE_WIDTH
Definition: diracdec.c:47
MpegEncContext::MSMP4_WMV1
@ MSMP4_WMV1
Definition: mpegvideo.h:240
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:58
ff_faandct
void ff_faandct(int16_t *data)
Definition: faandct.c:115
MpegEncContext::inter_matrix
uint16_t inter_matrix[64]
Definition: mpegvideo.h:206
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:241
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
MPVEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideoenc.h:53
me_cmp_init
static av_cold int me_cmp_init(MPVMainEncContext *const m, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:331
select_input_picture
static int select_input_picture(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1802
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:524
AV_CODEC_ID_SPEEDHQ
@ AV_CODEC_ID_SPEEDHQ
Definition: codec_id.h:279
mathops.h
dct_error
static int dct_error(const struct algo *dct, int test, int is_idct, int speed, const int bits)
Definition: dct.c:188
AV_CODEC_FLAG_AC_PRED
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:327
MERGE
#define MERGE(field)
Definition: mpegvideo_enc.c:3623
AVCodecContext::ildct_cmp
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:868
av_refstruct_pool_get
void * av_refstruct_pool_get(AVRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
ff_mpv_encode_end
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:1117
MBBackup::qscale
int qscale
Definition: mpegvideo_enc.c:2645
FF_MB_DECISION_SIMPLE
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:937
qpeldsp.h
ff_mpv_reallocate_putbitbuffer
int ff_mpv_reallocate_putbitbuffer(MPVEncContext *const s, size_t threshold, size_t size_increase)
Definition: mpegvideo_enc.c:2952
ff_h261_reorder_mb_index
void ff_h261_reorder_mb_index(MPVEncContext *const s)
Definition: h261enc.c:120
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:144
ff_mpv_unquantize_init
#define ff_mpv_unquantize_init(s, bitexact, q_scale_type)
Definition: mpegvideo_unquantize.h:50
add_dequant_dct
static void add_dequant_dct(MPVEncContext *const s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_enc.c:1160
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1305
AV_CODEC_ID_WMV1
@ AV_CODEC_ID_WMV1
Definition: codec_id.h:69
mpeg12codecs.h
ff_mpeg4_encode_video_packet_header
void ff_mpeg4_encode_video_packet_header(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1326
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Average and put pixel Widths can be 16, 8, 4 or 2.
Definition: hpeldsp.h:39
MBBackup::block
int16_t(* block)[64]
Definition: mpegvideo_enc.c:2653
update_duplicate_context_after_me
static void update_duplicate_context_after_me(MPVEncContext *const dst, const MPVEncContext *const src)
Definition: mpegvideo_enc.c:254
MPVMainEncContext
Definition: mpegvideoenc.h:198
AVOnce
#define AVOnce
Definition: thread.h:202
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
MPVPicture::reference
int reference
Definition: mpegpicture.h:86
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:176
AVCodecContext::temporal_cplx_masking
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:815
load_input_picture
static int load_input_picture(MPVMainEncContext *const m, const AVFrame *pic_arg)
Definition: mpegvideo_enc.c:1314
set_put_bits_buffer_size
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:436
ff_set_mpeg4_time
void ff_set_mpeg4_time(MPVEncContext *const s)
Definition: mpeg4videoenc.c:843
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:535
ff_encode_alloc_frame
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
Definition: encode.c:837
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1380
ff_h263_clean_intra_table_entries
static void ff_h263_clean_intra_table_entries(MpegEncContext *s, int xy)
Definition: h263.h:47
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1312
MPVMainEncContext::last_pict_type
int last_pict_type
Definition: mpegvideoenc.h:258
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:225
f
f
Definition: af_crystalizer.c:122
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:519
QUANT_BIAS_SHIFT
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:81
MotionEstContext::temp
uint8_t * temp
Definition: motion_est.h:57
clip_coeffs
static void clip_coeffs(const MPVEncContext *const s, int16_t block[], int last_index)
Definition: mpegvideo_enc.c:2223
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:302
AVPacket::size
int size
Definition: packet.h:589
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1005
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:155
ff_mpeg4_clean_buffers
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:44
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:104
MPVMainEncContext::vbv_delay_pos
int vbv_delay_pos
offset of vbv_delay in the bitstream
Definition: mpegvideoenc.h:236
MECmpContext::sse
me_cmp_func sse[6]
Definition: me_cmp.h:54
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
MBBackup::mv_type
int mv_type
Definition: mpegvideo_enc.c:2642
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:204
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:549
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample, int lowres, int chroma_x_shift)
Definition: mpegvideo.h:337
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:125
FF_IDCT_PERM_NONE
@ FF_IDCT_PERM_NONE
Definition: idctdsp.h:28
CANDIDATE_MB_TYPE_DIRECT0
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegvideoenc.h:304
ff_mpeg4_default_intra_matrix
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:334
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
size
int size
Definition: twinvq_data.h:10344
CANDIDATE_MB_TYPE_INTRA
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegvideoenc.h:289
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
mpv_generic_options
static const AVOption mpv_generic_options[]
Definition: mpegvideo_enc.c:96
RECON_SHIFT
#define RECON_SHIFT
Definition: mpegvideoencdsp.h:27
MPVMainEncContext::frame_bits
int frame_bits
bits used for the current frame
Definition: mpegvideoenc.h:252
AVCodecInternal::byte_buffer
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:95
FF_MPV_FLAG_QP_RD
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideoenc.h:309
encode_picture
static int encode_picture(MPVMainEncContext *const s, const AVPacket *pkt)
Definition: mpegvideo_enc.c:3713
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:514
AVCPBProperties::min_bitrate
int64_t min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: defs.h:292
MECmpContext::nsse
me_cmp_func nsse[6]
Definition: me_cmp.h:62
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
ff_set_cmp
av_cold int ff_set_cmp(const MECmpContext *c, me_cmp_func *cmp, int type, int mpvenc)
Fill the function pointer array cmp[6] with me_cmp_funcs from c based upon type.
Definition: me_cmp.c:443
MPVEncContext::me
MotionEstContext me
Definition: mpegvideoenc.h:78
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:587
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:294
FF_COMPLIANCE_NORMAL
#define FF_COMPLIANCE_NORMAL
Definition: defs.h:60
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
ff_mpeg4_default_non_intra_matrix
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:345
ALLOCZ_ARRAYS
#define ALLOCZ_ARRAYS(p, mult, numb)
Definition: mpegvideo_enc.c:373
MPVMainEncContext::input_picture_number
int input_picture_number
used to set pic->display_picture_number
Definition: mpegvideoenc.h:205
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:99
ff_mpeg1_encode_slice_header
void ff_mpeg1_encode_slice_header(MPVEncContext *s)
MPVUnquantDSPContext::dct_unquantize_mpeg2_inter
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:41
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:178
MAX_PB2_MB_SIZE
@ MAX_PB2_MB_SIZE
Definition: mpeg4videoenc.h:38
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:594
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:63
AVCPBProperties::avg_bitrate
int64_t avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: defs.h:297
AVCodecInternal::byte_buffer_size
unsigned int byte_buffer_size
Definition: internal.h:96
ScratchpadContext::scratchpad_buf
uint8_t * scratchpad_buf
the other *_scratchpad point into this buffer
Definition: mpegpicture.h:38
MPVMainEncContext::me_penalty_compensation
int me_penalty_compensation
Definition: mpegvideoenc.h:262
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideoenc.h:285
M_PI
#define M_PI
Definition: mathematics.h:67
CANDIDATE_MB_TYPE_BIDIR_I
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegvideoenc.h:302
MBBackup
Definition: mpegvideo_enc.c:2638
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
CANDIDATE_MB_TYPE_INTER4V
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegvideoenc.h:291
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
av_refstruct_unref
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
ff_mjpeg_add_icc_profile_size
int ff_mjpeg_add_icc_profile_size(AVCodecContext *avctx, const AVFrame *frame, size_t *max_pkt_size)
Definition: mjpegenc_common.c:137
CHROMA_444
#define CHROMA_444
Definition: mpegvideo.h:266
AVCPBProperties::vbv_delay
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: defs.h:312
emms.h
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
get_bits_diff
static int get_bits_diff(MPVEncContext *s)
Definition: mpegvideoenc.h:407
MBBackup::last_dc
int last_dc[3]
Definition: mpegvideo_enc.c:2643
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:104
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:142
AV_PKT_DATA_H263_MB_INFO
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: packet.h:90
AV_CODEC_ID_RV10
@ AV_CODEC_ID_RV10
Definition: codec_id.h:57
CHROMA_420
#define CHROMA_420
Definition: mpegvideo.h:264
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
MBBackup::mv
int mv[2][4][2]
Definition: mpegvideo_enc.c:2640
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:581
MPVUnquantDSPContext::dct_unquantize_h263_inter
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:45
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:90
ff_rv20_encode_picture_header
int ff_rv20_encode_picture_header(MPVMainEncContext *m)
Definition: rv20enc.c:37
encode_thread
static int encode_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2986
MPVMainEncContext::mv_table_base
int16_t(* mv_table_base)[2]
Definition: mpegvideoenc.h:270
MBBackup::pb2
PutBitContext pb2
Definition: mpegvideo_enc.c:2654
ff_jpeg_fdct_islow_8
void ff_jpeg_fdct_islow_8(int16_t *data)
ff_fdctdsp_init
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:25
internal.h
FF_MATRIX_TYPE_CHROMA_INTRA
#define FF_MATRIX_TYPE_CHROMA_INTRA
Definition: encode.h:108
ff_h263_update_mb
void ff_h263_update_mb(MPVEncContext *s)
AVCodecContext::intra_dc_precision
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:971
src2
const pixel * src2
Definition: h264pred_template.c:421
MPVEncContext::dct_offset
uint16_t(* dct_offset)[64]
Definition: mpegvideoenc.h:128
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:57
AVCPBProperties::max_bitrate
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: defs.h:287
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:194
ff_rate_control_init
av_cold int ff_rate_control_init(MPVMainEncContext *const m)
Definition: ratecontrol.c:497
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:53
AV_CODEC_ID_RV20
@ AV_CODEC_ID_RV20
Definition: codec_id.h:58
av_always_inline
#define av_always_inline
Definition: attributes.h:63
MPVENC_MAX_B_FRAMES
#define MPVENC_MAX_B_FRAMES
Definition: mpegvideoenc.h:44
ff_jpeg_fdct_islow_10
void ff_jpeg_fdct_islow_10(int16_t *data)
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
mpv_encode_defaults
static av_cold void mpv_encode_defaults(MPVMainEncContext *const m)
Set the given MPVEncContext to defaults for encoding.
Definition: mpegvideo_enc.c:279
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:523
MPVMainEncContext::next_lambda
int next_lambda
next lambda used for retrying to encode a frame
Definition: mpegvideoenc.h:255
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:153
AV_STRINGIFY
#define AV_STRINGIFY(s)
Definition: macros.h:66
ff_h263_format
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:236
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:879
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
MPVMainEncContext::border_masking
float border_masking
Definition: mpegvideoenc.h:231
ff_write_pass1_stats
void ff_write_pass1_stats(MPVMainEncContext *const m)
Definition: ratecontrol.c:38
ff_msmpeg4_encode_ext_header
void ff_msmpeg4_encode_ext_header(MPVEncContext *const s)
Definition: msmpeg4enc.c:285
ff_square_tab
const EXTERN uint32_t ff_square_tab[512]
Definition: mathops.h:35
MPVMainEncContext::last_non_b_pict_type
int last_non_b_pict_type
used for MPEG-4 gmc B-frames & ratecontrol
Definition: mpegvideoenc.h:259
MpegEncContext::MSMP4_V3
@ MSMP4_V3
Definition: mpegvideo.h:239
AVCodecContext::height
int height
Definition: avcodec.h:592
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:490
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:631
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
prepare_picture
static int prepare_picture(MPVEncContext *const s, AVFrame *f, const AVFrame *props_frame)
Allocates new buffers for an AVFrame and copies the properties from another AVFrame.
Definition: mpegvideo_enc.c:1282
RateControlContext::buffer_index
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:63
ff_get_2pass_fcode
void ff_get_2pass_fcode(MPVMainEncContext *const m)
Definition: ratecontrol.c:900
frame_end
static void frame_end(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1860
idctdsp.h
MPVPicture::b_frame_score
int b_frame_score
Definition: mpegpicture.h:84
encode_mb_internal
static av_always_inline void encode_mb_internal(MPVEncContext *const s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count, int chroma_x_shift, int chroma_y_shift, int chroma_format)
Definition: mpegvideo_enc.c:2281
avcodec.h
init_buffers
static av_cold int init_buffers(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:442
ff_pixblockdsp_init
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, int bits_per_raw_sample)
Definition: pixblockdsp.c:87
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:137
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:332
ret
ret
Definition: filter_design.txt:187
ff_h263_mpeg4_reset_dc
void ff_h263_mpeg4_reset_dc(MPVEncContext *s)
MPVMainEncContext::vbv_ignore_qmax
int vbv_ignore_qmax
Definition: mpegvideoenc.h:233
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
AVCPBProperties::buffer_size
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: defs.h:303
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1357
ff_fdct_ifast
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:207
ff_inv_aanscales
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
ff_h263_loop_filter
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:97
ff_convert_matrix
void ff_convert_matrix(MPVEncContext *const s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:109
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
MPVMainEncContext::reordered_pts
int64_t reordered_pts
reordered pts to be used as dts for the next output frame when there's a delay
Definition: mpegvideoenc.h:219
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
MotionEstContext::scratchpad
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
Definition: motion_est.h:55
mpeg12data.h
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
MpegEncContext::chroma_x_shift
int chroma_x_shift
Definition: mpegvideo.h:267
AVCodecContext::dark_masking
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:836
MPVMainEncContext::frame_skip_cmp
int frame_skip_cmp
Definition: mpegvideoenc.h:244
MBBackup::dquant
int dquant
Definition: mpegvideo_enc.c:2651
AVCodecContext
main external API structure.
Definition: avcodec.h:431
AVFrame::height
int height
Definition: frame.h:499
MBBackup::mb_skipped
int mb_skipped
Definition: mpegvideo_enc.c:2644
AV_CODEC_ID_H263P
@ AV_CODEC_ID_H263P
Definition: codec_id.h:71
h261enc.h
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:402
MPVMainEncContext::lmax
int lmax
Definition: mpegvideoenc.h:232
ADD
#define ADD(field)
Definition: mpegvideo_enc.c:3622
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:280
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
Definition: packet.c:231
mpeg4video.h
AVCodecContext::qmin
int qmin
minimum quantizer
Definition: avcodec.h:1234
AVRational::den
int den
Denominator.
Definition: rational.h:60
MPVUnquantDSPContext::dct_unquantize_mpeg1_inter
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:37
ff_mjpeg_encode_stuffing
int ff_mjpeg_encode_stuffing(MPVEncContext *const s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
Definition: mjpegenc.c:238
MBBackup::i_count
int i_count
Definition: mpegvideo_enc.c:2650
AVCodecContext::spatial_cplx_masking
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:822
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
temp
else temp
Definition: vf_mcdeint.c:271
ff_mpv_pic_check_linesize
int ff_mpv_pic_check_linesize(void *logctx, const AVFrame *f, ptrdiff_t *linesizep, ptrdiff_t *uvlinesizep)
Definition: mpegpicture.c:181
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:861
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
MPVMainEncContext::frame_skip_exp
int frame_skip_exp
Definition: mpegvideoenc.h:243
QMAT_SHIFT
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:84
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:939
ff_mpv_replace_picture
void ff_mpv_replace_picture(MPVWorkPicture *dst, const MPVWorkPicture *src)
Definition: mpegpicture.c:121
ff_estimate_p_frame_motion
void ff_estimate_p_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:892
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
AVERROR_ENCODER_NOT_FOUND
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:56
INPLACE_OFFSET
#define INPLACE_OFFSET
Definition: mpegvideoenc.h:286
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
msmpeg4enc.h
mem.h
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:769
overflow
Undefined Behavior In the C some operations are like signed integer overflow
Definition: undefined.txt:3
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:322
denoise_dct
static void denoise_dct(MPVEncContext *const s, int16_t block[])
Definition: mpegvideo_enc.c:3954
dct_quantize_refine
static int dct_quantize_refine(MPVEncContext *const s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
Definition: mpegvideo_enc.c:4297
FDCTDSPContext::fdct
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:29
ff_mpv_encode_init
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:554
AVCodecContext::rc_max_available_vbv_use
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
Definition: avcodec.h:1284
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:153
ff_mpeg4_merge_partitions
void ff_mpeg4_merge_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1300
merge_context_after_encode
static void merge_context_after_encode(MPVEncContext *const dst, MPVEncContext *const src)
Definition: mpegvideo_enc.c:3631
MPVMainEncContext::b_frame_strategy
int b_frame_strategy
Definition: mpegvideoenc.h:223
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
av_refstruct_pool_uninit
static void av_refstruct_pool_uninit(AVRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:273
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:171
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:1021
FF_MB_DECISION_BITS
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:938
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
AVPacket
This structure stores compressed data.
Definition: packet.h:565
mpeg4videodata.h
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::inter_matrix
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:957
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:276
MPVMainEncContext::scenechange_threshold
int scenechange_threshold
Definition: mpegvideoenc.h:227
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
ff_dct_encode_init_x86
void ff_dct_encode_init_x86(MPVEncContext *s)
Definition: mpegvideoenc.c:56
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:592
bytestream.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:472
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
ff_mjpeg_encode_picture_trailer
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
Definition: mjpegenc_common.c:461
MBBackup::mb_intra
int mb_intra
Definition: mpegvideo_enc.c:2644
AV_CODEC_ID_MSMPEG4V3
@ AV_CODEC_ID_MSMPEG4V3
Definition: codec_id.h:68
MPVUnquantDSPContext
Definition: mpegvideo_unquantize.h:34
h
h
Definition: vp9dsp_template.c:2070
MPVMainEncContext::user_specified_pts
int64_t user_specified_pts
last non-zero pts from user-supplied AVFrame
Definition: mpegvideoenc.h:211
ff_encode_add_cpb_side_data
AVCPBProperties * ff_encode_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: encode.c:887
dct_quantize_c
static int dct_quantize_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:4649
stride
#define stride
Definition: h264pred_template.c:536
MBBackup::pb
PutBitContext pb
Definition: mpegvideo_enc.c:2654
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
width
#define width
Definition: dsp.h:89
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:226
FF_MPV_FLAG_STRICT_GOP
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideoenc.h:308
MpegEncContext::start_mb_y
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:109
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
sp5x_qscale_five_quant_table
static const uint8_t sp5x_qscale_five_quant_table[][64]
Definition: sp5x.h:135
mjpegenc.h
AV_PICTURE_TYPE_S
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
Definition: avutil.h:281
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
ff_mpv_alloc_pic_accessories
int ff_mpv_alloc_pic_accessories(AVCodecContext *avctx, MPVWorkPicture *wpic, ScratchpadContext *sc, BufferPoolContext *pools, int mb_height)
Allocate an MPVPicture's accessories (but not the AVFrame's buffer itself) and set the MPVWorkPicture...
Definition: mpegpicture.c:237
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:63
update_qscale
static void update_qscale(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:196
RateControlContext::entry
RateControlEntry * entry
Definition: ratecontrol.h:62
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:61
MPVMainEncContext::s
MPVEncContext s
The main slicecontext.
Definition: mpegvideoenc.h:199
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:616
write_mb_info
static void write_mb_info(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2909
MpegEncContext::dc_val
int16_t * dc_val
used for H.263 AIC/MPEG-4 DC prediction and ER
Definition: mpegvideo.h:144
ff_mpv_alloc_pic_pool
av_cold AVRefStructPool * ff_mpv_alloc_pic_pool(int init_progress)
Allocate a pool of MPVPictures.
Definition: mpegpicture.c:90
src
#define src
Definition: vp8dsp.c:248
MBBackup::p_tex_bits
int p_tex_bits
Definition: mpegvideo_enc.c:2650
pixblockdsp.h
MpegEncContext::MSMP4_V2
@ MSMP4_V2
Definition: mpegvideo.h:238
ff_aanscales
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
av_cpb_properties_alloc
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:968
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:290
ff_check_codec_matrices
int ff_check_codec_matrices(AVCodecContext *avctx, unsigned types, uint16_t min, uint16_t max)
Definition: encode.c:943
MpegEncContext::chroma_format
int chroma_format
Definition: mpegvideo.h:263
FF_MATRIX_TYPE_INTER
#define FF_MATRIX_TYPE_INTER
Definition: encode.h:107
h263.h
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(RateControlContext *rcc)
Definition: ratecontrol.c:711
ff_get_best_fcode
int ff_get_best_fcode(MPVMainEncContext *const m, const int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1605
intmath.h
MPVEncContext::mpeg_quant
int mpeg_quant
Definition: mpegvideoenc.h:166