FFmpeg
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include "config_components.h"
35 
36 #include <assert.h>
37 #include <stdint.h>
38 
39 #include "libavutil/emms.h"
40 #include "libavutil/internal.h"
41 #include "libavutil/intmath.h"
42 #include "libavutil/mathematics.h"
43 #include "libavutil/mem.h"
44 #include "libavutil/mem_internal.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/thread.h"
47 #include "avcodec.h"
48 #include "encode.h"
49 #include "idctdsp.h"
50 #include "mpeg12codecs.h"
51 #include "mpeg12data.h"
52 #include "mpeg12enc.h"
53 #include "mpegvideo.h"
54 #include "mpegvideodata.h"
55 #include "mpegvideoenc.h"
56 #include "h261enc.h"
57 #include "h263.h"
58 #include "h263data.h"
59 #include "h263enc.h"
60 #include "mjpegenc_common.h"
61 #include "mathops.h"
62 #include "mpegutils.h"
63 #include "mpegvideo_unquantize.h"
64 #include "mjpegenc.h"
65 #include "speedhqenc.h"
66 #include "msmpeg4enc.h"
67 #include "pixblockdsp.h"
68 #include "qpeldsp.h"
69 #include "faandct.h"
70 #include "aandcttab.h"
71 #include "mpeg4video.h"
72 #include "mpeg4videodata.h"
73 #include "mpeg4videoenc.h"
74 #include "internal.h"
75 #include "bytestream.h"
76 #include "rv10enc.h"
77 #include "packet_internal.h"
78 #include "libavutil/refstruct.h"
79 #include <limits.h>
80 #include "sp5x.h"
81 
82 #define QUANT_BIAS_SHIFT 8
83 
84 #define QMAT_SHIFT_MMX 16
85 #define QMAT_SHIFT 21
86 
87 static int encode_picture(MPVMainEncContext *const s, const AVPacket *pkt);
88 static int dct_quantize_refine(MPVEncContext *const s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
89 static int sse_mb(MPVEncContext *const s);
90 static void denoise_dct_c(MPVEncContext *const s, int16_t *block);
91 static int dct_quantize_c(MPVEncContext *const s,
92  int16_t *block, int n,
93  int qscale, int *overflow);
94 static int dct_quantize_trellis_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow);
95 
96 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
97 
98 static const AVOption mpv_generic_options[] = {
101  { NULL },
102 };
103 
105  .class_name = "generic mpegvideo encoder",
106  .item_name = av_default_item_name,
107  .option = mpv_generic_options,
108  .version = LIBAVUTIL_VERSION_INT,
109 };
110 
111 void ff_convert_matrix(MPVEncContext *const s, int (*qmat)[64],
112  uint16_t (*qmat16)[2][64],
113  const uint16_t *quant_matrix,
114  int bias, int qmin, int qmax, int intra)
115 {
116  FDCTDSPContext *fdsp = &s->fdsp;
117  int qscale;
118  int shift = 0;
119 
120  for (qscale = qmin; qscale <= qmax; qscale++) {
121  int i;
122  int qscale2;
123 
124  if (s->c.q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
125  else qscale2 = qscale << 1;
126 
127  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
128 #if CONFIG_FAANDCT
129  fdsp->fdct == ff_faandct ||
130 #endif /* CONFIG_FAANDCT */
131  fdsp->fdct == ff_jpeg_fdct_islow_10) {
132  for (i = 0; i < 64; i++) {
133  const int j = s->c.idsp.idct_permutation[i];
134  int64_t den = (int64_t) qscale2 * quant_matrix[j];
135  /* 1 * 1 <= qscale2 * quant_matrix[j] <= 112 * 255
136  * Assume x = qscale2 * quant_matrix[j]
137  * 1 <= x <= 28560
138  * (1 << 22) / 1 >= (1 << 22) / (x) >= (1 << 22) / 28560
139  * 4194304 >= (1 << 22) / (x) >= 146 */
140 
141  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
142  }
143  } else if (fdsp->fdct == ff_fdct_ifast) {
144  for (i = 0; i < 64; i++) {
145  const int j = s->c.idsp.idct_permutation[i];
146  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
147  /* 1247 * 1 * 1 <= ff_aanscales[i] * qscale2 * quant_matrix[j] <= 31521 * 112 * 255
148  * Assume x = ff_aanscales[i] * qscale2 * quant_matrix[j]
149  * 1247 <= x <= 900239760
150  * (1 << 36) / 1247 >= (1 << 36) / (x) >= (1 << 36) / 900239760
151  * 55107840 >= (1 << 36) / (x) >= 76 */
152 
153  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
154  }
155  } else {
156  for (i = 0; i < 64; i++) {
157  const int j = s->c.idsp.idct_permutation[i];
158  int64_t den = (int64_t) qscale2 * quant_matrix[j];
159  /* 1 * 1 <= qscale2 * quant_matrix[j] <= 112 * 255
160  * Assume x = qscale2 * quant_matrix[j]
161  * 1 <= x <= 28560
162  * (1 << 22) / 1 >= (1 << 22) / (x) >= (1 << 22) / 28560
163  * 4194304 >= (1 << 22) / (x) >= 146
164  *
165  * 1 <= x <= 28560
166  * (1 << 17) / 1 >= (1 << 17) / (x) >= (1 << 17) / 28560
167  * 131072 >= (1 << 17) / (x) >= 4 */
168 
169  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
170  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
171 
172  if (qmat16[qscale][0][i] == 0 ||
173  qmat16[qscale][0][i] == 128 * 256)
174  qmat16[qscale][0][i] = 128 * 256 - 1;
175  qmat16[qscale][1][i] =
176  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
177  qmat16[qscale][0][i]);
178  }
179  }
180 
181  for (i = intra; i < 64; i++) {
182  int64_t max = 8191;
183  if (fdsp->fdct == ff_fdct_ifast) {
184  max = (8191LL * ff_aanscales[i]) >> 14;
185  }
186  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
187  shift++;
188  }
189  }
190  }
191  if (shift) {
192  av_log(s->c.avctx, AV_LOG_INFO,
193  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
194  QMAT_SHIFT - shift);
195  }
196 }
197 
198 static inline void update_qscale(MPVMainEncContext *const m)
199 {
200  MPVEncContext *const s = &m->s;
201 
202  if (s->c.q_scale_type == 1 && 0) {
203  int i;
204  int bestdiff=INT_MAX;
205  int best = 1;
206 
207  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
208  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
209  if (ff_mpeg2_non_linear_qscale[i] < s->c.avctx->qmin ||
210  (ff_mpeg2_non_linear_qscale[i] > s->c.avctx->qmax && !m->vbv_ignore_qmax))
211  continue;
212  if (diff < bestdiff) {
213  bestdiff = diff;
214  best = i;
215  }
216  }
217  s->c.qscale = best;
218  } else {
219  s->c.qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
220  (FF_LAMBDA_SHIFT + 7);
221  s->c.qscale = av_clip(s->c.qscale, s->c.avctx->qmin, m->vbv_ignore_qmax ? 31 : s->c.avctx->qmax);
222  }
223 
224  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
226 }
227 
229 {
230  int i;
231 
232  if (matrix) {
233  put_bits(pb, 1, 1);
234  for (i = 0; i < 64; i++) {
235  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
236  }
237  } else
238  put_bits(pb, 1, 0);
239 }
240 
241 /**
242  * init s->c.cur_pic.qscale_table from s->lambda_table
243  */
244 static void init_qscale_tab(MPVEncContext *const s)
245 {
246  int8_t *const qscale_table = s->c.cur_pic.qscale_table;
247 
248  for (int i = 0; i < s->c.mb_num; i++) {
249  unsigned int lam = s->lambda_table[s->c.mb_index2xy[i]];
250  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
251  qscale_table[s->c.mb_index2xy[i]] = av_clip(qp, s->c.avctx->qmin,
252  s->c.avctx->qmax);
253  }
254 }
255 
257  const MPVEncContext *const src)
258 {
259 #define COPY(a) dst->a = src->a
260  COPY(c.pict_type);
261  COPY(f_code);
262  COPY(b_code);
263  COPY(c.qscale);
264  COPY(lambda);
265  COPY(lambda2);
266  COPY(c.frame_pred_frame_dct); // FIXME don't set in encode_header
267  COPY(c.progressive_frame); // FIXME don't set in encode_header
268  COPY(c.partitioned_frame); // FIXME don't set in encode_header
269 #undef COPY
270 }
271 
273 {
274  for (int i = -16; i < 16; i++)
275  default_fcode_tab[i + MAX_MV] = 1;
276 }
277 
278 /**
279  * Set the given MPVEncContext to defaults for encoding.
280  */
282 {
283  MPVEncContext *const s = &m->s;
284  static AVOnce init_static_once = AV_ONCE_INIT;
285 
287 
288  s->f_code = 1;
289  s->b_code = 1;
290 
291  if (!m->fcode_tab) {
293  ff_thread_once(&init_static_once, mpv_encode_init_static);
294  }
295  if (!s->c.y_dc_scale_table) {
296  s->c.y_dc_scale_table =
297  s->c.c_dc_scale_table = ff_mpeg1_dc_scale_table;
298  }
299 }
300 
302 {
303  s->dct_quantize = dct_quantize_c;
304  s->denoise_dct = denoise_dct_c;
305 
306 #if ARCH_MIPS
308 #elif ARCH_X86
310 #endif
311 
312  if (s->c.avctx->trellis)
313  s->dct_quantize = dct_quantize_trellis_c;
314 }
315 
317 {
318  MpegEncContext *const s = &s2->c;
319  MPVUnquantDSPContext unquant_dsp_ctx;
320 
321  ff_mpv_unquantize_init(&unquant_dsp_ctx,
322  avctx->flags & AV_CODEC_FLAG_BITEXACT, s->q_scale_type);
323 
324  if (s2->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
325  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_mpeg2_intra;
326  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_mpeg2_inter;
327  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
328  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_h263_intra;
329  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_h263_inter;
330  } else {
331  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_mpeg1_intra;
332  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_mpeg1_inter;
333  }
334 }
335 
337 {
338  MPVEncContext *const s = &m->s;
339  MECmpContext mecc;
340  me_cmp_func me_cmp[6];
341  int ret;
342 
343  ff_me_cmp_init(&mecc, avctx);
344  ret = ff_me_init(&s->me, avctx, &mecc, 1);
345  if (ret < 0)
346  return ret;
347  ret = ff_set_cmp(&mecc, me_cmp, m->frame_skip_cmp, 1);
348  if (ret < 0)
349  return ret;
350  m->frame_skip_cmp_fn = me_cmp[1];
352  ret = ff_set_cmp(&mecc, me_cmp, avctx->ildct_cmp, 1);
353  if (ret < 0)
354  return ret;
355  if (!me_cmp[0] || !me_cmp[4])
356  return AVERROR(EINVAL);
357  s->ildct_cmp[0] = me_cmp[0];
358  s->ildct_cmp[1] = me_cmp[4];
359  }
360 
361  s->sum_abs_dctelem = mecc.sum_abs_dctelem;
362 
363  s->sse_cmp[0] = mecc.sse[0];
364  s->sse_cmp[1] = mecc.sse[1];
365  s->sad_cmp[0] = mecc.sad[0];
366  s->sad_cmp[1] = mecc.sad[1];
367  if (avctx->mb_cmp == FF_CMP_NSSE) {
368  s->n_sse_cmp[0] = mecc.nsse[0];
369  s->n_sse_cmp[1] = mecc.nsse[1];
370  } else {
371  s->n_sse_cmp[0] = mecc.sse[0];
372  s->n_sse_cmp[1] = mecc.sse[1];
373  }
374 
375  return 0;
376 }
377 
378 #define ALLOCZ_ARRAYS(p, mult, numb) ((p) = av_calloc(numb, mult * sizeof(*(p))))
380 {
381  MPVEncContext *const s = &m->s;
382  const int nb_matrices = 1 + (s->c.out_format == FMT_MJPEG) + !m->intra_only;
383  const uint16_t *intra_matrix, *inter_matrix;
384  int ret;
385 
386  if (!ALLOCZ_ARRAYS(s->q_intra_matrix, 32, nb_matrices) ||
387  !ALLOCZ_ARRAYS(s->q_intra_matrix16, 32, nb_matrices))
388  return AVERROR(ENOMEM);
389 
390  if (s->c.out_format == FMT_MJPEG) {
391  s->q_chroma_intra_matrix = s->q_intra_matrix + 32;
392  s->q_chroma_intra_matrix16 = s->q_intra_matrix16 + 32;
393  // No need to set q_inter_matrix
395  // intra_matrix, chroma_intra_matrix will be set later for MJPEG.
396  return 0;
397  } else {
398  s->q_chroma_intra_matrix = s->q_intra_matrix;
399  s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
400  }
401  if (!m->intra_only) {
402  s->q_inter_matrix = s->q_intra_matrix + 32;
403  s->q_inter_matrix16 = s->q_intra_matrix16 + 32;
404  }
405 
406  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4 &&
407  s->mpeg_quant) {
410  } else if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
411  intra_matrix =
413  } else {
414  /* MPEG-1/2, SpeedHQ */
417  }
418  if (avctx->intra_matrix)
420  if (avctx->inter_matrix)
422 
423  /* init q matrix */
424  for (int i = 0; i < 64; i++) {
425  int j = s->c.idsp.idct_permutation[i];
426 
427  s->c.intra_matrix[j] = s->c.chroma_intra_matrix[j] = intra_matrix[i];
428  s->c.inter_matrix[j] = inter_matrix[i];
429  }
430 
431  /* precompute matrix */
433  if (ret < 0)
434  return ret;
435 
436  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
437  s->c.intra_matrix, s->intra_quant_bias, avctx->qmin,
438  31, 1);
439  if (s->q_inter_matrix)
440  ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
441  s->c.inter_matrix, s->inter_quant_bias, avctx->qmin,
442  31, 0);
443 
444  return 0;
445 }
446 
448 {
449  MPVEncContext *const s = &m->s;
450  int has_b_frames = !!m->max_b_frames;
451  int16_t (*mv_table)[2];
452 
453  /* Allocate MB type table */
454  unsigned mb_array_size = s->c.mb_stride * s->c.mb_height;
455  s->mb_type = av_calloc(mb_array_size, 3 * sizeof(*s->mb_type) + sizeof(*s->mb_mean));
456  if (!s->mb_type)
457  return AVERROR(ENOMEM);
458  s->mc_mb_var = s->mb_type + mb_array_size;
459  s->mb_var = s->mc_mb_var + mb_array_size;
460  s->mb_mean = (uint8_t*)(s->mb_var + mb_array_size);
461 
462  if (!FF_ALLOCZ_TYPED_ARRAY(s->lambda_table, mb_array_size))
463  return AVERROR(ENOMEM);
464 
465  unsigned mv_table_size = (s->c.mb_height + 2) * s->c.mb_stride + 1;
466  unsigned nb_mv_tables = 1 + 5 * has_b_frames;
467  if (s->c.codec_id == AV_CODEC_ID_MPEG4 ||
468  (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
469  nb_mv_tables += 8 * has_b_frames;
470  s->p_field_select_table[0] = av_calloc(mv_table_size, 2 * (2 + 4 * has_b_frames));
471  if (!s->p_field_select_table[0])
472  return AVERROR(ENOMEM);
473  s->p_field_select_table[1] = s->p_field_select_table[0] + 2 * mv_table_size;
474  }
475 
476  mv_table = av_calloc(mv_table_size, nb_mv_tables * sizeof(*mv_table));
477  if (!mv_table)
478  return AVERROR(ENOMEM);
479  m->mv_table_base = mv_table;
480  mv_table += s->c.mb_stride + 1;
481 
482  s->p_mv_table = mv_table;
483  if (has_b_frames) {
484  s->b_forw_mv_table = mv_table += mv_table_size;
485  s->b_back_mv_table = mv_table += mv_table_size;
486  s->b_bidir_forw_mv_table = mv_table += mv_table_size;
487  s->b_bidir_back_mv_table = mv_table += mv_table_size;
488  s->b_direct_mv_table = mv_table += mv_table_size;
489 
490  if (s->p_field_select_table[1]) { // MPEG-4 or INTERLACED_ME above
491  uint8_t *field_select = s->p_field_select_table[1];
492  for (int j = 0; j < 2; j++) {
493  for (int k = 0; k < 2; k++) {
494  for (int l = 0; l < 2; l++)
495  s->b_field_mv_table[j][k][l] = mv_table += mv_table_size;
496  s->b_field_select_table[j][k] = field_select += 2 * mv_table_size;
497  }
498  }
499  }
500  }
501 
502  return 0;
503 }
504 
506 {
507  MPVEncContext *const s = &m->s;
508  // Align the following per-thread buffers to avoid false sharing.
509  enum {
510 #ifndef _MSC_VER
511  /// The number is supposed to match/exceed the cache-line size.
512  ALIGN = FFMAX(128, _Alignof(max_align_t)),
513 #else
514  ALIGN = 128,
515 #endif
516  DCT_ERROR_SIZE = FFALIGN(2 * sizeof(*s->dct_error_sum), ALIGN),
517  };
518  static_assert(DCT_ERROR_SIZE * MAX_THREADS + ALIGN - 1 <= SIZE_MAX,
519  "Need checks for potential overflow.");
520  unsigned nb_slices = s->c.slice_context_count;
521  char *dct_error = NULL;
522 
523  if (m->noise_reduction) {
524  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
525  return AVERROR(ENOMEM);
526  dct_error = av_mallocz(ALIGN - 1 + nb_slices * DCT_ERROR_SIZE);
527  if (!dct_error)
528  return AVERROR(ENOMEM);
530  dct_error += FFALIGN((uintptr_t)dct_error, ALIGN) - (uintptr_t)dct_error;
531  }
532 
533  const int y_size = s->c.b8_stride * (2 * s->c.mb_height + 1);
534  const int c_size = s->c.mb_stride * (s->c.mb_height + 1);
535  const int yc_size = y_size + 2 * c_size;
536  ptrdiff_t offset = 0;
537 
538  for (unsigned i = 0; i < nb_slices; ++i) {
539  MPVEncContext *const s2 = s->c.enc_contexts[i];
540 
541  if (dct_error) {
542  s2->dct_offset = s->dct_offset;
543  s2->dct_error_sum = (void*)dct_error;
544  dct_error += DCT_ERROR_SIZE;
545  }
546 
547  if (s2->c.ac_val) {
548  s2->c.dc_val += offset + i;
549  s2->c.ac_val += offset;
550  offset += yc_size;
551  }
552  }
553  return 0;
554 }
555 
556 /* init video encoder */
558 {
559  MPVMainEncContext *const m = avctx->priv_data;
560  MPVEncContext *const s = &m->s;
561  AVCPBProperties *cpb_props;
562  int gcd, ret;
563 
565 
566  switch (avctx->pix_fmt) {
567  case AV_PIX_FMT_YUVJ444P:
568  case AV_PIX_FMT_YUV444P:
569  s->c.chroma_format = CHROMA_444;
570  break;
571  case AV_PIX_FMT_YUVJ422P:
572  case AV_PIX_FMT_YUV422P:
573  s->c.chroma_format = CHROMA_422;
574  break;
575  default:
576  av_unreachable("Already checked via CODEC_PIXFMTS");
577  case AV_PIX_FMT_YUVJ420P:
578  case AV_PIX_FMT_YUV420P:
579  s->c.chroma_format = CHROMA_420;
580  break;
581  }
582 
584 
585  m->bit_rate = avctx->bit_rate;
586  s->c.width = avctx->width;
587  s->c.height = avctx->height;
588  if (avctx->gop_size > 600 &&
591  "keyframe interval too large!, reducing it from %d to %d\n",
592  avctx->gop_size, 600);
593  avctx->gop_size = 600;
594  }
595  m->gop_size = avctx->gop_size;
596  s->c.avctx = avctx;
598  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
599  "is " AV_STRINGIFY(MPVENC_MAX_B_FRAMES) ".\n");
601  } else if (avctx->max_b_frames < 0) {
603  "max b frames must be 0 or positive for mpegvideo based encoders\n");
604  return AVERROR(EINVAL);
605  }
607  s->c.codec_id = avctx->codec->id;
609  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
610  return AVERROR(EINVAL);
611  }
612 
613  s->c.quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
614  s->rtp_mode = !!s->rtp_payload_size;
615  s->c.intra_dc_precision = avctx->intra_dc_precision;
616 
617  // workaround some differences between how applications specify dc precision
618  if (s->c.intra_dc_precision < 0) {
619  s->c.intra_dc_precision += 8;
620  } else if (s->c.intra_dc_precision >= 8)
621  s->c.intra_dc_precision -= 8;
622 
623  if (s->c.intra_dc_precision < 0) {
625  "intra dc precision must be positive, note some applications use"
626  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
627  return AVERROR(EINVAL);
628  }
629 
630  if (s->c.intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
631  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
632  return AVERROR(EINVAL);
633  }
635 
636  if (m->gop_size <= 1) {
637  m->intra_only = 1;
638  m->gop_size = 12;
639  } else {
640  m->intra_only = 0;
641  }
642 
643  /* Fixed QSCALE */
645 
646  s->adaptive_quant = (avctx->lumi_masking ||
647  avctx->dark_masking ||
650  avctx->p_masking ||
651  m->border_masking ||
652  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
653  !m->fixed_qscale;
654 
655  s->c.loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
656 
658  switch(avctx->codec_id) {
661  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
662  break;
663  case AV_CODEC_ID_MPEG4:
667  if (avctx->rc_max_rate >= 15000000) {
668  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
669  } else if(avctx->rc_max_rate >= 2000000) {
670  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
671  } else if(avctx->rc_max_rate >= 384000) {
672  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
673  } else
674  avctx->rc_buffer_size = 40;
675  avctx->rc_buffer_size *= 16384;
676  break;
677  }
678  if (avctx->rc_buffer_size) {
679  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
680  }
681  }
682 
683  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
684  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
685  return AVERROR(EINVAL);
686  }
687 
690  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
691  }
692 
694  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
695  return AVERROR(EINVAL);
696  }
697 
699  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
700  return AVERROR(EINVAL);
701  }
702 
703  if (avctx->rc_max_rate &&
707  "impossible bitrate constraints, this will fail\n");
708  }
709 
710  if (avctx->rc_buffer_size &&
713  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
714  return AVERROR(EINVAL);
715  }
716 
717  if (!m->fixed_qscale &&
720  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
722  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
723  if (nbt <= INT_MAX) {
724  avctx->bit_rate_tolerance = nbt;
725  } else
726  avctx->bit_rate_tolerance = INT_MAX;
727  }
728 
729  if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->c.codec_id != AV_CODEC_ID_MPEG4 &&
730  s->c.codec_id != AV_CODEC_ID_H263 && s->c.codec_id != AV_CODEC_ID_H263P &&
731  s->c.codec_id != AV_CODEC_ID_FLV1) {
732  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
733  return AVERROR(EINVAL);
734  }
735 
736  if (s->c.obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
738  "OBMC is only supported with simple mb decision\n");
739  return AVERROR(EINVAL);
740  }
741 
742  if (s->c.quarter_sample && s->c.codec_id != AV_CODEC_ID_MPEG4) {
743  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
744  return AVERROR(EINVAL);
745  }
746 
747  if ((s->c.codec_id == AV_CODEC_ID_MPEG4 ||
748  s->c.codec_id == AV_CODEC_ID_H263 ||
749  s->c.codec_id == AV_CODEC_ID_H263P) &&
750  (avctx->sample_aspect_ratio.num > 255 ||
751  avctx->sample_aspect_ratio.den > 255)) {
753  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
757  }
758 
759  if ((s->c.codec_id == AV_CODEC_ID_H263 ||
760  s->c.codec_id == AV_CODEC_ID_H263P) &&
761  (avctx->width > 2048 ||
762  avctx->height > 1152 )) {
763  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
764  return AVERROR(EINVAL);
765  }
766  if (s->c.codec_id == AV_CODEC_ID_FLV1 &&
767  (avctx->width > 65535 ||
768  avctx->height > 65535 )) {
769  av_log(avctx, AV_LOG_ERROR, "FLV does not support resolutions above 16bit\n");
770  return AVERROR(EINVAL);
771  }
772  if ((s->c.codec_id == AV_CODEC_ID_H263 ||
773  s->c.codec_id == AV_CODEC_ID_H263P ||
774  s->c.codec_id == AV_CODEC_ID_RV20) &&
775  ((avctx->width &3) ||
776  (avctx->height&3) )) {
777  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
778  return AVERROR(EINVAL);
779  }
780 
781  if (s->c.codec_id == AV_CODEC_ID_RV10 &&
782  (avctx->width &15 ||
783  avctx->height&15 )) {
784  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
785  return AVERROR(EINVAL);
786  }
787 
788  if ((s->c.codec_id == AV_CODEC_ID_WMV1 ||
789  s->c.codec_id == AV_CODEC_ID_WMV2) &&
790  avctx->width & 1) {
791  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
792  return AVERROR(EINVAL);
793  }
794 
796  s->c.codec_id != AV_CODEC_ID_MPEG4 && s->c.codec_id != AV_CODEC_ID_MPEG2VIDEO) {
797  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
798  return AVERROR(EINVAL);
799  }
800 
801  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
802  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
803  return AVERROR(EINVAL);
804  }
805 
806  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
808  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=rd\n");
809  return AVERROR(EINVAL);
810  }
811 
812  if (m->scenechange_threshold < 1000000000 &&
815  "closed gop with scene change detection are not supported yet, "
816  "set threshold to 1000000000\n");
817  return AVERROR_PATCHWELCOME;
818  }
819 
821  if (s->c.codec_id != AV_CODEC_ID_MPEG2VIDEO &&
824  "low delay forcing is only available for mpeg2, "
825  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
826  return AVERROR(EINVAL);
827  }
828  if (m->max_b_frames != 0) {
830  "B-frames cannot be used with low delay\n");
831  return AVERROR(EINVAL);
832  }
833  }
834 
835  if (avctx->slices > 1 &&
837  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
838  return AVERROR(EINVAL);
839  }
840 
843  "notice: b_frame_strategy only affects the first pass\n");
844  m->b_frame_strategy = 0;
845  }
846 
848  if (gcd > 1) {
849  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
850  avctx->time_base.den /= gcd;
851  avctx->time_base.num /= gcd;
852  //return -1;
853  }
854 
855  if (s->mpeg_quant || s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO || s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO || s->c.codec_id == AV_CODEC_ID_MJPEG || s->c.codec_id == AV_CODEC_ID_AMV || s->c.codec_id == AV_CODEC_ID_SPEEDHQ) {
856  // (a + x * 3 / 8) / x
857  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
858  s->inter_quant_bias = 0;
859  } else {
860  s->intra_quant_bias = 0;
861  // (a - x / 4) / x
862  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
863  }
864 
865  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
866  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
867  return AVERROR(EINVAL);
868  }
869 
870  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
871 
872  switch (avctx->codec->id) {
873 #if CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER
875  s->rtp_mode = 1;
876  /* fallthrough */
878  s->c.out_format = FMT_MPEG1;
879  s->c.low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
880  avctx->delay = s->c.low_delay ? 0 : (m->max_b_frames + 1);
882  break;
883 #endif
884 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
885  case AV_CODEC_ID_MJPEG:
886  case AV_CODEC_ID_AMV:
887  s->c.out_format = FMT_MJPEG;
888  m->intra_only = 1; /* force intra only for jpeg */
889  avctx->delay = 0;
890  s->c.low_delay = 1;
891  break;
892 #endif
893  case AV_CODEC_ID_SPEEDHQ:
894  s->c.out_format = FMT_SPEEDHQ;
895  m->intra_only = 1; /* force intra only for SHQ */
896  avctx->delay = 0;
897  s->c.low_delay = 1;
898  break;
899  case AV_CODEC_ID_H261:
900  s->c.out_format = FMT_H261;
901  avctx->delay = 0;
902  s->c.low_delay = 1;
903  s->rtp_mode = 0; /* Sliced encoding not supported */
904  break;
905  case AV_CODEC_ID_H263:
906  if (!CONFIG_H263_ENCODER)
909  s->c.width, s->c.height) == 8) {
911  "The specified picture size of %dx%d is not valid for "
912  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
913  "352x288, 704x576, and 1408x1152. "
914  "Try H.263+.\n", s->c.width, s->c.height);
915  return AVERROR(EINVAL);
916  }
917  s->c.out_format = FMT_H263;
918  avctx->delay = 0;
919  s->c.low_delay = 1;
920  break;
921  case AV_CODEC_ID_H263P:
922  s->c.out_format = FMT_H263;
923  /* Fx */
924  s->c.h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
925  s->c.modified_quant = s->c.h263_aic;
926  s->c.loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
927  s->c.unrestricted_mv = s->c.obmc || s->c.loop_filter || s->c.umvplus;
928  s->c.flipflop_rounding = 1;
929 
930  /* /Fx */
931  /* These are just to be sure */
932  avctx->delay = 0;
933  s->c.low_delay = 1;
934  break;
935  case AV_CODEC_ID_FLV1:
936  s->c.out_format = FMT_H263;
937  s->c.h263_flv = 2; /* format = 1; 11-bit codes */
938  s->c.unrestricted_mv = 1;
939  s->rtp_mode = 0; /* don't allow GOB */
940  avctx->delay = 0;
941  s->c.low_delay = 1;
942  break;
943 #if CONFIG_RV10_ENCODER
944  case AV_CODEC_ID_RV10:
946  s->c.out_format = FMT_H263;
947  avctx->delay = 0;
948  s->c.low_delay = 1;
949  break;
950 #endif
951 #if CONFIG_RV20_ENCODER
952  case AV_CODEC_ID_RV20:
954  s->c.out_format = FMT_H263;
955  avctx->delay = 0;
956  s->c.low_delay = 1;
957  s->c.modified_quant = 1;
958  // Set here to force allocation of dc_val;
959  // will be set later on a per-frame basis.
960  s->c.h263_aic = 1;
961  s->c.loop_filter = 1;
962  s->c.unrestricted_mv = 0;
963  break;
964 #endif
965  case AV_CODEC_ID_MPEG4:
966  s->c.out_format = FMT_H263;
967  s->c.h263_pred = 1;
968  s->c.unrestricted_mv = 1;
969  s->c.flipflop_rounding = 1;
970  s->c.low_delay = m->max_b_frames ? 0 : 1;
971  avctx->delay = s->c.low_delay ? 0 : (m->max_b_frames + 1);
972  break;
974  s->c.out_format = FMT_H263;
975  s->c.h263_pred = 1;
976  s->c.unrestricted_mv = 1;
977  s->c.msmpeg4_version = MSMP4_V2;
978  avctx->delay = 0;
979  s->c.low_delay = 1;
980  break;
982  s->c.out_format = FMT_H263;
983  s->c.h263_pred = 1;
984  s->c.unrestricted_mv = 1;
985  s->c.msmpeg4_version = MSMP4_V3;
986  s->c.flipflop_rounding = 1;
987  avctx->delay = 0;
988  s->c.low_delay = 1;
989  break;
990  case AV_CODEC_ID_WMV1:
991  s->c.out_format = FMT_H263;
992  s->c.h263_pred = 1;
993  s->c.unrestricted_mv = 1;
994  s->c.msmpeg4_version = MSMP4_WMV1;
995  s->c.flipflop_rounding = 1;
996  avctx->delay = 0;
997  s->c.low_delay = 1;
998  break;
999  case AV_CODEC_ID_WMV2:
1000  s->c.out_format = FMT_H263;
1001  s->c.h263_pred = 1;
1002  s->c.unrestricted_mv = 1;
1003  s->c.msmpeg4_version = MSMP4_WMV2;
1004  s->c.flipflop_rounding = 1;
1005  avctx->delay = 0;
1006  s->c.low_delay = 1;
1007  break;
1008  default:
1009  av_unreachable("List contains all codecs using ff_mpv_encode_init()");
1010  }
1011 
1012  avctx->has_b_frames = !s->c.low_delay;
1013 
1014  s->c.encoding = 1;
1015 
1016  s->c.progressive_frame =
1017  s->c.progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
1019  s->c.alternate_scan);
1020 
1023  s->frame_reconstruction_bitfield = (1 << AV_PICTURE_TYPE_I) |
1024  (1 << AV_PICTURE_TYPE_P) |
1025  (1 << AV_PICTURE_TYPE_B);
1026  } else if (!m->intra_only) {
1027  s->frame_reconstruction_bitfield = (1 << AV_PICTURE_TYPE_I) |
1028  (1 << AV_PICTURE_TYPE_P);
1029  } else {
1030  s->frame_reconstruction_bitfield = 0;
1031  }
1032 
1033  if (m->lmin > m->lmax) {
1034  av_log(avctx, AV_LOG_WARNING, "Clipping lmin value to %d\n", m->lmax);
1035  m->lmin = m->lmax;
1036  }
1037 
1038  /* ff_mpv_init_duplicate_contexts() will copy (memdup) the contents of the
1039  * main slice to the slice contexts, so we initialize various fields of it
1040  * before calling ff_mpv_init_duplicate_contexts(). */
1041  s->parent = m;
1042  ff_mpv_idct_init(&s->c);
1044  ff_fdctdsp_init(&s->fdsp, avctx);
1045  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
1046  ff_pixblockdsp_init(&s->pdsp, 8);
1047  ret = me_cmp_init(m, avctx);
1048  if (ret < 0)
1049  return ret;
1050 
1051  if (!(avctx->stats_out = av_mallocz(256)) ||
1052  !(s->new_pic = av_frame_alloc()) ||
1053  !(s->c.picture_pool = ff_mpv_alloc_pic_pool(0)))
1054  return AVERROR(ENOMEM);
1055 
1056  ret = init_matrices(m, avctx);
1057  if (ret < 0)
1058  return ret;
1059 
1061 
1062  if (CONFIG_H263_ENCODER && s->c.out_format == FMT_H263) {
1064 #if CONFIG_MSMPEG4ENC
1065  if (s->c.msmpeg4_version != MSMP4_UNUSED)
1067 #endif
1068  }
1069 
1070  s->c.slice_ctx_size = sizeof(*s);
1071  ret = ff_mpv_common_init(&s->c);
1072  if (ret < 0)
1073  return ret;
1074  ret = init_buffers(m);
1075  if (ret < 0)
1076  return ret;
1077  if (s->c.slice_context_count > 1) {
1078  s->rtp_mode = 1;
1080  s->c.h263_slice_structured = 1;
1081  }
1083  if (ret < 0)
1084  return ret;
1085 
1086  ret = init_slice_buffers(m);
1087  if (ret < 0)
1088  return ret;
1089 
1091  if (ret < 0)
1092  return ret;
1093 
1094  if (m->b_frame_strategy == 2) {
1095  for (int i = 0; i < m->max_b_frames + 2; i++) {
1096  m->tmp_frames[i] = av_frame_alloc();
1097  if (!m->tmp_frames[i])
1098  return AVERROR(ENOMEM);
1099 
1101  m->tmp_frames[i]->width = s->c.width >> m->brd_scale;
1102  m->tmp_frames[i]->height = s->c.height >> m->brd_scale;
1103 
1104  ret = av_frame_get_buffer(m->tmp_frames[i], 0);
1105  if (ret < 0)
1106  return ret;
1107  }
1108  }
1109 
1110  cpb_props = ff_encode_add_cpb_side_data(avctx);
1111  if (!cpb_props)
1112  return AVERROR(ENOMEM);
1113  cpb_props->max_bitrate = avctx->rc_max_rate;
1114  cpb_props->min_bitrate = avctx->rc_min_rate;
1115  cpb_props->avg_bitrate = avctx->bit_rate;
1116  cpb_props->buffer_size = avctx->rc_buffer_size;
1117 
1118  return 0;
1119 }
1120 
1122 {
1123  MPVMainEncContext *const m = avctx->priv_data;
1124  MPVEncContext *const s = &m->s;
1125 
1127 
1128  ff_mpv_common_end(&s->c);
1129  av_refstruct_pool_uninit(&s->c.picture_pool);
1130 
1131  for (int i = 0; i < MPVENC_MAX_B_FRAMES + 1; i++) {
1134  }
1135  for (int i = 0; i < FF_ARRAY_ELEMS(m->tmp_frames); i++)
1136  av_frame_free(&m->tmp_frames[i]);
1137 
1138  av_frame_free(&s->new_pic);
1139 
1141 
1142  av_freep(&m->mv_table_base);
1143  av_freep(&s->p_field_select_table[0]);
1145 
1146  av_freep(&s->mb_type);
1147  av_freep(&s->lambda_table);
1148 
1149  av_freep(&s->q_intra_matrix);
1150  av_freep(&s->q_intra_matrix16);
1151  av_freep(&s->dct_offset);
1152 
1153  return 0;
1154 }
1155 
1156 /* put block[] to dest[] */
1157 static inline void put_dct(MPVEncContext *const s,
1158  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1159 {
1160  s->c.dct_unquantize_intra(&s->c, block, i, qscale);
1161  s->c.idsp.idct_put(dest, line_size, block);
1162 }
1163 
1164 static inline void add_dequant_dct(MPVEncContext *const s,
1165  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1166 {
1167  if (s->c.block_last_index[i] >= 0) {
1168  s->c.dct_unquantize_inter(&s->c, block, i, qscale);
1169 
1170  s->c.idsp.idct_add(dest, line_size, block);
1171  }
1172 }
1173 
1174 /**
1175  * Performs dequantization and IDCT (if necessary)
1176  */
1177 static void mpv_reconstruct_mb(MPVEncContext *const s, int16_t block[12][64])
1178 {
1179  if (s->c.avctx->debug & FF_DEBUG_DCT_COEFF) {
1180  /* print DCT coefficients */
1181  av_log(s->c.avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->c.mb_x, s->c.mb_y);
1182  for (int i = 0; i < 6; i++) {
1183  for (int j = 0; j < 64; j++) {
1184  av_log(s->c.avctx, AV_LOG_DEBUG, "%5d",
1185  block[i][s->c.idsp.idct_permutation[j]]);
1186  }
1187  av_log(s->c.avctx, AV_LOG_DEBUG, "\n");
1188  }
1189  }
1190 
1191  if ((1 << s->c.pict_type) & s->frame_reconstruction_bitfield) {
1192  uint8_t *dest_y = s->c.dest[0], *dest_cb = s->c.dest[1], *dest_cr = s->c.dest[2];
1193  int dct_linesize, dct_offset;
1194  const int linesize = s->c.cur_pic.linesize[0];
1195  const int uvlinesize = s->c.cur_pic.linesize[1];
1196  const int block_size = 8;
1197 
1198  dct_linesize = linesize << s->c.interlaced_dct;
1199  dct_offset = s->c.interlaced_dct ? linesize : linesize * block_size;
1200 
1201  if (!s->c.mb_intra) {
1202  /* No MC, as that was already done otherwise */
1203  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->c.qscale);
1204  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->c.qscale);
1205  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->c.qscale);
1206  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->c.qscale);
1207 
1208  if (!CONFIG_GRAY || !(s->c.avctx->flags & AV_CODEC_FLAG_GRAY)) {
1209  if (s->c.chroma_y_shift) {
1210  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->c.chroma_qscale);
1211  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->c.chroma_qscale);
1212  } else {
1213  dct_linesize >>= 1;
1214  dct_offset >>= 1;
1215  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->c.chroma_qscale);
1216  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->c.chroma_qscale);
1217  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->c.chroma_qscale);
1218  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->c.chroma_qscale);
1219  }
1220  }
1221  } else {
1222  /* dct only in intra block */
1223  put_dct(s, block[0], 0, dest_y , dct_linesize, s->c.qscale);
1224  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->c.qscale);
1225  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->c.qscale);
1226  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->c.qscale);
1227 
1228  if (!CONFIG_GRAY || !(s->c.avctx->flags & AV_CODEC_FLAG_GRAY)) {
1229  if (s->c.chroma_y_shift) {
1230  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->c.chroma_qscale);
1231  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->c.chroma_qscale);
1232  } else {
1233  dct_offset >>= 1;
1234  dct_linesize >>= 1;
1235  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->c.chroma_qscale);
1236  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->c.chroma_qscale);
1237  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->c.chroma_qscale);
1238  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->c.chroma_qscale);
1239  }
1240  }
1241  }
1242  }
1243 }
1244 
1245 static int get_sae(const uint8_t *src, int ref, int stride)
1246 {
1247  int x,y;
1248  int acc = 0;
1249 
1250  for (y = 0; y < 16; y++) {
1251  for (x = 0; x < 16; x++) {
1252  acc += FFABS(src[x + y * stride] - ref);
1253  }
1254  }
1255 
1256  return acc;
1257 }
1258 
1259 static int get_intra_count(MPVEncContext *const s, const uint8_t *src,
1260  const uint8_t *ref, int stride)
1261 {
1262  int x, y, w, h;
1263  int acc = 0;
1264 
1265  w = s->c.width & ~15;
1266  h = s->c.height & ~15;
1267 
1268  for (y = 0; y < h; y += 16) {
1269  for (x = 0; x < w; x += 16) {
1270  int offset = x + y * stride;
1271  int sad = s->sad_cmp[0](NULL, src + offset, ref + offset,
1272  stride, 16);
1273  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1274  int sae = get_sae(src + offset, mean, stride);
1275 
1276  acc += sae + 500 < sad;
1277  }
1278  }
1279  return acc;
1280 }
1281 
1282 /**
1283  * Allocates new buffers for an AVFrame and copies the properties
1284  * from another AVFrame.
1285  */
1286 static int prepare_picture(MPVEncContext *const s, AVFrame *f, const AVFrame *props_frame)
1287 {
1288  AVCodecContext *avctx = s->c.avctx;
1289  int ret;
1290 
1291  f->width = avctx->width + 2 * EDGE_WIDTH;
1292  f->height = avctx->height + 2 * EDGE_WIDTH;
1293 
1295  if (ret < 0)
1296  return ret;
1297 
1298  ret = ff_mpv_pic_check_linesize(avctx, f, &s->c.linesize, &s->c.uvlinesize);
1299  if (ret < 0)
1300  return ret;
1301 
1302  for (int i = 0; f->data[i]; i++) {
1303  int offset = (EDGE_WIDTH >> (i ? s->c.chroma_y_shift : 0)) *
1304  f->linesize[i] +
1305  (EDGE_WIDTH >> (i ? s->c.chroma_x_shift : 0));
1306  f->data[i] += offset;
1307  }
1308  f->width = avctx->width;
1309  f->height = avctx->height;
1310 
1311  ret = av_frame_copy_props(f, props_frame);
1312  if (ret < 0)
1313  return ret;
1314 
1315  return 0;
1316 }
1317 
1318 static int load_input_picture(MPVMainEncContext *const m, const AVFrame *pic_arg)
1319 {
1320  MPVEncContext *const s = &m->s;
1321  MPVPicture *pic = NULL;
1322  int64_t pts;
1323  int display_picture_number = 0, ret;
1324  int encoding_delay = m->max_b_frames ? m->max_b_frames
1325  : (s->c.low_delay ? 0 : 1);
1326  int flush_offset = 1;
1327  int direct = 1;
1328 
1329  av_assert1(!m->input_picture[0]);
1330 
1331  if (pic_arg) {
1332  pts = pic_arg->pts;
1333  display_picture_number = m->input_picture_number++;
1334 
1335  if (pts != AV_NOPTS_VALUE) {
1336  if (m->user_specified_pts != AV_NOPTS_VALUE) {
1337  int64_t last = m->user_specified_pts;
1338 
1339  if (pts <= last) {
1340  av_log(s->c.avctx, AV_LOG_ERROR,
1341  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1342  pts, last);
1343  return AVERROR(EINVAL);
1344  }
1345 
1346  if (!s->c.low_delay && display_picture_number == 1)
1347  m->dts_delta = pts - last;
1348  }
1349  m->user_specified_pts = pts;
1350  } else {
1351  if (m->user_specified_pts != AV_NOPTS_VALUE) {
1352  m->user_specified_pts =
1353  pts = m->user_specified_pts + 1;
1354  av_log(s->c.avctx, AV_LOG_INFO,
1355  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1356  pts);
1357  } else {
1358  pts = display_picture_number;
1359  }
1360  }
1361 
1362  if (pic_arg->linesize[0] != s->c.linesize ||
1363  pic_arg->linesize[1] != s->c.uvlinesize ||
1364  pic_arg->linesize[2] != s->c.uvlinesize)
1365  direct = 0;
1366  if ((s->c.width & 15) || (s->c.height & 15))
1367  direct = 0;
1368  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1369  direct = 0;
1370  if (s->c.linesize & (STRIDE_ALIGN-1))
1371  direct = 0;
1372 
1373  ff_dlog(s->c.avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1374  pic_arg->linesize[1], s->c.linesize, s->c.uvlinesize);
1375 
1376  pic = av_refstruct_pool_get(s->c.picture_pool);
1377  if (!pic)
1378  return AVERROR(ENOMEM);
1379 
1380  if (direct) {
1381  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1382  goto fail;
1383  pic->shared = 1;
1384  } else {
1385  ret = prepare_picture(s, pic->f, pic_arg);
1386  if (ret < 0)
1387  goto fail;
1388 
1389  for (int i = 0; i < 3; i++) {
1390  ptrdiff_t src_stride = pic_arg->linesize[i];
1391  ptrdiff_t dst_stride = i ? s->c.uvlinesize : s->c.linesize;
1392  int h_shift = i ? s->c.chroma_x_shift : 0;
1393  int v_shift = i ? s->c.chroma_y_shift : 0;
1394  int w = AV_CEIL_RSHIFT(s->c.width , h_shift);
1395  int h = AV_CEIL_RSHIFT(s->c.height, v_shift);
1396  const uint8_t *src = pic_arg->data[i];
1397  uint8_t *dst = pic->f->data[i];
1398  int vpad = 16;
1399 
1400  if ( s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO
1401  && !s->c.progressive_sequence
1402  && FFALIGN(s->c.height, 32) - s->c.height > 16)
1403  vpad = 32;
1404 
1405  if (!s->c.avctx->rc_buffer_size)
1406  dst += INPLACE_OFFSET;
1407 
1408  if (src_stride == dst_stride)
1409  memcpy(dst, src, src_stride * h - src_stride + w);
1410  else {
1411  int h2 = h;
1412  uint8_t *dst2 = dst;
1413  while (h2--) {
1414  memcpy(dst2, src, w);
1415  dst2 += dst_stride;
1416  src += src_stride;
1417  }
1418  }
1419  if ((s->c.width & 15) || (s->c.height & (vpad-1))) {
1420  s->mpvencdsp.draw_edges(dst, dst_stride,
1421  w, h,
1422  16 >> h_shift,
1423  vpad >> v_shift,
1424  EDGE_BOTTOM);
1425  }
1426  }
1427  emms_c();
1428  }
1429 
1430  pic->display_picture_number = display_picture_number;
1431  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1432  } else if (!m->reordered_input_picture[1]) {
1433  /* Flushing: When the above check is true, the encoder is about to run
1434  * out of frames to encode. Check if there are input_pictures left;
1435  * if so, ensure m->input_picture[0] contains the first picture.
1436  * A flush_offset != 1 will only happen if we did not receive enough
1437  * input frames. */
1438  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1439  if (m->input_picture[flush_offset])
1440  break;
1441 
1442  encoding_delay -= flush_offset - 1;
1443  }
1444 
1445  /* shift buffer entries */
1446  for (int i = flush_offset; i <= MPVENC_MAX_B_FRAMES; i++)
1447  m->input_picture[i - flush_offset] = m->input_picture[i];
1448  for (int i = MPVENC_MAX_B_FRAMES + 1 - flush_offset; i <= MPVENC_MAX_B_FRAMES; i++)
1449  m->input_picture[i] = NULL;
1450 
1451  m->input_picture[encoding_delay] = pic;
1452 
1453  return 0;
1454 fail:
1455  av_refstruct_unref(&pic);
1456  return ret;
1457 }
1458 
1459 static int skip_check(MPVMainEncContext *const m,
1460  const MPVPicture *p, const MPVPicture *ref)
1461 {
1462  MPVEncContext *const s = &m->s;
1463  int score = 0;
1464  int64_t score64 = 0;
1465 
1466  for (int plane = 0; plane < 3; plane++) {
1467  const int stride = p->f->linesize[plane];
1468  const int bw = plane ? 1 : 2;
1469  for (int y = 0; y < s->c.mb_height * bw; y++) {
1470  for (int x = 0; x < s->c.mb_width * bw; x++) {
1471  int off = p->shared ? 0 : 16;
1472  const uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1473  const uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1474  int v = m->frame_skip_cmp_fn(s, dptr, rptr, stride, 8);
1475 
1476  switch (FFABS(m->frame_skip_exp)) {
1477  case 0: score = FFMAX(score, v); break;
1478  case 1: score += FFABS(v); break;
1479  case 2: score64 += v * (int64_t)v; break;
1480  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1481  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1482  }
1483  }
1484  }
1485  }
1486  emms_c();
1487 
1488  if (score)
1489  score64 = score;
1490  if (m->frame_skip_exp < 0)
1491  score64 = pow(score64 / (double)(s->c.mb_width * s->c.mb_height),
1492  -1.0/m->frame_skip_exp);
1493 
1494  if (score64 < m->frame_skip_threshold)
1495  return 1;
1496  if (score64 < ((m->frame_skip_factor * (int64_t) s->lambda) >> 8))
1497  return 1;
1498  return 0;
1499 }
1500 
1502 {
1503  int ret;
1504  int size = 0;
1505 
1507  if (ret < 0)
1508  return ret;
1509 
1510  do {
1512  if (ret >= 0) {
1513  size += pkt->size;
1515  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1516  return ret;
1517  } while (ret >= 0);
1518 
1519  return size;
1520 }
1521 
1523 {
1524  MPVEncContext *const s = &m->s;
1525  AVPacket *pkt;
1526  const int scale = m->brd_scale;
1527  int width = s->c.width >> scale;
1528  int height = s->c.height >> scale;
1529  int out_size, p_lambda, b_lambda, lambda2;
1530  int64_t best_rd = INT64_MAX;
1531  int best_b_count = -1;
1532  int ret = 0;
1533 
1534  av_assert0(scale >= 0 && scale <= 3);
1535 
1536  pkt = av_packet_alloc();
1537  if (!pkt)
1538  return AVERROR(ENOMEM);
1539 
1540  //emms_c();
1541  p_lambda = m->last_lambda_for[AV_PICTURE_TYPE_P];
1542  //p_lambda * FFABS(s->c.avctx->b_quant_factor) + s->c.avctx->b_quant_offset;
1543  b_lambda = m->last_lambda_for[AV_PICTURE_TYPE_B];
1544  if (!b_lambda) // FIXME we should do this somewhere else
1545  b_lambda = p_lambda;
1546  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1548 
1549  for (int i = 0; i < m->max_b_frames + 2; i++) {
1550  const MPVPicture *pre_input_ptr = i ? m->input_picture[i - 1] :
1551  s->c.next_pic.ptr;
1552 
1553  if (pre_input_ptr) {
1554  const uint8_t *data[4];
1555  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1556 
1557  if (!pre_input_ptr->shared && i) {
1558  data[0] += INPLACE_OFFSET;
1559  data[1] += INPLACE_OFFSET;
1560  data[2] += INPLACE_OFFSET;
1561  }
1562 
1563  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[0],
1564  m->tmp_frames[i]->linesize[0],
1565  data[0],
1566  pre_input_ptr->f->linesize[0],
1567  width, height);
1568  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[1],
1569  m->tmp_frames[i]->linesize[1],
1570  data[1],
1571  pre_input_ptr->f->linesize[1],
1572  width >> 1, height >> 1);
1573  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[2],
1574  m->tmp_frames[i]->linesize[2],
1575  data[2],
1576  pre_input_ptr->f->linesize[2],
1577  width >> 1, height >> 1);
1578  }
1579  }
1580 
1581  for (int j = 0; j < m->max_b_frames + 1; j++) {
1582  AVCodecContext *c;
1583  int64_t rd = 0;
1584 
1585  if (!m->input_picture[j])
1586  break;
1587 
1589  if (!c) {
1590  ret = AVERROR(ENOMEM);
1591  goto fail;
1592  }
1593 
1594  c->width = width;
1595  c->height = height;
1597  c->flags |= s->c.avctx->flags & AV_CODEC_FLAG_QPEL;
1598  c->mb_decision = s->c.avctx->mb_decision;
1599  c->me_cmp = s->c.avctx->me_cmp;
1600  c->mb_cmp = s->c.avctx->mb_cmp;
1601  c->me_sub_cmp = s->c.avctx->me_sub_cmp;
1602  c->pix_fmt = AV_PIX_FMT_YUV420P;
1603  c->time_base = s->c.avctx->time_base;
1604  c->max_b_frames = m->max_b_frames;
1605 
1606  ret = avcodec_open2(c, s->c.avctx->codec, NULL);
1607  if (ret < 0)
1608  goto fail;
1609 
1610 
1612  m->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1613 
1614  out_size = encode_frame(c, m->tmp_frames[0], pkt);
1615  if (out_size < 0) {
1616  ret = out_size;
1617  goto fail;
1618  }
1619 
1620  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1621 
1622  for (int i = 0; i < m->max_b_frames + 1; i++) {
1623  int is_p = i % (j + 1) == j || i == m->max_b_frames;
1624 
1625  m->tmp_frames[i + 1]->pict_type = is_p ?
1627  m->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1628 
1629  out_size = encode_frame(c, m->tmp_frames[i + 1], pkt);
1630  if (out_size < 0) {
1631  ret = out_size;
1632  goto fail;
1633  }
1634 
1635  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1636  }
1637 
1638  /* get the delayed frames */
1640  if (out_size < 0) {
1641  ret = out_size;
1642  goto fail;
1643  }
1644  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1645 
1646  rd += c->error[0] + c->error[1] + c->error[2];
1647 
1648  if (rd < best_rd) {
1649  best_rd = rd;
1650  best_b_count = j;
1651  }
1652 
1653 fail:
1656  if (ret < 0) {
1657  best_b_count = ret;
1658  break;
1659  }
1660  }
1661 
1662  av_packet_free(&pkt);
1663 
1664  return best_b_count;
1665 }
1666 
1667 /**
1668  * Determines whether an input picture is discarded or not
1669  * and if not determines the length of the next chain of B frames
1670  * and moves these pictures (including the P frame) into
1671  * reordered_input_picture.
1672  * input_picture[0] is always NULL when exiting this function, even on error;
1673  * reordered_input_picture[0] is always NULL when exiting this function on error.
1674  */
1676 {
1677  MPVEncContext *const s = &m->s;
1678 
1679  /* Either nothing to do or can't do anything */
1680  if (m->reordered_input_picture[0] || !m->input_picture[0])
1681  return 0;
1682 
1683  /* set next picture type & ordering */
1684  if (m->frame_skip_threshold || m->frame_skip_factor) {
1685  if (m->picture_in_gop_number < m->gop_size &&
1686  s->c.next_pic.ptr &&
1687  skip_check(m, m->input_picture[0], s->c.next_pic.ptr)) {
1688  // FIXME check that the gop check above is +-1 correct
1690 
1691  ff_vbv_update(m, 0);
1692 
1693  return 0;
1694  }
1695  }
1696 
1697  if (/* m->picture_in_gop_number >= m->gop_size || */
1698  !s->c.next_pic.ptr || m->intra_only) {
1699  m->reordered_input_picture[0] = m->input_picture[0];
1700  m->input_picture[0] = NULL;
1703  m->coded_picture_number++;
1704  } else {
1705  int b_frames = 0;
1706 
1707  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS2) {
1708  for (int i = 0; i < m->max_b_frames + 1; i++) {
1709  int pict_num = m->input_picture[0]->display_picture_number + i;
1710 
1711  if (pict_num >= m->rc_context.num_entries)
1712  break;
1713  if (!m->input_picture[i]) {
1714  m->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1715  break;
1716  }
1717 
1718  m->input_picture[i]->f->pict_type =
1719  m->rc_context.entry[pict_num].new_pict_type;
1720  }
1721  }
1722 
1723  if (m->b_frame_strategy == 0) {
1724  b_frames = m->max_b_frames;
1725  while (b_frames && !m->input_picture[b_frames])
1726  b_frames--;
1727  } else if (m->b_frame_strategy == 1) {
1728  for (int i = 1; i < m->max_b_frames + 1; i++) {
1729  if (m->input_picture[i] &&
1730  m->input_picture[i]->b_frame_score == 0) {
1733  m->input_picture[i ]->f->data[0],
1734  m->input_picture[i - 1]->f->data[0],
1735  s->c.linesize) + 1;
1736  }
1737  }
1738  for (int i = 0;; i++) {
1739  if (i >= m->max_b_frames + 1 ||
1740  !m->input_picture[i] ||
1741  m->input_picture[i]->b_frame_score - 1 >
1742  s->c.mb_num / m->b_sensitivity) {
1743  b_frames = FFMAX(0, i - 1);
1744  break;
1745  }
1746  }
1747 
1748  /* reset scores */
1749  for (int i = 0; i < b_frames + 1; i++)
1750  m->input_picture[i]->b_frame_score = 0;
1751  } else if (m->b_frame_strategy == 2) {
1752  b_frames = estimate_best_b_count(m);
1753  if (b_frames < 0) {
1755  return b_frames;
1756  }
1757  }
1758 
1759  emms_c();
1760 
1761  for (int i = b_frames - 1; i >= 0; i--) {
1762  int type = m->input_picture[i]->f->pict_type;
1763  if (type && type != AV_PICTURE_TYPE_B)
1764  b_frames = i;
1765  }
1766  if (m->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1767  b_frames == m->max_b_frames) {
1768  av_log(s->c.avctx, AV_LOG_ERROR,
1769  "warning, too many B-frames in a row\n");
1770  }
1771 
1772  if (m->picture_in_gop_number + b_frames >= m->gop_size) {
1773  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1774  m->gop_size > m->picture_in_gop_number) {
1775  b_frames = m->gop_size - m->picture_in_gop_number - 1;
1776  } else {
1777  if (s->c.avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1778  b_frames = 0;
1779  m->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1780  }
1781  }
1782 
1783  if ((s->c.avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1784  m->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1785  b_frames--;
1786 
1787  m->reordered_input_picture[0] = m->input_picture[b_frames];
1788  m->input_picture[b_frames] = NULL;
1792  m->coded_picture_number++;
1793  for (int i = 0; i < b_frames; i++) {
1794  m->reordered_input_picture[i + 1] = m->input_picture[i];
1795  m->input_picture[i] = NULL;
1796  m->reordered_input_picture[i + 1]->f->pict_type =
1799  m->coded_picture_number++;
1800  }
1801  }
1802 
1803  return 0;
1804 }
1805 
1807 {
1808  MPVEncContext *const s = &m->s;
1809  int ret;
1810 
1812 
1813  for (int i = 1; i <= MPVENC_MAX_B_FRAMES; i++)
1816 
1818  av_assert1(!m->input_picture[0]);
1819  if (ret < 0)
1820  return ret;
1821 
1822  av_frame_unref(s->new_pic);
1823 
1824  if (m->reordered_input_picture[0]) {
1827 
1828  if (m->reordered_input_picture[0]->shared || s->c.avctx->rc_buffer_size) {
1829  // input is a shared pix, so we can't modify it -> allocate a new
1830  // one & ensure that the shared one is reuseable
1831  av_frame_move_ref(s->new_pic, m->reordered_input_picture[0]->f);
1832 
1833  ret = prepare_picture(s, m->reordered_input_picture[0]->f, s->new_pic);
1834  if (ret < 0)
1835  goto fail;
1836  } else {
1837  // input is not a shared pix -> reuse buffer for current_pix
1838  ret = av_frame_ref(s->new_pic, m->reordered_input_picture[0]->f);
1839  if (ret < 0)
1840  goto fail;
1841  for (int i = 0; i < MPV_MAX_PLANES; i++)
1842  s->new_pic->data[i] += INPLACE_OFFSET;
1843  }
1844  s->c.cur_pic.ptr = m->reordered_input_picture[0];
1845  m->reordered_input_picture[0] = NULL;
1846  av_assert1(s->c.mb_width == s->c.buffer_pools.alloc_mb_width);
1847  av_assert1(s->c.mb_height == s->c.buffer_pools.alloc_mb_height);
1848  av_assert1(s->c.mb_stride == s->c.buffer_pools.alloc_mb_stride);
1849  ret = ff_mpv_alloc_pic_accessories(s->c.avctx, &s->c.cur_pic,
1850  &s->c.sc, &s->c.buffer_pools, s->c.mb_height);
1851  if (ret < 0) {
1852  ff_mpv_unref_picture(&s->c.cur_pic);
1853  return ret;
1854  }
1855  s->c.picture_number = s->c.cur_pic.ptr->display_picture_number;
1856 
1857  }
1858  return 0;
1859 fail:
1861  return ret;
1862 }
1863 
1864 static void frame_end(MPVMainEncContext *const m)
1865 {
1866  MPVEncContext *const s = &m->s;
1867 
1868  if (s->c.unrestricted_mv &&
1869  s->c.cur_pic.reference &&
1870  !m->intra_only) {
1871  int hshift = s->c.chroma_x_shift;
1872  int vshift = s->c.chroma_y_shift;
1873  s->mpvencdsp.draw_edges(s->c.cur_pic.data[0],
1874  s->c.cur_pic.linesize[0],
1875  s->c.h_edge_pos, s->c.v_edge_pos,
1877  EDGE_TOP | EDGE_BOTTOM);
1878  s->mpvencdsp.draw_edges(s->c.cur_pic.data[1],
1879  s->c.cur_pic.linesize[1],
1880  s->c.h_edge_pos >> hshift,
1881  s->c.v_edge_pos >> vshift,
1882  EDGE_WIDTH >> hshift,
1883  EDGE_WIDTH >> vshift,
1884  EDGE_TOP | EDGE_BOTTOM);
1885  s->mpvencdsp.draw_edges(s->c.cur_pic.data[2],
1886  s->c.cur_pic.linesize[2],
1887  s->c.h_edge_pos >> hshift,
1888  s->c.v_edge_pos >> vshift,
1889  EDGE_WIDTH >> hshift,
1890  EDGE_WIDTH >> vshift,
1891  EDGE_TOP | EDGE_BOTTOM);
1892  }
1893 
1894  emms_c();
1895 
1896  m->last_pict_type = s->c.pict_type;
1897  m->last_lambda_for[s->c.pict_type] = s->c.cur_pic.ptr->f->quality;
1898  if (s->c.pict_type != AV_PICTURE_TYPE_B)
1899  m->last_non_b_pict_type = s->c.pict_type;
1900 }
1901 
1903 {
1904  MPVEncContext *const s = &m->s;
1905  int intra, i;
1906 
1907  for (intra = 0; intra < 2; intra++) {
1908  if (s->dct_count[intra] > (1 << 16)) {
1909  for (i = 0; i < 64; i++) {
1910  s->dct_error_sum[intra][i] >>= 1;
1911  }
1912  s->dct_count[intra] >>= 1;
1913  }
1914 
1915  for (i = 0; i < 64; i++) {
1916  s->dct_offset[intra][i] = (m->noise_reduction *
1917  s->dct_count[intra] +
1918  s->dct_error_sum[intra][i] / 2) /
1919  (s->dct_error_sum[intra][i] + 1);
1920  }
1921  }
1922 }
1923 
1924 static void frame_start(MPVMainEncContext *const m)
1925 {
1926  MPVEncContext *const s = &m->s;
1927 
1928  s->c.cur_pic.ptr->f->pict_type = s->c.pict_type;
1929 
1930  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
1931  ff_mpv_replace_picture(&s->c.last_pic, &s->c.next_pic);
1932  ff_mpv_replace_picture(&s->c.next_pic, &s->c.cur_pic);
1933  }
1934 
1935  av_assert2(!!m->noise_reduction == !!s->dct_error_sum);
1936  if (s->dct_error_sum) {
1938  }
1939 }
1940 
1942  const AVFrame *pic_arg, int *got_packet)
1943 {
1944  MPVMainEncContext *const m = avctx->priv_data;
1945  MPVEncContext *const s = &m->s;
1946  int stuffing_count, ret;
1947  int context_count = s->c.slice_context_count;
1948 
1949  ff_mpv_unref_picture(&s->c.cur_pic);
1950 
1951  m->vbv_ignore_qmax = 0;
1952 
1953  m->picture_in_gop_number++;
1954 
1955  ret = load_input_picture(m, pic_arg);
1956  if (ret < 0)
1957  return ret;
1958 
1960  if (ret < 0)
1961  return ret;
1962 
1963  /* output? */
1964  if (s->new_pic->data[0]) {
1965  int growing_buffer = context_count == 1 && !s->c.data_partitioning;
1966  size_t pkt_size = 10000 + s->c.mb_width * s->c.mb_height *
1967  (growing_buffer ? 64 : (MAX_MB_BYTES + 100));
1968  if (CONFIG_MJPEG_ENCODER && avctx->codec_id == AV_CODEC_ID_MJPEG) {
1969  ret = ff_mjpeg_add_icc_profile_size(avctx, s->new_pic, &pkt_size);
1970  if (ret < 0)
1971  return ret;
1972  }
1973  if ((ret = ff_alloc_packet(avctx, pkt, pkt_size)) < 0)
1974  return ret;
1976  if (s->mb_info) {
1977  s->mb_info_ptr = av_packet_new_side_data(pkt,
1979  s->c.mb_width*s->c.mb_height*12);
1980  if (!s->mb_info_ptr)
1981  return AVERROR(ENOMEM);
1982  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1983  }
1984 
1985  s->c.pict_type = s->new_pic->pict_type;
1986  //emms_c();
1987  frame_start(m);
1988 vbv_retry:
1989  ret = encode_picture(m, pkt);
1990  if (growing_buffer) {
1991  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1992  pkt->data = s->pb.buf;
1994  }
1995  if (ret < 0)
1996  return -1;
1997 
1998  frame_end(m);
1999 
2000  if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->c.out_format == FMT_MJPEG)
2002 
2003  if (avctx->rc_buffer_size) {
2004  RateControlContext *rcc = &m->rc_context;
2005  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
2006  int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
2007  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
2008 
2009  if (put_bits_count(&s->pb) > max_size &&
2010  s->lambda < m->lmax) {
2011  m->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
2012  (s->c.qscale + 1) / s->c.qscale);
2013  if (s->adaptive_quant) {
2014  for (int i = 0; i < s->c.mb_height * s->c.mb_stride; i++)
2015  s->lambda_table[i] =
2016  FFMAX(s->lambda_table[i] + min_step,
2017  s->lambda_table[i] * (s->c.qscale + 1) /
2018  s->c.qscale);
2019  }
2020  s->c.mb_skipped = 0; // done in frame_start()
2021  // done in encode_picture() so we must undo it
2022  if (s->c.pict_type == AV_PICTURE_TYPE_P) {
2023  s->c.no_rounding ^= s->c.flipflop_rounding;
2024  }
2025  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
2026  s->c.time_base = s->c.last_time_base;
2027  s->c.last_non_b_time = s->c.time - s->c.pp_time;
2028  }
2029  m->vbv_ignore_qmax = 1;
2030  av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
2031  goto vbv_retry;
2032  }
2033 
2035  }
2036 
2039 
2040  for (int i = 0; i < MPV_MAX_PLANES; i++)
2041  avctx->error[i] += s->encoding_error[i];
2042  ff_side_data_set_encoder_stats(pkt, s->c.cur_pic.ptr->f->quality,
2043  s->encoding_error,
2045  s->c.pict_type);
2046 
2048  assert(put_bits_count(&s->pb) == m->header_bits + s->mv_bits +
2049  s->misc_bits + s->i_tex_bits +
2050  s->p_tex_bits);
2051  flush_put_bits(&s->pb);
2052  m->frame_bits = put_bits_count(&s->pb);
2053 
2054  stuffing_count = ff_vbv_update(m, m->frame_bits);
2055  m->stuffing_bits = 8*stuffing_count;
2056  if (stuffing_count) {
2057  if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
2058  av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
2059  return -1;
2060  }
2061 
2062  switch (s->c.codec_id) {
2065  while (stuffing_count--) {
2066  put_bits(&s->pb, 8, 0);
2067  }
2068  break;
2069  case AV_CODEC_ID_MPEG4:
2070  put_bits(&s->pb, 16, 0);
2071  put_bits(&s->pb, 16, 0x1C3);
2072  stuffing_count -= 4;
2073  while (stuffing_count--) {
2074  put_bits(&s->pb, 8, 0xFF);
2075  }
2076  break;
2077  default:
2078  av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2079  m->stuffing_bits = 0;
2080  }
2081  flush_put_bits(&s->pb);
2082  m->frame_bits = put_bits_count(&s->pb);
2083  }
2084 
2085  /* update MPEG-1/2 vbv_delay for CBR */
2086  if (avctx->rc_max_rate &&
2088  s->c.out_format == FMT_MPEG1 &&
2089  90000LL * (avctx->rc_buffer_size - 1) <=
2090  avctx->rc_max_rate * 0xFFFFLL) {
2091  AVCPBProperties *props;
2092  size_t props_size;
2093 
2094  int vbv_delay, min_delay;
2095  double inbits = avctx->rc_max_rate *
2097  int minbits = m->frame_bits - 8 *
2098  (m->vbv_delay_pos - 1);
2099  double bits = m->rc_context.buffer_index + minbits - inbits;
2100  uint8_t *const vbv_delay_ptr = s->pb.buf + m->vbv_delay_pos;
2101 
2102  if (bits < 0)
2104  "Internal error, negative bits\n");
2105 
2106  av_assert1(s->c.repeat_first_field == 0);
2107 
2108  vbv_delay = bits * 90000 / avctx->rc_max_rate;
2109  min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
2110  avctx->rc_max_rate;
2111 
2112  vbv_delay = FFMAX(vbv_delay, min_delay);
2113 
2114  av_assert0(vbv_delay < 0xFFFF);
2115 
2116  vbv_delay_ptr[0] &= 0xF8;
2117  vbv_delay_ptr[0] |= vbv_delay >> 13;
2118  vbv_delay_ptr[1] = vbv_delay >> 5;
2119  vbv_delay_ptr[2] &= 0x07;
2120  vbv_delay_ptr[2] |= vbv_delay << 3;
2121 
2122  props = av_cpb_properties_alloc(&props_size);
2123  if (!props)
2124  return AVERROR(ENOMEM);
2125  props->vbv_delay = vbv_delay * 300;
2126 
2128  (uint8_t*)props, props_size);
2129  if (ret < 0) {
2130  av_freep(&props);
2131  return ret;
2132  }
2133  }
2134  m->total_bits += m->frame_bits;
2135 
2136  pkt->pts = s->c.cur_pic.ptr->f->pts;
2137  pkt->duration = s->c.cur_pic.ptr->f->duration;
2138  if (!s->c.low_delay && s->c.pict_type != AV_PICTURE_TYPE_B) {
2139  if (!s->c.cur_pic.ptr->coded_picture_number)
2140  pkt->dts = pkt->pts - m->dts_delta;
2141  else
2142  pkt->dts = m->reordered_pts;
2143  m->reordered_pts = pkt->pts;
2144  } else
2145  pkt->dts = pkt->pts;
2146 
2147  // the no-delay case is handled in generic code
2149  ret = ff_encode_reordered_opaque(avctx, pkt, s->c.cur_pic.ptr->f);
2150  if (ret < 0)
2151  return ret;
2152  }
2153 
2154  if (s->c.cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
2156  if (s->mb_info)
2158  } else {
2159  m->frame_bits = 0;
2160  }
2161 
2162  ff_mpv_unref_picture(&s->c.cur_pic);
2163 
2164  av_assert1((m->frame_bits & 7) == 0);
2165 
2166  pkt->size = m->frame_bits / 8;
2167  *got_packet = !!pkt->size;
2168  return 0;
2169 }
2170 
2172  int n, int threshold)
2173 {
2174  static const char tab[64] = {
2175  3, 2, 2, 1, 1, 1, 1, 1,
2176  1, 1, 1, 1, 1, 1, 1, 1,
2177  1, 1, 1, 1, 1, 1, 1, 1,
2178  0, 0, 0, 0, 0, 0, 0, 0,
2179  0, 0, 0, 0, 0, 0, 0, 0,
2180  0, 0, 0, 0, 0, 0, 0, 0,
2181  0, 0, 0, 0, 0, 0, 0, 0,
2182  0, 0, 0, 0, 0, 0, 0, 0
2183  };
2184  int score = 0;
2185  int run = 0;
2186  int i;
2187  int16_t *block = s->c.block[n];
2188  const int last_index = s->c.block_last_index[n];
2189  int skip_dc;
2190 
2191  if (threshold < 0) {
2192  skip_dc = 0;
2193  threshold = -threshold;
2194  } else
2195  skip_dc = 1;
2196 
2197  /* Are all we could set to zero already zero? */
2198  if (last_index <= skip_dc - 1)
2199  return;
2200 
2201  for (i = 0; i <= last_index; i++) {
2202  const int j = s->c.intra_scantable.permutated[i];
2203  const int level = FFABS(block[j]);
2204  if (level == 1) {
2205  if (skip_dc && i == 0)
2206  continue;
2207  score += tab[run];
2208  run = 0;
2209  } else if (level > 1) {
2210  return;
2211  } else {
2212  run++;
2213  }
2214  }
2215  if (score >= threshold)
2216  return;
2217  for (i = skip_dc; i <= last_index; i++) {
2218  const int j = s->c.intra_scantable.permutated[i];
2219  block[j] = 0;
2220  }
2221  if (block[0])
2222  s->c.block_last_index[n] = 0;
2223  else
2224  s->c.block_last_index[n] = -1;
2225 }
2226 
2227 static inline void clip_coeffs(const MPVEncContext *const s, int16_t block[],
2228  int last_index)
2229 {
2230  int i;
2231  const int maxlevel = s->max_qcoeff;
2232  const int minlevel = s->min_qcoeff;
2233  int overflow = 0;
2234 
2235  if (s->c.mb_intra) {
2236  i = 1; // skip clipping of intra dc
2237  } else
2238  i = 0;
2239 
2240  for (; i <= last_index; i++) {
2241  const int j = s->c.intra_scantable.permutated[i];
2242  int level = block[j];
2243 
2244  if (level > maxlevel) {
2245  level = maxlevel;
2246  overflow++;
2247  } else if (level < minlevel) {
2248  level = minlevel;
2249  overflow++;
2250  }
2251 
2252  block[j] = level;
2253  }
2254 
2255  if (overflow && s->c.avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2256  av_log(s->c.avctx, AV_LOG_INFO,
2257  "warning, clipping %d dct coefficients to %d..%d\n",
2258  overflow, minlevel, maxlevel);
2259 }
2260 
2261 static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
2262 {
2263  int x, y;
2264  // FIXME optimize
2265  for (y = 0; y < 8; y++) {
2266  for (x = 0; x < 8; x++) {
2267  int x2, y2;
2268  int sum = 0;
2269  int sqr = 0;
2270  int count = 0;
2271 
2272  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2273  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2274  int v = ptr[x2 + y2 * stride];
2275  sum += v;
2276  sqr += v * v;
2277  count++;
2278  }
2279  }
2280  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2281  }
2282  }
2283 }
2284 
2286  int motion_x, int motion_y,
2287  int mb_block_height,
2288  int mb_block_width,
2289  int mb_block_count,
2290  int chroma_x_shift,
2291  int chroma_y_shift,
2292  int chroma_format)
2293 {
2294 /* Interlaced DCT is only possible with MPEG-2 and MPEG-4
2295  * and neither of these encoders currently supports 444. */
2296 #define INTERLACED_DCT(s) ((chroma_format == CHROMA_420 || chroma_format == CHROMA_422) && \
2297  (s)->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT)
2298  int16_t weight[12][64];
2299  int16_t orig[12][64];
2300  const int mb_x = s->c.mb_x;
2301  const int mb_y = s->c.mb_y;
2302  int i;
2303  int skip_dct[12];
2304  int dct_offset = s->c.linesize * 8; // default for progressive frames
2305  int uv_dct_offset = s->c.uvlinesize * 8;
2306  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2307  ptrdiff_t wrap_y, wrap_c;
2308 
2309  for (i = 0; i < mb_block_count; i++)
2310  skip_dct[i] = s->skipdct;
2311 
2312  if (s->adaptive_quant) {
2313  const int last_qp = s->c.qscale;
2314  const int mb_xy = mb_x + mb_y * s->c.mb_stride;
2315 
2316  s->lambda = s->lambda_table[mb_xy];
2317  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
2319 
2320  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2321  s->dquant = s->c.cur_pic.qscale_table[mb_xy] - last_qp;
2322 
2323  if (s->c.out_format == FMT_H263) {
2324  s->dquant = av_clip(s->dquant, -2, 2);
2325 
2326  if (s->c.codec_id == AV_CODEC_ID_MPEG4) {
2327  if (!s->c.mb_intra) {
2328  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
2329  if (s->dquant & 1 || s->c.mv_dir & MV_DIRECT)
2330  s->dquant = 0;
2331  }
2332  if (s->c.mv_type == MV_TYPE_8X8)
2333  s->dquant = 0;
2334  }
2335  }
2336  }
2337  }
2338  ff_set_qscale(&s->c, last_qp + s->dquant);
2339  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2340  ff_set_qscale(&s->c, s->c.qscale + s->dquant);
2341 
2342  wrap_y = s->c.linesize;
2343  wrap_c = s->c.uvlinesize;
2344  ptr_y = s->new_pic->data[0] +
2345  (mb_y * 16 * wrap_y) + mb_x * 16;
2346  ptr_cb = s->new_pic->data[1] +
2347  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2348  ptr_cr = s->new_pic->data[2] +
2349  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2350 
2351  if ((mb_x * 16 + 16 > s->c.width || mb_y * 16 + 16 > s->c.height) &&
2352  s->c.codec_id != AV_CODEC_ID_AMV) {
2353  uint8_t *ebuf = s->c.sc.edge_emu_buffer + 38 * wrap_y;
2354  int cw = (s->c.width + chroma_x_shift) >> chroma_x_shift;
2355  int ch = (s->c.height + chroma_y_shift) >> chroma_y_shift;
2356  s->c.vdsp.emulated_edge_mc(ebuf, ptr_y,
2357  wrap_y, wrap_y,
2358  16, 16, mb_x * 16, mb_y * 16,
2359  s->c.width, s->c.height);
2360  ptr_y = ebuf;
2361  s->c.vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2362  wrap_c, wrap_c,
2363  mb_block_width, mb_block_height,
2364  mb_x * mb_block_width, mb_y * mb_block_height,
2365  cw, ch);
2366  ptr_cb = ebuf + 16 * wrap_y;
2367  s->c.vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2368  wrap_c, wrap_c,
2369  mb_block_width, mb_block_height,
2370  mb_x * mb_block_width, mb_y * mb_block_height,
2371  cw, ch);
2372  ptr_cr = ebuf + 16 * wrap_y + 16;
2373  }
2374 
2375  if (s->c.mb_intra) {
2376  if (INTERLACED_DCT(s)) {
2377  int progressive_score, interlaced_score;
2378 
2379  s->c.interlaced_dct = 0;
2380  progressive_score = s->ildct_cmp[1](s, ptr_y, NULL, wrap_y, 8) +
2381  s->ildct_cmp[1](s, ptr_y + wrap_y * 8,
2382  NULL, wrap_y, 8) - 400;
2383 
2384  if (progressive_score > 0) {
2385  interlaced_score = s->ildct_cmp[1](s, ptr_y,
2386  NULL, wrap_y * 2, 8) +
2387  s->ildct_cmp[1](s, ptr_y + wrap_y,
2388  NULL, wrap_y * 2, 8);
2389  if (progressive_score > interlaced_score) {
2390  s->c.interlaced_dct = 1;
2391 
2392  dct_offset = wrap_y;
2393  uv_dct_offset = wrap_c;
2394  wrap_y <<= 1;
2395  if (chroma_format == CHROMA_422 ||
2397  wrap_c <<= 1;
2398  }
2399  }
2400  }
2401 
2402  s->pdsp.get_pixels(s->c.block[0], ptr_y, wrap_y);
2403  s->pdsp.get_pixels(s->c.block[1], ptr_y + 8, wrap_y);
2404  s->pdsp.get_pixels(s->c.block[2], ptr_y + dct_offset, wrap_y);
2405  s->pdsp.get_pixels(s->c.block[3], ptr_y + dct_offset + 8, wrap_y);
2406 
2407  if (s->c.avctx->flags & AV_CODEC_FLAG_GRAY) {
2408  skip_dct[4] = 1;
2409  skip_dct[5] = 1;
2410  } else {
2411  s->pdsp.get_pixels(s->c.block[4], ptr_cb, wrap_c);
2412  s->pdsp.get_pixels(s->c.block[5], ptr_cr, wrap_c);
2413  if (chroma_format == CHROMA_422) {
2414  s->pdsp.get_pixels(s->c.block[6], ptr_cb + uv_dct_offset, wrap_c);
2415  s->pdsp.get_pixels(s->c.block[7], ptr_cr + uv_dct_offset, wrap_c);
2416  } else if (chroma_format == CHROMA_444) {
2417  s->pdsp.get_pixels(s->c.block[ 6], ptr_cb + 8, wrap_c);
2418  s->pdsp.get_pixels(s->c.block[ 7], ptr_cr + 8, wrap_c);
2419  s->pdsp.get_pixels(s->c.block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2420  s->pdsp.get_pixels(s->c.block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2421  s->pdsp.get_pixels(s->c.block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2422  s->pdsp.get_pixels(s->c.block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2423  }
2424  }
2425  } else {
2426  op_pixels_func (*op_pix)[4];
2427  qpel_mc_func (*op_qpix)[16];
2428  uint8_t *dest_y, *dest_cb, *dest_cr;
2429 
2430  dest_y = s->c.dest[0];
2431  dest_cb = s->c.dest[1];
2432  dest_cr = s->c.dest[2];
2433 
2434  if ((!s->c.no_rounding) || s->c.pict_type == AV_PICTURE_TYPE_B) {
2435  op_pix = s->c.hdsp.put_pixels_tab;
2436  op_qpix = s->c.qdsp.put_qpel_pixels_tab;
2437  } else {
2438  op_pix = s->c.hdsp.put_no_rnd_pixels_tab;
2439  op_qpix = s->c.qdsp.put_no_rnd_qpel_pixels_tab;
2440  }
2441 
2442  if (s->c.mv_dir & MV_DIR_FORWARD) {
2443  ff_mpv_motion(&s->c, dest_y, dest_cb, dest_cr, 0,
2444  s->c.last_pic.data,
2445  op_pix, op_qpix);
2446  op_pix = s->c.hdsp.avg_pixels_tab;
2447  op_qpix = s->c.qdsp.avg_qpel_pixels_tab;
2448  }
2449  if (s->c.mv_dir & MV_DIR_BACKWARD) {
2450  ff_mpv_motion(&s->c, dest_y, dest_cb, dest_cr, 1,
2451  s->c.next_pic.data,
2452  op_pix, op_qpix);
2453  }
2454 
2455  if (INTERLACED_DCT(s)) {
2456  int progressive_score, interlaced_score;
2457 
2458  s->c.interlaced_dct = 0;
2459  progressive_score = s->ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2460  s->ildct_cmp[0](s, dest_y + wrap_y * 8,
2461  ptr_y + wrap_y * 8,
2462  wrap_y, 8) - 400;
2463 
2464  if (s->c.avctx->ildct_cmp == FF_CMP_VSSE)
2465  progressive_score -= 400;
2466 
2467  if (progressive_score > 0) {
2468  interlaced_score = s->ildct_cmp[0](s, dest_y, ptr_y,
2469  wrap_y * 2, 8) +
2470  s->ildct_cmp[0](s, dest_y + wrap_y,
2471  ptr_y + wrap_y,
2472  wrap_y * 2, 8);
2473 
2474  if (progressive_score > interlaced_score) {
2475  s->c.interlaced_dct = 1;
2476 
2477  dct_offset = wrap_y;
2478  uv_dct_offset = wrap_c;
2479  wrap_y <<= 1;
2480  if (chroma_format == CHROMA_422)
2481  wrap_c <<= 1;
2482  }
2483  }
2484  }
2485 
2486  s->pdsp.diff_pixels(s->c.block[0], ptr_y, dest_y, wrap_y);
2487  s->pdsp.diff_pixels(s->c.block[1], ptr_y + 8, dest_y + 8, wrap_y);
2488  s->pdsp.diff_pixels(s->c.block[2], ptr_y + dct_offset,
2489  dest_y + dct_offset, wrap_y);
2490  s->pdsp.diff_pixels(s->c.block[3], ptr_y + dct_offset + 8,
2491  dest_y + dct_offset + 8, wrap_y);
2492 
2493  if (s->c.avctx->flags & AV_CODEC_FLAG_GRAY) {
2494  skip_dct[4] = 1;
2495  skip_dct[5] = 1;
2496  } else {
2497  s->pdsp.diff_pixels(s->c.block[4], ptr_cb, dest_cb, wrap_c);
2498  s->pdsp.diff_pixels(s->c.block[5], ptr_cr, dest_cr, wrap_c);
2499  if (!chroma_y_shift) { /* 422 */
2500  s->pdsp.diff_pixels(s->c.block[6], ptr_cb + uv_dct_offset,
2501  dest_cb + uv_dct_offset, wrap_c);
2502  s->pdsp.diff_pixels(s->c.block[7], ptr_cr + uv_dct_offset,
2503  dest_cr + uv_dct_offset, wrap_c);
2504  }
2505  }
2506  /* pre quantization */
2507  if (s->mc_mb_var[s->c.mb_stride * mb_y + mb_x] < 2 * s->c.qscale * s->c.qscale) {
2508  // FIXME optimize
2509  if (s->sad_cmp[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->c.qscale)
2510  skip_dct[0] = 1;
2511  if (s->sad_cmp[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->c.qscale)
2512  skip_dct[1] = 1;
2513  if (s->sad_cmp[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2514  wrap_y, 8) < 20 * s->c.qscale)
2515  skip_dct[2] = 1;
2516  if (s->sad_cmp[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2517  wrap_y, 8) < 20 * s->c.qscale)
2518  skip_dct[3] = 1;
2519  if (s->sad_cmp[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->c.qscale)
2520  skip_dct[4] = 1;
2521  if (s->sad_cmp[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->c.qscale)
2522  skip_dct[5] = 1;
2523  if (!chroma_y_shift) { /* 422 */
2524  if (s->sad_cmp[1](NULL, ptr_cb + uv_dct_offset,
2525  dest_cb + uv_dct_offset,
2526  wrap_c, 8) < 20 * s->c.qscale)
2527  skip_dct[6] = 1;
2528  if (s->sad_cmp[1](NULL, ptr_cr + uv_dct_offset,
2529  dest_cr + uv_dct_offset,
2530  wrap_c, 8) < 20 * s->c.qscale)
2531  skip_dct[7] = 1;
2532  }
2533  }
2534  }
2535 
2536  if (s->quantizer_noise_shaping) {
2537  if (!skip_dct[0])
2538  get_visual_weight(weight[0], ptr_y , wrap_y);
2539  if (!skip_dct[1])
2540  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2541  if (!skip_dct[2])
2542  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2543  if (!skip_dct[3])
2544  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2545  if (!skip_dct[4])
2546  get_visual_weight(weight[4], ptr_cb , wrap_c);
2547  if (!skip_dct[5])
2548  get_visual_weight(weight[5], ptr_cr , wrap_c);
2549  if (!chroma_y_shift) { /* 422 */
2550  if (!skip_dct[6])
2551  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2552  wrap_c);
2553  if (!skip_dct[7])
2554  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2555  wrap_c);
2556  }
2557  memcpy(orig[0], s->c.block[0], sizeof(int16_t) * 64 * mb_block_count);
2558  }
2559 
2560  /* DCT & quantize */
2561  av_assert2(s->c.out_format != FMT_MJPEG || s->c.qscale == 8);
2562  {
2563  for (i = 0; i < mb_block_count; i++) {
2564  if (!skip_dct[i]) {
2565  int overflow;
2566  s->c.block_last_index[i] = s->dct_quantize(s, s->c.block[i], i, s->c.qscale, &overflow);
2567  // FIXME we could decide to change to quantizer instead of
2568  // clipping
2569  // JS: I don't think that would be a good idea it could lower
2570  // quality instead of improve it. Just INTRADC clipping
2571  // deserves changes in quantizer
2572  if (overflow)
2573  clip_coeffs(s, s->c.block[i], s->c.block_last_index[i]);
2574  } else
2575  s->c.block_last_index[i] = -1;
2576  }
2577  if (s->quantizer_noise_shaping) {
2578  for (i = 0; i < mb_block_count; i++) {
2579  if (!skip_dct[i]) {
2580  s->c.block_last_index[i] =
2581  dct_quantize_refine(s, s->c.block[i], weight[i],
2582  orig[i], i, s->c.qscale);
2583  }
2584  }
2585  }
2586 
2587  if (s->luma_elim_threshold && !s->c.mb_intra)
2588  for (i = 0; i < 4; i++)
2589  dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2590  if (s->chroma_elim_threshold && !s->c.mb_intra)
2591  for (i = 4; i < mb_block_count; i++)
2592  dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2593 
2594  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2595  for (i = 0; i < mb_block_count; i++) {
2596  if (s->c.block_last_index[i] == -1)
2597  s->coded_score[i] = INT_MAX / 256;
2598  }
2599  }
2600  }
2601 
2602  if ((s->c.avctx->flags & AV_CODEC_FLAG_GRAY) && s->c.mb_intra) {
2603  s->c.block_last_index[4] =
2604  s->c.block_last_index[5] = 0;
2605  s->c.block[4][0] =
2606  s->c.block[5][0] = (1024 + s->c.c_dc_scale / 2) / s->c.c_dc_scale;
2607  if (!chroma_y_shift) { /* 422 / 444 */
2608  for (i=6; i<12; i++) {
2609  s->c.block_last_index[i] = 0;
2610  s->c.block[i][0] = s->c.block[4][0];
2611  }
2612  }
2613  }
2614 
2615  // non c quantize code returns incorrect block_last_index FIXME
2616  if (s->c.alternate_scan && s->dct_quantize != dct_quantize_c) {
2617  for (i = 0; i < mb_block_count; i++) {
2618  int j;
2619  if (s->c.block_last_index[i] > 0) {
2620  for (j = 63; j > 0; j--) {
2621  if (s->c.block[i][s->c.intra_scantable.permutated[j]])
2622  break;
2623  }
2624  s->c.block_last_index[i] = j;
2625  }
2626  }
2627  }
2628 
2629  s->encode_mb(s, s->c.block, motion_x, motion_y);
2630 }
2631 
2632 static void encode_mb(MPVEncContext *const s, int motion_x, int motion_y)
2633 {
2634  if (s->c.chroma_format == CHROMA_420)
2635  encode_mb_internal(s, motion_x, motion_y, 8, 8, 6, 1, 1, CHROMA_420);
2636  else if (s->c.chroma_format == CHROMA_422)
2637  encode_mb_internal(s, motion_x, motion_y, 16, 8, 8, 1, 0, CHROMA_422);
2638  else
2639  encode_mb_internal(s, motion_x, motion_y, 16, 16, 12, 0, 0, CHROMA_444);
2640 }
2641 
2642 typedef struct MBBackup {
2643  struct {
2644  int mv[2][4][2];
2645  int last_mv[2][2][2];
2647  int last_dc[3];
2649  int qscale;
2652  int16_t (*block)[64];
2653  } c;
2655  int dquant;
2658 } MBBackup;
2659 
2660 #define COPY_CONTEXT(BEFORE, AFTER, DST_TYPE, SRC_TYPE) \
2661 static inline void BEFORE ##_context_before_encode(DST_TYPE *const d, \
2662  const SRC_TYPE *const s) \
2663 { \
2664  /* FIXME is memcpy faster than a loop? */ \
2665  memcpy(d->c.last_mv, s->c.last_mv, 2*2*2*sizeof(int)); \
2666  \
2667  /* MPEG-1 */ \
2668  d->c.mb_skip_run = s->c.mb_skip_run; \
2669  for (int i = 0; i < 3; i++) \
2670  d->c.last_dc[i] = s->c.last_dc[i]; \
2671  \
2672  /* statistics */ \
2673  d->mv_bits = s->mv_bits; \
2674  d->i_tex_bits = s->i_tex_bits; \
2675  d->p_tex_bits = s->p_tex_bits; \
2676  d->i_count = s->i_count; \
2677  d->misc_bits = s->misc_bits; \
2678  d->last_bits = 0; \
2679  \
2680  d->c.mb_skipped = 0; \
2681  d->c.qscale = s->c.qscale; \
2682  d->dquant = s->dquant; \
2683  \
2684  d->esc3_level_length = s->esc3_level_length; \
2685 } \
2686  \
2687 static inline void AFTER ## _context_after_encode(DST_TYPE *const d, \
2688  const SRC_TYPE *const s, \
2689  int data_partitioning) \
2690 { \
2691  /* FIXME is memcpy faster than a loop? */ \
2692  memcpy(d->c.mv, s->c.mv, 2*4*2*sizeof(int)); \
2693  memcpy(d->c.last_mv, s->c.last_mv, 2*2*2*sizeof(int)); \
2694  \
2695  /* MPEG-1 */ \
2696  d->c.mb_skip_run = s->c.mb_skip_run; \
2697  for (int i = 0; i < 3; i++) \
2698  d->c.last_dc[i] = s->c.last_dc[i]; \
2699  \
2700  /* statistics */ \
2701  d->mv_bits = s->mv_bits; \
2702  d->i_tex_bits = s->i_tex_bits; \
2703  d->p_tex_bits = s->p_tex_bits; \
2704  d->i_count = s->i_count; \
2705  d->misc_bits = s->misc_bits; \
2706  \
2707  d->c.mb_intra = s->c.mb_intra; \
2708  d->c.mb_skipped = s->c.mb_skipped; \
2709  d->c.mv_type = s->c.mv_type; \
2710  d->c.mv_dir = s->c.mv_dir; \
2711  d->pb = s->pb; \
2712  if (data_partitioning) { \
2713  d->pb2 = s->pb2; \
2714  d->tex_pb = s->tex_pb; \
2715  } \
2716  d->c.block = s->c.block; \
2717  for (int i = 0; i < 8; i++) \
2718  d->c.block_last_index[i] = s->c.block_last_index[i]; \
2719  d->c.interlaced_dct = s->c.interlaced_dct; \
2720  d->c.qscale = s->c.qscale; \
2721  \
2722  d->esc3_level_length = s->esc3_level_length; \
2723 }
2724 
2725 COPY_CONTEXT(backup, save, MBBackup, MPVEncContext)
2726 COPY_CONTEXT(reset, store, MPVEncContext, MBBackup)
2727 
2728 static void encode_mb_hq(MPVEncContext *const s, MBBackup *const backup, MBBackup *const best,
2729  PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2730  int *dmin, int *next_block, int motion_x, int motion_y)
2731 {
2732  int score;
2733  uint8_t *dest_backup[3];
2734 
2735  reset_context_before_encode(s, backup);
2736 
2737  s->c.block = s->c.blocks[*next_block];
2738  s->pb = pb[*next_block];
2739  if (s->c.data_partitioning) {
2740  s->pb2 = pb2 [*next_block];
2741  s->tex_pb= tex_pb[*next_block];
2742  }
2743 
2744  if(*next_block){
2745  memcpy(dest_backup, s->c.dest, sizeof(s->c.dest));
2746  s->c.dest[0] = s->c.sc.rd_scratchpad;
2747  s->c.dest[1] = s->c.sc.rd_scratchpad + 16*s->c.linesize;
2748  s->c.dest[2] = s->c.sc.rd_scratchpad + 16*s->c.linesize + 8;
2749  av_assert0(s->c.linesize >= 32); //FIXME
2750  }
2751 
2752  encode_mb(s, motion_x, motion_y);
2753 
2754  score= put_bits_count(&s->pb);
2755  if (s->c.data_partitioning) {
2756  score+= put_bits_count(&s->pb2);
2757  score+= put_bits_count(&s->tex_pb);
2758  }
2759 
2760  if (s->c.avctx->mb_decision == FF_MB_DECISION_RD) {
2761  mpv_reconstruct_mb(s, s->c.block);
2762 
2763  score *= s->lambda2;
2764  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2765  }
2766 
2767  if(*next_block){
2768  memcpy(s->c.dest, dest_backup, sizeof(s->c.dest));
2769  }
2770 
2771  if(score<*dmin){
2772  *dmin= score;
2773  *next_block^=1;
2774 
2775  save_context_after_encode(best, s, s->c.data_partitioning);
2776  }
2777 }
2778 
2779 static int sse(const MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
2780 {
2781  const uint32_t *sq = ff_square_tab + 256;
2782  int acc=0;
2783  int x,y;
2784 
2785  if(w==16 && h==16)
2786  return s->sse_cmp[0](NULL, src1, src2, stride, 16);
2787  else if(w==8 && h==8)
2788  return s->sse_cmp[1](NULL, src1, src2, stride, 8);
2789 
2790  for(y=0; y<h; y++){
2791  for(x=0; x<w; x++){
2792  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2793  }
2794  }
2795 
2796  av_assert2(acc>=0);
2797 
2798  return acc;
2799 }
2800 
2801 static int sse_mb(MPVEncContext *const s)
2802 {
2803  int w= 16;
2804  int h= 16;
2805  int chroma_mb_w = w >> s->c.chroma_x_shift;
2806  int chroma_mb_h = h >> s->c.chroma_y_shift;
2807 
2808  if (s->c.mb_x*16 + 16 > s->c.width ) w = s->c.width - s->c.mb_x*16;
2809  if (s->c.mb_y*16 + 16 > s->c.height) h = s->c.height- s->c.mb_y*16;
2810 
2811  if(w==16 && h==16)
2812  return s->n_sse_cmp[0](s, s->new_pic->data[0] + s->c.mb_x * 16 + s->c.mb_y * s->c.linesize * 16,
2813  s->c.dest[0], s->c.linesize, 16) +
2814  s->n_sse_cmp[1](s, s->new_pic->data[1] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2815  s->c.dest[1], s->c.uvlinesize, chroma_mb_h) +
2816  s->n_sse_cmp[1](s, s->new_pic->data[2] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2817  s->c.dest[2], s->c.uvlinesize, chroma_mb_h);
2818  else
2819  return sse(s, s->new_pic->data[0] + s->c.mb_x * 16 + s->c.mb_y * s->c.linesize * 16,
2820  s->c.dest[0], w, h, s->c.linesize) +
2821  sse(s, s->new_pic->data[1] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2822  s->c.dest[1], w >> s->c.chroma_x_shift, h >> s->c.chroma_y_shift, s->c.uvlinesize) +
2823  sse(s, s->new_pic->data[2] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2824  s->c.dest[2], w >> s->c.chroma_x_shift, h >> s->c.chroma_y_shift, s->c.uvlinesize);
2825 }
2826 
2828  MPVEncContext *const s = *(void**)arg;
2829 
2830 
2831  s->me.pre_pass = 1;
2832  s->me.dia_size = s->c.avctx->pre_dia_size;
2833  s->c.first_slice_line = 1;
2834  for (s->c.mb_y = s->c.end_mb_y - 1; s->c.mb_y >= s->c.start_mb_y; s->c.mb_y--) {
2835  for (s->c.mb_x = s->c.mb_width - 1; s->c.mb_x >=0 ; s->c.mb_x--)
2836  ff_pre_estimate_p_frame_motion(s, s->c.mb_x, s->c.mb_y);
2837  s->c.first_slice_line = 0;
2838  }
2839 
2840  s->me.pre_pass = 0;
2841 
2842  return 0;
2843 }
2844 
2846  MPVEncContext *const s = *(void**)arg;
2847 
2848  s->me.dia_size = s->c.avctx->dia_size;
2849  s->c.first_slice_line = 1;
2850  for (s->c.mb_y = s->c.start_mb_y; s->c.mb_y < s->c.end_mb_y; s->c.mb_y++) {
2851  s->c.mb_x = 0; //for block init below
2852  ff_init_block_index(&s->c);
2853  for (s->c.mb_x = 0; s->c.mb_x < s->c.mb_width; s->c.mb_x++) {
2854  s->c.block_index[0] += 2;
2855  s->c.block_index[1] += 2;
2856  s->c.block_index[2] += 2;
2857  s->c.block_index[3] += 2;
2858 
2859  /* compute motion vector & mb_type and store in context */
2860  if (s->c.pict_type == AV_PICTURE_TYPE_B)
2861  ff_estimate_b_frame_motion(s, s->c.mb_x, s->c.mb_y);
2862  else
2863  ff_estimate_p_frame_motion(s, s->c.mb_x, s->c.mb_y);
2864  }
2865  s->c.first_slice_line = 0;
2866  }
2867  return 0;
2868 }
2869 
2870 static int mb_var_thread(AVCodecContext *c, void *arg){
2871  MPVEncContext *const s = *(void**)arg;
2872 
2873  for (int mb_y = s->c.start_mb_y; mb_y < s->c.end_mb_y; mb_y++) {
2874  for (int mb_x = 0; mb_x < s->c.mb_width; mb_x++) {
2875  int xx = mb_x * 16;
2876  int yy = mb_y * 16;
2877  const uint8_t *pix = s->new_pic->data[0] + (yy * s->c.linesize) + xx;
2878  int varc;
2879  int sum = s->mpvencdsp.pix_sum(pix, s->c.linesize);
2880 
2881  varc = (s->mpvencdsp.pix_norm1(pix, s->c.linesize) -
2882  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2883 
2884  s->mb_var [s->c.mb_stride * mb_y + mb_x] = varc;
2885  s->mb_mean[s->c.mb_stride * mb_y + mb_x] = (sum+128)>>8;
2886  s->me.mb_var_sum_temp += varc;
2887  }
2888  }
2889  return 0;
2890 }
2891 
2892 static void write_slice_end(MPVEncContext *const s)
2893 {
2894  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4) {
2895  if (s->c.partitioned_frame)
2897 
2898  ff_mpeg4_stuffing(&s->pb);
2899  } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2900  s->c.out_format == FMT_MJPEG) {
2902  } else if (CONFIG_SPEEDHQ_ENCODER && s->c.out_format == FMT_SPEEDHQ) {
2904  }
2905 
2906  flush_put_bits(&s->pb);
2907 
2908  if ((s->c.avctx->flags & AV_CODEC_FLAG_PASS1) && !s->c.partitioned_frame)
2909  s->misc_bits+= get_bits_diff(s);
2910 }
2911 
2912 static void write_mb_info(MPVEncContext *const s)
2913 {
2914  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2915  int offset = put_bits_count(&s->pb);
2916  int mba = s->c.mb_x + s->c.mb_width * (s->c.mb_y % s->c.gob_index);
2917  int gobn = s->c.mb_y / s->c.gob_index;
2918  int pred_x, pred_y;
2919  if (CONFIG_H263_ENCODER)
2920  ff_h263_pred_motion(&s->c, 0, 0, &pred_x, &pred_y);
2921  bytestream_put_le32(&ptr, offset);
2922  bytestream_put_byte(&ptr, s->c.qscale);
2923  bytestream_put_byte(&ptr, gobn);
2924  bytestream_put_le16(&ptr, mba);
2925  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2926  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2927  /* 4MV not implemented */
2928  bytestream_put_byte(&ptr, 0); /* hmv2 */
2929  bytestream_put_byte(&ptr, 0); /* vmv2 */
2930 }
2931 
2932 static void update_mb_info(MPVEncContext *const s, int startcode)
2933 {
2934  if (!s->mb_info)
2935  return;
2936  if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2937  s->mb_info_size += 12;
2938  s->prev_mb_info = s->last_mb_info;
2939  }
2940  if (startcode) {
2941  s->prev_mb_info = put_bytes_count(&s->pb, 0);
2942  /* This might have incremented mb_info_size above, and we return without
2943  * actually writing any info into that slot yet. But in that case,
2944  * this will be called again at the start of the after writing the
2945  * start code, actually writing the mb info. */
2946  return;
2947  }
2948 
2949  s->last_mb_info = put_bytes_count(&s->pb, 0);
2950  if (!s->mb_info_size)
2951  s->mb_info_size += 12;
2952  write_mb_info(s);
2953 }
2954 
2955 int ff_mpv_reallocate_putbitbuffer(MPVEncContext *const s, size_t threshold, size_t size_increase)
2956 {
2957  if (put_bytes_left(&s->pb, 0) < threshold
2958  && s->c.slice_context_count == 1
2959  && s->pb.buf == s->c.avctx->internal->byte_buffer) {
2960  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2961 
2962  uint8_t *new_buffer = NULL;
2963  int new_buffer_size = 0;
2964 
2965  if ((s->c.avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2966  av_log(s->c.avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2967  return AVERROR(ENOMEM);
2968  }
2969 
2970  emms_c();
2971 
2972  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2973  s->c.avctx->internal->byte_buffer_size + size_increase);
2974  if (!new_buffer)
2975  return AVERROR(ENOMEM);
2976 
2977  memcpy(new_buffer, s->c.avctx->internal->byte_buffer, s->c.avctx->internal->byte_buffer_size);
2978  av_free(s->c.avctx->internal->byte_buffer);
2979  s->c.avctx->internal->byte_buffer = new_buffer;
2980  s->c.avctx->internal->byte_buffer_size = new_buffer_size;
2981  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2982  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2983  }
2984  if (put_bytes_left(&s->pb, 0) < threshold)
2985  return AVERROR(EINVAL);
2986  return 0;
2987 }
2988 
2989 static int encode_thread(AVCodecContext *c, void *arg){
2990  MPVEncContext *const s = *(void**)arg;
2991  int chr_h = 16 >> s->c.chroma_y_shift;
2992  int i;
2993  MBBackup best_s = { 0 }, backup_s;
2994  uint8_t bit_buf[2][MAX_MB_BYTES];
2995  // + 2 because ff_copy_bits() overreads
2996  uint8_t bit_buf2[2][MAX_PB2_MB_SIZE + 2];
2997  uint8_t bit_buf_tex[2][MAX_AC_TEX_MB_SIZE + 2];
2998  PutBitContext pb[2], pb2[2], tex_pb[2];
2999 
3000  for(i=0; i<2; i++){
3001  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
3002  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_PB2_MB_SIZE);
3003  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_AC_TEX_MB_SIZE);
3004  }
3005 
3006  s->last_bits= put_bits_count(&s->pb);
3007  s->mv_bits=0;
3008  s->misc_bits=0;
3009  s->i_tex_bits=0;
3010  s->p_tex_bits=0;
3011  s->i_count=0;
3012 
3013  for(i=0; i<3; i++){
3014  /* init last dc values */
3015  /* note: quant matrix value (8) is implied here */
3016  s->c.last_dc[i] = 128 << s->c.intra_dc_precision;
3017 
3018  s->encoding_error[i] = 0;
3019  }
3020  if (s->c.codec_id == AV_CODEC_ID_AMV) {
3021  s->c.last_dc[0] = 128 * 8 / 13;
3022  s->c.last_dc[1] = 128 * 8 / 14;
3023  s->c.last_dc[2] = 128 * 8 / 14;
3024 #if CONFIG_MPEG4_ENCODER
3025  } else if (s->c.partitioned_frame) {
3026  av_assert1(s->c.codec_id == AV_CODEC_ID_MPEG4);
3028 #endif
3029  }
3030  s->c.mb_skip_run = 0;
3031  memset(s->c.last_mv, 0, sizeof(s->c.last_mv));
3032 
3033  s->last_mv_dir = 0;
3034 
3035  s->c.resync_mb_x = 0;
3036  s->c.resync_mb_y = 0;
3037  s->c.first_slice_line = 1;
3038  s->ptr_lastgob = s->pb.buf;
3039  for (int mb_y_order = s->c.start_mb_y; mb_y_order < s->c.end_mb_y; mb_y_order++) {
3040  int mb_y;
3041  if (CONFIG_SPEEDHQ_ENCODER && s->c.codec_id == AV_CODEC_ID_SPEEDHQ) {
3042  int first_in_slice;
3043  mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->c.mb_height, &first_in_slice);
3044  if (first_in_slice && mb_y_order != s->c.start_mb_y)
3046  s->c.last_dc[0] = s->c.last_dc[1] = s->c.last_dc[2] = 1024 << s->c.intra_dc_precision;
3047  } else {
3048  mb_y = mb_y_order;
3049  }
3050  s->c.mb_x = 0;
3051  s->c.mb_y = mb_y;
3052 
3053  ff_set_qscale(&s->c, s->c.qscale);
3054  ff_init_block_index(&s->c);
3055 
3056  for (int mb_x = 0; mb_x < s->c.mb_width; mb_x++) {
3057  int mb_type, xy;
3058 // int d;
3059  int dmin= INT_MAX;
3060  int dir;
3061  int size_increase = s->c.avctx->internal->byte_buffer_size/4
3062  + s->c.mb_width*MAX_MB_BYTES;
3063 
3065  if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
3066  av_log(s->c.avctx, AV_LOG_ERROR, "encoded frame too large\n");
3067  return -1;
3068  }
3069  if (s->c.data_partitioning) {
3070  if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
3071  put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
3072  av_log(s->c.avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3073  return -1;
3074  }
3075  }
3076 
3077  s->c.mb_x = mb_x;
3078  s->c.mb_y = mb_y; // moved into loop, can get changed by H.261
3079  ff_update_block_index(&s->c, 8, 0, s->c.chroma_x_shift);
3080 
3081  if (CONFIG_H261_ENCODER && s->c.codec_id == AV_CODEC_ID_H261)
3083  xy = s->c.mb_y * s->c.mb_stride + s->c.mb_x;
3084  mb_type = s->mb_type[xy];
3085 
3086  /* write gob / video packet header */
3087  if(s->rtp_mode){
3088  int current_packet_size, is_gob_start;
3089 
3090  current_packet_size = put_bytes_count(&s->pb, 1)
3091  - (s->ptr_lastgob - s->pb.buf);
3092 
3093  is_gob_start = s->rtp_payload_size &&
3094  current_packet_size >= s->rtp_payload_size &&
3095  mb_y + mb_x > 0;
3096 
3097  if (s->c.start_mb_y == mb_y && mb_y > 0 && mb_x == 0) is_gob_start = 1;
3098 
3099  switch (s->c.codec_id) {
3100  case AV_CODEC_ID_H263:
3101  case AV_CODEC_ID_H263P:
3102  if (!s->c.h263_slice_structured)
3103  if (s->c.mb_x || s->c.mb_y % s->c.gob_index) is_gob_start = 0;
3104  break;
3106  if (s->c.mb_x == 0 && s->c.mb_y != 0) is_gob_start = 1;
3108  if (s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO && s->c.mb_y >= 175 ||
3109  s->c.mb_skip_run)
3110  is_gob_start=0;
3111  break;
3112  case AV_CODEC_ID_MJPEG:
3113  if (s->c.mb_x == 0 && s->c.mb_y != 0) is_gob_start = 1;
3114  break;
3115  }
3116 
3117  if(is_gob_start){
3118  if (s->c.start_mb_y != mb_y || mb_x != 0) {
3119  write_slice_end(s);
3120 
3121  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4 && s->c.partitioned_frame)
3123  }
3124 
3125  av_assert2((put_bits_count(&s->pb)&7) == 0);
3126  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3127 
3128  if (s->error_rate && s->c.resync_mb_x + s->c.resync_mb_y > 0) {
3129  int r = put_bytes_count(&s->pb, 0) + s->c.picture_number + 16 + s->c.mb_x + s->c.mb_y;
3130  int d = 100 / s->error_rate;
3131  if(r % d == 0){
3132  current_packet_size=0;
3133  s->pb.buf_ptr= s->ptr_lastgob;
3134  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3135  }
3136  }
3137 
3138  switch (s->c.codec_id) {
3139  case AV_CODEC_ID_MPEG4:
3140  if (CONFIG_MPEG4_ENCODER) {
3144  }
3145  break;
3148  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3151  }
3152  break;
3153 #if CONFIG_H263P_ENCODER
3154  case AV_CODEC_ID_H263P:
3155  if (s->c.dc_val)
3157  // fallthrough
3158 #endif
3159  case AV_CODEC_ID_H263:
3160  if (CONFIG_H263_ENCODER) {
3161  update_mb_info(s, 1);
3163  }
3164  break;
3165  }
3166 
3167  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS1) {
3168  int bits= put_bits_count(&s->pb);
3169  s->misc_bits+= bits - s->last_bits;
3170  s->last_bits= bits;
3171  }
3172 
3173  s->ptr_lastgob += current_packet_size;
3174  s->c.first_slice_line = 1;
3175  s->c.resync_mb_x = mb_x;
3176  s->c.resync_mb_y = mb_y;
3177  }
3178  }
3179 
3180  if (s->c.resync_mb_x == s->c.mb_x &&
3181  s->c.resync_mb_y+1 == s->c.mb_y)
3182  s->c.first_slice_line = 0;
3183 
3184  s->c.mb_skipped = 0;
3185  s->dquant=0; //only for QP_RD
3186 
3187  update_mb_info(s, 0);
3188 
3189  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3190  int next_block=0;
3191  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3192 
3193  backup_context_before_encode(&backup_s, s);
3194  backup_s.pb= s->pb;
3195  if (s->c.data_partitioning) {
3196  backup_s.pb2= s->pb2;
3197  backup_s.tex_pb= s->tex_pb;
3198  }
3199 
3200  if(mb_type&CANDIDATE_MB_TYPE_INTER){
3201  s->c.mv_dir = MV_DIR_FORWARD;
3202  s->c.mv_type = MV_TYPE_16X16;
3203  s->c.mb_intra = 0;
3204  s->c.mv[0][0][0] = s->p_mv_table[xy][0];
3205  s->c.mv[0][0][1] = s->p_mv_table[xy][1];
3206  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3207  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3208  }
3209  if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3210  s->c.mv_dir = MV_DIR_FORWARD;
3211  s->c.mv_type = MV_TYPE_FIELD;
3212  s->c.mb_intra = 0;
3213  for(i=0; i<2; i++){
3214  int j = s->c.field_select[0][i] = s->p_field_select_table[i][xy];
3215  s->c.mv[0][i][0] = s->c.p_field_mv_table[i][j][xy][0];
3216  s->c.mv[0][i][1] = s->c.p_field_mv_table[i][j][xy][1];
3217  }
3218  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3219  &dmin, &next_block, 0, 0);
3220  }
3221  if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3222  s->c.mv_dir = MV_DIR_FORWARD;
3223  s->c.mv_type = MV_TYPE_16X16;
3224  s->c.mb_intra = 0;
3225  s->c.mv[0][0][0] = 0;
3226  s->c.mv[0][0][1] = 0;
3227  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3228  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3229  }
3230  if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3231  s->c.mv_dir = MV_DIR_FORWARD;
3232  s->c.mv_type = MV_TYPE_8X8;
3233  s->c.mb_intra = 0;
3234  for(i=0; i<4; i++){
3235  s->c.mv[0][i][0] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][0];
3236  s->c.mv[0][i][1] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][1];
3237  }
3238  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3239  &dmin, &next_block, 0, 0);
3240  }
3241  if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3242  s->c.mv_dir = MV_DIR_FORWARD;
3243  s->c.mv_type = MV_TYPE_16X16;
3244  s->c.mb_intra = 0;
3245  s->c.mv[0][0][0] = s->b_forw_mv_table[xy][0];
3246  s->c.mv[0][0][1] = s->b_forw_mv_table[xy][1];
3247  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3248  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3249  }
3250  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3251  s->c.mv_dir = MV_DIR_BACKWARD;
3252  s->c.mv_type = MV_TYPE_16X16;
3253  s->c.mb_intra = 0;
3254  s->c.mv[1][0][0] = s->b_back_mv_table[xy][0];
3255  s->c.mv[1][0][1] = s->b_back_mv_table[xy][1];
3256  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3257  &dmin, &next_block, s->c.mv[1][0][0], s->c.mv[1][0][1]);
3258  }
3259  if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3260  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3261  s->c.mv_type = MV_TYPE_16X16;
3262  s->c.mb_intra = 0;
3263  s->c.mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3264  s->c.mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3265  s->c.mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3266  s->c.mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3267  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3268  &dmin, &next_block, 0, 0);
3269  }
3270  if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3271  s->c.mv_dir = MV_DIR_FORWARD;
3272  s->c.mv_type = MV_TYPE_FIELD;
3273  s->c.mb_intra = 0;
3274  for(i=0; i<2; i++){
3275  int j = s->c.field_select[0][i] = s->b_field_select_table[0][i][xy];
3276  s->c.mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3277  s->c.mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3278  }
3279  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3280  &dmin, &next_block, 0, 0);
3281  }
3282  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3283  s->c.mv_dir = MV_DIR_BACKWARD;
3284  s->c.mv_type = MV_TYPE_FIELD;
3285  s->c.mb_intra = 0;
3286  for(i=0; i<2; i++){
3287  int j = s->c.field_select[1][i] = s->b_field_select_table[1][i][xy];
3288  s->c.mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3289  s->c.mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3290  }
3291  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3292  &dmin, &next_block, 0, 0);
3293  }
3294  if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3295  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3296  s->c.mv_type = MV_TYPE_FIELD;
3297  s->c.mb_intra = 0;
3298  for(dir=0; dir<2; dir++){
3299  for(i=0; i<2; i++){
3300  int j = s->c.field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3301  s->c.mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3302  s->c.mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3303  }
3304  }
3305  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3306  &dmin, &next_block, 0, 0);
3307  }
3308  if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3309  s->c.mv_dir = 0;
3310  s->c.mv_type = MV_TYPE_16X16;
3311  s->c.mb_intra = 1;
3312  s->c.mv[0][0][0] = 0;
3313  s->c.mv[0][0][1] = 0;
3314  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3315  &dmin, &next_block, 0, 0);
3316  s->c.mbintra_table[xy] = 1;
3317  }
3318 
3319  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3320  if (best_s.c.mv_type == MV_TYPE_16X16) { //FIXME move 4mv after QPRD
3321  const int last_qp = backup_s.c.qscale;
3322  int qpi, qp, dc[6];
3323  int16_t ac[6][16];
3324  const int mvdir = (best_s.c.mv_dir & MV_DIR_BACKWARD) ? 1 : 0;
3325  static const int dquant_tab[4]={-1,1,-2,2};
3326  int storecoefs = s->c.mb_intra && s->c.dc_val;
3327 
3328  av_assert2(backup_s.dquant == 0);
3329 
3330  //FIXME intra
3331  s->c.mv_dir = best_s.c.mv_dir;
3332  s->c.mv_type = MV_TYPE_16X16;
3333  s->c.mb_intra = best_s.c.mb_intra;
3334  s->c.mv[0][0][0] = best_s.c.mv[0][0][0];
3335  s->c.mv[0][0][1] = best_s.c.mv[0][0][1];
3336  s->c.mv[1][0][0] = best_s.c.mv[1][0][0];
3337  s->c.mv[1][0][1] = best_s.c.mv[1][0][1];
3338 
3339  qpi = s->c.pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3340  for(; qpi<4; qpi++){
3341  int dquant= dquant_tab[qpi];
3342  qp= last_qp + dquant;
3343  if (qp < s->c.avctx->qmin || qp > s->c.avctx->qmax)
3344  continue;
3345  backup_s.dquant= dquant;
3346  if(storecoefs){
3347  for(i=0; i<6; i++){
3348  dc[i] = s->c.dc_val[s->c.block_index[i]];
3349  memcpy(ac[i], s->c.ac_val[s->c.block_index[i]], sizeof(*s->c.ac_val));
3350  }
3351  }
3352 
3353  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3354  &dmin, &next_block, s->c.mv[mvdir][0][0], s->c.mv[mvdir][0][1]);
3355  if (best_s.c.qscale != qp) {
3356  if(storecoefs){
3357  for(i=0; i<6; i++){
3358  s->c.dc_val[s->c.block_index[i]] = dc[i];
3359  memcpy(s->c.ac_val[s->c.block_index[i]], ac[i], sizeof(*s->c.ac_val));
3360  }
3361  }
3362  }
3363  }
3364  }
3365  }
3366  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3367  int mx= s->b_direct_mv_table[xy][0];
3368  int my= s->b_direct_mv_table[xy][1];
3369 
3370  backup_s.dquant = 0;
3371  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3372  s->c.mb_intra = 0;
3373  ff_mpeg4_set_direct_mv(&s->c, mx, my);
3374  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3375  &dmin, &next_block, mx, my);
3376  }
3377  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3378  backup_s.dquant = 0;
3379  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3380  s->c.mb_intra = 0;
3381  ff_mpeg4_set_direct_mv(&s->c, 0, 0);
3382  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3383  &dmin, &next_block, 0, 0);
3384  }
3385  if (!best_s.c.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3386  int coded=0;
3387  for(i=0; i<6; i++)
3388  coded |= s->c.block_last_index[i];
3389  if(coded){
3390  int mx,my;
3391  memcpy(s->c.mv, best_s.c.mv, sizeof(s->c.mv));
3392  if (CONFIG_MPEG4_ENCODER && best_s.c.mv_dir & MV_DIRECT) {
3393  mx=my=0; //FIXME find the one we actually used
3394  ff_mpeg4_set_direct_mv(&s->c, mx, my);
3395  } else if (best_s.c.mv_dir & MV_DIR_BACKWARD) {
3396  mx = s->c.mv[1][0][0];
3397  my = s->c.mv[1][0][1];
3398  }else{
3399  mx = s->c.mv[0][0][0];
3400  my = s->c.mv[0][0][1];
3401  }
3402 
3403  s->c.mv_dir = best_s.c.mv_dir;
3404  s->c.mv_type = best_s.c.mv_type;
3405  s->c.mb_intra = 0;
3406 /* s->c.mv[0][0][0] = best_s.mv[0][0][0];
3407  s->c.mv[0][0][1] = best_s.mv[0][0][1];
3408  s->c.mv[1][0][0] = best_s.mv[1][0][0];
3409  s->c.mv[1][0][1] = best_s.mv[1][0][1];*/
3410  backup_s.dquant= 0;
3411  s->skipdct=1;
3412  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3413  &dmin, &next_block, mx, my);
3414  s->skipdct=0;
3415  }
3416  }
3417 
3418  store_context_after_encode(s, &best_s, s->c.data_partitioning);
3419 
3420  pb_bits_count= put_bits_count(&s->pb);
3421  flush_put_bits(&s->pb);
3422  ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3423  s->pb= backup_s.pb;
3424 
3425  if (s->c.data_partitioning) {
3426  pb2_bits_count= put_bits_count(&s->pb2);
3427  flush_put_bits(&s->pb2);
3428  ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3429  s->pb2= backup_s.pb2;
3430 
3431  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3432  flush_put_bits(&s->tex_pb);
3433  ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3434  s->tex_pb= backup_s.tex_pb;
3435  }
3436  s->last_bits= put_bits_count(&s->pb);
3437 
3438  if (CONFIG_H263_ENCODER &&
3439  s->c.out_format == FMT_H263 && s->c.pict_type != AV_PICTURE_TYPE_B)
3441 
3442  if(next_block==0){ //FIXME 16 vs linesize16
3443  s->c.hdsp.put_pixels_tab[0][0](s->c.dest[0], s->c.sc.rd_scratchpad , s->c.linesize ,16);
3444  s->c.hdsp.put_pixels_tab[1][0](s->c.dest[1], s->c.sc.rd_scratchpad + 16*s->c.linesize , s->c.uvlinesize, 8);
3445  s->c.hdsp.put_pixels_tab[1][0](s->c.dest[2], s->c.sc.rd_scratchpad + 16*s->c.linesize + 8, s->c.uvlinesize, 8);
3446  }
3447 
3448  if (s->c.avctx->mb_decision == FF_MB_DECISION_BITS)
3449  mpv_reconstruct_mb(s, s->c.block);
3450  } else {
3451  int motion_x = 0, motion_y = 0;
3452  s->c.mv_type = MV_TYPE_16X16;
3453  // only one MB-Type possible
3454 
3455  switch(mb_type){
3457  s->c.mv_dir = 0;
3458  s->c.mb_intra = 1;
3459  motion_x= s->c.mv[0][0][0] = 0;
3460  motion_y= s->c.mv[0][0][1] = 0;
3461  s->c.mbintra_table[xy] = 1;
3462  break;
3464  s->c.mv_dir = MV_DIR_FORWARD;
3465  s->c.mb_intra = 0;
3466  motion_x= s->c.mv[0][0][0] = s->p_mv_table[xy][0];
3467  motion_y= s->c.mv[0][0][1] = s->p_mv_table[xy][1];
3468  break;
3470  s->c.mv_dir = MV_DIR_FORWARD;
3471  s->c.mv_type = MV_TYPE_FIELD;
3472  s->c.mb_intra = 0;
3473  for(i=0; i<2; i++){
3474  int j = s->c.field_select[0][i] = s->p_field_select_table[i][xy];
3475  s->c.mv[0][i][0] = s->c.p_field_mv_table[i][j][xy][0];
3476  s->c.mv[0][i][1] = s->c.p_field_mv_table[i][j][xy][1];
3477  }
3478  break;
3480  s->c.mv_dir = MV_DIR_FORWARD;
3481  s->c.mv_type = MV_TYPE_8X8;
3482  s->c.mb_intra = 0;
3483  for(i=0; i<4; i++){
3484  s->c.mv[0][i][0] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][0];
3485  s->c.mv[0][i][1] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][1];
3486  }
3487  break;
3489  if (CONFIG_MPEG4_ENCODER) {
3491  s->c.mb_intra = 0;
3492  motion_x=s->b_direct_mv_table[xy][0];
3493  motion_y=s->b_direct_mv_table[xy][1];
3494  ff_mpeg4_set_direct_mv(&s->c, motion_x, motion_y);
3495  }
3496  break;
3498  if (CONFIG_MPEG4_ENCODER) {
3500  s->c.mb_intra = 0;
3501  ff_mpeg4_set_direct_mv(&s->c, 0, 0);
3502  }
3503  break;
3505  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3506  s->c.mb_intra = 0;
3507  s->c.mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3508  s->c.mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3509  s->c.mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3510  s->c.mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3511  break;
3513  s->c.mv_dir = MV_DIR_BACKWARD;
3514  s->c.mb_intra = 0;
3515  motion_x= s->c.mv[1][0][0] = s->b_back_mv_table[xy][0];
3516  motion_y= s->c.mv[1][0][1] = s->b_back_mv_table[xy][1];
3517  break;
3519  s->c.mv_dir = MV_DIR_FORWARD;
3520  s->c.mb_intra = 0;
3521  motion_x= s->c.mv[0][0][0] = s->b_forw_mv_table[xy][0];
3522  motion_y= s->c.mv[0][0][1] = s->b_forw_mv_table[xy][1];
3523  break;
3525  s->c.mv_dir = MV_DIR_FORWARD;
3526  s->c.mv_type = MV_TYPE_FIELD;
3527  s->c.mb_intra = 0;
3528  for(i=0; i<2; i++){
3529  int j = s->c.field_select[0][i] = s->b_field_select_table[0][i][xy];
3530  s->c.mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3531  s->c.mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3532  }
3533  break;
3535  s->c.mv_dir = MV_DIR_BACKWARD;
3536  s->c.mv_type = MV_TYPE_FIELD;
3537  s->c.mb_intra = 0;
3538  for(i=0; i<2; i++){
3539  int j = s->c.field_select[1][i] = s->b_field_select_table[1][i][xy];
3540  s->c.mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3541  s->c.mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3542  }
3543  break;
3545  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3546  s->c.mv_type = MV_TYPE_FIELD;
3547  s->c.mb_intra = 0;
3548  for(dir=0; dir<2; dir++){
3549  for(i=0; i<2; i++){
3550  int j = s->c.field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3551  s->c.mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3552  s->c.mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3553  }
3554  }
3555  break;
3556  default:
3557  av_unreachable("There is a case for every CANDIDATE_MB_TYPE_* "
3558  "except CANDIDATE_MB_TYPE_SKIPPED which is never "
3559  "the only candidate (always coupled with INTER) "
3560  "so that it never reaches this switch");
3561  }
3562 
3563  encode_mb(s, motion_x, motion_y);
3564 
3565  // RAL: Update last macroblock type
3566  s->last_mv_dir = s->c.mv_dir;
3567 
3568  if (CONFIG_H263_ENCODER &&
3569  s->c.out_format == FMT_H263 && s->c.pict_type != AV_PICTURE_TYPE_B)
3571 
3572  mpv_reconstruct_mb(s, s->c.block);
3573  }
3574 
3575  s->c.cur_pic.qscale_table[xy] = s->c.qscale;
3576 
3577  /* clean the MV table in IPS frames for direct mode in B-frames */
3578  if (s->c.mb_intra /* && I,P,S_TYPE */) {
3579  s->p_mv_table[xy][0]=0;
3580  s->p_mv_table[xy][1]=0;
3581 #if CONFIG_H263_ENCODER
3582  } else if (s->c.h263_pred || s->c.h263_aic) {
3584 #endif
3585  }
3586 
3587  if (s->c.avctx->flags & AV_CODEC_FLAG_PSNR) {
3588  int w= 16;
3589  int h= 16;
3590 
3591  if (s->c.mb_x*16 + 16 > s->c.width ) w = s->c.width - s->c.mb_x*16;
3592  if (s->c.mb_y*16 + 16 > s->c.height) h = s->c.height- s->c.mb_y*16;
3593 
3594  s->encoding_error[0] += sse(
3595  s, s->new_pic->data[0] + s->c.mb_x*16 + s->c.mb_y*s->c.linesize*16,
3596  s->c.dest[0], w, h, s->c.linesize);
3597  s->encoding_error[1] += sse(
3598  s, s->new_pic->data[1] + s->c.mb_x*8 + s->c.mb_y*s->c.uvlinesize*chr_h,
3599  s->c.dest[1], w>>1, h>>s->c.chroma_y_shift, s->c.uvlinesize);
3600  s->encoding_error[2] += sse(
3601  s, s->new_pic->data[2] + s->c.mb_x*8 + s->c.mb_y*s->c.uvlinesize*chr_h,
3602  s->c.dest[2], w>>1, h>>s->c.chroma_y_shift, s->c.uvlinesize);
3603  }
3604  if (s->c.loop_filter) {
3605  if (CONFIG_H263_ENCODER && s->c.out_format == FMT_H263)
3606  ff_h263_loop_filter(&s->c);
3607  }
3608  ff_dlog(s->c.avctx, "MB %d %d bits\n",
3609  s->c.mb_x + s->c.mb_y * s->c.mb_stride, put_bits_count(&s->pb));
3610  }
3611  }
3612 
3613 #if CONFIG_MSMPEG4ENC
3614  //not beautiful here but we must write it before flushing so it has to be here
3615  if (s->c.msmpeg4_version != MSMP4_UNUSED && s->c.msmpeg4_version < MSMP4_WMV1 &&
3616  s->c.pict_type == AV_PICTURE_TYPE_I)
3618 #endif
3619 
3620  write_slice_end(s);
3621 
3622  return 0;
3623 }
3624 
3625 #define ADD(field) dst->field += src->field;
3626 #define MERGE(field) dst->field += src->field; src->field=0
3628 {
3629  ADD(me.scene_change_score);
3630  ADD(me.mc_mb_var_sum_temp);
3631  ADD(me.mb_var_sum_temp);
3632 }
3633 
3635 {
3636  int i;
3637 
3638  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3639  MERGE(dct_count[1]);
3640  ADD(mv_bits);
3641  ADD(i_tex_bits);
3642  ADD(p_tex_bits);
3643  ADD(i_count);
3644  ADD(misc_bits);
3645  ADD(encoding_error[0]);
3646  ADD(encoding_error[1]);
3647  ADD(encoding_error[2]);
3648 
3649  if (dst->dct_error_sum) {
3650  for(i=0; i<64; i++){
3651  MERGE(dct_error_sum[0][i]);
3652  MERGE(dct_error_sum[1][i]);
3653  }
3654  }
3655 
3656  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3657  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3658  ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3659  flush_put_bits(&dst->pb);
3660 }
3661 
3662 static int estimate_qp(MPVMainEncContext *const m, int dry_run)
3663 {
3664  MPVEncContext *const s = &m->s;
3665 
3666  if (m->next_lambda){
3667  s->c.cur_pic.ptr->f->quality = m->next_lambda;
3668  if(!dry_run) m->next_lambda= 0;
3669  } else if (!m->fixed_qscale) {
3670  int quality = ff_rate_estimate_qscale(m, dry_run);
3671  s->c.cur_pic.ptr->f->quality = quality;
3672  if (s->c.cur_pic.ptr->f->quality < 0)
3673  return -1;
3674  }
3675 
3676  if(s->adaptive_quant){
3677  init_qscale_tab(s);
3678 
3679  switch (s->c.codec_id) {
3680  case AV_CODEC_ID_MPEG4:
3681  if (CONFIG_MPEG4_ENCODER)
3683  break;
3684  case AV_CODEC_ID_H263:
3685  case AV_CODEC_ID_H263P:
3686  case AV_CODEC_ID_FLV1:
3687  if (CONFIG_H263_ENCODER)
3689  break;
3690  }
3691 
3692  s->lambda = s->lambda_table[0];
3693  //FIXME broken
3694  }else
3695  s->lambda = s->c.cur_pic.ptr->f->quality;
3696  update_qscale(m);
3697  return 0;
3698 }
3699 
3700 /* must be called before writing the header */
3702 {
3703  av_assert1(s->c.cur_pic.ptr->f->pts != AV_NOPTS_VALUE);
3704  s->c.time = s->c.cur_pic.ptr->f->pts * s->c.avctx->time_base.num;
3705 
3706  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
3707  s->c.pb_time = s->c.pp_time - (s->c.last_non_b_time - s->c.time);
3708  av_assert1(s->c.pb_time > 0 && s->c.pb_time < s->c.pp_time);
3709  }else{
3710  s->c.pp_time = s->c.time - s->c.last_non_b_time;
3711  s->c.last_non_b_time = s->c.time;
3712  av_assert1(s->c.picture_number == 0 || s->c.pp_time > 0);
3713  }
3714 }
3715 
3716 static int encode_picture(MPVMainEncContext *const m, const AVPacket *pkt)
3717 {
3718  MPVEncContext *const s = &m->s;
3719  int i, ret;
3720  int bits;
3721  int context_count = s->c.slice_context_count;
3722 
3723  /* we need to initialize some time vars before we can encode B-frames */
3724  // RAL: Condition added for MPEG1VIDEO
3725  if (s->c.out_format == FMT_MPEG1 || (s->c.h263_pred && s->c.msmpeg4_version == MSMP4_UNUSED))
3727  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4)
3729 
3730 // s->lambda = s->c.cur_pic.ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3731 
3732  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
3733  s->c.no_rounding = s->c.msmpeg4_version >= MSMP4_V3;
3734  } else if (s->c.pict_type != AV_PICTURE_TYPE_B) {
3735  s->c.no_rounding ^= s->c.flipflop_rounding;
3736  }
3737 
3738  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS2) {
3739  ret = estimate_qp(m, 1);
3740  if (ret < 0)
3741  return ret;
3742  ff_get_2pass_fcode(m);
3743  } else if (!(s->c.avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3744  if (s->c.pict_type == AV_PICTURE_TYPE_B)
3745  s->lambda = m->last_lambda_for[s->c.pict_type];
3746  else
3747  s->lambda = m->last_lambda_for[m->last_non_b_pict_type];
3748  update_qscale(m);
3749  }
3750 
3751  s->c.mb_intra = 0; //for the rate distortion & bit compare functions
3752  for (int i = 0; i < context_count; i++) {
3753  MPVEncContext *const slice = s->c.enc_contexts[i];
3754  int h = s->c.mb_height;
3755  uint8_t *start = pkt->data + (int64_t)pkt->size * slice->c.start_mb_y / h;
3756  uint8_t *end = pkt->data + (int64_t)pkt->size * slice->c. end_mb_y / h;
3757 
3758  init_put_bits(&slice->pb, start, end - start);
3759 
3760  if (i) {
3761  ret = ff_update_duplicate_context(&slice->c, &s->c);
3762  if (ret < 0)
3763  return ret;
3764  slice->lambda = s->lambda;
3765  slice->lambda2 = s->lambda2;
3766  }
3767  slice->me.temp = slice->me.scratchpad = slice->c.sc.scratchpad_buf;
3768  ff_me_init_pic(slice);
3769  }
3770 
3771  /* Estimate motion for every MB */
3772  if (s->c.pict_type != AV_PICTURE_TYPE_I) {
3773  s->lambda = (s->lambda * m->me_penalty_compensation + 128) >> 8;
3774  s->lambda2 = (s->lambda2 * (int64_t) m->me_penalty_compensation + 128) >> 8;
3775  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
3776  if ((m->me_pre && m->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3777  m->me_pre == 2) {
3778  s->c.avctx->execute(s->c.avctx, pre_estimate_motion_thread,
3779  &s->c.enc_contexts[0], NULL,
3780  context_count, sizeof(void*));
3781  }
3782  }
3783 
3784  s->c.avctx->execute(s->c.avctx, estimate_motion_thread, &s->c.enc_contexts[0],
3785  NULL, context_count, sizeof(void*));
3786  }else /* if (s->c.pict_type == AV_PICTURE_TYPE_I) */{
3787  /* I-Frame */
3788  for (int i = 0; i < s->c.mb_stride * s->c.mb_height; i++)
3789  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3790 
3791  if (!m->fixed_qscale) {
3792  /* finding spatial complexity for I-frame rate control */
3793  s->c.avctx->execute(s->c.avctx, mb_var_thread, &s->c.enc_contexts[0],
3794  NULL, context_count, sizeof(void*));
3795  }
3796  }
3797  for(i=1; i<context_count; i++){
3798  merge_context_after_me(s, s->c.enc_contexts[i]);
3799  }
3800  m->mc_mb_var_sum = s->me.mc_mb_var_sum_temp;
3801  m->mb_var_sum = s->me. mb_var_sum_temp;
3802  emms_c();
3803 
3804  if (s->me.scene_change_score > m->scenechange_threshold &&
3805  s->c.pict_type == AV_PICTURE_TYPE_P) {
3806  s->c.pict_type = AV_PICTURE_TYPE_I;
3807  for (int i = 0; i < s->c.mb_stride * s->c.mb_height; i++)
3808  s->mb_type[i] = CANDIDATE_MB_TYPE_INTRA;
3809  if (s->c.msmpeg4_version >= MSMP4_V3)
3810  s->c.no_rounding = 1;
3811  ff_dlog(s->c.avctx, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3812  m->mb_var_sum, m->mc_mb_var_sum);
3813  }
3814 
3815  if (!s->c.umvplus) {
3816  if (s->c.pict_type == AV_PICTURE_TYPE_P || s->c.pict_type == AV_PICTURE_TYPE_S) {
3817  s->f_code = ff_get_best_fcode(m, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3818 
3819  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3820  int a,b;
3821  a = ff_get_best_fcode(m, s->c.p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3822  b = ff_get_best_fcode(m, s->c.p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3823  s->f_code = FFMAX3(s->f_code, a, b);
3824  }
3825 
3827  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3828  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3829  int j;
3830  for(i=0; i<2; i++){
3831  for(j=0; j<2; j++)
3832  ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3833  s->c.p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3834  }
3835  }
3836  } else if (s->c.pict_type == AV_PICTURE_TYPE_B) {
3837  int a, b;
3838 
3839  a = ff_get_best_fcode(m, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3840  b = ff_get_best_fcode(m, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3841  s->f_code = FFMAX(a, b);
3842 
3843  a = ff_get_best_fcode(m, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3844  b = ff_get_best_fcode(m, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3845  s->b_code = FFMAX(a, b);
3846 
3847  ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3848  ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3849  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3850  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3851  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3852  int dir, j;
3853  for(dir=0; dir<2; dir++){
3854  for(i=0; i<2; i++){
3855  for(j=0; j<2; j++){
3858  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3859  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3860  }
3861  }
3862  }
3863  }
3864  }
3865  }
3866 
3867  ret = estimate_qp(m, 0);
3868  if (ret < 0)
3869  return ret;
3870 
3871  if (s->c.qscale < 3 && s->max_qcoeff <= 128 &&
3872  s->c.pict_type == AV_PICTURE_TYPE_I &&
3873  !(s->c.avctx->flags & AV_CODEC_FLAG_QSCALE))
3874  s->c.qscale = 3; //reduce clipping problems
3875 
3876  if (s->c.out_format == FMT_MJPEG) {
3878  (7 + s->c.qscale) / s->c.qscale, 65535);
3879  if (ret < 0)
3880  return ret;
3881 
3882  if (s->c.codec_id != AV_CODEC_ID_AMV) {
3883  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3884  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3885 
3886  if (s->c.avctx->intra_matrix) {
3887  chroma_matrix =
3888  luma_matrix = s->c.avctx->intra_matrix;
3889  }
3890  if (s->c.avctx->chroma_intra_matrix)
3891  chroma_matrix = s->c.avctx->chroma_intra_matrix;
3892 
3893  /* for mjpeg, we do include qscale in the matrix */
3894  for (int i = 1; i < 64; i++) {
3895  int j = s->c.idsp.idct_permutation[i];
3896 
3897  s->c.chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->c.qscale) >> 3);
3898  s->c. intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->c.qscale) >> 3);
3899  }
3900  s->c.y_dc_scale_table =
3901  s->c.c_dc_scale_table = ff_mpeg12_dc_scale_table[s->c.intra_dc_precision];
3902  s->c.chroma_intra_matrix[0] =
3903  s->c.intra_matrix[0] = ff_mpeg12_dc_scale_table[s->c.intra_dc_precision][8];
3904  } else {
3905  static const uint8_t y[32] = {13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3906  static const uint8_t c[32] = {14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3907  for (int i = 1; i < 64; i++) {
3908  int j = s->c.idsp.idct_permutation[ff_zigzag_direct[i]];
3909 
3910  s->c.intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3911  s->c.chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3912  }
3913  s->c.y_dc_scale_table = y;
3914  s->c.c_dc_scale_table = c;
3915  s->c.intra_matrix[0] = 13;
3916  s->c.chroma_intra_matrix[0] = 14;
3917  }
3918  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3919  s->c.intra_matrix, s->intra_quant_bias, 8, 8, 1);
3920  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3921  s->c.chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3922  s->c.qscale = 8;
3923  }
3924 
3925  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
3926  s->c.cur_pic.ptr->f->flags |= AV_FRAME_FLAG_KEY;
3927  } else {
3928  s->c.cur_pic.ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
3929  }
3930  s->c.cur_pic.ptr->f->pict_type = s->c.pict_type;
3931 
3932  if (s->c.cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
3933  m->picture_in_gop_number = 0;
3934 
3935  s->c.mb_x = s->c.mb_y = 0;
3936  s->last_bits= put_bits_count(&s->pb);
3937  ret = m->encode_picture_header(m);
3938  if (ret < 0)
3939  return ret;
3940  bits= put_bits_count(&s->pb);
3941  m->header_bits = bits - s->last_bits;
3942 
3943  for(i=1; i<context_count; i++){
3944  update_duplicate_context_after_me(s->c.enc_contexts[i], s);
3945  }
3946  s->c.avctx->execute(s->c.avctx, encode_thread, &s->c.enc_contexts[0],
3947  NULL, context_count, sizeof(void*));
3948  for(i=1; i<context_count; i++){
3949  if (s->pb.buf_end == s->c.enc_contexts[i]->pb.buf)
3950  set_put_bits_buffer_size(&s->pb, FFMIN(s->c.enc_contexts[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3951  merge_context_after_encode(s, s->c.enc_contexts[i]);
3952  }
3953  emms_c();
3954  return 0;
3955 }
3956 
3957 static void denoise_dct_c(MPVEncContext *const s, int16_t *block)
3958 {
3959  const int intra = s->c.mb_intra;
3960  int i;
3961 
3962  s->dct_count[intra]++;
3963 
3964  for(i=0; i<64; i++){
3965  int level= block[i];
3966 
3967  if(level){
3968  if(level>0){
3969  s->dct_error_sum[intra][i] += level;
3970  level -= s->dct_offset[intra][i];
3971  if(level<0) level=0;
3972  }else{
3973  s->dct_error_sum[intra][i] -= level;
3974  level += s->dct_offset[intra][i];
3975  if(level>0) level=0;
3976  }
3977  block[i]= level;
3978  }
3979  }
3980 }
3981 
3983  int16_t *block, int n,
3984  int qscale, int *overflow){
3985  const int *qmat;
3986  const uint16_t *matrix;
3987  const uint8_t *scantable;
3988  const uint8_t *perm_scantable;
3989  int max=0;
3990  unsigned int threshold1, threshold2;
3991  int bias=0;
3992  int run_tab[65];
3993  int level_tab[65];
3994  int score_tab[65];
3995  int survivor[65];
3996  int survivor_count;
3997  int last_run=0;
3998  int last_level=0;
3999  int last_score= 0;
4000  int last_i;
4001  int coeff[2][64];
4002  int coeff_count[64];
4003  int qmul, qadd, start_i, last_non_zero, i, dc;
4004  const int esc_length= s->ac_esc_length;
4005  const uint8_t *length, *last_length;
4006  const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4007  int mpeg2_qscale;
4008 
4009  s->fdsp.fdct(block);
4010 
4011  if(s->dct_error_sum)
4012  s->denoise_dct(s, block);
4013  qmul= qscale*16;
4014  qadd= ((qscale-1)|1)*8;
4015 
4016  if (s->c.q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
4017  else mpeg2_qscale = qscale << 1;
4018 
4019  if (s->c.mb_intra) {
4020  int q;
4021  scantable = s->c.intra_scantable.scantable;
4022  perm_scantable = s->c.intra_scantable.permutated;
4023  if (!s->c.h263_aic) {
4024  if (n < 4)
4025  q = s->c.y_dc_scale;
4026  else
4027  q = s->c.c_dc_scale;
4028  q = q << 3;
4029  } else{
4030  /* For AIC we skip quant/dequant of INTRADC */
4031  q = 1 << 3;
4032  qadd=0;
4033  }
4034 
4035  /* note: block[0] is assumed to be positive */
4036  block[0] = (block[0] + (q >> 1)) / q;
4037  start_i = 1;
4038  last_non_zero = 0;
4039  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4040  matrix = n < 4 ? s->c.intra_matrix : s->c.chroma_intra_matrix;
4041  if (s->mpeg_quant || s->c.out_format == FMT_MPEG1 || s->c.out_format == FMT_MJPEG)
4042  bias= 1<<(QMAT_SHIFT-1);
4043 
4044  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4045  length = s->intra_chroma_ac_vlc_length;
4046  last_length= s->intra_chroma_ac_vlc_last_length;
4047  } else {
4048  length = s->intra_ac_vlc_length;
4049  last_length= s->intra_ac_vlc_last_length;
4050  }
4051  } else {
4052  scantable = s->c.inter_scantable.scantable;
4053  perm_scantable = s->c.inter_scantable.permutated;
4054  start_i = 0;
4055  last_non_zero = -1;
4056  qmat = s->q_inter_matrix[qscale];
4057  matrix = s->c.inter_matrix;
4058  length = s->inter_ac_vlc_length;
4059  last_length= s->inter_ac_vlc_last_length;
4060  }
4061  last_i= start_i;
4062 
4063  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4064  threshold2= (threshold1<<1);
4065 
4066  for(i=63; i>=start_i; i--) {
4067  const int j = scantable[i];
4068  int64_t level = (int64_t)block[j] * qmat[j];
4069 
4070  if(((uint64_t)(level+threshold1))>threshold2){
4071  last_non_zero = i;
4072  break;
4073  }
4074  }
4075 
4076  for(i=start_i; i<=last_non_zero; i++) {
4077  const int j = scantable[i];
4078  int64_t level = (int64_t)block[j] * qmat[j];
4079 
4080 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4081 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4082  if(((uint64_t)(level+threshold1))>threshold2){
4083  if(level>0){
4084  level= (bias + level)>>QMAT_SHIFT;
4085  coeff[0][i]= level;
4086  coeff[1][i]= level-1;
4087 // coeff[2][k]= level-2;
4088  }else{
4089  level= (bias - level)>>QMAT_SHIFT;
4090  coeff[0][i]= -level;
4091  coeff[1][i]= -level+1;
4092 // coeff[2][k]= -level+2;
4093  }
4094  coeff_count[i]= FFMIN(level, 2);
4095  av_assert2(coeff_count[i]);
4096  max |=level;
4097  }else{
4098  coeff[0][i]= (level>>31)|1;
4099  coeff_count[i]= 1;
4100  }
4101  }
4102 
4103  *overflow= s->max_qcoeff < max; //overflow might have happened
4104 
4105  if(last_non_zero < start_i){
4106  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4107  return last_non_zero;
4108  }
4109 
4110  score_tab[start_i]= 0;
4111  survivor[0]= start_i;
4112  survivor_count= 1;
4113 
4114  for(i=start_i; i<=last_non_zero; i++){
4115  int level_index, j, zero_distortion;
4116  int dct_coeff= FFABS(block[ scantable[i] ]);
4117  int best_score=256*256*256*120;
4118 
4119  if (s->fdsp.fdct == ff_fdct_ifast)
4120  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4121  zero_distortion= dct_coeff*dct_coeff;
4122 
4123  for(level_index=0; level_index < coeff_count[i]; level_index++){
4124  int distortion;
4125  int level= coeff[level_index][i];
4126  const int alevel= FFABS(level);
4127  int unquant_coeff;
4128 
4129  av_assert2(level);
4130 
4131  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4132  unquant_coeff= alevel*qmul + qadd;
4133  } else if (s->c.out_format == FMT_MJPEG) {
4134  j = s->c.idsp.idct_permutation[scantable[i]];
4135  unquant_coeff = alevel * matrix[j] * 8;
4136  }else{ // MPEG-1
4137  j = s->c.idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4138  if (s->c.mb_intra) {
4139  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4140  unquant_coeff = (unquant_coeff - 1) | 1;
4141  }else{
4142  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4143  unquant_coeff = (unquant_coeff - 1) | 1;
4144  }
4145  unquant_coeff<<= 3;
4146  }
4147 
4148  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4149  level+=64;
4150  if((level&(~127)) == 0){
4151  for(j=survivor_count-1; j>=0; j--){
4152  int run= i - survivor[j];
4153  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4154  score += score_tab[i-run];
4155 
4156  if(score < best_score){
4157  best_score= score;
4158  run_tab[i+1]= run;
4159  level_tab[i+1]= level-64;
4160  }
4161  }
4162 
4163  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4164  for(j=survivor_count-1; j>=0; j--){
4165  int run= i - survivor[j];
4166  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4167  score += score_tab[i-run];
4168  if(score < last_score){
4169  last_score= score;
4170  last_run= run;
4171  last_level= level-64;
4172  last_i= i+1;
4173  }
4174  }
4175  }
4176  }else{
4177  distortion += esc_length*lambda;
4178  for(j=survivor_count-1; j>=0; j--){
4179  int run= i - survivor[j];
4180  int score= distortion + score_tab[i-run];
4181 
4182  if(score < best_score){
4183  best_score= score;
4184  run_tab[i+1]= run;
4185  level_tab[i+1]= level-64;
4186  }
4187  }
4188 
4189  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4190  for(j=survivor_count-1; j>=0; j--){
4191  int run= i - survivor[j];
4192  int score= distortion + score_tab[i-run];
4193  if(score < last_score){
4194  last_score= score;
4195  last_run= run;
4196  last_level= level-64;
4197  last_i= i+1;
4198  }
4199  }
4200  }
4201  }
4202  }
4203 
4204  score_tab[i+1]= best_score;
4205 
4206  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4207  if(last_non_zero <= 27){
4208  for(; survivor_count; survivor_count--){
4209  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4210  break;
4211  }
4212  }else{
4213  for(; survivor_count; survivor_count--){
4214  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4215  break;
4216  }
4217  }
4218 
4219  survivor[ survivor_count++ ]= i+1;
4220  }
4221 
4222  if (s->c.out_format != FMT_H263 && s->c.out_format != FMT_H261) {
4223  last_score= 256*256*256*120;
4224  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4225  int score= score_tab[i];
4226  if (i)
4227  score += lambda * 2; // FIXME more exact?
4228 
4229  if(score < last_score){
4230  last_score= score;
4231  last_i= i;
4232  last_level= level_tab[i];
4233  last_run= run_tab[i];
4234  }
4235  }
4236  }
4237 
4238  s->coded_score[n] = last_score;
4239 
4240  dc= FFABS(block[0]);
4241  last_non_zero= last_i - 1;
4242  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4243 
4244  if(last_non_zero < start_i)
4245  return last_non_zero;
4246 
4247  if(last_non_zero == 0 && start_i == 0){
4248  int best_level= 0;
4249  int best_score= dc * dc;
4250 
4251  for(i=0; i<coeff_count[0]; i++){
4252  int level= coeff[i][0];
4253  int alevel= FFABS(level);
4254  int unquant_coeff, score, distortion;
4255 
4256  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4257  unquant_coeff= (alevel*qmul + qadd)>>3;
4258  } else{ // MPEG-1
4259  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4260  unquant_coeff = (unquant_coeff - 1) | 1;
4261  }
4262  unquant_coeff = (unquant_coeff + 4) >> 3;
4263  unquant_coeff<<= 3 + 3;
4264 
4265  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4266  level+=64;
4267  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4268  else score= distortion + esc_length*lambda;
4269 
4270  if(score < best_score){
4271  best_score= score;
4272  best_level= level - 64;
4273  }
4274  }
4275  block[0]= best_level;
4276  s->coded_score[n] = best_score - dc*dc;
4277  if(best_level == 0) return -1;
4278  else return last_non_zero;
4279  }
4280 
4281  i= last_i;
4282  av_assert2(last_level);
4283 
4284  block[ perm_scantable[last_non_zero] ]= last_level;
4285  i -= last_run + 1;
4286 
4287  for(; i>start_i; i -= run_tab[i] + 1){
4288  block[ perm_scantable[i-1] ]= level_tab[i];
4289  }
4290 
4291  return last_non_zero;
4292 }
4293 
4294 static int16_t basis[64][64];
4295 
4296 static void build_basis(uint8_t *perm){
4297  int i, j, x, y;
4298  emms_c();
4299  for(i=0; i<8; i++){
4300  for(j=0; j<8; j++){
4301  for(y=0; y<8; y++){
4302  for(x=0; x<8; x++){
4303  double s= 0.25*(1<<BASIS_SHIFT);
4304  int index= 8*i + j;
4305  int perm_index= perm[index];
4306  if(i==0) s*= sqrt(0.5);
4307  if(j==0) s*= sqrt(0.5);
4308  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4309  }
4310  }
4311  }
4312  }
4313 }
4314 
4315 static int dct_quantize_refine(MPVEncContext *const s, //FIXME breaks denoise?
4316  int16_t *block, int16_t *weight, int16_t *orig,
4317  int n, int qscale){
4318  int16_t rem[64];
4319  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4320  const uint8_t *scantable;
4321  const uint8_t *perm_scantable;
4322 // unsigned int threshold1, threshold2;
4323 // int bias=0;
4324  int run_tab[65];
4325  int prev_run=0;
4326  int prev_level=0;
4327  int qmul, qadd, start_i, last_non_zero, i, dc;
4328  const uint8_t *length;
4329  const uint8_t *last_length;
4330  int lambda;
4331  int rle_index, run, q = 1, sum; //q is only used when s->c.mb_intra is true
4332 
4333  if(basis[0][0] == 0)
4334  build_basis(s->c.idsp.idct_permutation);
4335 
4336  qmul= qscale*2;
4337  qadd= (qscale-1)|1;
4338  if (s->c.mb_intra) {
4339  scantable = s->c.intra_scantable.scantable;
4340  perm_scantable = s->c.intra_scantable.permutated;
4341  if (!s->c.h263_aic) {
4342  if (n < 4)
4343  q = s->c.y_dc_scale;
4344  else
4345  q = s->c.c_dc_scale;
4346  } else{
4347  /* For AIC we skip quant/dequant of INTRADC */
4348  q = 1;
4349  qadd=0;
4350  }
4351  q <<= RECON_SHIFT-3;
4352  /* note: block[0] is assumed to be positive */
4353  dc= block[0]*q;
4354 // block[0] = (block[0] + (q >> 1)) / q;
4355  start_i = 1;
4356 // if (s->mpeg_quant || s->c.out_format == FMT_MPEG1)
4357 // bias= 1<<(QMAT_SHIFT-1);
4358  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4359  length = s->intra_chroma_ac_vlc_length;
4360  last_length= s->intra_chroma_ac_vlc_last_length;
4361  } else {
4362  length = s->intra_ac_vlc_length;
4363  last_length= s->intra_ac_vlc_last_length;
4364  }
4365  } else {
4366  scantable = s->c.inter_scantable.scantable;
4367  perm_scantable = s->c.inter_scantable.permutated;
4368  dc= 0;
4369  start_i = 0;
4370  length = s->inter_ac_vlc_length;
4371  last_length= s->inter_ac_vlc_last_length;
4372  }
4373  last_non_zero = s->c.block_last_index[n];
4374 
4375  dc += (1<<(RECON_SHIFT-1));
4376  for(i=0; i<64; i++){
4377  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4378  }
4379 
4380  sum=0;
4381  for(i=0; i<64; i++){
4382  int one= 36;
4383  int qns=4;
4384  int w;
4385 
4386  w= FFABS(weight[i]) + qns*one;
4387  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4388 
4389  weight[i] = w;
4390 // w=weight[i] = (63*qns + (w/2)) / w;
4391 
4392  av_assert2(w>0);
4393  av_assert2(w<(1<<6));
4394  sum += w*w;
4395  }
4396  lambda = sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4397 
4398  run=0;
4399  rle_index=0;
4400  for(i=start_i; i<=last_non_zero; i++){
4401  int j= perm_scantable[i];
4402  const int level= block[j];
4403  int coeff;
4404 
4405  if(level){
4406  if(level<0) coeff= qmul*level - qadd;
4407  else coeff= qmul*level + qadd;
4408  run_tab[rle_index++]=run;
4409  run=0;
4410 
4411  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4412  }else{
4413  run++;
4414  }
4415  }
4416 
4417  for(;;){
4418  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4419  int best_coeff=0;
4420  int best_change=0;
4421  int run2, best_unquant_change=0, analyze_gradient;
4422  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4423 
4424  if(analyze_gradient){
4425  for(i=0; i<64; i++){
4426  int w= weight[i];
4427 
4428  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4429  }
4430  s->fdsp.fdct(d1);
4431  }
4432 
4433  if(start_i){
4434  const int level= block[0];
4435  int change, old_coeff;
4436 
4437  av_assert2(s->c.mb_intra);
4438 
4439  old_coeff= q*level;
4440 
4441  for(change=-1; change<=1; change+=2){
4442  int new_level= level + change;
4443  int score, new_coeff;
4444 
4445  new_coeff= q*new_level;
4446  if(new_coeff >= 2048 || new_coeff < 0)
4447  continue;
4448 
4449  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4450  new_coeff - old_coeff);
4451  if(score<best_score){
4452  best_score= score;
4453  best_coeff= 0;
4454  best_change= change;
4455  best_unquant_change= new_coeff - old_coeff;
4456  }
4457  }
4458  }
4459 
4460  run=0;
4461  rle_index=0;
4462  run2= run_tab[rle_index++];
4463  prev_level=0;
4464  prev_run=0;
4465 
4466  for(i=start_i; i<64; i++){
4467  int j= perm_scantable[i];
4468  const int level= block[j];
4469  int change, old_coeff;
4470 
4471  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4472  break;
4473 
4474  if(level){
4475  if(level<0) old_coeff= qmul*level - qadd;
4476  else old_coeff= qmul*level + qadd;
4477  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4478  }else{
4479  old_coeff=0;
4480  run2--;
4481  av_assert2(run2>=0 || i >= last_non_zero );
4482  }
4483 
4484  for(change=-1; change<=1; change+=2){
4485  int new_level= level + change;
4486  int score, new_coeff, unquant_change;
4487 
4488  score=0;
4489  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4490  continue;
4491 
4492  if(new_level){
4493  if(new_level<0) new_coeff= qmul*new_level - qadd;
4494  else new_coeff= qmul*new_level + qadd;
4495  if(new_coeff >= 2048 || new_coeff <= -2048)
4496  continue;
4497  //FIXME check for overflow
4498 
4499  if(level){
4500  if(level < 63 && level > -63){
4501  if(i < last_non_zero)
4502  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4503  - length[UNI_AC_ENC_INDEX(run, level+64)];
4504  else
4505  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4506  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4507  }
4508  }else{
4509  av_assert2(FFABS(new_level)==1);
4510 
4511  if(analyze_gradient){
4512  int g= d1[ scantable[i] ];
4513  if(g && (g^new_level) >= 0)
4514  continue;
4515  }
4516 
4517  if(i < last_non_zero){
4518  int next_i= i + run2 + 1;
4519  int next_level= block[ perm_scantable[next_i] ] + 64;
4520 
4521  if(next_level&(~127))
4522  next_level= 0;
4523 
4524  if(next_i < last_non_zero)
4525  score += length[UNI_AC_ENC_INDEX(run, 65)]
4526  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4527  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4528  else
4529  score += length[UNI_AC_ENC_INDEX(run, 65)]
4530  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4531  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4532  }else{
4533  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4534  if(prev_level){
4535  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4536  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4537  }
4538  }
4539  }
4540  }else{
4541  new_coeff=0;
4542  av_assert2(FFABS(level)==1);
4543 
4544  if(i < last_non_zero){
4545  int next_i= i + run2 + 1;
4546  int next_level= block[ perm_scantable[next_i] ] + 64;
4547 
4548  if(next_level&(~127))
4549  next_level= 0;
4550 
4551  if(next_i < last_non_zero)
4552  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4553  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4554  - length[UNI_AC_ENC_INDEX(run, 65)];
4555  else
4556  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4557  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4558  - length[UNI_AC_ENC_INDEX(run, 65)];
4559  }else{
4560  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4561  if(prev_level){
4562  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4563  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4564  }
4565  }
4566  }
4567 
4568  score *= lambda;
4569 
4570  unquant_change= new_coeff - old_coeff;
4571  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4572 
4573  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4574  unquant_change);
4575  if(score<best_score){
4576  best_score= score;
4577  best_coeff= i;
4578  best_change= change;
4579  best_unquant_change= unquant_change;
4580  }
4581  }
4582  if(level){
4583  prev_level= level + 64;
4584  if(prev_level&(~127))
4585  prev_level= 0;
4586  prev_run= run;
4587  run=0;
4588  }else{
4589  run++;
4590  }
4591  }
4592 
4593  if(best_change){
4594  int j= perm_scantable[ best_coeff ];
4595 
4596  block[j] += best_change;
4597 
4598  if(best_coeff > last_non_zero){
4599  last_non_zero= best_coeff;
4600  av_assert2(block[j]);
4601  }else{
4602  for(; last_non_zero>=start_i; last_non_zero--){
4603  if(block[perm_scantable[last_non_zero]])
4604  break;
4605  }
4606  }
4607 
4608  run=0;
4609  rle_index=0;
4610  for(i=start_i; i<=last_non_zero; i++){
4611  int j= perm_scantable[i];
4612  const int level= block[j];
4613 
4614  if(level){
4615  run_tab[rle_index++]=run;
4616  run=0;
4617  }else{
4618  run++;
4619  }
4620  }
4621 
4622  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4623  }else{
4624  break;
4625  }
4626  }
4627 
4628  return last_non_zero;
4629 }
4630 
4631 /**
4632  * Permute an 8x8 block according to permutation.
4633  * @param block the block which will be permuted according to
4634  * the given permutation vector
4635  * @param permutation the permutation vector
4636  * @param last the last non zero coefficient in scantable order, used to
4637  * speed the permutation up
4638  * @param scantable the used scantable, this is only used to speed the
4639  * permutation up, the block is not (inverse) permutated
4640  * to scantable order!
4641  */
4642 void ff_block_permute(int16_t *block, const uint8_t *permutation,
4643  const uint8_t *scantable, int last)
4644 {
4645  int i;
4646  int16_t temp[64];
4647 
4648  if (last <= 0)
4649  return;
4650  //FIXME it is ok but not clean and might fail for some permutations
4651  // if (permutation[1] == 1)
4652  // return;
4653 
4654  for (i = 0; i <= last; i++) {
4655  const int j = scantable[i];
4656  temp[j] = block[j];
4657  block[j] = 0;
4658  }
4659 
4660  for (i = 0; i <= last; i++) {
4661  const int j = scantable[i];
4662  const int perm_j = permutation[j];
4663  block[perm_j] = temp[j];
4664  }
4665 }
4666 
4667 static int dct_quantize_c(MPVEncContext *const s,
4668  int16_t *block, int n,
4669  int qscale, int *overflow)
4670 {
4671  int i, last_non_zero, q, start_i;
4672  const int *qmat;
4673  const uint8_t *scantable;
4674  int bias;
4675  int max=0;
4676  unsigned int threshold1, threshold2;
4677 
4678  s->fdsp.fdct(block);
4679 
4680  if(s->dct_error_sum)
4681  s->denoise_dct(s, block);
4682 
4683  if (s->c.mb_intra) {
4684  scantable = s->c.intra_scantable.scantable;
4685  if (!s->c.h263_aic) {
4686  if (n < 4)
4687  q = s->c.y_dc_scale;
4688  else
4689  q = s->c.c_dc_scale;
4690  q = q << 3;
4691  } else
4692  /* For AIC we skip quant/dequant of INTRADC */
4693  q = 1 << 3;
4694 
4695  /* note: block[0] is assumed to be positive */
4696  block[0] = (block[0] + (q >> 1)) / q;
4697  start_i = 1;
4698  last_non_zero = 0;
4699  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4700  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4701  } else {
4702  scantable = s->c.inter_scantable.scantable;
4703  start_i = 0;
4704  last_non_zero = -1;
4705  qmat = s->q_inter_matrix[qscale];
4706  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4707  }
4708  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4709  threshold2= (threshold1<<1);
4710  for(i=63;i>=start_i;i--) {
4711  const int j = scantable[i];
4712  int64_t level = (int64_t)block[j] * qmat[j];
4713 
4714  if(((uint64_t)(level+threshold1))>threshold2){
4715  last_non_zero = i;
4716  break;
4717  }else{
4718  block[j]=0;
4719  }
4720  }
4721  for(i=start_i; i<=last_non_zero; i++) {
4722  const int j = scantable[i];
4723  int64_t level = (int64_t)block[j] * qmat[j];
4724 
4725 // if( bias+level >= (1<<QMAT_SHIFT)
4726 // || bias-level >= (1<<QMAT_SHIFT)){
4727  if(((uint64_t)(level+threshold1))>threshold2){
4728  if(level>0){
4729  level= (bias + level)>>QMAT_SHIFT;
4730  block[j]= level;
4731  }else{
4732  level= (bias - level)>>QMAT_SHIFT;
4733  block[j]= -level;
4734  }
4735  max |=level;
4736  }else{
4737  block[j]=0;
4738  }
4739  }
4740  *overflow= s->max_qcoeff < max; //overflow might have happened
4741 
4742  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4743  if (s->c.idsp.perm_type != FF_IDCT_PERM_NONE)
4744  ff_block_permute(block, s->c.idsp.idct_permutation,
4745  scantable, last_non_zero);
4746 
4747  return last_non_zero;
4748 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
encode_frame
static int encode_frame(AVCodecContext *c, const AVFrame *frame, AVPacket *pkt)
Definition: mpegvideo_enc.c:1501
dct_quantize_trellis_c
static int dct_quantize_trellis_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:3982
put_dct
static void put_dct(MPVEncContext *const s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_enc.c:1157
MPV_MAX_PLANES
#define MPV_MAX_PLANES
Definition: mpegpicture.h:31
ff_fix_long_p_mvs
void ff_fix_long_p_mvs(MPVEncContext *const s, int type)
Definition: motion_est.c:1661
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:398
FF_MATRIX_TYPE_INTRA
#define FF_MATRIX_TYPE_INTRA
Check if the elements of codec context matrices (intra_matrix, inter_matrix or chroma_intra_matrix) a...
Definition: encode.h:103
QMAT_SHIFT_MMX
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:84
ff_encode_reordered_opaque
int ff_encode_reordered_opaque(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame)
Propagate user opaque values from the frame to avctx/pkt as needed.
Definition: encode.c:220
mpegvideo_unquantize.h
MPVMainEncContext::me_pre
int me_pre
prepass for motion estimation
Definition: mpegvideoenc.h:243
ff_fix_long_mvs
void ff_fix_long_mvs(MPVEncContext *const s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1710
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:430
MPVMainEncContext::fcode_tab
const uint8_t * fcode_tab
smallest fcode needed for each MV
Definition: mpegvideoenc.h:218
MPVMainEncContext::fixed_qscale
int fixed_qscale
fixed qscale if non zero
Definition: mpegvideoenc.h:236
CANDIDATE_MB_TYPE_BIDIR
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegvideoenc.h:277
encode_mb_hq
static void encode_mb_hq(MPVEncContext *const s, MBBackup *const backup, MBBackup *const best, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2728
MPVMainEncContext::frame_skip_cmp_fn
me_cmp_func frame_skip_cmp_fn
Definition: mpegvideoenc.h:225
MPVMainEncContext::bit_rate
int64_t bit_rate
Definition: mpegvideoenc.h:230
dct_single_coeff_elimination
static void dct_single_coeff_elimination(MPVEncContext *const s, int n, int threshold)
Definition: mpegvideo_enc.c:2171
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:185
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
MAX_PB2_MB_SIZE
@ MAX_PB2_MB_SIZE
Definition: mpeg4videoenc.h:38
h263data.h
init_unquantize
static av_cold void init_unquantize(MPVEncContext *const s2, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:316
ff_mpv_enc_class
const AVClass ff_mpv_enc_class
Definition: mpegvideo_enc.c:104
encode_mb
static void encode_mb(MPVEncContext *const s, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2632
level
uint8_t level
Definition: svq3.c:208
ff_estimate_b_frame_motion
void ff_estimate_b_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:1493
av_clip
#define av_clip
Definition: common.h:100
MPVEncContext
Definition: mpegvideoenc.h:45
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:526
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:225
ALIGN
#define ALIGN
Definition: hashtable.c:32
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], const qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:821
init_qscale_tab
static void init_qscale_tab(MPVEncContext *const s)
init s->c.cur_pic.qscale_table from s->lambda_table
Definition: mpegvideo_enc.c:244
ff_mpv_init_duplicate_contexts
av_cold int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:126
update_noise_reduction
static void update_noise_reduction(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1902
MBBackup::mv_bits
int mv_bits
Definition: mpegvideo_enc.c:2654
mem_internal.h
MPVMainEncContext::dct_error_sum_base
char * dct_error_sum_base
backs dct_error_sum
Definition: mpegvideoenc.h:249
ff_me_init
av_cold int ff_me_init(MotionEstContext *c, AVCodecContext *avctx, const MECmpContext *mecc, int mpvenc)
Definition: motion_est.c:309
MBBackup::misc_bits
int misc_bits
Definition: mpegvideo_enc.c:2654
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:205
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
mjpegenc_common.h
BUF_BITS
#define BUF_BITS
Definition: put_bits.h:47
AVCodecContext::rc_min_rate
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:1277
set_frame_distances
static void set_frame_distances(MPVEncContext *const s)
Definition: mpegvideo_enc.c:3701
thread.h
frame_start
static void frame_start(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1924
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
ff_speedhq_end_slice
void ff_speedhq_end_slice(MPVEncContext *const s)
Definition: speedhqenc.c:117
MBBackup::block_last_index
int block_last_index[8]
Definition: mpegvideo_enc.c:2650
estimate_qp
static int estimate_qp(MPVMainEncContext *const m, int dry_run)
Definition: mpegvideo_enc.c:3662
ff_msmpeg4_encode_init
av_cold void ff_msmpeg4_encode_init(MPVMainEncContext *const m)
Definition: msmpeg4enc.c:673
matrix
Definition: vc1dsp.c:43
src1
const pixel * src1
Definition: h264pred_template.c:420
MPVEncContext::c
MpegEncContext c
the common base context
Definition: mpegvideoenc.h:46
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:213
MBBackup::last_mv
int last_mv[2][2][2]
Definition: mpegvideo_enc.c:2645
MPVMainEncContext::total_bits
int64_t total_bits
Definition: mpegvideoenc.h:231
mpegvideoenc.h
int64_t
long long int64_t
Definition: coverity.c:34
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
ff_dct_encode_init
av_cold void ff_dct_encode_init(MPVEncContext *const s)
Definition: mpegvideo_enc.c:301
MPVMainEncContext::noise_reduction
int noise_reduction
Definition: mpegvideoenc.h:209
COPY
#define COPY(a)
ff_me_init_pic
void ff_me_init_pic(MPVEncContext *const s)
Definition: motion_est.c:371
h263enc.h
basis
static int16_t basis[64][64]
Definition: mpegvideo_enc.c:4294
AVCodecContext::intra_matrix
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:948
estimate_best_b_count
static int estimate_best_b_count(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1522
MPVMainEncContext::last_lambda_for
int last_lambda_for[5]
last lambda for a specific pict type
Definition: mpegvideoenc.h:237
mv_bits
static const uint8_t mv_bits[2][16][10]
Definition: mobiclip.c:164
estimate_motion_thread
static int estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2845
ff_clean_h263_qscales
void ff_clean_h263_qscales(MPVEncContext *s)
AVCodecContext::lumi_masking
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:808
out_size
int out_size
Definition: movenc.c:56
MV_DIRECT
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:183
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:63
sse
static int sse(const MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
Definition: mpegvideo_enc.c:2779
CANDIDATE_MB_TYPE_INTER
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegvideoenc.h:270
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:174
MPVMainEncContext::frame_skip_threshold
int frame_skip_threshold
Definition: mpegvideoenc.h:221
MPVUnquantDSPContext::dct_unquantize_mpeg1_intra
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:35
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:410
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:223
INTERLACED_DCT
#define INTERLACED_DCT(s)
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:512
AVFrame::width
int width
Definition: frame.h:482
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:191
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
MBBackup::last_bits
int last_bits
Definition: mpegvideo_enc.c:2654
AVPacket::data
uint8_t * data
Definition: packet.h:535
av_packet_shrink_side_data
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Shrink the already allocated side data buffer.
Definition: packet.c:377
AVOption
AVOption.
Definition: opt.h:429
encode.h
b
#define b
Definition: input.c:42
put_bytes_count
static int put_bytes_count(const PutBitContext *s, int round_up)
Definition: put_bits.h:110
MPVEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideoenc.h:51
data
const char data[16]
Definition: mxf.c:149
MPVMainEncContext::dts_delta
int64_t dts_delta
pts difference between the first and second input frame, used for calculating dts of the first frame ...
Definition: mpegvideoenc.h:195
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
write_slice_end
static void write_slice_end(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2892
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
MpegEncContext::dest
uint8_t * dest[3]
Definition: mpegvideo.h:210
speedhqenc.h
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:511
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:553
FF_MPV_FLAG_SKIP_RD
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideoenc.h:287
max
#define max(a, b)
Definition: cuda_runtime.h:33
ff_mpeg12_dc_scale_table
const uint8_t ff_mpeg12_dc_scale_table[4][32]
Definition: mpegvideodata.c:33
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:82
mathematics.h
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: defs.h:62
sqr
static double sqr(double in)
Definition: af_afwtdn.c:872
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:306
pre_estimate_motion_thread
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2827
get_visual_weight
static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
Definition: mpegvideo_enc.c:2261
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:224
COPY_CONTEXT
#define COPY_CONTEXT(BEFORE, AFTER, DST_TYPE, SRC_TYPE)
Definition: mpegvideo_enc.c:2660
AVCodecContext::mb_decision
int mb_decision
macroblock decision mode
Definition: avcodec.h:936
FMT_H261
@ FMT_H261
Definition: mpegvideo.h:55
MPVMainEncContext::gop_size
int gop_size
Definition: mpegvideoenc.h:182
AVCodecContext::qmax
int qmax
maximum quantizer
Definition: avcodec.h:1241
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:331
MPVMainEncContext::mb_var_sum
int64_t mb_var_sum
sum of MB variance for current frame
Definition: mpegvideoenc.h:245
mpegutils.h
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:217
AVCodecContext::delay
int delay
Codec delay.
Definition: avcodec.h:575
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:590
AVCodecContext::mb_cmp
int mb_cmp
macroblock comparison function (not supported yet)
Definition: avcodec.h:862
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:75
MPVMainEncContext::encode_picture_header
int(* encode_picture_header)(struct MPVMainEncContext *m)
Definition: mpegvideoenc.h:227
quality
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
Definition: rate_distortion.txt:12
CANDIDATE_MB_TYPE_BACKWARD_I
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegvideoenc.h:281
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:431
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:182
MECmpContext::sum_abs_dctelem
int(* sum_abs_dctelem)(const int16_t *block)
Definition: me_cmp.h:51
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:55
update_mb_info
static void update_mb_info(MPVEncContext *const s, int startcode)
Definition: mpegvideo_enc.c:2932
MBBackup::i_tex_bits
int i_tex_bits
Definition: mpegvideo_enc.c:2654
MPVMainEncContext::coded_picture_number
int coded_picture_number
used to set pic->coded_picture_number
Definition: mpegvideoenc.h:186
av_gcd
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
set_bframe_chain_length
static int set_bframe_chain_length(MPVMainEncContext *const m)
Determines whether an input picture is discarded or not and if not determines the length of the next ...
Definition: mpegvideo_enc.c:1675
FF_MPV_COMMON_MOTION_EST_OPTS
#define FF_MPV_COMMON_MOTION_EST_OPTS
Definition: mpegvideoenc.h:356
mpv_reconstruct_mb
static void mpv_reconstruct_mb(MPVEncContext *const s, int16_t block[12][64])
Performs dequantization and IDCT (if necessary)
Definition: mpegvideo_enc.c:1177
MBBackup::tex_pb
PutBitContext tex_pb
Definition: mpegvideo_enc.c:2657
mpeg4videoenc.h
FF_CMP_VSSE
#define FF_CMP_VSSE
Definition: avcodec.h:878
ff_mpv_encode_picture
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
Definition: mpegvideo_enc.c:1941
FF_MPV_COMMON_OPTS
#define FF_MPV_COMMON_OPTS
Definition: mpegvideoenc.h:315
sp5x.h
MBBackup::mb_skip_run
int mb_skip_run
Definition: mpegvideo_enc.c:2648
ff_copy_bits
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:49
FMT_MJPEG
@ FMT_MJPEG
Definition: mpegvideo.h:57
init_slice_buffers
static av_cold int init_slice_buffers(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:505
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:57
FDCTDSPContext
Definition: fdctdsp.h:28
MPVMainEncContext::b_sensitivity
int b_sensitivity
Definition: mpegvideoenc.h:204
faandct.h
Floating point AAN DCT.
av_packet_add_side_data
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: packet.c:198
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:54
ff_match_2uint16
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:830
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:440
mpeg12enc.h
ff_h263_pred_motion
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:182
MBBackup::interlaced_dct
int interlaced_dct
Definition: mpegvideo_enc.c:2651
STRIDE_ALIGN
#define STRIDE_ALIGN
Definition: internal.h:46
ff_vbv_update
int ff_vbv_update(MPVMainEncContext *m, int frame_size)
Definition: ratecontrol.c:722
MpegEncContext::chroma_y_shift
int chroma_y_shift
Definition: mpegvideo.h:305
fail
#define fail()
Definition: checkasm.h:196
FMT_SPEEDHQ
@ FMT_SPEEDHQ
Definition: mpegvideo.h:58
MpegEncContext::MSMP4_WMV1
@ MSMP4_WMV1
Definition: mpegvideo.h:274
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:10345
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:109
ff_h263_encode_init
void ff_h263_encode_init(MPVMainEncContext *m)
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:960
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:488
CANDIDATE_MB_TYPE_SKIPPED
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegvideoenc.h:272
MPVUnquantDSPContext::dct_unquantize_h263_intra
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:43
perm
perm
Definition: f_perms.c:75
MAX_THREADS
#define MAX_THREADS
Definition: frame_thread_encoder.c:37
weight
const h264_weight_func weight
Definition: h264dsp_init.c:33
MPVMainEncContext::input_picture
MPVPicture * input_picture[MPVENC_MAX_B_FRAMES+1]
next pictures in display order
Definition: mpegvideoenc.h:188
AVCodecContext::bit_rate_tolerance
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:1209
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:314
pts
static int64_t pts
Definition: transcode_aac.c:644
FF_MPV_FLAG_CBP_RD
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideoenc.h:290
get_intra_count
static int get_intra_count(MPVEncContext *const s, const uint8_t *src, const uint8_t *ref, int stride)
Definition: mpegvideo_enc.c:1259
ff_mpeg4_init_partitions
void ff_mpeg4_init_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1286
sse_mb
static int sse_mb(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2801
AV_CODEC_ID_MSMPEG4V2
@ AV_CODEC_ID_MSMPEG4V2
Definition: codec_id.h:67
AV_CODEC_FLAG_LOOP_FILTER
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:298
ff_sqrt
#define ff_sqrt
Definition: mathops.h:217
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
ff_mpeg1_encode_init
static void ff_mpeg1_encode_init(MPVEncContext *s)
Definition: mpeg12enc.h:33
init_matrices
static av_cold int init_matrices(MPVMainEncContext *const m, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:379
AVRational::num
int num
Numerator.
Definition: rational.h:59
put_bytes_left
static int put_bytes_left(const PutBitContext *s, int round_up)
Definition: put_bits.h:145
refstruct.h
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:310
CANDIDATE_MB_TYPE_DIRECT
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegvideoenc.h:274
CANDIDATE_MB_TYPE_INTER_I
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegvideoenc.h:279
MPVMainEncContext::frame_skip_factor
int frame_skip_factor
Definition: mpegvideoenc.h:222
skip_check
static int skip_check(MPVMainEncContext *const m, const MPVPicture *p, const MPVPicture *ref)
Definition: mpegvideo_enc.c:1459
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:51
MPVMainEncContext::stuffing_bits
int stuffing_bits
bits used for stuffing
Definition: mpegvideoenc.h:234
MPVMainEncContext::picture_in_gop_number
int picture_in_gop_number
0-> first pic in gop, ...
Definition: mpegvideoenc.h:184
RateControlContext
rate control context.
Definition: ratecontrol.h:60
RateControlContext::num_entries
int num_entries
number of RateControlEntries
Definition: ratecontrol.h:61
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
ff_h263_encode_gob_header
void ff_h263_encode_gob_header(MPVEncContext *s, int mb_line)
MAX_MV
#define MAX_MV
Definition: motion_est.h:37
MPVPicture::shared
int shared
Definition: mpegpicture.h:87
MPVPicture::coded_picture_number
int coded_picture_number
Definition: mpegpicture.h:90
me_cmp_func
int(* me_cmp_func)(MPVEncContext *c, const uint8_t *blk1, const uint8_t *blk2, ptrdiff_t stride, int h)
Definition: me_cmp.h:45
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:625
default_fcode_tab
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:96
MpegEncContext::ac_val
int16_t(* ac_val)[16]
used for H.263 AIC, MPEG-4 AC prediction
Definition: mpegvideo.h:151
ff_mpeg4_set_direct_mv
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:119
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
emms_c
#define emms_c()
Definition: emms.h:63
build_basis
static void build_basis(uint8_t *perm)
Definition: mpegvideo_enc.c:4296
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:697
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
MPVMainEncContext::tmp_frames
AVFrame * tmp_frames[MPVENC_MAX_B_FRAMES+2]
temporary frames used by b_frame_strategy = 2
Definition: mpegvideoenc.h:202
MAX_MB_BYTES
#define MAX_MB_BYTES
Definition: mpegutils.h:35
get_sae
static int get_sae(const uint8_t *src, int ref, int stride)
Definition: mpegvideo_enc.c:1245
ff_rv10_encode_picture_header
int ff_rv10_encode_picture_header(MPVMainEncContext *const m)
Definition: rv10enc.c:34
s
#define s(width, name)
Definition: cbs_vp9.c:198
rebase_put_bits
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:122
CHROMA_422
#define CHROMA_422
Definition: mpegvideo.h:302
ff_mpvenc_dct_init_mips
av_cold void ff_mpvenc_dct_init_mips(MPVEncContext *s)
Definition: mpegvideoenc_init_mips.c:26
BASIS_SHIFT
#define BASIS_SHIFT
Definition: mpegvideoencdsp.h:26
MPVMainEncContext::brd_scale
int brd_scale
Definition: mpegvideoenc.h:205
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
MBBackup::esc3_level_length
int esc3_level_length
Definition: mpegvideo_enc.c:2656
MPVMainEncContext::reordered_input_picture
MPVPicture * reordered_input_picture[MPVENC_MAX_B_FRAMES+1]
next pictures in coded order
Definition: mpegvideoenc.h:189
MPVMainEncContext::intra_only
int intra_only
if true, only intra pictures are generated
Definition: mpegvideoenc.h:181
MPVMainEncContext::mc_mb_var_sum
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegvideoenc.h:246
merge_context_after_me
static void merge_context_after_me(MPVEncContext *const dst, MPVEncContext *const src)
Definition: mpegvideo_enc.c:3627
g
const char * g
Definition: vf_curves.c:128
ff_mpeg4_stuffing
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
Definition: mpeg4videoenc.c:834
MPVMainEncContext::rc_context
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
Definition: mpegvideoenc.h:240
MPVUnquantDSPContext::dct_unquantize_mpeg2_intra
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:39
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:70
ff_mpeg1_dc_scale_table
static const uint8_t *const ff_mpeg1_dc_scale_table
Definition: mpegvideodata.h:32
bits
uint8_t bits
Definition: vp3data.h:128
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:130
MPVEncContext::pb
PutBitContext pb
bit output
Definition: mpegvideoenc.h:49
MPVMainEncContext::header_bits
int header_bits
Definition: mpegvideoenc.h:233
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1553
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
RateControlEntry::new_pict_type
int new_pict_type
Definition: ratecontrol.h:51
ff_write_quant_matrix
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
Definition: mpegvideo_enc.c:228
limits.h
AV_CODEC_ID_MSMPEG4V1
@ AV_CODEC_ID_MSMPEG4V1
Definition: codec_id.h:66
MPVMainEncContext::max_b_frames
int max_b_frames
max number of B-frames
Definition: mpegvideoenc.h:183
ff_pre_estimate_p_frame_motion
int ff_pre_estimate_p_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:1067
ff_clean_mpeg4_qscales
void ff_clean_mpeg4_qscales(MPVEncContext *const s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
Definition: mpeg4videoenc.c:269
rv10enc.h
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1270
ff_block_permute
void ff_block_permute(int16_t *block, const uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
Definition: mpegvideo_enc.c:4642
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1505
AVCPBProperties
This structure describes the bitrate properties of an encoded bitstream.
Definition: defs.h:279
PutBitContext
Definition: put_bits.h:50
ff_speedhq_mb_y_order_to_mb
static int ff_speedhq_mb_y_order_to_mb(int mb_y_order, int mb_height, int *first_in_slice)
Definition: speedhqenc.h:41
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
CANDIDATE_MB_TYPE_FORWARD
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegvideoenc.h:275
MBBackup::mv_dir
int mv_dir
Definition: mpegvideo_enc.c:2646
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:441
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:57
AVCodecContext::p_masking
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:829
mb_var_thread
static int mb_var_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2870
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:56
arg
const char * arg
Definition: jacosubdec.c:67
mpv_encode_init_static
static av_cold void mpv_encode_init_static(void)
Definition: mpegvideo_enc.c:272
ff_mpv_common_end
av_cold void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:467
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1255
MECmpContext
Definition: me_cmp.h:50
MpegEncContext::field_select
int field_select[2][2]
Definition: mpegvideo.h:196
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
CANDIDATE_MB_TYPE_FORWARD_I
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegvideoenc.h:280
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:128
NULL
#define NULL
Definition: coverity.c:32
MPVEncContext::dct_error_sum
int(* dct_error_sum)[64]
Definition: mpegvideoenc.h:124
MPVMainEncContext::lmin
int lmin
Definition: mpegvideoenc.h:212
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:597
run
uint8_t run
Definition: svq3.c:207
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:204
bias
static int bias(int x, int c)
Definition: vqcdec.c:115
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:95
me
#define me
Definition: vf_colormatrix.c:102
aandcttab.h
ff_mpv_common_defaults
av_cold void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:208
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
av_unreachable
#define av_unreachable(msg)
Asserts that are used as compiler optimization hints depending upon ASSERT_LEVEL and NBDEBUG.
Definition: avassert.h:109
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MPVMainEncContext *const m, int dry_run)
Definition: ratecontrol.c:912
CANDIDATE_MB_TYPE_BACKWARD
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegvideoenc.h:276
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:466
MECmpContext::sad
me_cmp_func sad[6]
Definition: me_cmp.h:53
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:481
MPVPicture::display_picture_number
int display_picture_number
Definition: mpegpicture.h:89
EDGE_WIDTH
#define EDGE_WIDTH
Definition: diracdec.c:47
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:58
ff_faandct
void ff_faandct(int16_t *data)
Definition: faandct.c:115
MpegEncContext::inter_matrix
uint16_t inter_matrix[64]
Definition: mpegvideo.h:217
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:240
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
MPVEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideoenc.h:52
me_cmp_init
static av_cold int me_cmp_init(MPVMainEncContext *const m, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:336
select_input_picture
static int select_input_picture(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1806
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:544
AV_CODEC_ID_SPEEDHQ
@ AV_CODEC_ID_SPEEDHQ
Definition: codec_id.h:279
mathops.h
dct_error
static int dct_error(const struct algo *dct, int test, int is_idct, int speed, const int bits)
Definition: dct.c:189
AV_CODEC_FLAG_AC_PRED
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:327
MERGE
#define MERGE(field)
Definition: mpegvideo_enc.c:3626
MAX_AC_TEX_MB_SIZE
@ MAX_AC_TEX_MB_SIZE
Definition: mpeg4videoenc.h:39
AVCodecContext::ildct_cmp
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:868
av_refstruct_pool_get
void * av_refstruct_pool_get(AVRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
ff_mpv_encode_end
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:1121
MBBackup::qscale
int qscale
Definition: mpegvideo_enc.c:2649
FF_MB_DECISION_SIMPLE
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:937
qpeldsp.h
ff_mpv_reallocate_putbitbuffer
int ff_mpv_reallocate_putbitbuffer(MPVEncContext *const s, size_t threshold, size_t size_increase)
Definition: mpegvideo_enc.c:2955
ff_h261_reorder_mb_index
void ff_h261_reorder_mb_index(MPVEncContext *const s)
Definition: h261enc.c:120
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:143
ff_mpv_unquantize_init
#define ff_mpv_unquantize_init(s, bitexact, q_scale_type)
Definition: mpegvideo_unquantize.h:50
add_dequant_dct
static void add_dequant_dct(MPVEncContext *const s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_enc.c:1164
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1305
AV_CODEC_ID_WMV1
@ AV_CODEC_ID_WMV1
Definition: codec_id.h:69
mpeg12codecs.h
ff_mpeg4_encode_video_packet_header
void ff_mpeg4_encode_video_packet_header(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1325
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
MBBackup::block
int16_t(* block)[64]
Definition: mpegvideo_enc.c:2652
update_duplicate_context_after_me
static void update_duplicate_context_after_me(MPVEncContext *const dst, const MPVEncContext *const src)
Definition: mpegvideo_enc.c:256
MPVMainEncContext
Definition: mpegvideoenc.h:178
AVOnce
#define AVOnce
Definition: thread.h:202
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
MPVPicture::reference
int reference
Definition: mpegpicture.h:86
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:186
AVCodecContext::temporal_cplx_masking
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:815
load_input_picture
static int load_input_picture(MPVMainEncContext *const m, const AVFrame *pic_arg)
Definition: mpegvideo_enc.c:1318
set_put_bits_buffer_size
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:436
ff_set_mpeg4_time
void ff_set_mpeg4_time(MPVEncContext *const s)
Definition: mpeg4videoenc.c:842
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:535
ff_encode_alloc_frame
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
Definition: encode.c:818
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1380
ff_h263_clean_intra_table_entries
static void ff_h263_clean_intra_table_entries(MpegEncContext *s, int xy)
Definition: h263.h:47
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1312
MPVMainEncContext::last_pict_type
int last_pict_type
Definition: mpegvideoenc.h:238
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:225
f
f
Definition: af_crystalizer.c:122
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:502
QUANT_BIAS_SHIFT
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:82
MotionEstContext::temp
uint8_t * temp
Definition: motion_est.h:57
clip_coeffs
static void clip_coeffs(const MPVEncContext *const s, int16_t block[], int last_index)
Definition: mpegvideo_enc.c:2227
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:302
AVPacket::size
int size
Definition: packet.h:536
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1005
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:161
ff_mpeg4_clean_buffers
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:44
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:276
MPVMainEncContext::vbv_delay_pos
int vbv_delay_pos
offset of vbv_delay in the bitstream
Definition: mpegvideoenc.h:216
MECmpContext::sse
me_cmp_func sse[6]
Definition: me_cmp.h:54
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
MBBackup::mv_type
int mv_type
Definition: mpegvideo_enc.c:2646
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:215
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:532
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample, int lowres, int chroma_x_shift)
Definition: mpegvideo.h:382
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:125
FF_IDCT_PERM_NONE
@ FF_IDCT_PERM_NONE
Definition: idctdsp.h:28
CANDIDATE_MB_TYPE_DIRECT0
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegvideoenc.h:284
ff_mpeg4_default_intra_matrix
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:334
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
size
int size
Definition: twinvq_data.h:10344
CANDIDATE_MB_TYPE_INTRA
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegvideoenc.h:269
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
mpv_generic_options
static const AVOption mpv_generic_options[]
Definition: mpegvideo_enc.c:98
RECON_SHIFT
#define RECON_SHIFT
Definition: mpegvideoencdsp.h:27
MPVMainEncContext::frame_bits
int frame_bits
bits used for the current frame
Definition: mpegvideoenc.h:232
AVCodecInternal::byte_buffer
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:95
FF_MPV_FLAG_QP_RD
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideoenc.h:289
encode_picture
static int encode_picture(MPVMainEncContext *const s, const AVPacket *pkt)
Definition: mpegvideo_enc.c:3716
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:497
AVCPBProperties::min_bitrate
int64_t min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: defs.h:289
MECmpContext::nsse
me_cmp_func nsse[6]
Definition: me_cmp.h:62
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
ff_set_cmp
av_cold int ff_set_cmp(const MECmpContext *c, me_cmp_func *cmp, int type, int mpvenc)
Fill the function pointer array cmp[6] with me_cmp_funcs from c based upon type.
Definition: me_cmp.c:442
MPVEncContext::me
MotionEstContext me
Definition: mpegvideoenc.h:77
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:534
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:294
FF_COMPLIANCE_NORMAL
#define FF_COMPLIANCE_NORMAL
Definition: defs.h:60
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
ff_mpeg4_default_non_intra_matrix
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:345
ALLOCZ_ARRAYS
#define ALLOCZ_ARRAYS(p, mult, numb)
Definition: mpegvideo_enc.c:378
MPVMainEncContext::input_picture_number
int input_picture_number
used to set pic->display_picture_number
Definition: mpegvideoenc.h:185
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:99
ff_mpeg1_encode_slice_header
void ff_mpeg1_encode_slice_header(MPVEncContext *s)
MPVUnquantDSPContext::dct_unquantize_mpeg2_inter
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:41
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:188
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:541
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:64
AVCPBProperties::avg_bitrate
int64_t avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: defs.h:294
AVCodecInternal::byte_buffer_size
unsigned int byte_buffer_size
Definition: internal.h:96
ScratchpadContext::scratchpad_buf
uint8_t * scratchpad_buf
the other *_scratchpad point into this buffer
Definition: mpegpicture.h:38
MPVMainEncContext::me_penalty_compensation
int me_penalty_compensation
Definition: mpegvideoenc.h:242
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideoenc.h:265
denoise_dct_c
static void denoise_dct_c(MPVEncContext *const s, int16_t *block)
Definition: mpegvideo_enc.c:3957
M_PI
#define M_PI
Definition: mathematics.h:67
CANDIDATE_MB_TYPE_BIDIR_I
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegvideoenc.h:282
MBBackup
Definition: mpegvideo_enc.c:2642
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
CANDIDATE_MB_TYPE_INTER4V
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegvideoenc.h:271
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
av_refstruct_unref
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
ff_mjpeg_add_icc_profile_size
int ff_mjpeg_add_icc_profile_size(AVCodecContext *avctx, const AVFrame *frame, size_t *max_pkt_size)
Definition: mjpegenc_common.c:137
CHROMA_444
#define CHROMA_444
Definition: mpegvideo.h:303
AVCPBProperties::vbv_delay
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: defs.h:309
emms.h
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
get_bits_diff
static int get_bits_diff(MPVEncContext *s)
Definition: mpegvideoenc.h:388
MBBackup::last_dc
int last_dc[3]
Definition: mpegvideo_enc.c:2647
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:110
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:142
AV_PKT_DATA_H263_MB_INFO
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: packet.h:90
AV_CODEC_ID_RV10
@ AV_CODEC_ID_RV10
Definition: codec_id.h:57
CHROMA_420
#define CHROMA_420
Definition: mpegvideo.h:301
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
MBBackup::mv
int mv[2][4][2]
Definition: mpegvideo_enc.c:2644
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:528
MPVUnquantDSPContext::dct_unquantize_h263_inter
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:45
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:90
ff_rv20_encode_picture_header
int ff_rv20_encode_picture_header(MPVMainEncContext *m)
Definition: rv20enc.c:37
encode_thread
static int encode_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2989
MPVMainEncContext::mv_table_base
int16_t(* mv_table_base)[2]
Definition: mpegvideoenc.h:250
MBBackup::pb2
PutBitContext pb2
Definition: mpegvideo_enc.c:2657
ff_jpeg_fdct_islow_8
void ff_jpeg_fdct_islow_8(int16_t *data)
ff_fdctdsp_init
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:25
internal.h
FF_MATRIX_TYPE_CHROMA_INTRA
#define FF_MATRIX_TYPE_CHROMA_INTRA
Definition: encode.h:105
ff_h263_update_mb
void ff_h263_update_mb(MPVEncContext *s)
AVCodecContext::intra_dc_precision
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:971
src2
const pixel * src2
Definition: h264pred_template.c:421
MPVEncContext::dct_offset
uint16_t(* dct_offset)[64]
Definition: mpegvideoenc.h:126
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:57
AVCPBProperties::max_bitrate
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: defs.h:284
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:204
ff_rate_control_init
av_cold int ff_rate_control_init(MPVMainEncContext *const m)
Definition: ratecontrol.c:497
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:53
AV_CODEC_ID_RV20
@ AV_CODEC_ID_RV20
Definition: codec_id.h:58
av_always_inline
#define av_always_inline
Definition: attributes.h:49
MPVENC_MAX_B_FRAMES
#define MPVENC_MAX_B_FRAMES
Definition: mpegvideoenc.h:43
ff_jpeg_fdct_islow_10
void ff_jpeg_fdct_islow_10(int16_t *data)
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
mpv_encode_defaults
static av_cold void mpv_encode_defaults(MPVMainEncContext *const m)
Set the given MPVEncContext to defaults for encoding.
Definition: mpegvideo_enc.c:281
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:521
MPVMainEncContext::next_lambda
int next_lambda
next lambda used for retrying to encode a frame
Definition: mpegvideoenc.h:235
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:159
AV_STRINGIFY
#define AV_STRINGIFY(s)
Definition: macros.h:66
MpegEncContext::MSMP4_V3
@ MSMP4_V3
Definition: mpegvideo.h:273
ff_h263_format
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:236
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:879
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:494
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
MPVMainEncContext::border_masking
float border_masking
Definition: mpegvideoenc.h:211
ff_write_pass1_stats
void ff_write_pass1_stats(MPVMainEncContext *const m)
Definition: ratecontrol.c:38
ff_msmpeg4_encode_ext_header
void ff_msmpeg4_encode_ext_header(MPVEncContext *const s)
Definition: msmpeg4enc.c:285
ff_square_tab
const EXTERN uint32_t ff_square_tab[512]
Definition: mathops.h:35
MPVMainEncContext::last_non_b_pict_type
int last_non_b_pict_type
used for MPEG-4 gmc B-frames & ratecontrol
Definition: mpegvideoenc.h:239
AVCodecContext::height
int height
Definition: avcodec.h:592
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:493
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:631
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
prepare_picture
static int prepare_picture(MPVEncContext *const s, AVFrame *f, const AVFrame *props_frame)
Allocates new buffers for an AVFrame and copies the properties from another AVFrame.
Definition: mpegvideo_enc.c:1286
RateControlContext::buffer_index
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:63
ff_get_2pass_fcode
void ff_get_2pass_fcode(MPVMainEncContext *const m)
Definition: ratecontrol.c:900
frame_end
static void frame_end(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1864
idctdsp.h
MPVPicture::b_frame_score
int b_frame_score
Definition: mpegpicture.h:84
encode_mb_internal
static av_always_inline void encode_mb_internal(MPVEncContext *const s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count, int chroma_x_shift, int chroma_y_shift, int chroma_format)
Definition: mpegvideo_enc.c:2285
avcodec.h
stride
#define stride
Definition: h264pred_template.c:536
init_buffers
static av_cold int init_buffers(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:447
ff_pixblockdsp_init
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, int bits_per_raw_sample)
Definition: pixblockdsp.c:87
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:137
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:332
ret
ret
Definition: filter_design.txt:187
ff_h263_mpeg4_reset_dc
void ff_h263_mpeg4_reset_dc(MPVEncContext *s)
MPVMainEncContext::vbv_ignore_qmax
int vbv_ignore_qmax
Definition: mpegvideoenc.h:213
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
AVCPBProperties::buffer_size
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: defs.h:300
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1357
ff_fdct_ifast
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:207
ff_inv_aanscales
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
MpegEncContext::MSMP4_WMV2
@ MSMP4_WMV2
Definition: mpegvideo.h:275
ff_h263_loop_filter
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:97
ff_convert_matrix
void ff_convert_matrix(MPVEncContext *const s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:111
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
MPVMainEncContext::reordered_pts
int64_t reordered_pts
reordered pts to be used as dts for the next output frame when there's a delay
Definition: mpegvideoenc.h:199
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
MotionEstContext::scratchpad
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
Definition: motion_est.h:55
mpeg12data.h
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
MpegEncContext::chroma_x_shift
int chroma_x_shift
Definition: mpegvideo.h:304
AVCodecContext::dark_masking
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:836
MPVMainEncContext::frame_skip_cmp
int frame_skip_cmp
Definition: mpegvideoenc.h:224
MBBackup::dquant
int dquant
Definition: mpegvideo_enc.c:2655
AVCodecContext
main external API structure.
Definition: avcodec.h:431
AVFrame::height
int height
Definition: frame.h:482
MBBackup::mb_skipped
int mb_skipped
Definition: mpegvideo_enc.c:2648
AV_CODEC_ID_H263P
@ AV_CODEC_ID_H263P
Definition: codec_id.h:71
h261enc.h
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:402
MPVMainEncContext::lmax
int lmax
Definition: mpegvideoenc.h:212
ADD
#define ADD(field)
Definition: mpegvideo_enc.c:3625
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:280
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
Definition: packet.c:232
mpeg4video.h
MBBackup::c
struct MBBackup::@211 c
AVCodecContext::qmin
int qmin
minimum quantizer
Definition: avcodec.h:1234
AVRational::den
int den
Denominator.
Definition: rational.h:60
MPVUnquantDSPContext::dct_unquantize_mpeg1_inter
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:37
ff_mjpeg_encode_stuffing
int ff_mjpeg_encode_stuffing(MPVEncContext *const s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
Definition: mjpegenc.c:238
MBBackup::i_count
int i_count
Definition: mpegvideo_enc.c:2654
AVCodecContext::spatial_cplx_masking
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:822
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
temp
else temp
Definition: vf_mcdeint.c:271
ff_mpv_pic_check_linesize
int ff_mpv_pic_check_linesize(void *logctx, const AVFrame *f, ptrdiff_t *linesizep, ptrdiff_t *uvlinesizep)
Definition: mpegpicture.c:181
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:861
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
MPVMainEncContext::frame_skip_exp
int frame_skip_exp
Definition: mpegvideoenc.h:223
QMAT_SHIFT
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:85
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:939
ff_mpv_replace_picture
void ff_mpv_replace_picture(MPVWorkPicture *dst, const MPVWorkPicture *src)
Definition: mpegpicture.c:121
ff_estimate_p_frame_motion
void ff_estimate_p_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:892
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
AVERROR_ENCODER_NOT_FOUND
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:56
INPLACE_OFFSET
#define INPLACE_OFFSET
Definition: mpegvideoenc.h:266
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
msmpeg4enc.h
mem.h
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:769
packet_internal.h
overflow
Undefined Behavior In the C some operations are like signed integer overflow
Definition: undefined.txt:3
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:322
dct_quantize_refine
static int dct_quantize_refine(MPVEncContext *const s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
Definition: mpegvideo_enc.c:4315
FDCTDSPContext::fdct
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:29
ff_mpv_encode_init
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:557
AVCodecContext::rc_max_available_vbv_use
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
Definition: avcodec.h:1284
MpegEncContext::MSMP4_UNUSED
@ MSMP4_UNUSED
Definition: mpegvideo.h:270
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:153
ff_mpeg4_merge_partitions
void ff_mpeg4_merge_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1299
merge_context_after_encode
static void merge_context_after_encode(MPVEncContext *const dst, MPVEncContext *const src)
Definition: mpegvideo_enc.c:3634
MPVMainEncContext::b_frame_strategy
int b_frame_strategy
Definition: mpegvideoenc.h:203
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
av_refstruct_pool_uninit
static void av_refstruct_pool_uninit(AVRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:273
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:181
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:1021
FF_MB_DECISION_BITS
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:938
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
AVPacket
This structure stores compressed data.
Definition: packet.h:512
mpeg4videodata.h
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::inter_matrix
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:957
MpegEncContext::MSMP4_V2
@ MSMP4_V2
Definition: mpegvideo.h:272
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:253
MPVMainEncContext::scenechange_threshold
int scenechange_threshold
Definition: mpegvideoenc.h:207
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
ff_dct_encode_init_x86
void ff_dct_encode_init_x86(MPVEncContext *s)
Definition: mpegvideoenc.c:122
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:592
bytestream.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:455
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
ff_mjpeg_encode_picture_trailer
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
Definition: mjpegenc_common.c:461
ff_side_data_set_encoder_stats
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: packet.c:610
MBBackup::mb_intra
int mb_intra
Definition: mpegvideo_enc.c:2648
AV_CODEC_ID_MSMPEG4V3
@ AV_CODEC_ID_MSMPEG4V3
Definition: codec_id.h:68
MPVUnquantDSPContext
Definition: mpegvideo_unquantize.h:34
h
h
Definition: vp9dsp_template.c:2070
MPVMainEncContext::user_specified_pts
int64_t user_specified_pts
last non-zero pts from user-supplied AVFrame
Definition: mpegvideoenc.h:191
ff_encode_add_cpb_side_data
AVCPBProperties * ff_encode_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: encode.c:880
dct_quantize_c
static int dct_quantize_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:4667
MBBackup::pb
PutBitContext pb
Definition: mpegvideo_enc.c:2657
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
width
#define width
Definition: dsp.h:89
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:226
FF_MPV_FLAG_STRICT_GOP
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideoenc.h:288
MpegEncContext::start_mb_y
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:115
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
sp5x_qscale_five_quant_table
static const uint8_t sp5x_qscale_five_quant_table[][64]
Definition: sp5x.h:135
mjpegenc.h
AV_PICTURE_TYPE_S
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
Definition: avutil.h:281
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
ff_mpv_alloc_pic_accessories
int ff_mpv_alloc_pic_accessories(AVCodecContext *avctx, MPVWorkPicture *wpic, ScratchpadContext *sc, BufferPoolContext *pools, int mb_height)
Allocate an MPVPicture's accessories (but not the AVFrame's buffer itself) and set the MPVWorkPicture...
Definition: mpegpicture.c:237
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:64
update_qscale
static void update_qscale(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:198
RateControlContext::entry
RateControlEntry * entry
Definition: ratecontrol.h:62
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:62
MPVMainEncContext::s
MPVEncContext s
The main slicecontext.
Definition: mpegvideoenc.h:179
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:616
write_mb_info
static void write_mb_info(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2912
MpegEncContext::dc_val
int16_t * dc_val
used for H.263 AIC/MPEG-4 DC prediction and ER
Definition: mpegvideo.h:150
ff_mpv_alloc_pic_pool
av_cold AVRefStructPool * ff_mpv_alloc_pic_pool(int init_progress)
Allocate a pool of MPVPictures.
Definition: mpegpicture.c:90
src
#define src
Definition: vp8dsp.c:248
MBBackup::p_tex_bits
int p_tex_bits
Definition: mpegvideo_enc.c:2654
pixblockdsp.h
ff_aanscales
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
av_cpb_properties_alloc
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:955
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:290
ff_check_codec_matrices
int ff_check_codec_matrices(AVCodecContext *avctx, unsigned types, uint16_t min, uint16_t max)
Definition: encode.c:911
MpegEncContext::chroma_format
int chroma_format
Definition: mpegvideo.h:300
FF_MATRIX_TYPE_INTER
#define FF_MATRIX_TYPE_INTER
Definition: encode.h:104
h263.h
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(RateControlContext *rcc)
Definition: ratecontrol.c:711
ff_get_best_fcode
int ff_get_best_fcode(MPVMainEncContext *const m, const int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1605
intmath.h
MPVEncContext::mpeg_quant
int mpeg_quant
Definition: mpegvideoenc.h:150