FFmpeg
mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29 
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/mem.h"
35 
36 #include "avcodec.h"
37 #include "blockdsp.h"
38 #include "idctdsp.h"
39 #include "mathops.h"
40 #include "mpeg_er.h"
41 #include "mpegutils.h"
42 #include "mpegvideo.h"
43 #include "mpegvideodata.h"
44 #include "refstruct.h"
45 
47  int16_t *block, int n, int qscale)
48 {
49  int i, level, nCoeffs;
50  const uint16_t *quant_matrix;
51 
52  nCoeffs= s->block_last_index[n];
53 
54  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
55  /* XXX: only MPEG-1 */
56  quant_matrix = s->intra_matrix;
57  for(i=1;i<=nCoeffs;i++) {
58  int j= s->intra_scantable.permutated[i];
59  level = block[j];
60  if (level) {
61  if (level < 0) {
62  level = -level;
63  level = (int)(level * qscale * quant_matrix[j]) >> 3;
64  level = (level - 1) | 1;
65  level = -level;
66  } else {
67  level = (int)(level * qscale * quant_matrix[j]) >> 3;
68  level = (level - 1) | 1;
69  }
70  block[j] = level;
71  }
72  }
73 }
74 
76  int16_t *block, int n, int qscale)
77 {
78  int i, level, nCoeffs;
79  const uint16_t *quant_matrix;
80 
81  nCoeffs= s->block_last_index[n];
82 
83  quant_matrix = s->inter_matrix;
84  for(i=0; i<=nCoeffs; i++) {
85  int j= s->intra_scantable.permutated[i];
86  level = block[j];
87  if (level) {
88  if (level < 0) {
89  level = -level;
90  level = (((level << 1) + 1) * qscale *
91  ((int) (quant_matrix[j]))) >> 4;
92  level = (level - 1) | 1;
93  level = -level;
94  } else {
95  level = (((level << 1) + 1) * qscale *
96  ((int) (quant_matrix[j]))) >> 4;
97  level = (level - 1) | 1;
98  }
99  block[j] = level;
100  }
101  }
102 }
103 
105  int16_t *block, int n, int qscale)
106 {
107  int i, level, nCoeffs;
108  const uint16_t *quant_matrix;
109 
110  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
111  else qscale <<= 1;
112 
113  nCoeffs= s->block_last_index[n];
114 
115  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
116  quant_matrix = s->intra_matrix;
117  for(i=1;i<=nCoeffs;i++) {
118  int j= s->intra_scantable.permutated[i];
119  level = block[j];
120  if (level) {
121  if (level < 0) {
122  level = -level;
123  level = (int)(level * qscale * quant_matrix[j]) >> 4;
124  level = -level;
125  } else {
126  level = (int)(level * qscale * quant_matrix[j]) >> 4;
127  }
128  block[j] = level;
129  }
130  }
131 }
132 
134  int16_t *block, int n, int qscale)
135 {
136  int i, level, nCoeffs;
137  const uint16_t *quant_matrix;
138  int sum=-1;
139 
140  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
141  else qscale <<= 1;
142 
143  nCoeffs= s->block_last_index[n];
144 
145  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
146  sum += block[0];
147  quant_matrix = s->intra_matrix;
148  for(i=1;i<=nCoeffs;i++) {
149  int j= s->intra_scantable.permutated[i];
150  level = block[j];
151  if (level) {
152  if (level < 0) {
153  level = -level;
154  level = (int)(level * qscale * quant_matrix[j]) >> 4;
155  level = -level;
156  } else {
157  level = (int)(level * qscale * quant_matrix[j]) >> 4;
158  }
159  block[j] = level;
160  sum+=level;
161  }
162  }
163  block[63]^=sum&1;
164 }
165 
167  int16_t *block, int n, int qscale)
168 {
169  int i, level, nCoeffs;
170  const uint16_t *quant_matrix;
171  int sum=-1;
172 
173  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
174  else qscale <<= 1;
175 
176  nCoeffs= s->block_last_index[n];
177 
178  quant_matrix = s->inter_matrix;
179  for(i=0; i<=nCoeffs; i++) {
180  int j= s->intra_scantable.permutated[i];
181  level = block[j];
182  if (level) {
183  if (level < 0) {
184  level = -level;
185  level = (((level << 1) + 1) * qscale *
186  ((int) (quant_matrix[j]))) >> 5;
187  level = -level;
188  } else {
189  level = (((level << 1) + 1) * qscale *
190  ((int) (quant_matrix[j]))) >> 5;
191  }
192  block[j] = level;
193  sum+=level;
194  }
195  }
196  block[63]^=sum&1;
197 }
198 
200  int16_t *block, int n, int qscale)
201 {
202  int i, level, qmul, qadd;
203  int nCoeffs;
204 
205  av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
206 
207  qmul = qscale << 1;
208 
209  if (!s->h263_aic) {
210  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
211  qadd = (qscale - 1) | 1;
212  }else{
213  qadd = 0;
214  }
215  if(s->ac_pred)
216  nCoeffs=63;
217  else
218  nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
219 
220  for(i=1; i<=nCoeffs; i++) {
221  level = block[i];
222  if (level) {
223  if (level < 0) {
224  level = level * qmul - qadd;
225  } else {
226  level = level * qmul + qadd;
227  }
228  block[i] = level;
229  }
230  }
231 }
232 
234  int16_t *block, int n, int qscale)
235 {
236  int i, level, qmul, qadd;
237  int nCoeffs;
238 
239  av_assert2(s->block_last_index[n]>=0);
240 
241  qadd = (qscale - 1) | 1;
242  qmul = qscale << 1;
243 
244  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
245 
246  for(i=0; i<=nCoeffs; i++) {
247  level = block[i];
248  if (level) {
249  if (level < 0) {
250  level = level * qmul - qadd;
251  } else {
252  level = level * qmul + qadd;
253  }
254  block[i] = level;
255  }
256  }
257 }
258 
259 
260 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
261 {
262  while(h--)
263  memset(dst + h*linesize, 128, 16);
264 }
265 
266 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
267 {
268  while(h--)
269  memset(dst + h*linesize, 128, 8);
270 }
271 
272 /* init common dct for both encoder and decoder */
274 {
275  ff_blockdsp_init(&s->bdsp);
276  ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
277  ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
278 
279  if (s->avctx->debug & FF_DEBUG_NOMC) {
280  int i;
281  for (i=0; i<4; i++) {
282  s->hdsp.avg_pixels_tab[0][i] = gray16;
283  s->hdsp.put_pixels_tab[0][i] = gray16;
284  s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
285 
286  s->hdsp.avg_pixels_tab[1][i] = gray8;
287  s->hdsp.put_pixels_tab[1][i] = gray8;
288  s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
289  }
290  }
291 }
292 
293 av_cold void ff_init_scantable(const uint8_t *permutation, ScanTable *st,
294  const uint8_t *src_scantable)
295 {
296  st->scantable = src_scantable;
297 
298  for (int i = 0, end = -1; i < 64; i++) {
299  int j = src_scantable[i];
300  st->permutated[i] = permutation[j];
301  if (permutation[j] > end)
302  end = permutation[j];
303  st->raster_end[i] = end;
304  }
305 }
306 
308 {
309  if (s->codec_id == AV_CODEC_ID_MPEG4)
310  s->idsp.mpeg4_studio_profile = s->studio_profile;
311  ff_idctdsp_init(&s->idsp, s->avctx);
312 
313  /* load & permutate scantables
314  * note: only wmv uses different ones
315  */
316  if (s->alternate_scan) {
317  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
318  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
319  } else {
320  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
321  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
322  }
323  ff_permute_scantable(s->permutated_intra_h_scantable, ff_alternate_horizontal_scan,
324  s->idsp.idct_permutation);
325  ff_permute_scantable(s->permutated_intra_v_scantable, ff_alternate_vertical_scan,
326  s->idsp.idct_permutation);
327 
328  s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
329  s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
330  s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
331  s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
332  s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
333  if (s->avctx->flags & AV_CODEC_FLAG_BITEXACT)
334  s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
335  s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
336 
337 #if HAVE_INTRINSICS_NEON
339 #endif
340 
341 #if ARCH_ARM
343 #elif ARCH_PPC
345 #elif ARCH_X86
347 #elif ARCH_MIPS
349 #endif
350 }
351 
353 {
354  if (s->encoding) {
355  s->me.map = av_mallocz(2 * ME_MAP_SIZE * sizeof(*s->me.map));
356  if (!s->me.map)
357  return AVERROR(ENOMEM);
358  s->me.score_map = s->me.map + ME_MAP_SIZE;
359 
360  if (s->noise_reduction) {
361  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_error_sum, 2))
362  return AVERROR(ENOMEM);
363  }
364  }
365  if (!FF_ALLOCZ_TYPED_ARRAY(s->blocks, 1 + s->encoding))
366  return AVERROR(ENOMEM);
367  s->block = s->blocks[0];
368 
369  if (s->out_format == FMT_H263) {
370  int mb_height = s->msmpeg4_version == MSMP4_VC1 ?
371  FFALIGN(s->mb_height, 2) : s->mb_height;
372  int y_size = s->b8_stride * (2 * mb_height + 1);
373  int c_size = s->mb_stride * (mb_height + 1);
374  int yc_size = y_size + 2 * c_size;
375  /* ac values */
376  if (!FF_ALLOCZ_TYPED_ARRAY(s->ac_val_base, yc_size))
377  return AVERROR(ENOMEM);
378  s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
379  s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
380  s->ac_val[2] = s->ac_val[1] + c_size;
381  }
382 
383  return 0;
384 }
385 
387 {
388  int nb_slices = s->slice_context_count, ret;
389 
390  /* We initialize the copies before the original so that
391  * fields allocated in init_duplicate_context are NULL after
392  * copying. This prevents double-frees upon allocation error. */
393  for (int i = 1; i < nb_slices; i++) {
394  s->thread_context[i] = av_memdup(s, sizeof(MpegEncContext));
395  if (!s->thread_context[i])
396  return AVERROR(ENOMEM);
397  if ((ret = init_duplicate_context(s->thread_context[i])) < 0)
398  return ret;
399  s->thread_context[i]->start_mb_y =
400  (s->mb_height * (i ) + nb_slices / 2) / nb_slices;
401  s->thread_context[i]->end_mb_y =
402  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
403  }
404  s->start_mb_y = 0;
405  s->end_mb_y = nb_slices > 1 ? (s->mb_height + nb_slices / 2) / nb_slices
406  : s->mb_height;
407  return init_duplicate_context(s);
408 }
409 
411 {
412  if (!s)
413  return;
414 
415  av_freep(&s->sc.edge_emu_buffer);
416  av_freep(&s->sc.scratchpad_buf);
417  s->me.temp = s->me.scratchpad =
418  s->sc.obmc_scratchpad = NULL;
419  s->sc.linesize = 0;
420 
421  av_freep(&s->dct_error_sum);
422  av_freep(&s->me.map);
423  s->me.score_map = NULL;
424  av_freep(&s->blocks);
425  av_freep(&s->ac_val_base);
426  s->block = NULL;
427 }
428 
430 {
431  for (int i = 1; i < s->slice_context_count; i++) {
432  free_duplicate_context(s->thread_context[i]);
433  av_freep(&s->thread_context[i]);
434  }
436 }
437 
439 {
440 #define COPY(a) bak->a = src->a
441  COPY(sc);
442  COPY(me.map);
443  COPY(me.score_map);
444  COPY(blocks);
445  COPY(block);
446  COPY(start_mb_y);
447  COPY(end_mb_y);
448  COPY(me.map_generation);
449  COPY(dct_error_sum);
450  COPY(dct_count[0]);
451  COPY(dct_count[1]);
452  COPY(ac_val_base);
453  COPY(ac_val[0]);
454  COPY(ac_val[1]);
455  COPY(ac_val[2]);
456 #undef COPY
457 }
458 
460 {
461  MpegEncContext bak;
462  int ret;
463  // FIXME copy only needed parts
465  memcpy(dst, src, sizeof(MpegEncContext));
467 
468  ret = ff_mpv_framesize_alloc(dst->avctx, &dst->sc, dst->linesize);
469  if (ret < 0) {
470  av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
471  "scratch buffers.\n");
472  return ret;
473  }
474  return 0;
475 }
476 
477 /**
478  * Set the given MpegEncContext to common defaults
479  * (same for encoding and decoding).
480  * The changed fields will not depend upon the
481  * prior state of the MpegEncContext.
482  */
484 {
485  s->y_dc_scale_table =
486  s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
487  s->chroma_qscale_table = ff_default_chroma_qscale_table;
488  s->progressive_frame = 1;
489  s->progressive_sequence = 1;
490  s->picture_structure = PICT_FRAME;
491 
492  s->picture_number = 0;
493 
494  s->f_code = 1;
495  s->b_code = 1;
496 
497  s->slice_context_count = 1;
498 }
499 
501 {
507  pools->alloc_mb_height = pools->alloc_mb_width = pools->alloc_mb_stride = 0;
508 }
509 
511 {
512  BufferPoolContext *const pools = &s->buffer_pools;
513  int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
514  int mb_height;
515 
516  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
517  s->mb_height = (s->height + 31) / 32 * 2;
518  else
519  s->mb_height = (s->height + 15) / 16;
520 
521  /* VC-1 can change from being progressive to interlaced on a per-frame
522  * basis. We therefore allocate certain buffers so big that they work
523  * in both instances. */
524  mb_height = s->msmpeg4_version == MSMP4_VC1 ?
525  FFALIGN(s->mb_height, 2) : s->mb_height;
526 
527  s->mb_width = (s->width + 15) / 16;
528  s->mb_stride = s->mb_width + 1;
529  s->b8_stride = s->mb_width * 2 + 1;
530  mb_array_size = mb_height * s->mb_stride;
531  mv_table_size = (mb_height + 2) * s->mb_stride + 1;
532 
533  /* set default edge pos, will be overridden
534  * in decode_header if needed */
535  s->h_edge_pos = s->mb_width * 16;
536  s->v_edge_pos = s->mb_height * 16;
537 
538  s->mb_num = s->mb_width * s->mb_height;
539 
540  s->block_wrap[0] =
541  s->block_wrap[1] =
542  s->block_wrap[2] =
543  s->block_wrap[3] = s->b8_stride;
544  s->block_wrap[4] =
545  s->block_wrap[5] = s->mb_stride;
546 
547  y_size = s->b8_stride * (2 * mb_height + 1);
548  c_size = s->mb_stride * (mb_height + 1);
549  yc_size = y_size + 2 * c_size;
550 
551  if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_index2xy, s->mb_num + 1))
552  return AVERROR(ENOMEM);
553  for (y = 0; y < s->mb_height; y++)
554  for (x = 0; x < s->mb_width; x++)
555  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
556 
557  s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
558 
559 #define ALLOC_POOL(name, size, flags) do { \
560  pools->name ##_pool = ff_refstruct_pool_alloc((size), (flags)); \
561  if (!pools->name ##_pool) \
562  return AVERROR(ENOMEM); \
563 } while (0)
564 
565  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
566  (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
567  /* interlaced direct mode decoding tables */
568  int16_t (*tmp)[2] = av_calloc(mv_table_size, 4 * sizeof(*tmp));
569  if (!tmp)
570  return AVERROR(ENOMEM);
571  s->p_field_mv_table_base = tmp;
572  tmp += s->mb_stride + 1;
573  for (int i = 0; i < 2; i++) {
574  for (int j = 0; j < 2; j++) {
575  s->p_field_mv_table[i][j] = tmp;
576  tmp += mv_table_size;
577  }
578  }
579  if (s->codec_id == AV_CODEC_ID_MPEG4) {
580  ALLOC_POOL(mbskip_table, mb_array_size + 2,
581  !s->encoding ? FF_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME : 0);
582  if (!s->encoding) {
583  /* cbp, pred_dir */
584  if (!(s->cbp_table = av_mallocz(mb_array_size)) ||
585  !(s->pred_dir_table = av_mallocz(mb_array_size)))
586  return AVERROR(ENOMEM);
587  }
588  }
589  }
590 
591  if (s->msmpeg4_version >= MSMP4_V3) {
592  s->coded_block_base = av_mallocz(y_size);
593  if (!s->coded_block_base)
594  return AVERROR(ENOMEM);
595  s->coded_block = s->coded_block_base + s->b8_stride + 1;
596  }
597 
598  if (s->h263_pred || s->h263_plus || !s->encoding) {
599  /* dc values */
600  // MN: we need these for error resilience of intra-frames
601  if (!FF_ALLOCZ_TYPED_ARRAY(s->dc_val_base, yc_size))
602  return AVERROR(ENOMEM);
603  s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
604  s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
605  s->dc_val[2] = s->dc_val[1] + c_size;
606  for (i = 0; i < yc_size; i++)
607  s->dc_val_base[i] = 1024;
608  }
609 
610  // Note the + 1 is for a quicker MPEG-4 slice_end detection
611  if (!(s->mbskip_table = av_mallocz(mb_array_size + 2)) ||
612  /* which mb is an intra block, init macroblock skip table */
613  !(s->mbintra_table = av_malloc(mb_array_size)))
614  return AVERROR(ENOMEM);
615  memset(s->mbintra_table, 1, mb_array_size);
616 
617  ALLOC_POOL(qscale_table, mv_table_size, 0);
618  ALLOC_POOL(mb_type, mv_table_size * sizeof(uint32_t), 0);
619 
620  if (s->out_format == FMT_H263 || s->encoding ||
621  (s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_MVS)) {
622  const int b8_array_size = s->b8_stride * mb_height * 2;
623  int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
624  int ref_index_size = 4 * mb_array_size;
625 
626  /* FIXME: The output of H.263 with OBMC depends upon
627  * the earlier content of the buffer; therefore we set
628  * the flags to always reset returned buffers here. */
630  ALLOC_POOL(ref_index, ref_index_size, 0);
631  }
632 #undef ALLOC_POOL
633  pools->alloc_mb_width = s->mb_width;
634  pools->alloc_mb_height = mb_height;
635  pools->alloc_mb_stride = s->mb_stride;
636 
637  return !CONFIG_MPEGVIDEODEC || s->encoding ? 0 : ff_mpeg_er_init(s);
638 }
639 
641 {
642  memset(&s->buffer_pools, 0, sizeof(s->buffer_pools));
643  memset(&s->next_pic, 0, sizeof(s->next_pic));
644  memset(&s->last_pic, 0, sizeof(s->last_pic));
645  memset(&s->cur_pic, 0, sizeof(s->cur_pic));
646 
647  memset(s->thread_context, 0, sizeof(s->thread_context));
648 
649  s->me.map = NULL;
650  s->me.score_map = NULL;
651  s->dct_error_sum = NULL;
652  s->block = NULL;
653  s->blocks = NULL;
654  s->ac_val_base = NULL;
655  s->ac_val[0] =
656  s->ac_val[1] =
657  s->ac_val[2] =NULL;
658  s->me.scratchpad = NULL;
659  s->me.temp = NULL;
660  memset(&s->sc, 0, sizeof(s->sc));
661 
662 
663  s->bitstream_buffer = NULL;
664  s->allocated_bitstream_buffer_size = 0;
665  s->p_field_mv_table_base = NULL;
666  for (int i = 0; i < 2; i++)
667  for (int j = 0; j < 2; j++)
668  s->p_field_mv_table[i][j] = NULL;
669 
670  s->dc_val_base = NULL;
671  s->coded_block_base = NULL;
672  s->mbintra_table = NULL;
673  s->cbp_table = NULL;
674  s->pred_dir_table = NULL;
675 
676  s->mbskip_table = NULL;
677 
678  s->er.error_status_table = NULL;
679  s->er.er_temp_buffer = NULL;
680  s->mb_index2xy = NULL;
681 }
682 
683 /**
684  * init common structure for both encoder and decoder.
685  * this assumes that some variables like width/height are already set
686  */
688 {
689  int nb_slices = (HAVE_THREADS &&
690  s->avctx->active_thread_type & FF_THREAD_SLICE) ?
691  s->avctx->thread_count : 1;
692  int ret;
693 
694  clear_context(s);
695 
696  if (s->encoding && s->avctx->slices)
697  nb_slices = s->avctx->slices;
698 
699  if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
700  av_log(s->avctx, AV_LOG_ERROR,
701  "decoding to AV_PIX_FMT_NONE is not supported.\n");
702  return AVERROR(EINVAL);
703  }
704 
705  if ((s->width || s->height) &&
706  av_image_check_size(s->width, s->height, 0, s->avctx))
707  return AVERROR(EINVAL);
708 
709  dsp_init(s);
710 
711  /* set chroma shifts */
712  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
713  &s->chroma_x_shift,
714  &s->chroma_y_shift);
715  if (ret)
716  return ret;
717 
719  goto fail;
720 
721  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
722  int max_slices;
723  if (s->mb_height)
724  max_slices = FFMIN(MAX_THREADS, s->mb_height);
725  else
726  max_slices = MAX_THREADS;
727  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
728  " reducing to %d\n", nb_slices, max_slices);
729  nb_slices = max_slices;
730  }
731 
732  s->context_initialized = 1;
733  memset(s->thread_context, 0, sizeof(s->thread_context));
734  s->thread_context[0] = s;
735  s->slice_context_count = nb_slices;
736 
737 // if (s->width && s->height) {
739  if (ret < 0)
740  goto fail;
741 // }
742 
743  return 0;
744  fail:
746  return ret;
747 }
748 
750 {
752 
753  free_buffer_pools(&s->buffer_pools);
754  av_freep(&s->p_field_mv_table_base);
755  for (int i = 0; i < 2; i++)
756  for (int j = 0; j < 2; j++)
757  s->p_field_mv_table[i][j] = NULL;
758 
759  av_freep(&s->dc_val_base);
760  av_freep(&s->coded_block_base);
761  av_freep(&s->mbintra_table);
762  av_freep(&s->cbp_table);
763  av_freep(&s->pred_dir_table);
764 
765  av_freep(&s->mbskip_table);
766 
767  av_freep(&s->er.error_status_table);
768  av_freep(&s->er.er_temp_buffer);
769  av_freep(&s->mb_index2xy);
770 
771  s->linesize = s->uvlinesize = 0;
772 }
773 
775 {
777  if (s->slice_context_count > 1)
778  s->slice_context_count = 1;
779 
780  av_freep(&s->bitstream_buffer);
781  s->allocated_bitstream_buffer_size = 0;
782 
783  ff_mpv_unref_picture(&s->last_pic);
784  ff_mpv_unref_picture(&s->cur_pic);
785  ff_mpv_unref_picture(&s->next_pic);
786 
787  s->context_initialized = 0;
788  s->context_reinit = 0;
789  s->linesize = s->uvlinesize = 0;
790 }
791 
792 
793 /**
794  * Clean dc, ac for the current non-intra MB.
795  */
797 {
798  int wrap = s->b8_stride;
799  int xy = s->block_index[0];
800 
801  s->dc_val[0][xy ] =
802  s->dc_val[0][xy + 1 ] =
803  s->dc_val[0][xy + wrap] =
804  s->dc_val[0][xy + 1 + wrap] = 1024;
805  /* ac pred */
806  memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
807  memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
808  /* chroma */
809  wrap = s->mb_stride;
810  xy = s->mb_x + s->mb_y * wrap;
811  s->dc_val[1][xy] =
812  s->dc_val[2][xy] = 1024;
813  /* ac pred */
814  memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
815  memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
816 
817  s->mbintra_table[xy]= 0;
818 }
819 
820 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
821  const int linesize = s->cur_pic.linesize[0]; //not s->linesize as this would be wrong for field pics
822  const int uvlinesize = s->cur_pic.linesize[1];
823  const int width_of_mb = (4 + (s->avctx->bits_per_raw_sample > 8)) - s->avctx->lowres;
824  const int height_of_mb = 4 - s->avctx->lowres;
825 
826  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
827  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
828  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
829  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
830  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
831  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
832  //block_index is not used by mpeg2, so it is not affected by chroma_format
833 
834  s->dest[0] = s->cur_pic.data[0] + (int)((s->mb_x - 1U) << width_of_mb);
835  s->dest[1] = s->cur_pic.data[1] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
836  s->dest[2] = s->cur_pic.data[2] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
837 
838  if (s->picture_structure == PICT_FRAME) {
839  s->dest[0] += s->mb_y * linesize << height_of_mb;
840  s->dest[1] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
841  s->dest[2] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
842  } else {
843  s->dest[0] += (s->mb_y>>1) * linesize << height_of_mb;
844  s->dest[1] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
845  s->dest[2] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
846  av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
847  }
848 }
849 
850 /**
851  * set qscale and update qscale dependent variables.
852  */
853 void ff_set_qscale(MpegEncContext * s, int qscale)
854 {
855  if (qscale < 1)
856  qscale = 1;
857  else if (qscale > 31)
858  qscale = 31;
859 
860  s->qscale = qscale;
861  s->chroma_qscale= s->chroma_qscale_table[qscale];
862 
863  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
864  s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
865 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:33
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:687
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
free_duplicate_contexts
static void free_duplicate_contexts(MpegEncContext *s)
Definition: mpegvideo.c:429
level
uint8_t level
Definition: svq3.c:205
blockdsp.h
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
ff_mpv_init_context_frame
int ff_mpv_init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:510
backup_duplicate_context
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
Definition: mpegvideo.c:438
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:483
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:459
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac for the current non-intra MB.
Definition: mpegvideo.c:796
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:32
init_duplicate_context
static int init_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:352
ff_mpv_common_init_arm
av_cold void ff_mpv_common_init_arm(MpegEncContext *s)
Definition: mpegvideo_arm.c:48
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:820
mpegvideo.h
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:351
mpegutils.h
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:228
free_duplicate_context
static void free_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:410
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
av_memdup
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
Definition: mem.c:304
ff_permute_scantable
av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64], const uint8_t permutation[64])
Definition: idctdsp.c:30
dct_unquantize_mpeg1_inter_c
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:75
fail
#define fail()
Definition: checkasm.h:188
ff_refstruct_pool_uninit
static void ff_refstruct_pool_uninit(FFRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
wrap
#define wrap(func)
Definition: neontest.h:65
MAX_THREADS
#define MAX_THREADS
Definition: frame_thread_encoder.c:37
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3198
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
BufferPoolContext::mb_type_pool
struct FFRefStructPool * mb_type_pool
Definition: mpegpicture.h:47
refstruct.h
dct_unquantize_mpeg1_intra_c
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:46
BufferPoolContext::alloc_mb_stride
int alloc_mb_stride
mb_stride used to allocate tables
Definition: mpegpicture.h:52
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:774
avassert.h
gray16
static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:260
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
av_cold
#define av_cold
Definition: attributes.h:90
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c)
Definition: blockdsp.c:58
s
#define s(width, name)
Definition: cbs_vp9.c:198
ff_mpv_framesize_alloc
int ff_mpv_framesize_alloc(AVCodecContext *avctx, ScratchpadContext *sc, int linesize)
Definition: mpegpicture.c:138
ff_mpeg1_dc_scale_table
static const uint8_t *const ff_mpeg1_dc_scale_table
Definition: mpegvideodata.h:32
dct_unquantize_mpeg2_intra_bitexact
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:133
ALLOC_POOL
#define ALLOC_POOL(name, size, flags)
ScanTable::scantable
const uint8_t * scantable
Definition: mpegvideo.h:57
BufferPoolContext::motion_val_pool
struct FFRefStructPool * motion_val_pool
Definition: mpegpicture.h:48
BufferPoolContext::ref_index_pool
struct FFRefStructPool * ref_index_pool
Definition: mpegpicture.h:49
BufferPoolContext::mbskip_table_pool
struct FFRefStructPool * mbskip_table_pool
Definition: mpegpicture.h:45
BufferPoolContext::alloc_mb_height
int alloc_mb_height
mb_height used to allocate tables
Definition: mpegpicture.h:51
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:65
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
NULL
#define NULL
Definition: coverity.c:32
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:307
me
#define me
Definition: vf_colormatrix.c:102
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:853
mathops.h
ff_alternate_horizontal_scan
const uint8_t ff_alternate_horizontal_scan[64]
Definition: mpegvideodata.c:52
ME_MAP_SIZE
#define ME_MAP_SIZE
Definition: motion_est.h:39
free_buffer_pools
static void free_buffer_pools(BufferPoolContext *pools)
Definition: mpegvideo.c:500
dct_unquantize_mpeg2_intra_c
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:104
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
ff_mpeg_er_init
int ff_mpeg_er_init(MpegEncContext *s)
Definition: mpeg_er.c:102
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1605
mpegvideodata.h
attributes.h
dct_unquantize_mpeg2_inter_c
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:166
clear_context
static void clear_context(MpegEncContext *s)
Definition: mpegvideo.c:640
ff_init_scantable
av_cold void ff_init_scantable(const uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: mpegvideo.c:293
FF_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME
#define FF_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME
If this flag is set, the entries will be zeroed before being returned to the user (after the init or ...
Definition: refstruct.h:221
BufferPoolContext::qscale_table_pool
struct FFRefStructPool * qscale_table_pool
Definition: mpegpicture.h:46
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
dct_unquantize_h263_inter_c
static void dct_unquantize_h263_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:233
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
ff_alternate_vertical_scan
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideodata.c:63
dsp_init
static av_cold void dsp_init(MpegEncContext *s)
Definition: mpegvideo.c:273
internal.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
ff_mpv_common_init_ppc
void ff_mpv_common_init_ppc(MpegEncContext *s)
Definition: mpegvideo_altivec.c:119
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
FF_DEBUG_NOMC
#define FF_DEBUG_NOMC
Definition: avcodec.h:1421
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ret
ret
Definition: filter_design.txt:187
U
#define U(x)
Definition: vpx_arith.h:37
ff_mpv_free_context_frame
void ff_mpv_free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution as well as the slice thread contex...
Definition: mpegvideo.c:749
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
dct_unquantize_h263_intra_c
static void dct_unquantize_h263_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:199
ff_mpv_common_init_x86
av_cold void ff_mpv_common_init_x86(MpegEncContext *s)
Definition: mpegvideo.c:452
ff_mpv_common_init_mips
av_cold void ff_mpv_common_init_mips(MpegEncContext *s)
Definition: mpegvideo_init_mips.c:26
ff_default_chroma_qscale_table
const uint8_t ff_default_chroma_qscale_table[32]
Definition: mpegvideodata.c:21
mem.h
AV_CODEC_EXPORT_DATA_MVS
#define AV_CODEC_EXPORT_DATA_MVS
Export motion vectors through frame side data.
Definition: avcodec.h:406
ff_mpv_init_duplicate_contexts
int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:386
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:342
COPY
#define COPY(a)
ScanTable
Scantable.
Definition: mpegvideo.h:56
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
ScanTable::permutated
uint8_t permutated[64]
Definition: mpegvideo.h:58
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
mpeg_er.h
imgutils.h
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
BufferPoolContext
Definition: mpegpicture.h:44
h
h
Definition: vp9dsp_template.c:2070
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
BufferPoolContext::alloc_mb_width
int alloc_mb_width
mb_width used to allocate tables
Definition: mpegpicture.h:50
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
ff_mpv_common_init_neon
av_cold void ff_mpv_common_init_neon(MpegEncContext *s)
Definition: mpegvideo.c:127
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:73
gray8
static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:266
src
#define src
Definition: vp8dsp.c:248
ScanTable::raster_end
uint8_t raster_end[64]
Definition: mpegvideo.h:59