Go to the documentation of this file.
48 static void gray16(uint8_t *
dst,
const uint8_t *
src, ptrdiff_t linesize,
int h)
51 memset(
dst +
h*linesize, 128, 16);
54 static void gray8(uint8_t *
dst,
const uint8_t *
src, ptrdiff_t linesize,
int h)
57 memset(
dst +
h*linesize, 128, 8);
70 s->hdsp.avg_pixels_tab[0][
i] =
gray16;
71 s->hdsp.put_pixels_tab[0][
i] =
gray16;
72 s->hdsp.put_no_rnd_pixels_tab[0][
i] =
gray16;
74 s->hdsp.avg_pixels_tab[1][
i] =
gray8;
75 s->hdsp.put_pixels_tab[1][
i] =
gray8;
76 s->hdsp.put_no_rnd_pixels_tab[1][
i] =
gray8;
82 const uint8_t *src_scantable)
86 for (
int i = 0, end = -1;
i < 64;
i++) {
87 int j = src_scantable[
i];
89 if (permutation[j] > end)
98 s->idsp.mpeg4_studio_profile =
s->studio_profile;
104 if (
s->alternate_scan) {
112 s->idsp.idct_permutation);
114 s->idsp.idct_permutation);
121 s->block =
s->blocks[0];
128 int nb_slices =
s->slice_context_count,
ret;
129 size_t slice_size =
s->slice_ctx_size ?
s->slice_ctx_size :
sizeof(*s);
134 for (
int i = 1;
i < nb_slices;
i++) {
136 if (!
s->thread_context[
i])
140 s->thread_context[
i]->start_mb_y =
141 (
s->mb_height * (
i ) + nb_slices / 2) / nb_slices;
142 s->thread_context[
i]->end_mb_y =
143 (
s->mb_height * (
i + 1) + nb_slices / 2) / nb_slices;
146 s->end_mb_y = nb_slices > 1 ? (
s->mb_height + nb_slices / 2) / nb_slices
158 s->sc.obmc_scratchpad =
NULL;
167 for (
int i = 1;
i <
s->slice_context_count;
i++) {
177 M(ScratchpadContext, sc) \
182 M(int16_t*, dc_val) \
187 #define BACKUP(T, member) T member = dst->member;
190 #define RESTORE(T, member) dst->member = member;
196 "scratch buffers.\n");
211 s->progressive_frame = 1;
212 s->progressive_sequence = 1;
215 s->picture_number = 0;
217 s->slice_context_count = 1;
232 int nb_slices = (HAVE_THREADS &&
234 s->avctx->thread_count : 1;
236 int y_size, c_size, yc_size, mb_array_size, mv_table_size, x, y;
239 if (
s->encoding &&
s->avctx->slices)
240 nb_slices =
s->avctx->slices;
243 s->mb_height = (
s->height + 31) / 32 * 2;
245 s->mb_height = (
s->height + 15) / 16;
247 if (nb_slices >
MAX_THREADS || (nb_slices >
s->mb_height &&
s->mb_height)) {
254 " reducing to %d\n", nb_slices, max_slices);
255 nb_slices = max_slices;
258 s->slice_context_count = nb_slices;
263 mb_height =
s->msmpeg4_version == MSMP4_VC1 ?
266 s->mb_width = (
s->width + 15) / 16;
267 s->mb_stride =
s->mb_width + 1;
268 s->b8_stride =
s->mb_width * 2 + 1;
269 mb_array_size = mb_height *
s->mb_stride;
270 mv_table_size = (mb_height + 2) *
s->mb_stride + 1;
274 s->h_edge_pos =
s->mb_width * 16;
275 s->v_edge_pos =
s->mb_height * 16;
277 s->mb_num =
s->mb_width *
s->mb_height;
282 s->block_wrap[3] =
s->b8_stride;
284 s->block_wrap[5] =
s->mb_stride;
286 y_size =
s->b8_stride * (2 * mb_height + 1);
287 c_size =
s->mb_stride * (mb_height + 1);
288 yc_size = y_size + 2 * c_size;
292 for (y = 0; y <
s->mb_height; y++)
293 for (x = 0; x <
s->mb_width; x++)
294 s->mb_index2xy[x + y *
s->mb_width] = x + y *
s->mb_stride;
296 s->mb_index2xy[
s->mb_height *
s->mb_width] = (
s->mb_height - 1) *
s->mb_stride +
s->mb_width;
298 #define ALLOC_POOL(name, size, flags) do { \
299 pools->name ##_pool = av_refstruct_pool_alloc((size), (flags)); \
300 if (!pools->name ##_pool) \
301 return AVERROR(ENOMEM); \
310 s->p_field_mv_table_base =
tmp;
311 tmp +=
s->mb_stride + 1;
312 for (
int i = 0;
i < 2;
i++) {
313 for (
int j = 0; j < 2; j++) {
314 s->p_field_mv_table[
i][j] =
tmp;
315 tmp += mv_table_size;
330 if (
s->msmpeg4_version >= MSMP4_V3) {
332 if (!
s->coded_block_base)
334 s->coded_block =
s->coded_block_base +
s->b8_stride + 1;
337 if (
s->h263_pred ||
s->h263_aic || !
s->encoding) {
341 size_t allslice_yc_size = yc_size * (
s->encoding ? nb_slices : 1);
346 s->ac_val =
s->ac_val_base +
s->b8_stride + 1;
357 allslice_yc_size +=
s->encoding * nb_slices;
360 s->dc_val =
s->dc_val_base +
s->b8_stride + 1;
361 for (
size_t i = 0;
i < allslice_yc_size; ++
i)
362 s->dc_val_base[
i] = 1024;
366 if (!(
s->mbskip_table =
av_mallocz(mb_array_size + 2)) ||
372 ALLOC_POOL(mb_type, mv_table_size *
sizeof(uint32_t), 0);
374 if (
s->out_format ==
FMT_H263 ||
s->encoding ||
376 const int b8_array_size =
s->b8_stride * mb_height * 2;
377 int mv_size = 2 * (b8_array_size + 4) *
sizeof(int16_t);
378 int ref_index_size = 4 * mb_array_size;
387 pools->alloc_mb_width =
s->mb_width;
388 pools->alloc_mb_height = mb_height;
389 pools->alloc_mb_stride =
s->mb_stride;
404 "decoding to AV_PIX_FMT_NONE is not supported.\n");
408 if ((
s->width ||
s->height) &&
424 s->context_initialized = 1;
425 s->thread_context[0] =
s;
447 for (
int i = 0;
i < 2;
i++)
448 for (
int j = 0; j < 2; j++)
449 s->p_field_mv_table[
i][j] =
NULL;
464 s->linesize =
s->uvlinesize = 0;
470 if (
s->slice_context_count > 1)
471 s->slice_context_count = 1;
477 s->context_initialized = 0;
478 s->context_reinit = 0;
479 s->linesize =
s->uvlinesize = 0;
488 int wrap =
s->b8_stride;
489 int xy =
s->block_index[0];
491 unsigned uxy =
s->block_index[4];
492 unsigned vxy =
s->block_index[5];
493 int16_t *dc_val =
s->dc_val;
495 AV_WN32A(dc_val + xy, 1024 << 16 | 1024);
500 int16_t (*ac_val)[16] =
s->ac_val;
504 memset(ac_val[xy + 1], 0,
sizeof(*ac_val));
505 memset(ac_val[xy +
wrap], 0, 2 *
sizeof(*ac_val));
507 memset(ac_val[uxy], 0,
sizeof(*ac_val));
508 memset(ac_val[vxy], 0,
sizeof(*ac_val));
512 const int linesize =
s->cur_pic.linesize[0];
513 const int uvlinesize =
s->cur_pic.linesize[1];
514 const int width_of_mb = (4 + (
s->avctx->bits_per_raw_sample > 8)) -
s->avctx->lowres;
515 const int height_of_mb = 4 -
s->avctx->lowres;
517 s->block_index[0]=
s->b8_stride*(
s->mb_y*2 ) - 2 +
s->mb_x*2;
518 s->block_index[1]=
s->b8_stride*(
s->mb_y*2 ) - 1 +
s->mb_x*2;
519 s->block_index[2]=
s->b8_stride*(
s->mb_y*2 + 1) - 2 +
s->mb_x*2;
520 s->block_index[3]=
s->b8_stride*(
s->mb_y*2 + 1) - 1 +
s->mb_x*2;
521 s->block_index[4]=
s->mb_stride*(
s->mb_y + 1) +
s->b8_stride*
s->mb_height*2 +
s->mb_x - 1;
522 s->block_index[5]=
s->mb_stride*(
s->mb_y +
s->mb_height + 2) +
s->b8_stride*
s->mb_height*2 +
s->mb_x - 1;
525 s->dest[0] =
s->cur_pic.data[0] + (int)((
s->mb_x - 1
U) << width_of_mb);
526 s->dest[1] =
s->cur_pic.data[1] + (int)((
s->mb_x - 1
U) << (width_of_mb -
s->chroma_x_shift));
527 s->dest[2] =
s->cur_pic.data[2] + (int)((
s->mb_x - 1
U) << (width_of_mb -
s->chroma_x_shift));
530 s->dest[0] +=
s->mb_y * linesize << height_of_mb;
531 s->dest[1] +=
s->mb_y * uvlinesize << (height_of_mb -
s->chroma_y_shift);
532 s->dest[2] +=
s->mb_y * uvlinesize << (height_of_mb -
s->chroma_y_shift);
534 s->dest[0] += (
s->mb_y>>1) * linesize << height_of_mb;
535 s->dest[1] += (
s->mb_y>>1) * uvlinesize << (height_of_mb -
s->chroma_y_shift);
536 s->dest[2] += (
s->mb_y>>1) * uvlinesize << (height_of_mb -
s->chroma_y_shift);
548 else if (qscale > 31)
552 s->chroma_qscale=
s->chroma_qscale_table[qscale];
554 s->y_dc_scale=
s->y_dc_scale_table[ qscale ];
555 s->c_dc_scale=
s->c_dc_scale_table[
s->chroma_qscale ];
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
#define AV_LOG_WARNING
Something somehow does not look correct.
struct AVRefStructPool * ref_index_pool
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
struct AVRefStructPool * mbskip_table_pool
av_cold int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
#define BACKUP(T, member)
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac for the current non-intra MB.
#define PICT_BOTTOM_FIELD
void ff_init_block_index(MpegEncContext *s)
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64], const uint8_t permutation[64])
#define AV_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME
If this flag is set, the entries will be zeroed before being returned to the user (after the init or ...
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
#define FF_ALLOC_TYPED_ARRAY(p, nelem)
int alloc_mb_stride
mb_stride used to allocate tables
av_cold int ff_mpv_init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static av_cold void free_buffer_pools(BufferPoolContext *pools)
av_cold void ff_blockdsp_init(BlockDSPContext *c)
int ff_mpv_framesize_alloc(AVCodecContext *avctx, ScratchpadContext *sc, int linesize)
struct AVRefStructPool * mb_type_pool
#define ALLOC_POOL(name, size, flags)
const uint8_t * scantable
static av_cold void free_duplicate_contexts(MpegEncContext *s)
int alloc_mb_height
mb_height used to allocate tables
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
av_cold void ff_mpv_common_end(MpegEncContext *s)
void ff_mpv_unref_picture(MPVWorkPicture *pic)
av_cold void ff_mpv_idct_init(MpegEncContext *s)
av_cold void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
const uint8_t ff_alternate_horizontal_scan[64]
#define RESTORE(T, member)
static av_cold void free_duplicate_context(MpegEncContext *s)
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
static av_cold int init_duplicate_context(MpegEncContext *s)
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
struct AVRefStructPool * qscale_table_pool
av_cold int ff_mpeg_er_init(MpegEncContext *s)
av_cold void ff_init_scantable(const uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
#define i(width, name, range_min, range_max)
const uint8_t ff_alternate_vertical_scan[64]
static av_cold void dsp_init(MpegEncContext *s)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
void * av_calloc(size_t nmemb, size_t size)
const uint8_t ff_zigzag_direct[64]
struct AVRefStructPool * motion_val_pool
const uint8_t ff_default_chroma_qscale_table[32]
#define AV_CODEC_EXPORT_DATA_MVS
Export motion vectors through frame side data.
static void av_refstruct_pool_uninit(AVRefStructPool **poolp)
Mark the pool as being available for freeing.
av_cold void ff_mpv_free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution as well as the slice thread contex...
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
int alloc_mb_width
mb_width used to allocate tables
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)