Go to the documentation of this file.
47 int16_t *
block,
int n,
int qscale)
50 const uint16_t *quant_matrix;
52 nCoeffs=
s->block_last_index[n];
54 block[0] *= n < 4 ?
s->y_dc_scale :
s->c_dc_scale;
56 quant_matrix =
s->intra_matrix;
57 for(
i=1;
i<=nCoeffs;
i++) {
58 int j=
s->intra_scantable.permutated[
i];
63 level = (int)(
level * qscale * quant_matrix[j]) >> 3;
67 level = (int)(
level * qscale * quant_matrix[j]) >> 3;
76 int16_t *
block,
int n,
int qscale)
79 const uint16_t *quant_matrix;
81 nCoeffs=
s->block_last_index[n];
83 quant_matrix =
s->inter_matrix;
84 for(
i=0;
i<=nCoeffs;
i++) {
85 int j=
s->intra_scantable.permutated[
i];
91 ((int) (quant_matrix[j]))) >> 4;
96 ((int) (quant_matrix[j]))) >> 4;
105 int16_t *
block,
int n,
int qscale)
108 const uint16_t *quant_matrix;
113 nCoeffs=
s->block_last_index[n];
115 block[0] *= n < 4 ?
s->y_dc_scale :
s->c_dc_scale;
116 quant_matrix =
s->intra_matrix;
117 for(
i=1;
i<=nCoeffs;
i++) {
118 int j=
s->intra_scantable.permutated[
i];
123 level = (int)(
level * qscale * quant_matrix[j]) >> 4;
126 level = (int)(
level * qscale * quant_matrix[j]) >> 4;
134 int16_t *
block,
int n,
int qscale)
137 const uint16_t *quant_matrix;
143 nCoeffs=
s->block_last_index[n];
145 block[0] *= n < 4 ?
s->y_dc_scale :
s->c_dc_scale;
147 quant_matrix =
s->intra_matrix;
148 for(
i=1;
i<=nCoeffs;
i++) {
149 int j=
s->intra_scantable.permutated[
i];
154 level = (int)(
level * qscale * quant_matrix[j]) >> 4;
157 level = (int)(
level * qscale * quant_matrix[j]) >> 4;
167 int16_t *
block,
int n,
int qscale)
170 const uint16_t *quant_matrix;
176 nCoeffs=
s->block_last_index[n];
178 quant_matrix =
s->inter_matrix;
179 for(
i=0;
i<=nCoeffs;
i++) {
180 int j=
s->intra_scantable.permutated[
i];
186 ((int) (quant_matrix[j]))) >> 5;
190 ((int) (quant_matrix[j]))) >> 5;
200 int16_t *
block,
int n,
int qscale)
210 block[0] *= n < 4 ?
s->y_dc_scale :
s->c_dc_scale;
211 qadd = (qscale - 1) | 1;
218 nCoeffs=
s->intra_scantable.raster_end[
s->block_last_index[n] ];
220 for(
i=1;
i<=nCoeffs;
i++) {
234 int16_t *
block,
int n,
int qscale)
241 qadd = (qscale - 1) | 1;
244 nCoeffs=
s->inter_scantable.raster_end[
s->block_last_index[n] ];
246 for(
i=0;
i<=nCoeffs;
i++) {
260 static void gray16(uint8_t *
dst,
const uint8_t *
src, ptrdiff_t linesize,
int h)
263 memset(
dst +
h*linesize, 128, 16);
266 static void gray8(uint8_t *
dst,
const uint8_t *
src, ptrdiff_t linesize,
int h)
269 memset(
dst +
h*linesize, 128, 8);
281 for (
i=0;
i<4;
i++) {
282 s->hdsp.avg_pixels_tab[0][
i] =
gray16;
283 s->hdsp.put_pixels_tab[0][
i] =
gray16;
284 s->hdsp.put_no_rnd_pixels_tab[0][
i] =
gray16;
286 s->hdsp.avg_pixels_tab[1][
i] =
gray8;
287 s->hdsp.put_pixels_tab[1][
i] =
gray8;
288 s->hdsp.put_no_rnd_pixels_tab[1][
i] =
gray8;
294 const uint8_t *src_scantable)
298 for (
int i = 0, end = -1;
i < 64;
i++) {
299 int j = src_scantable[
i];
301 if (permutation[j] > end)
302 end = permutation[j];
310 s->idsp.mpeg4_studio_profile =
s->studio_profile;
316 if (
s->alternate_scan) {
324 s->idsp.idct_permutation);
326 s->idsp.idct_permutation);
337 #if HAVE_INTRINSICS_NEON
360 if (
s->noise_reduction) {
367 s->block =
s->blocks[0];
370 int mb_height =
s->msmpeg4_version == MSMP4_VC1 ?
372 int y_size =
s->b8_stride * (2 * mb_height + 1);
373 int c_size =
s->mb_stride * (mb_height + 1);
374 int yc_size = y_size + 2 * c_size;
378 s->ac_val[0] =
s->ac_val_base +
s->b8_stride + 1;
379 s->ac_val[1] =
s->ac_val_base + y_size +
s->mb_stride + 1;
380 s->ac_val[2] =
s->ac_val[1] + c_size;
388 int nb_slices =
s->slice_context_count,
ret;
393 for (
int i = 1;
i < nb_slices;
i++) {
395 if (!
s->thread_context[
i])
399 s->thread_context[
i]->start_mb_y =
400 (
s->mb_height * (
i ) + nb_slices / 2) / nb_slices;
401 s->thread_context[
i]->end_mb_y =
402 (
s->mb_height * (
i + 1) + nb_slices / 2) / nb_slices;
405 s->end_mb_y = nb_slices > 1 ? (
s->mb_height + nb_slices / 2) / nb_slices
417 s->me.temp =
s->me.scratchpad =
418 s->sc.obmc_scratchpad =
NULL;
423 s->me.score_map =
NULL;
431 for (
int i = 1;
i <
s->slice_context_count;
i++) {
440 #define COPY(a) bak->a = src->a
471 "scratch buffers.\n");
485 s->y_dc_scale_table =
488 s->progressive_frame = 1;
489 s->progressive_sequence = 1;
492 s->picture_number = 0;
497 s->slice_context_count = 1;
513 int y_size, c_size, yc_size,
i, mb_array_size, mv_table_size, x, y;
517 s->mb_height = (
s->height + 31) / 32 * 2;
519 s->mb_height = (
s->height + 15) / 16;
524 mb_height =
s->msmpeg4_version == MSMP4_VC1 ?
527 s->mb_width = (
s->width + 15) / 16;
528 s->mb_stride =
s->mb_width + 1;
529 s->b8_stride =
s->mb_width * 2 + 1;
530 mb_array_size = mb_height *
s->mb_stride;
531 mv_table_size = (mb_height + 2) *
s->mb_stride + 1;
535 s->h_edge_pos =
s->mb_width * 16;
536 s->v_edge_pos =
s->mb_height * 16;
538 s->mb_num =
s->mb_width *
s->mb_height;
543 s->block_wrap[3] =
s->b8_stride;
545 s->block_wrap[5] =
s->mb_stride;
547 y_size =
s->b8_stride * (2 * mb_height + 1);
548 c_size =
s->mb_stride * (mb_height + 1);
549 yc_size = y_size + 2 * c_size;
553 for (y = 0; y <
s->mb_height; y++)
554 for (x = 0; x <
s->mb_width; x++)
555 s->mb_index2xy[x + y *
s->mb_width] = x + y *
s->mb_stride;
557 s->mb_index2xy[
s->mb_height *
s->mb_width] = (
s->mb_height - 1) *
s->mb_stride +
s->mb_width;
559 #define ALLOC_POOL(name, size, flags) do { \
560 pools->name ##_pool = ff_refstruct_pool_alloc((size), (flags)); \
561 if (!pools->name ##_pool) \
562 return AVERROR(ENOMEM); \
571 s->p_field_mv_table_base =
tmp;
572 tmp +=
s->mb_stride + 1;
573 for (
int i = 0;
i < 2;
i++) {
574 for (
int j = 0; j < 2; j++) {
575 s->p_field_mv_table[
i][j] =
tmp;
576 tmp += mv_table_size;
591 if (
s->msmpeg4_version >= MSMP4_V3) {
593 if (!
s->coded_block_base)
595 s->coded_block =
s->coded_block_base +
s->b8_stride + 1;
598 if (
s->h263_pred ||
s->h263_plus || !
s->encoding) {
603 s->dc_val[0] =
s->dc_val_base +
s->b8_stride + 1;
604 s->dc_val[1] =
s->dc_val_base + y_size +
s->mb_stride + 1;
605 s->dc_val[2] =
s->dc_val[1] + c_size;
606 for (
i = 0;
i < yc_size;
i++)
607 s->dc_val_base[
i] = 1024;
611 if (!(
s->mbskip_table =
av_mallocz(mb_array_size + 2)) ||
613 !(
s->mbintra_table =
av_malloc(mb_array_size)))
615 memset(
s->mbintra_table, 1, mb_array_size);
618 ALLOC_POOL(mb_type, mv_table_size *
sizeof(uint32_t), 0);
620 if (
s->out_format ==
FMT_H263 ||
s->encoding ||
622 const int b8_array_size =
s->b8_stride * mb_height * 2;
623 int mv_size = 2 * (b8_array_size + 4) *
sizeof(int16_t);
624 int ref_index_size = 4 * mb_array_size;
642 memset(&
s->buffer_pools, 0,
sizeof(
s->buffer_pools));
643 memset(&
s->next_pic, 0,
sizeof(
s->next_pic));
644 memset(&
s->last_pic, 0,
sizeof(
s->last_pic));
645 memset(&
s->cur_pic, 0,
sizeof(
s->cur_pic));
647 memset(
s->thread_context, 0,
sizeof(
s->thread_context));
650 s->me.score_map =
NULL;
651 s->dct_error_sum =
NULL;
654 s->ac_val_base =
NULL;
658 s->me.scratchpad =
NULL;
660 memset(&
s->sc, 0,
sizeof(
s->sc));
663 s->bitstream_buffer =
NULL;
664 s->allocated_bitstream_buffer_size = 0;
665 s->p_field_mv_table_base =
NULL;
666 for (
int i = 0;
i < 2;
i++)
667 for (
int j = 0; j < 2; j++)
668 s->p_field_mv_table[
i][j] =
NULL;
670 s->dc_val_base =
NULL;
671 s->coded_block_base =
NULL;
672 s->mbintra_table =
NULL;
674 s->pred_dir_table =
NULL;
676 s->mbskip_table =
NULL;
678 s->er.error_status_table =
NULL;
679 s->er.er_temp_buffer =
NULL;
680 s->mb_index2xy =
NULL;
689 int nb_slices = (HAVE_THREADS &&
691 s->avctx->thread_count : 1;
696 if (
s->encoding &&
s->avctx->slices)
697 nb_slices =
s->avctx->slices;
701 "decoding to AV_PIX_FMT_NONE is not supported.\n");
705 if ((
s->width ||
s->height) &&
721 if (nb_slices >
MAX_THREADS || (nb_slices >
s->mb_height &&
s->mb_height)) {
728 " reducing to %d\n", nb_slices, max_slices);
729 nb_slices = max_slices;
732 s->context_initialized = 1;
733 memset(
s->thread_context, 0,
sizeof(
s->thread_context));
734 s->thread_context[0] =
s;
735 s->slice_context_count = nb_slices;
755 for (
int i = 0;
i < 2;
i++)
756 for (
int j = 0; j < 2; j++)
757 s->p_field_mv_table[
i][j] =
NULL;
771 s->linesize =
s->uvlinesize = 0;
777 if (
s->slice_context_count > 1)
778 s->slice_context_count = 1;
781 s->allocated_bitstream_buffer_size = 0;
787 s->context_initialized = 0;
788 s->context_reinit = 0;
789 s->linesize =
s->uvlinesize = 0;
798 int wrap =
s->b8_stride;
799 int xy =
s->block_index[0];
802 s->dc_val[0][xy + 1 ] =
803 s->dc_val[0][xy +
wrap] =
804 s->dc_val[0][xy + 1 +
wrap] = 1024;
806 memset(
s->ac_val[0][xy ], 0, 32 *
sizeof(int16_t));
807 memset(
s->ac_val[0][xy +
wrap], 0, 32 *
sizeof(int16_t));
810 xy =
s->mb_x +
s->mb_y *
wrap;
812 s->dc_val[2][xy] = 1024;
814 memset(
s->ac_val[1][xy], 0, 16 *
sizeof(int16_t));
815 memset(
s->ac_val[2][xy], 0, 16 *
sizeof(int16_t));
817 s->mbintra_table[xy]= 0;
821 const int linesize =
s->cur_pic.linesize[0];
822 const int uvlinesize =
s->cur_pic.linesize[1];
823 const int width_of_mb = (4 + (
s->avctx->bits_per_raw_sample > 8)) -
s->avctx->lowres;
824 const int height_of_mb = 4 -
s->avctx->lowres;
826 s->block_index[0]=
s->b8_stride*(
s->mb_y*2 ) - 2 +
s->mb_x*2;
827 s->block_index[1]=
s->b8_stride*(
s->mb_y*2 ) - 1 +
s->mb_x*2;
828 s->block_index[2]=
s->b8_stride*(
s->mb_y*2 + 1) - 2 +
s->mb_x*2;
829 s->block_index[3]=
s->b8_stride*(
s->mb_y*2 + 1) - 1 +
s->mb_x*2;
830 s->block_index[4]=
s->mb_stride*(
s->mb_y + 1) +
s->b8_stride*
s->mb_height*2 +
s->mb_x - 1;
831 s->block_index[5]=
s->mb_stride*(
s->mb_y +
s->mb_height + 2) +
s->b8_stride*
s->mb_height*2 +
s->mb_x - 1;
834 s->dest[0] =
s->cur_pic.data[0] + (int)((
s->mb_x - 1
U) << width_of_mb);
835 s->dest[1] =
s->cur_pic.data[1] + (int)((
s->mb_x - 1
U) << (width_of_mb -
s->chroma_x_shift));
836 s->dest[2] =
s->cur_pic.data[2] + (int)((
s->mb_x - 1
U) << (width_of_mb -
s->chroma_x_shift));
839 s->dest[0] +=
s->mb_y * linesize << height_of_mb;
840 s->dest[1] +=
s->mb_y * uvlinesize << (height_of_mb -
s->chroma_y_shift);
841 s->dest[2] +=
s->mb_y * uvlinesize << (height_of_mb -
s->chroma_y_shift);
843 s->dest[0] += (
s->mb_y>>1) * linesize << height_of_mb;
844 s->dest[1] += (
s->mb_y>>1) * uvlinesize << (height_of_mb -
s->chroma_y_shift);
845 s->dest[2] += (
s->mb_y>>1) * uvlinesize << (height_of_mb -
s->chroma_y_shift);
857 else if (qscale > 31)
861 s->chroma_qscale=
s->chroma_qscale_table[qscale];
863 s->y_dc_scale=
s->y_dc_scale_table[ qscale ];
864 s->c_dc_scale=
s->c_dc_scale_table[
s->chroma_qscale ];
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
#define AV_LOG_WARNING
Something somehow does not look correct.
static void free_duplicate_contexts(MpegEncContext *s)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
int ff_mpv_init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
const uint8_t ff_mpeg2_non_linear_qscale[32]
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac for the current non-intra MB.
#define PICT_BOTTOM_FIELD
static int init_duplicate_context(MpegEncContext *s)
av_cold void ff_mpv_common_init_arm(MpegEncContext *s)
void ff_init_block_index(MpegEncContext *s)
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
static void free_duplicate_context(MpegEncContext *s)
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64], const uint8_t permutation[64])
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
static void ff_refstruct_pool_uninit(FFRefStructPool **poolp)
Mark the pool as being available for freeing.
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
struct FFRefStructPool * mb_type_pool
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
int alloc_mb_stride
mb_stride used to allocate tables
void ff_mpv_common_end(MpegEncContext *s)
static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
av_cold void ff_blockdsp_init(BlockDSPContext *c)
int ff_mpv_framesize_alloc(AVCodecContext *avctx, ScratchpadContext *sc, int linesize)
static const uint8_t *const ff_mpeg1_dc_scale_table
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s, int16_t *block, int n, int qscale)
#define ALLOC_POOL(name, size, flags)
const uint8_t * scantable
struct FFRefStructPool * motion_val_pool
struct FFRefStructPool * ref_index_pool
struct FFRefStructPool * mbskip_table_pool
int alloc_mb_height
mb_height used to allocate tables
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
void ff_mpv_unref_picture(MPVWorkPicture *pic)
av_cold void ff_mpv_idct_init(MpegEncContext *s)
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
const uint8_t ff_alternate_horizontal_scan[64]
static void free_buffer_pools(BufferPoolContext *pools)
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
int ff_mpeg_er_init(MpegEncContext *s)
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
static void clear_context(MpegEncContext *s)
av_cold void ff_init_scantable(const uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
#define FF_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME
If this flag is set, the entries will be zeroed before being returned to the user (after the init or ...
struct FFRefStructPool * qscale_table_pool
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
static void dct_unquantize_h263_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
#define i(width, name, range_min, range_max)
const uint8_t ff_alternate_vertical_scan[64]
static av_cold void dsp_init(MpegEncContext *s)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
void ff_mpv_common_init_ppc(MpegEncContext *s)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
void * av_calloc(size_t nmemb, size_t size)
const uint8_t ff_zigzag_direct[64]
void ff_mpv_free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution as well as the slice thread contex...
static void dct_unquantize_h263_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
av_cold void ff_mpv_common_init_x86(MpegEncContext *s)
av_cold void ff_mpv_common_init_mips(MpegEncContext *s)
const uint8_t ff_default_chroma_qscale_table[32]
#define AV_CODEC_EXPORT_DATA_MVS
Export motion vectors through frame side data.
int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
The exact code depends on how similar the blocks are and how related they are to the block
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
int alloc_mb_width
mb_width used to allocate tables
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
av_cold void ff_mpv_common_init_neon(MpegEncContext *s)
static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)