Go to the documentation of this file.
29 #include "config_components.h"
48 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
50 typedef struct SpriteData {
64 int effect_type, effect_flag;
65 int effect_pcount1, effect_pcount2;
66 int effect_params1[15], effect_params2[10];
74 static void vc1_sprite_parse_transform(
GetBitContext* gb,
int c[7])
81 c[2] = get_fp_val(gb);
85 c[0] =
c[4] = get_fp_val(gb);
86 c[2] = get_fp_val(gb);
89 c[0] = get_fp_val(gb);
90 c[2] = get_fp_val(gb);
91 c[4] = get_fp_val(gb);
94 c[0] = get_fp_val(gb);
95 c[1] = get_fp_val(gb);
96 c[2] = get_fp_val(gb);
97 c[3] = get_fp_val(gb);
98 c[4] = get_fp_val(gb);
101 c[5] = get_fp_val(gb);
103 c[6] = get_fp_val(gb);
113 for (sprite = 0; sprite <= v->
two_sprites; sprite++) {
114 vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
115 if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
118 for (
i = 0;
i < 7;
i++)
120 sd->coefs[sprite][
i] / (1<<16),
121 (
abs(sd->coefs[sprite][
i]) & 0xFFFF) * 1000 / (1 << 16));
127 switch (sd->effect_pcount1 =
get_bits(gb, 4)) {
129 vc1_sprite_parse_transform(gb, sd->effect_params1);
132 vc1_sprite_parse_transform(gb, sd->effect_params1);
133 vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
136 for (
i = 0;
i < sd->effect_pcount1;
i++)
137 sd->effect_params1[
i] = get_fp_val(gb);
139 if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
142 for (
i = 0;
i < sd->effect_pcount1;
i++)
144 sd->effect_params1[
i] / (1 << 16),
145 (
abs(sd->effect_params1[
i]) & 0xFFFF) * 1000 / (1 << 16));
149 sd->effect_pcount2 =
get_bits(gb, 16);
150 if (sd->effect_pcount2 > 10) {
153 }
else if (sd->effect_pcount2) {
156 while (++i < sd->effect_pcount2) {
157 sd->effect_params2[
i] = get_fp_val(gb);
159 sd->effect_params2[
i] / (1 << 16),
160 (
abs(sd->effect_params2[
i]) & 0xFFFF) * 1000 / (1 << 16));
179 static void vc1_draw_sprites(
VC1Context *v, SpriteData* sd)
181 int i, plane, row, sprite;
182 int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
183 uint8_t* src_h[2][2];
184 int xoff[2], xadv[2], yoff[2], yadv[2],
alpha;
190 xadv[
i] = sd->coefs[
i][0];
199 for (plane = 0; plane < (CONFIG_GRAY &&
s->avctx->flags &
AV_CODEC_FLAG_GRAY ? 1 : 3); plane++) {
206 for (sprite = 0; sprite <= v->
two_sprites; sprite++) {
207 uint8_t *iplane =
s->current_picture.f->data[plane];
208 int iline =
s->current_picture.f->linesize[plane];
209 int ycoord = yoff[sprite] + yadv[sprite] * row;
210 int yline = ycoord >> 16;
212 ysub[sprite] = ycoord & 0xFFFF;
214 iplane =
s->last_picture.f->data[plane];
215 iline =
s->last_picture.f->linesize[plane];
218 if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
219 src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
221 src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + next_line;
223 if (sr_cache[sprite][0] != yline) {
224 if (sr_cache[sprite][1] == yline) {
226 FFSWAP(
int, sr_cache[sprite][0], sr_cache[sprite][1]);
229 sr_cache[sprite][0] = yline;
232 if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
234 iplane + next_line, xoff[sprite],
235 xadv[sprite],
width);
236 sr_cache[sprite][1] = yline + 1;
238 src_h[sprite][0] = v->
sr_rows[sprite][0];
239 src_h[sprite][1] = v->
sr_rows[sprite][1];
247 memcpy(dst, src_h[0][0],
width);
250 if (ysub[0] && ysub[1]) {
252 src_h[1][0], src_h[1][1], ysub[1],
alpha,
width);
253 }
else if (ysub[0]) {
256 }
else if (ysub[1]) {
283 memset(&sd, 0,
sizeof(sd));
285 ret = vc1_parse_sprites(v, gb, &sd);
289 if (!
s->current_picture.f || !
s->current_picture.f->data[0]) {
294 if (v->
two_sprites && (!
s->last_picture_ptr || !
s->last_picture.f->data[0])) {
303 vc1_draw_sprites(v, &sd);
320 for (plane = 0; plane < (CONFIG_GRAY &&
s->avctx->flags &
AV_CODEC_FLAG_GRAY ? 1 : 3); plane++)
322 memset(
f->data[plane] +
i *
f->linesize[plane],
323 plane ? 128 : 0,
f->linesize[plane]);
332 int mb_height =
FFALIGN(
s->mb_height, 2);
377 v->
mv_f_base =
av_mallocz(2 * (
s->b8_stride * (mb_height * 2 + 1) +
s->mb_stride * (mb_height + 1) * 2));
381 v->
mv_f[1] = v->
mv_f[0] + (
s->b8_stride * (mb_height * 2 + 1) +
s->mb_stride * (mb_height + 1) * 2);
386 v->
mv_f_next[1] = v->
mv_f_next[0] + (
s->b8_stride * (mb_height * 2 + 1) +
s->mb_stride * (mb_height + 1) * 2);
389 for (
i = 0;
i < 4;
i++)
395 s->block,
s->block_last_index,
396 s->mb_width,
s->mb_height);
410 for (
i = 0;
i < 64;
i++) {
411 #define transpose(x) (((x) >> 3) | (((x) & 7) << 3))
467 }
else if (count < 0) {
475 uint8_t *buf2 =
NULL;
476 int seq_initialized = 0, ep_initialized = 0;
489 for (; next < end; start = next) {
491 size = next - start - 4;
514 if (!seq_initialized || !ep_initialized) {
600 for (
i = 0;
i < 4;
i++)
629 const uint8_t *buf = avpkt->
data;
630 int buf_size = avpkt->
size, n_slices = 0,
i,
ret;
633 uint8_t *buf2 =
NULL;
634 const uint8_t *buf_start = buf, *buf_start_second_field =
NULL;
635 int mb_height, n_slices1=-1;
640 const uint8_t *rawbuf;
652 if (
s->low_delay == 0 &&
s->next_picture_ptr) {
655 s->next_picture_ptr =
NULL;
671 const uint8_t *start, *end, *next;
675 for (start = buf, end = buf + buf_size; next < end; start = next) {
677 size = next - start - 4;
678 if (
size <= 0)
continue;
688 buf_start_second_field = start;
696 if (!slices[n_slices].buf) {
701 slices[n_slices].buf);
704 slices[n_slices].mby_start = avctx->
coded_height + 31 >> 5;
705 slices[n_slices].rawbuf = start;
706 slices[n_slices].raw_size =
size + 4;
707 n_slices1 = n_slices - 1;
725 if (!slices[n_slices].buf) {
730 slices[n_slices].buf);
733 slices[n_slices].mby_start =
get_bits(&slices[n_slices].gb, 9);
734 slices[n_slices].rawbuf = start;
735 slices[n_slices].raw_size =
size + 4;
741 }
else if (v->
interlace && ((buf[0] & 0xC0) == 0xC0)) {
742 const uint8_t *divider;
752 buf_start_second_field = divider;
760 if (!slices[n_slices].buf) {
767 slices[n_slices].mby_start =
s->mb_height + 1 >> 1;
768 slices[n_slices].rawbuf = divider;
769 slices[n_slices].raw_size = buf + buf_size - divider;
770 n_slices1 = n_slices - 1;
802 if (
s->context_initialized &&
808 if (!
s->context_initialized) {
864 s->current_picture.f->pict_type =
s->pict_type;
887 s->current_picture_ptr->f->repeat_pict = 0;
892 s->current_picture_ptr->f->repeat_pict = 1;
895 s->current_picture_ptr->f->repeat_pict = v->
rptfrm * 2;
898 s->me.qpel_put =
s->qdsp.put_qpel_pixels_tab;
899 s->me.qpel_avg =
s->qdsp.avg_qpel_pixels_tab;
903 if (v->
field_mode && buf_start_second_field) {
909 if (n_slices1 == -1) {
917 for (
i = 0 ;
i < n_slices1 + 1;
i++) {
918 s->gb = slices[
i].gb;
919 s->mb_y = slices[
i].mby_start;
941 s->gb = slices[n_slices1 + 1].gb;
942 s->mb_y = slices[n_slices1 + 1].mby_start;
953 if ((
ret = avctx->
hwaccel->
start_frame(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field)) < 0)
956 if (n_slices - n_slices1 == 2) {
958 if ((
ret = avctx->
hwaccel->
decode_slice(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field)) < 0)
961 if ((
ret = avctx->
hwaccel->
decode_slice(avctx, buf_start_second_field, slices[n_slices1 + 2].rawbuf - buf_start_second_field)) < 0)
964 for (
i = n_slices1 + 2;
i < n_slices;
i++) {
965 s->gb = slices[
i].gb;
966 s->mb_y = slices[
i].mby_start;
1001 for (
i = 0 ;
i < n_slices;
i++) {
1002 s->gb = slices[
i].gb;
1003 s->mb_y = slices[
i].mby_start;
1030 s->current_picture.f->linesize[0] <<= 1;
1031 s->current_picture.f->linesize[1] <<= 1;
1032 s->current_picture.f->linesize[2] <<= 1;
1034 s->uvlinesize <<= 1;
1040 for (
i = 0;
i <= n_slices;
i++) {
1041 if (
i > 0 && slices[
i - 1].mby_start >= mb_height) {
1044 "picture boundary (%d >= %d)\n",
i,
1045 slices[
i - 1].mby_start, mb_height);
1051 v->
mb_off =
s->mb_stride *
s->mb_height >> 1;
1080 s->start_mb_y = (
i == 0) ? 0 :
FFMAX(0, slices[
i-1].mby_start % mb_height);
1082 s->end_mb_y = (
i == n_slices ) ? mb_height :
FFMIN(mb_height, slices[
i].mby_start % mb_height);
1084 if (
i >= n_slices) {
1088 s->end_mb_y = (
i == n_slices1 + 1) ? mb_height :
FFMIN(mb_height, slices[
i].mby_start % mb_height);
1090 if (
s->end_mb_y <=
s->start_mb_y) {
1101 if (
i != n_slices) {
1102 s->gb = slices[
i].gb;
1107 s->current_picture.f->linesize[0] >>= 1;
1108 s->current_picture.f->linesize[1] >>= 1;
1109 s->current_picture.f->linesize[2] >>= 1;
1111 s->uvlinesize >>= 1;
1117 ff_dlog(
s->avctx,
"Consumed %i/%i bits\n",
1144 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
1145 if ((
ret = vc1_decode_sprites(v, &
s->gb)) < 0)
1158 }
else if (
s->last_picture_ptr) {
1169 for (
i = 0;
i < n_slices;
i++)
1176 for (
i = 0;
i < n_slices;
i++)
1184 #if CONFIG_VC1_DXVA2_HWACCEL
1187 #if CONFIG_VC1_D3D11VA_HWACCEL
1191 #if CONFIG_VC1_NVDEC_HWACCEL
1194 #if CONFIG_VC1_VAAPI_HWACCEL
1197 #if CONFIG_VC1_VDPAU_HWACCEL
1218 #if CONFIG_VC1_DXVA2_HWACCEL
1221 #if CONFIG_VC1_D3D11VA_HWACCEL
1224 #if CONFIG_VC1_D3D11VA2_HWACCEL
1227 #if CONFIG_VC1_NVDEC_HWACCEL
1230 #if CONFIG_VC1_VAAPI_HWACCEL
1233 #if CONFIG_VC1_VDPAU_HWACCEL
1241 #if CONFIG_WMV3_DECODER
1256 #if CONFIG_WMV3_DXVA2_HWACCEL
1259 #if CONFIG_WMV3_D3D11VA_HWACCEL
1262 #if CONFIG_WMV3_D3D11VA2_HWACCEL
1265 #if CONFIG_WMV3_NVDEC_HWACCEL
1268 #if CONFIG_WMV3_VAAPI_HWACCEL
1271 #if CONFIG_WMV3_VDPAU_HWACCEL
1280 #if CONFIG_WMV3IMAGE_DECODER
1282 .
p.
name =
"wmv3image",
1292 .flush = vc1_sprite_flush,
1300 #if CONFIG_VC1IMAGE_DECODER
1302 .
p.
name =
"vc1image",
1312 .flush = vc1_sprite_flush,
static void error(const char *err)
void(* sprite_v_double_noscale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src2a, int alpha, int width)
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
uint8_t zz_8x8[4][64]
Zigzag table for TT_8x8, permuted for IDCT.
#define AV_LOG_WARNING
Something somehow does not look correct.
int new_sprite
Frame decoding info for sprite modes.
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
@ PROGRESSIVE
in the bitstream is reported as 00b
enum AVColorSpace colorspace
YUV colorspace type.
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
static enum AVPixelFormat vc1_hwaccel_pixfmt_list_420[]
const FFCodec ff_vc1image_decoder
int end_mb_x
Horizontal macroblock limit (used only by mss2)
int interlace
Progressive/interlaced (RPTFTM syntax element)
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
const uint8_t ff_wmv1_scantable[WMV1_SCANTABLE_COUNT][64]
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
static int get_bits_count(const GetBitContext *s)
enum AVPixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
int field_picture
whether or not the picture was encoded in separate fields
This structure describes decoded (raw) audio or video data.
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
#define HWACCEL_DXVA2(codec)
int top_field_first
If the content is interlaced, is top field displayed first.
#define HWACCEL_D3D11VA2(codec)
uint8_t * mv_type_mb_plane
bitplane for mv_type == (4MV)
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
#define PICT_BOTTOM_FIELD
struct AVCodecContext * avctx
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
#define FF_DEBUG_PICT_INFO
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static const av_always_inline uint8_t * find_next_marker(const uint8_t *src, const uint8_t *end)
Find VC-1 marker in buffer.
int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext *gb)
static void skip_bits(GetBitContext *s, int n)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
AVCodec p
The public AVCodec.
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
const struct AVCodec * codec
enum AVDiscard skip_frame
Skip decoding for selected frames.
static av_cold int vc1_decode_init(AVCodecContext *avctx)
Initialize a VC1/WMV3 decoder.
int first_pic_header_flag
int ff_vc1_decode_sequence_header(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb)
Decode Simple/Main Profiles sequence header.
int flags
AV_CODEC_FLAG_*.
#define HWACCEL_VDPAU(codec)
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
void ff_mpv_common_end(MpegEncContext *s)
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
av_cold int ff_intrax8_common_init(AVCodecContext *avctx, IntraX8Context *w, IDCTDSPContext *idsp, int16_t(*block)[64], int block_last_index[12], int mb_width, int mb_height)
Initialize IntraX8 frame decoder.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
void(* sprite_v_double_twoscale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1, const uint8_t *src2a, const uint8_t *src2b, int offset2, int alpha, int width)
int has_b_frames
Size of the frame reordering buffer in the decoder.
const FFCodec ff_vc1_decoder
#define FF_CODEC_DECODE_CB(func)
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
int res_sprite
Simple/Main Profile sequence header.
int res_fasttx
reserved, always 1
uint8_t * mv_f[2]
0: MV obtained from same field, 1: opposite field
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
void(* sprite_h)(uint8_t *dst, const uint8_t *src, int offset, int advance, int count)
uint8_t * over_flags_plane
Overflags bitplane.
void ff_mpeg_er_frame_start(MpegEncContext *s)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
int top_blk_sh
Either 3 or 0, positions of l/t in blk[].
@ AVDISCARD_ALL
discard all
const FFCodec ff_wmv3_decoder
av_cold void ff_vc1_init_common(VC1Context *v)
Init VC-1 specific tables and VC1Context members.
uint8_t * forward_mb_plane
bitplane for "forward" MBs
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
enum AVColorRange color_range
MPEG vs JPEG YUV range.
uint8_t * direct_mb_plane
bitplane for "direct" MBs
av_cold void ff_vc1_init_transposed_scantables(VC1Context *v)
int field_mode
1 for interlaced field pictures
void ff_vc1_decode_blocks(VC1Context *v)
uint8_t * blk_mv_type_base
av_cold void ff_intrax8_common_end(IntraX8Context *w)
Destroy IntraX8 frame structure.
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
@ AV_PICTURE_TYPE_I
Intra.
static unsigned int get_bits1(GetBitContext *s)
int16_t(* luma_mv_base)[2]
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
#define AV_EF_EXPLODE
abort decoding on minor error detection
@ AVCOL_RANGE_UNSPECIFIED
H264ChromaContext h264chroma
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
const uint8_t ff_vc1_adv_interlaced_8x8_zz[64]
@ AVDISCARD_NONKEY
discard all frames except keyframes
enum AVPictureType pict_type
Picture type of the frame.
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
const FFCodec ff_wmv3image_decoder
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
void ff_mpeg_flush(AVCodecContext *avctx)
#define HWACCEL_D3D11VA(codec)
void(* sprite_v_single)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset, int width)
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
#define HWACCEL_NVDEC(codec)
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
uint8_t * sr_rows[2][2]
Sprite resizer line cache.
#define AV_LOG_INFO
Standard information.
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
int interlaced_frame
The content of the picture is interlaced.
Picture * current_picture_ptr
pointer to the current picture
void(* sprite_v_double_onescale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1, const uint8_t *src2a, int alpha, int width)
av_cold int ff_vc1_decode_init_alloc_tables(VC1Context *v)
#define i(width, name, range_min, range_max)
int(* vc1_unescape_buffer)(const uint8_t *src, int size, uint8_t *dst)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
int ff_vc1_decode_entry_point(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb)
int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext *gb)
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
int * ttblk
Transform type at the block level.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
#define FFSWAP(type, a, b)
AVFrame * sprite_output_frame
int color_prim
8 bits, chroma coordinates of the color primaries
#define AV_INPUT_BUFFER_PADDING_SIZE
main external API structure.
VLC * cbpcy_vlc
CBPCY VLC table.
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
int profile
Sequence header data for all Profiles TODO: choose between ints, uint8_ts and monobit flags.
void ff_mpv_frame_end(MpegEncContext *s)
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
const AVProfile ff_vc1_profiles[]
int matrix_coef
8 bits, Color primaries->YCbCr transform matrix
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
int coded_width
Bitstream width / height, may be different from width/height e.g.
enum FrameCodingMode fcm
Frame decoding info for Advanced profile.
@ AV_PICTURE_TYPE_P
Predicted.
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
#define avpriv_request_sample(...)
static const int16_t alpha[]
int level
Advanced Profile.
This structure stores compressed data.
uint8_t * acpred_plane
AC prediction flags bitplane.
void ff_er_frame_end(ERContext *s)
@ AV_PICTURE_TYPE_BI
BI type.
#define HWACCEL_VAAPI(codec)
int width
picture width / height.
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
int transfer_char
8 bits, Opto-electronic transfer characteristics
av_cold int ff_vc1_decode_end(AVCodecContext *avctx)
Close a VC1/WMV3 decoder.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int vc1_decode_frame(AVCodecContext *avctx, AVFrame *pict, int *got_frame, AVPacket *avpkt)
Decode a VC1/WMV3 frame.
uint8_t * blk_mv_type
0: frame MV, 1: field MV (interlaced frame)
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
@ AVDISCARD_NONREF
discard all non reference
av_cold int ff_msmpeg4_decode_init(AVCodecContext *avctx)