FFmpeg
eatgq.c
Go to the documentation of this file.
1 /*
2  * Electronic Arts TGQ Video Decoder
3  * Copyright (c) 2007-2008 Peter Ross <pross@xvid.org>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Electronic Arts TGQ Video Decoder
25  * @author Peter Ross <pross@xvid.org>
26  *
27  * Technical details here:
28  * http://wiki.multimedia.cx/index.php?title=Electronic_Arts_TGQ
29  */
30 
31 #define BITSTREAM_READER_LE
32 
33 #include "libavutil/mem_internal.h"
34 
35 #include "aandcttab.h"
36 #include "avcodec.h"
37 #include "bytestream.h"
38 #include "codec_internal.h"
39 #include "decode.h"
40 #include "eaidct.h"
41 #include "get_bits.h"
42 
43 typedef struct TgqContext {
45  int width, height;
46  int qtable[64];
47  DECLARE_ALIGNED(16, int16_t, block)[6][64];
48 } TgqContext;
49 
51 {
52  TgqContext *s = avctx->priv_data;
53  s->avctx = avctx;
54  avctx->framerate = (AVRational){ 15, 1 };
55  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
56  return 0;
57 }
58 
59 static int tgq_decode_block(TgqContext *s, int16_t block[64], GetBitContext *gb)
60 {
61  const uint8_t *scantable = ff_zigzag_direct;
62  int i, j, value;
63  block[0] = get_sbits(gb, 8) * s->qtable[0];
64  for (i = 1; i < 64;) {
65  switch (show_bits(gb, 3)) {
66  case 4:
67  if (i >= 63)
68  return AVERROR_INVALIDDATA;
69  block[scantable[i++]] = 0;
70  case 0:
71  block[scantable[i++]] = 0;
72  skip_bits(gb, 3);
73  break;
74  case 5:
75  case 1:
76  skip_bits(gb, 2);
77  value = get_bits(gb, 6);
78  if (value > 64 - i)
79  return AVERROR_INVALIDDATA;
80  for (j = 0; j < value; j++)
81  block[scantable[i++]] = 0;
82  break;
83  case 6:
84  skip_bits(gb, 3);
85  block[scantable[i]] = -s->qtable[scantable[i]];
86  i++;
87  break;
88  case 2:
89  skip_bits(gb, 3);
90  block[scantable[i]] = s->qtable[scantable[i]];
91  i++;
92  break;
93  case 7: // 111b
94  case 3: // 011b
95  skip_bits(gb, 2);
96  if (show_bits(gb, 6) == 0x3F) {
97  skip_bits(gb, 6);
98  block[scantable[i]] = get_sbits(gb, 8) * s->qtable[scantable[i]];
99  } else {
100  block[scantable[i]] = get_sbits(gb, 6) * s->qtable[scantable[i]];
101  }
102  i++;
103  break;
104  }
105  }
106  block[0] += 128 << 4;
107  return 0;
108 }
109 
110 static void tgq_idct_put_mb(TgqContext *s, int16_t (*block)[64], AVFrame *frame,
111  int mb_x, int mb_y)
112 {
113  ptrdiff_t linesize = frame->linesize[0];
114  uint8_t *dest_y = frame->data[0] + (mb_y * 16 * linesize) + mb_x * 16;
115  uint8_t *dest_cb = frame->data[1] + (mb_y * 8 * frame->linesize[1]) + mb_x * 8;
116  uint8_t *dest_cr = frame->data[2] + (mb_y * 8 * frame->linesize[2]) + mb_x * 8;
117 
118  ff_ea_idct_put_c(dest_y , linesize, block[0]);
119  ff_ea_idct_put_c(dest_y + 8, linesize, block[1]);
120  ff_ea_idct_put_c(dest_y + 8 * linesize , linesize, block[2]);
121  ff_ea_idct_put_c(dest_y + 8 * linesize + 8, linesize, block[3]);
122  if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
123  ff_ea_idct_put_c(dest_cb, frame->linesize[1], block[4]);
124  ff_ea_idct_put_c(dest_cr, frame->linesize[2], block[5]);
125  }
126 }
127 
128 static inline void tgq_dconly(TgqContext *s, unsigned char *dst,
129  ptrdiff_t dst_stride, int dc)
130 {
131  int level = av_clip_uint8((dc*s->qtable[0] + 2056) >> 4);
132  int j;
133  for (j = 0; j < 8; j++)
134  memset(dst + j * dst_stride, level, 8);
135 }
136 
138  int mb_x, int mb_y, const int8_t *dc)
139 {
140  ptrdiff_t linesize = frame->linesize[0];
141  uint8_t *dest_y = frame->data[0] + (mb_y * 16 * linesize) + mb_x * 16;
142  uint8_t *dest_cb = frame->data[1] + (mb_y * 8 * frame->linesize[1]) + mb_x * 8;
143  uint8_t *dest_cr = frame->data[2] + (mb_y * 8 * frame->linesize[2]) + mb_x * 8;
144  tgq_dconly(s, dest_y, linesize, dc[0]);
145  tgq_dconly(s, dest_y + 8, linesize, dc[1]);
146  tgq_dconly(s, dest_y + 8 * linesize, linesize, dc[2]);
147  tgq_dconly(s, dest_y + 8 * linesize + 8, linesize, dc[3]);
148  if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
149  tgq_dconly(s, dest_cb, frame->linesize[1], dc[4]);
150  tgq_dconly(s, dest_cr, frame->linesize[2], dc[5]);
151  }
152 }
153 
155  AVFrame *frame, int mb_y, int mb_x)
156 {
157  int mode;
158  int i;
159  int8_t dc[6];
160 
161  mode = bytestream2_get_byte(gbyte);
162  if (mode > 12) {
163  GetBitContext gb;
164  int ret = init_get_bits8(&gb, gbyte->buffer, FFMIN(bytestream2_get_bytes_left(gbyte), mode));
165  if (ret < 0)
166  return ret;
167 
168  for (i = 0; i < 6; i++) {
169  int ret = tgq_decode_block(s, s->block[i], &gb);
170  if (ret < 0)
171  return ret;
172  }
173  tgq_idct_put_mb(s, s->block, frame, mb_x, mb_y);
174  bytestream2_skip(gbyte, mode);
175  } else {
176  if (mode == 3) {
177  memset(dc, bytestream2_get_byte(gbyte), 4);
178  dc[4] = bytestream2_get_byte(gbyte);
179  dc[5] = bytestream2_get_byte(gbyte);
180  } else if (mode == 6) {
181  bytestream2_get_buffer(gbyte, dc, 6);
182  } else if (mode == 12) {
183  for (i = 0; i < 6; i++) {
184  dc[i] = bytestream2_get_byte(gbyte);
185  bytestream2_skip(gbyte, 1);
186  }
187  } else {
188  av_log(s->avctx, AV_LOG_ERROR, "unsupported mb mode %i\n", mode);
189  return -1;
190  }
191  tgq_idct_put_mb_dconly(s, frame, mb_x, mb_y, dc);
192  }
193  return 0;
194 }
195 
197 {
198  int i, j;
199  const int a = (14 * (100 - quant)) / 100 + 1;
200  const int b = (11 * (100 - quant)) / 100 + 4;
201  for (j = 0; j < 8; j++)
202  for (i = 0; i < 8; i++)
203  s->qtable[j * 8 + i] = ((a * (j + i) / (7 + 7) + b) *
204  ff_inv_aanscales[j * 8 + i]) >> (14 - 4);
205 }
206 
208  int *got_frame, AVPacket *avpkt)
209 {
210  const uint8_t *buf = avpkt->data;
211  int buf_size = avpkt->size;
212  TgqContext *s = avctx->priv_data;
213  GetByteContext gbyte;
214  int x, y, ret;
215  int big_endian;
216 
217  if (buf_size < 16) {
218  av_log(avctx, AV_LOG_WARNING, "truncated header\n");
219  return AVERROR_INVALIDDATA;
220  }
221  big_endian = AV_RL32(&buf[4]) > 0x000FFFFF;
222  bytestream2_init(&gbyte, buf + 8, buf_size - 8);
223  if (big_endian) {
224  s->width = bytestream2_get_be16u(&gbyte);
225  s->height = bytestream2_get_be16u(&gbyte);
226  } else {
227  s->width = bytestream2_get_le16u(&gbyte);
228  s->height = bytestream2_get_le16u(&gbyte);
229  }
230 
231  ret = ff_set_dimensions(s->avctx, s->width, s->height);
232  if (ret < 0)
233  return ret;
234 
235  tgq_calculate_qtable(s, bytestream2_get_byteu(&gbyte));
236  bytestream2_skipu(&gbyte, 3);
237 
238  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
239  return ret;
240 
241  for (y = 0; y < FFALIGN(avctx->height, 16) >> 4; y++)
242  for (x = 0; x < FFALIGN(avctx->width, 16) >> 4; x++)
243  if (tgq_decode_mb(s, &gbyte, frame, y, x) < 0)
244  return AVERROR_INVALIDDATA;
245 
246  *got_frame = 1;
247 
248  return avpkt->size;
249 }
250 
252  .p.name = "eatgq",
253  CODEC_LONG_NAME("Electronic Arts TGQ video"),
254  .p.type = AVMEDIA_TYPE_VIDEO,
255  .p.id = AV_CODEC_ID_TGQ,
256  .priv_data_size = sizeof(TgqContext),
259  .p.capabilities = AV_CODEC_CAP_DR1,
260 };
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
level
uint8_t level
Definition: svq3.c:205
mem_internal.h
TgqContext
Definition: eatgq.c:43
tgq_decode_mb
static int tgq_decode_mb(TgqContext *s, GetByteContext *gbyte, AVFrame *frame, int mb_y, int mb_x)
Definition: eatgq.c:154
GetByteContext
Definition: bytestream.h:33
tgq_calculate_qtable
static void tgq_calculate_qtable(TgqContext *s, int quant)
Definition: eatgq.c:196
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
AVPacket::data
uint8_t * data
Definition: packet.h:539
b
#define b
Definition: input.c:41
FFCodec
Definition: codec_internal.h:127
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
TgqContext::block
int16_t block[6][64]
Definition: eatgq.c:47
AV_CODEC_ID_TGQ
@ AV_CODEC_ID_TGQ
Definition: codec_id.h:173
tgq_decode_block
static int tgq_decode_block(TgqContext *s, int16_t block[64], GetBitContext *gb)
Definition: eatgq.c:59
tgq_decode_frame
static int tgq_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: eatgq.c:207
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:381
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:566
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
GetBitContext
Definition: get_bits.h:108
ff_ea_idct_put_c
void ff_ea_idct_put_c(uint8_t *dest, ptrdiff_t linesize, int16_t *block)
Definition: eaidct.c:80
quant
static const uint8_t quant[64]
Definition: vmixdec.c:71
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:311
s
#define s(width, name)
Definition: cbs_vp9.c:198
tgq_idct_put_mb
static void tgq_idct_put_mb(TgqContext *s, int16_t(*block)[64], AVFrame *frame, int mb_x, int mb_y)
Definition: eatgq.c:110
GetByteContext::buffer
const uint8_t * buffer
Definition: bytestream.h:34
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:320
decode.h
get_bits.h
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:296
tgq_dconly
static void tgq_dconly(TgqContext *s, unsigned char *dst, ptrdiff_t dst_stride, int dc)
Definition: eatgq.c:128
aandcttab.h
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
bytestream2_get_buffer
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:267
ff_eatgq_decoder
const FFCodec ff_eatgq_decoder
Definition: eatgq.c:251
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
TgqContext::qtable
int qtable[64]
Definition: eatgq.c:46
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1697
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:322
AVPacket::size
int size
Definition: packet.h:540
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
codec_internal.h
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:109
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
TgqContext::height
int height
Definition: eatgq.c:45
TgqContext::avctx
AVCodecContext * avctx
Definition: eatgq.c:44
tgq_idct_put_mb_dconly
static void tgq_idct_put_mb_dconly(TgqContext *s, AVFrame *frame, int mb_x, int mb_y, const int8_t *dc)
Definition: eatgq.c:137
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:371
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
AVCodecContext::height
int height
Definition: avcodec.h:624
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:663
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_inv_aanscales
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
tgq_decode_init
static av_cold int tgq_decode_init(AVCodecContext *avctx)
Definition: eatgq.c:50
AVCodecContext
main external API structure.
Definition: avcodec.h:451
mode
mode
Definition: ebur128.h:83
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVPacket
This structure stores compressed data.
Definition: packet.h:516
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:624
bytestream.h
eaidct.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
TgqContext::width
int width
Definition: eatgq.c:45