FFmpeg
rv34.c
Go to the documentation of this file.
1 /*
2  * RV30/40 decoder common data
3  * Copyright (c) 2007 Mike Melanson, Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * RV30/40 decoder common data
25  */
26 
27 #include "libavutil/avassert.h"
28 #include "libavutil/imgutils.h"
29 #include "libavutil/internal.h"
30 #include "libavutil/mem.h"
31 #include "libavutil/mem_internal.h"
32 #include "libavutil/thread.h"
33 
34 #include "avcodec.h"
35 #include "decode.h"
36 #include "error_resilience.h"
37 #include "mpegutils.h"
38 #include "mpegvideo.h"
39 #include "mpegvideodec.h"
40 #include "golomb.h"
41 #include "mathops.h"
42 #include "mpeg_er.h"
43 #include "qpeldsp.h"
44 #include "rectangle.h"
45 #include "thread.h"
46 #include "threadprogress.h"
47 
48 #include "rv34vlc.h"
49 #include "rv34data.h"
50 #include "rv34.h"
51 
52 static inline void ZERO8x2(void* dst, int stride)
53 {
54  fill_rectangle(dst, 1, 2, stride, 0, 4);
55  fill_rectangle(((uint8_t*)(dst))+4, 1, 2, stride, 0, 4);
56 }
57 
58 /** translation of RV30/40 macroblock types to lavc ones */
59 static const int rv34_mb_type_to_lavc[12] = {
72 };
73 
74 
76 
77 static int rv34_decode_mv(RV34DecContext *r, int block_type);
78 
79 /**
80  * @name RV30/40 VLC generating functions
81  * @{
82  */
83 
84 static VLCElem table_data[117592];
85 
86 /**
87  * Generate VLC from codeword lengths.
88  * @param bits codeword lengths (zeroes are accepted)
89  * @param size length of input data
90  * @param vlc output VLC
91  * @param insyms symbols for input codes (NULL for default ones)
92  * @param num VLC table number (for static initialization)
93  */
94 static av_cold void rv34_gen_vlc_ext(const uint8_t *bits, int size, VLC *vlc,
95  const uint8_t *syms, int mod_three_bits_offset, int *offset)
96 {
97  int counts[17] = {0}, codes[17];
98  int maxbits;
99 
100  av_assert1(size > 0);
101 
102  for (int i = 0; i < size; i++)
103  counts[bits[i]]++;
104 
105  /* bits[0] is zero for some tables, i.e. syms actually starts at 1.
106  * So we reset it here. The code assigned to this element is 0x00. */
107  codes[0] = counts[0] = 0;
108  for (int i = 0; i < 16; i++) {
109  codes[i+1] = (codes[i] + counts[i]) << 1;
110  if (counts[i])
111  maxbits = i;
112  }
113 
114  uint16_t symbols[MAX_VLC_SIZE];
115  uint16_t cw[MAX_VLC_SIZE];
116  const void *symp = syms;
117  int symbol_size;
118 
119  if (mod_three_bits_offset > 0) {
120  symp = symbols;
121  symbol_size = 2;
122 
123  for (int i = 0, mask = (1 << mod_three_bits_offset) - 1; i < size; ++i) {
124  cw[i] = codes[bits[i]]++;
125  symbols[i] = (modulo_three_table[i >> mod_three_bits_offset] << mod_three_bits_offset) | (i & mask);
126  }
127  } else {
128  if (!mod_three_bits_offset)
129  symp = modulo_three_table;
130 
131  symbol_size = !!symp;
132  for (int i = 0; i < size; ++i)
133  cw[i] = codes[bits[i]]++;
134  }
135 
136  vlc->table = &table_data[*offset];
138  ff_vlc_init_sparse(vlc, FFMIN(maxbits, 9), size,
139  bits, 1, 1,
140  cw, 2, 2,
141  symp, symbol_size, symbol_size, VLC_INIT_STATIC_OVERLONG);
142  *offset += vlc->table_size;
143 }
144 
145 static av_cold void rv34_gen_vlc(const uint8_t *bits, int size, const VLCElem **vlcp,
146  int mod_three_bits_offset, int *offset)
147 {
148  VLC vlc = { 0 };
149  rv34_gen_vlc_ext(bits, size, &vlc, NULL, mod_three_bits_offset, offset);
150  *vlcp = vlc.table;
151 }
152 
153 /**
154  * Initialize all tables.
155  */
156 static av_cold void rv34_init_tables(void)
157 {
158  int i, j, k, offset = 0;
159 
160  for(i = 0; i < NUM_INTRA_TABLES; i++){
161  for(j = 0; j < 2; j++){
163  &intra_vlcs[i].cbppattern[j], 4, &offset);
165  &intra_vlcs[i].second_pattern[j], 0, &offset);
167  &intra_vlcs[i].third_pattern[j], 0, &offset);
168  for(k = 0; k < 4; k++){
170  &intra_vlcs[i].cbp[j][k], rv34_cbp_code, -1, &offset);
171  }
172  }
173  for(j = 0; j < 4; j++){
175  &intra_vlcs[i].first_pattern[j], 3, &offset);
176  }
178  &intra_vlcs[i].coefficient, -1, &offset);
179  }
180 
181  for(i = 0; i < NUM_INTER_TABLES; i++){
183  &inter_vlcs[i].cbppattern[0], 4, &offset);
184  for(j = 0; j < 4; j++){
186  &inter_vlcs[i].cbp[0][j], rv34_cbp_code, -1, &offset);
187  }
188  for(j = 0; j < 2; j++){
190  &inter_vlcs[i].first_pattern[j], 3, &offset);
192  &inter_vlcs[i].second_pattern[j], 0, &offset);
194  &inter_vlcs[i].third_pattern[j], 0, &offset);
195  }
197  &inter_vlcs[i].coefficient, -1, &offset);
198  }
199 }
200 
201 /** @} */ // vlc group
202 
203 /**
204  * @name RV30/40 4x4 block decoding functions
205  * @{
206  */
207 
208 /**
209  * Decode coded block pattern.
210  */
211 static int rv34_decode_cbp(GetBitContext *gb, const RV34VLC *vlc, int table)
212 {
213  int pattern, code, cbp=0;
214  int ones;
215  static const int cbp_masks[3] = {0x100000, 0x010000, 0x110000};
216  static const int shifts[4] = { 0, 2, 8, 10 };
217  const int *curshift = shifts;
218  int i, t, mask;
219 
220  code = get_vlc2(gb, vlc->cbppattern[table], 9, 2);
221  pattern = code & 0xF;
222  code >>= 4;
223 
224  ones = rv34_count_ones[pattern];
225 
226  for(mask = 8; mask; mask >>= 1, curshift++){
227  if(pattern & mask)
228  cbp |= get_vlc2(gb, vlc->cbp[table][ones].table, vlc->cbp[table][ones].bits, 1) << curshift[0];
229  }
230 
231  for(i = 0; i < 4; i++){
232  t = (code >> (6 - 2*i)) & 3;
233  if(t == 1)
234  cbp |= cbp_masks[get_bits1(gb)] << i;
235  if(t == 2)
236  cbp |= cbp_masks[2] << i;
237  }
238  return cbp;
239 }
240 
241 /**
242  * Get one coefficient value from the bitstream and store it.
243  */
244 static inline void decode_coeff(int16_t *dst, int coef, int esc, GetBitContext *gb,
245  const VLCElem *vlc, int q)
246 {
247  if(coef){
248  if(coef == esc){
249  coef = get_vlc2(gb, vlc, 9, 2);
250  if(coef > 23){
251  coef -= 23;
252  coef = 22 + ((1 << coef) | get_bits(gb, coef));
253  }
254  coef += esc;
255  }
256  if(get_bits1(gb))
257  coef = -coef;
258  *dst = (coef*q + 8) >> 4;
259  }
260 }
261 
262 /**
263  * Decode 2x2 subblock of coefficients.
264  */
265 static inline void decode_subblock(int16_t *dst, int flags, const int is_block2,
266  GetBitContext *gb, const VLCElem *vlc, int q)
267 {
268  decode_coeff( dst+0*4+0, (flags >> 6) , 3, gb, vlc, q);
269  if(is_block2){
270  decode_coeff(dst+1*4+0, (flags >> 4) & 3, 2, gb, vlc, q);
271  decode_coeff(dst+0*4+1, (flags >> 2) & 3, 2, gb, vlc, q);
272  }else{
273  decode_coeff(dst+0*4+1, (flags >> 4) & 3, 2, gb, vlc, q);
274  decode_coeff(dst+1*4+0, (flags >> 2) & 3, 2, gb, vlc, q);
275  }
276  decode_coeff( dst+1*4+1, (flags >> 0) & 3, 2, gb, vlc, q);
277 }
278 
279 /**
280  * Decode a single coefficient.
281  */
282 static inline void decode_subblock1(int16_t *dst, int flags, GetBitContext *gb,
283  const VLCElem *vlc, int q)
284 {
285  int coeff = flags >> 6;
286  decode_coeff(dst, coeff, 3, gb, vlc, q);
287 }
288 
289 static inline void decode_subblock3(int16_t *dst, int flags, GetBitContext *gb,
290  const VLCElem *vlc,
291  int q_dc, int q_ac1, int q_ac2)
292 {
293  decode_coeff(dst+0*4+0, (flags >> 6) , 3, gb, vlc, q_dc);
294  decode_coeff(dst+0*4+1, (flags >> 4) & 3, 2, gb, vlc, q_ac1);
295  decode_coeff(dst+1*4+0, (flags >> 2) & 3, 2, gb, vlc, q_ac1);
296  decode_coeff(dst+1*4+1, (flags >> 0) & 3, 2, gb, vlc, q_ac2);
297 }
298 
299 /**
300  * Decode coefficients for 4x4 block.
301  *
302  * This is done by filling 2x2 subblocks with decoded coefficients
303  * in this order (the same for subblocks and subblock coefficients):
304  * o--o
305  * /
306  * /
307  * o--o
308  */
309 
310 static int rv34_decode_block(int16_t *dst, GetBitContext *gb, const RV34VLC *rvlc,
311  int fc, int sc, int q_dc, int q_ac1, int q_ac2)
312 {
313  int flags = get_vlc2(gb, rvlc->first_pattern[fc], 9, 2);
314 
315  int pattern = flags & 0x7;
316 
317  flags >>= 3;
318 
319  if (flags & 0x3F) {
320  decode_subblock3(dst, flags, gb, rvlc->coefficient, q_dc, q_ac1, q_ac2);
321  } else {
322  decode_subblock1(dst, flags, gb, rvlc->coefficient, q_dc);
323  if (!pattern)
324  return 0;
325  }
326 
327  if(pattern & 4){
328  flags = get_vlc2(gb, rvlc->second_pattern[sc], 9, 2);
329  decode_subblock(dst + 4*0+2, flags, 0, gb, rvlc->coefficient, q_ac2);
330  }
331  if(pattern & 2){ // Looks like coefficients 1 and 2 are swapped for this block
332  flags = get_vlc2(gb, rvlc->second_pattern[sc], 9, 2);
333  decode_subblock(dst + 4*2+0, flags, 1, gb, rvlc->coefficient, q_ac2);
334  }
335  if(pattern & 1){
336  flags = get_vlc2(gb, rvlc->third_pattern[sc], 9, 2);
337  decode_subblock(dst + 4*2+2, flags, 0, gb, rvlc->coefficient, q_ac2);
338  }
339  return 1;
340 }
341 
342 /**
343  * @name RV30/40 bitstream parsing
344  * @{
345  */
346 
347 /**
348  * Decode starting slice position.
349  * @todo Maybe replace with ff_h263_decode_mba() ?
350  */
352 {
353  int i;
354  for(i = 0; i < 5; i++)
355  if(rv34_mb_max_sizes[i] >= mb_size - 1)
356  break;
357  return get_bits(gb, rv34_mb_bits_sizes[i]);
358 }
359 
360 /**
361  * Select VLC set for decoding from current quantizer, modifier and frame type.
362  */
363 static inline RV34VLC* choose_vlc_set(int quant, int mod, int type)
364 {
365  if(mod == 2 && quant < 19) quant += 10;
366  else if(mod && quant < 26) quant += 5;
367  av_assert2(quant >= 0 && quant < 32);
370 }
371 
372 /**
373  * Decode intra macroblock header and return CBP in case of success, -1 otherwise.
374  */
375 static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
376 {
377  MpegEncContext *s = &r->s;
378  GetBitContext *const gb = &r->gb;
379  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
380  int t;
381 
382  r->is16 = get_bits1(gb);
383  if(r->is16){
384  s->cur_pic.mb_type[mb_pos] = MB_TYPE_INTRA16x16;
385  r->block_type = RV34_MB_TYPE_INTRA16x16;
386  t = get_bits(gb, 2);
387  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
388  r->luma_vlc = 2;
389  }else{
390  if(!r->rv30){
391  if(!get_bits1(gb))
392  av_log(s->avctx, AV_LOG_ERROR, "Need DQUANT\n");
393  }
394  s->cur_pic.mb_type[mb_pos] = MB_TYPE_INTRA;
395  r->block_type = RV34_MB_TYPE_INTRA;
396  if(r->decode_intra_types(r, gb, intra_types) < 0)
397  return -1;
398  r->luma_vlc = 1;
399  }
400 
401  r->chroma_vlc = 0;
402  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
403 
404  return rv34_decode_cbp(gb, r->cur_vlcs, r->is16);
405 }
406 
407 /**
408  * Decode inter macroblock header and return CBP in case of success, -1 otherwise.
409  */
410 static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
411 {
412  MpegEncContext *s = &r->s;
413  GetBitContext *const gb = &r->gb;
414  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
415  int i, t;
416 
417  r->block_type = r->decode_mb_info(r);
418  if(r->block_type == -1)
419  return -1;
420  s->cur_pic.mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type];
421  r->mb_type[mb_pos] = r->block_type;
422  if(r->block_type == RV34_MB_SKIP){
423  if(s->pict_type == AV_PICTURE_TYPE_P)
424  r->mb_type[mb_pos] = RV34_MB_P_16x16;
425  if(s->pict_type == AV_PICTURE_TYPE_B)
426  r->mb_type[mb_pos] = RV34_MB_B_DIRECT;
427  }
428  r->is16 = !!IS_INTRA16x16(s->cur_pic.mb_type[mb_pos]);
429  if (rv34_decode_mv(r, r->block_type) < 0)
430  return -1;
431  if(r->block_type == RV34_MB_SKIP){
432  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, 0, sizeof(intra_types[0]));
433  return 0;
434  }
435  r->chroma_vlc = 1;
436  r->luma_vlc = 0;
437 
438  if (IS_INTRA(s->cur_pic.mb_type[mb_pos])) {
439  if(r->is16){
440  t = get_bits(gb, 2);
441  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
442  r->luma_vlc = 2;
443  }else{
444  if(r->decode_intra_types(r, gb, intra_types) < 0)
445  return -1;
446  r->luma_vlc = 1;
447  }
448  r->chroma_vlc = 0;
449  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
450  }else{
451  for(i = 0; i < 16; i++)
452  intra_types[(i & 3) + (i>>2) * r->intra_types_stride] = 0;
453  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1);
454  if(r->mb_type[mb_pos] == RV34_MB_P_MIX16x16){
455  r->is16 = 1;
456  r->chroma_vlc = 1;
457  r->luma_vlc = 2;
458  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
459  }
460  }
461 
462  return rv34_decode_cbp(gb, r->cur_vlcs, r->is16);
463 }
464 
465 /** @} */ //bitstream functions
466 
467 /**
468  * @name motion vector related code (prediction, reconstruction, motion compensation)
469  * @{
470  */
471 
472 /** macroblock partition width in 8x8 blocks */
473 static const uint8_t part_sizes_w[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2 };
474 
475 /** macroblock partition height in 8x8 blocks */
476 static const uint8_t part_sizes_h[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2 };
477 
478 /** availability index for subblocks */
479 static const uint8_t avail_indexes[4] = { 6, 7, 10, 11 };
480 
481 /**
482  * motion vector prediction
483  *
484  * Motion prediction performed for the block by using median prediction of
485  * motion vectors from the left, top and right top blocks but in corner cases
486  * some other vectors may be used instead.
487  */
488 static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no)
489 {
490  MpegEncContext *s = &r->s;
491  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
492  int A[2] = {0}, B[2], C[2];
493  int i, j;
494  int mx, my;
495  int* avail = r->avail_cache + avail_indexes[subblock_no];
496  int c_off = part_sizes_w[block_type];
497  int16_t (*motion_val)[2] = s->cur_pic.motion_val[0];
498 
499  mv_pos += (subblock_no & 1) + (subblock_no >> 1)*s->b8_stride;
500  if(subblock_no == 3)
501  c_off = -1;
502 
503  if(avail[-1]){
504  A[0] = motion_val[mv_pos-1][0];
505  A[1] = motion_val[mv_pos-1][1];
506  }
507  if(avail[-4]){
508  B[0] = motion_val[mv_pos-s->b8_stride][0];
509  B[1] = motion_val[mv_pos-s->b8_stride][1];
510  }else{
511  B[0] = A[0];
512  B[1] = A[1];
513  }
514  if(!avail[c_off-4]){
515  if(avail[-4] && (avail[-1] || r->rv30)){
516  C[0] = motion_val[mv_pos-s->b8_stride-1][0];
517  C[1] = motion_val[mv_pos-s->b8_stride-1][1];
518  }else{
519  C[0] = A[0];
520  C[1] = A[1];
521  }
522  }else{
523  C[0] = motion_val[mv_pos-s->b8_stride+c_off][0];
524  C[1] = motion_val[mv_pos-s->b8_stride+c_off][1];
525  }
526  mx = mid_pred(A[0], B[0], C[0]);
527  my = mid_pred(A[1], B[1], C[1]);
528  mx += r->dmv[dmv_no][0];
529  my += r->dmv[dmv_no][1];
530  for(j = 0; j < part_sizes_h[block_type]; j++){
531  for(i = 0; i < part_sizes_w[block_type]; i++){
532  motion_val[mv_pos + i + j*s->b8_stride][0] = mx;
533  motion_val[mv_pos + i + j*s->b8_stride][1] = my;
534  }
535  }
536 }
537 
538 #define GET_PTS_DIFF(a, b) (((a) - (b) + 8192) & 0x1FFF)
539 
540 /**
541  * Calculate motion vector component that should be added for direct blocks.
542  */
543 static int calc_add_mv(RV34DecContext *r, int dir, int val)
544 {
545  int mul = dir ? -r->mv_weight2 : r->mv_weight1;
546 
547  return (int)(val * (SUINT)mul + 0x2000) >> 14;
548 }
549 
550 /**
551  * Predict motion vector for B-frame macroblock.
552  */
553 static inline void rv34_pred_b_vector(int A[2], int B[2], int C[2],
554  int A_avail, int B_avail, int C_avail,
555  int *mx, int *my)
556 {
557  if(A_avail + B_avail + C_avail != 3){
558  *mx = A[0] + B[0] + C[0];
559  *my = A[1] + B[1] + C[1];
560  if(A_avail + B_avail + C_avail == 2){
561  *mx /= 2;
562  *my /= 2;
563  }
564  }else{
565  *mx = mid_pred(A[0], B[0], C[0]);
566  *my = mid_pred(A[1], B[1], C[1]);
567  }
568 }
569 
570 /**
571  * motion vector prediction for B-frames
572  */
573 static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
574 {
575  MpegEncContext *s = &r->s;
576  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
577  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
578  int A[2] = { 0 }, B[2] = { 0 }, C[2] = { 0 };
579  int has_A = 0, has_B = 0, has_C = 0;
580  int mx, my;
581  int i, j;
582  MPVWorkPicture *cur_pic = &s->cur_pic;
583  const int mask = dir ? MB_TYPE_BACKWARD_MV : MB_TYPE_FORWARD_MV;
584  int type = cur_pic->mb_type[mb_pos];
585 
586  if((r->avail_cache[6-1] & type) & mask){
587  A[0] = cur_pic->motion_val[dir][mv_pos - 1][0];
588  A[1] = cur_pic->motion_val[dir][mv_pos - 1][1];
589  has_A = 1;
590  }
591  if((r->avail_cache[6-4] & type) & mask){
592  B[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][0];
593  B[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][1];
594  has_B = 1;
595  }
596  if(r->avail_cache[6-4] && (r->avail_cache[6-2] & type) & mask){
597  C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][0];
598  C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][1];
599  has_C = 1;
600  }else if((s->mb_x+1) == s->mb_width && (r->avail_cache[6-5] & type) & mask){
601  C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][0];
602  C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][1];
603  has_C = 1;
604  }
605 
606  rv34_pred_b_vector(A, B, C, has_A, has_B, has_C, &mx, &my);
607 
608  mx += r->dmv[dir][0];
609  my += r->dmv[dir][1];
610 
611  for(j = 0; j < 2; j++){
612  for(i = 0; i < 2; i++){
613  cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][0] = mx;
614  cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][1] = my;
615  }
616  }
617  if(block_type == RV34_MB_B_BACKWARD || block_type == RV34_MB_B_FORWARD){
618  ZERO8x2(cur_pic->motion_val[!dir][mv_pos], s->b8_stride);
619  }
620 }
621 
622 /**
623  * motion vector prediction - RV3 version
624  */
625 static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
626 {
627  MpegEncContext *s = &r->s;
628  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
629  int A[2] = {0}, B[2], C[2];
630  int i, j, k;
631  int mx, my;
632  int* avail = r->avail_cache + avail_indexes[0];
633 
634  if(avail[-1]){
635  A[0] = s->cur_pic.motion_val[0][mv_pos - 1][0];
636  A[1] = s->cur_pic.motion_val[0][mv_pos - 1][1];
637  }
638  if(avail[-4]){
639  B[0] = s->cur_pic.motion_val[0][mv_pos - s->b8_stride][0];
640  B[1] = s->cur_pic.motion_val[0][mv_pos - s->b8_stride][1];
641  }else{
642  B[0] = A[0];
643  B[1] = A[1];
644  }
645  if(!avail[-4 + 2]){
646  if(avail[-4] && (avail[-1])){
647  C[0] = s->cur_pic.motion_val[0][mv_pos - s->b8_stride - 1][0];
648  C[1] = s->cur_pic.motion_val[0][mv_pos - s->b8_stride - 1][1];
649  }else{
650  C[0] = A[0];
651  C[1] = A[1];
652  }
653  }else{
654  C[0] = s->cur_pic.motion_val[0][mv_pos - s->b8_stride + 2][0];
655  C[1] = s->cur_pic.motion_val[0][mv_pos - s->b8_stride + 2][1];
656  }
657  mx = mid_pred(A[0], B[0], C[0]);
658  my = mid_pred(A[1], B[1], C[1]);
659  mx += r->dmv[0][0];
660  my += r->dmv[0][1];
661  for(j = 0; j < 2; j++){
662  for(i = 0; i < 2; i++){
663  for(k = 0; k < 2; k++){
664  s->cur_pic.motion_val[k][mv_pos + i + j*s->b8_stride][0] = mx;
665  s->cur_pic.motion_val[k][mv_pos + i + j*s->b8_stride][1] = my;
666  }
667  }
668  }
669 }
670 
671 static const int chroma_coeffs[3] = { 0, 3, 5 };
672 
673 /**
674  * generic motion compensation function
675  *
676  * @param r decoder context
677  * @param block_type type of the current block
678  * @param xoff horizontal offset from the start of the current block
679  * @param yoff vertical offset from the start of the current block
680  * @param mv_off offset to the motion vector information
681  * @param width width of the current partition in 8x8 blocks
682  * @param height height of the current partition in 8x8 blocks
683  * @param dir motion compensation direction (i.e. from the last or the next reference frame)
684  * @param thirdpel motion vectors are specified in 1/3 of pixel
685  * @param qpel_mc a set of functions used to perform luma motion compensation
686  * @param chroma_mc a set of functions used to perform chroma motion compensation
687  */
688 static inline void rv34_mc(RV34DecContext *r, const int block_type,
689  const int xoff, const int yoff, int mv_off,
690  const int width, const int height, int dir,
691  const int thirdpel, int weighted,
692  qpel_mc_func (*qpel_mc)[16],
694 {
695  MpegEncContext *s = &r->s;
696  uint8_t *Y, *U, *V;
697  const uint8_t *srcY, *srcU, *srcV;
698  int dxy, mx, my, umx, umy, lx, ly, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
699  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride + mv_off;
700  int is16x16 = 1;
701  int emu = 0;
702  int16_t *motion_val = s->cur_pic.motion_val[dir][mv_pos];
703 
704  if(thirdpel){
705  int chroma_mx, chroma_my;
706  mx = (motion_val[0] + (3 << 24)) / 3 - (1 << 24);
707  my = (motion_val[1] + (3 << 24)) / 3 - (1 << 24);
708  lx = (motion_val[0] + (3 << 24)) % 3;
709  ly = (motion_val[1] + (3 << 24)) % 3;
710  chroma_mx = motion_val[0] / 2;
711  chroma_my = motion_val[1] / 2;
712  umx = (chroma_mx + (3 << 24)) / 3 - (1 << 24);
713  umy = (chroma_my + (3 << 24)) / 3 - (1 << 24);
714  uvmx = chroma_coeffs[(chroma_mx + (3 << 24)) % 3];
715  uvmy = chroma_coeffs[(chroma_my + (3 << 24)) % 3];
716  }else{
717  int cx, cy;
718  mx = motion_val[0] >> 2;
719  my = motion_val[1] >> 2;
720  lx = motion_val[0] & 3;
721  ly = motion_val[1] & 3;
722  cx = motion_val[0] / 2;
723  cy = motion_val[1] / 2;
724  umx = cx >> 2;
725  umy = cy >> 2;
726  uvmx = (cx & 3) << 1;
727  uvmy = (cy & 3) << 1;
728  //due to some flaw RV40 uses the same MC compensation routine for H2V2 and H3V3
729  if(uvmx == 6 && uvmy == 6)
730  uvmx = uvmy = 4;
731  }
732 
733  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
734  /* wait for the referenced mb row to be finished */
735  int mb_row = s->mb_y + ((yoff + my + 5 + 8 * height) >> 4);
736  const ThreadProgress *p = dir ? &s->next_pic.ptr->progress : &s->last_pic.ptr->progress;
737  ff_thread_progress_await(p, mb_row);
738  }
739 
740  dxy = ly*4 + lx;
741  srcY = dir ? s->next_pic.data[0] : s->last_pic.data[0];
742  srcU = dir ? s->next_pic.data[1] : s->last_pic.data[1];
743  srcV = dir ? s->next_pic.data[2] : s->last_pic.data[2];
744  src_x = s->mb_x * 16 + xoff + mx;
745  src_y = s->mb_y * 16 + yoff + my;
746  uvsrc_x = s->mb_x * 8 + (xoff >> 1) + umx;
747  uvsrc_y = s->mb_y * 8 + (yoff >> 1) + umy;
748  srcY += src_y * s->linesize + src_x;
749  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
750  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
751  if(s->h_edge_pos - (width << 3) < 6 || s->v_edge_pos - (height << 3) < 6 ||
752  (unsigned)(src_x - !!lx*2) > s->h_edge_pos - !!lx*2 - (width <<3) - 4 ||
753  (unsigned)(src_y - !!ly*2) > s->v_edge_pos - !!ly*2 - (height<<3) - 4) {
754  srcY -= 2 + 2*s->linesize;
755  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, srcY,
756  s->linesize, s->linesize,
757  (width << 3) + 6, (height << 3) + 6,
758  src_x - 2, src_y - 2,
759  s->h_edge_pos, s->v_edge_pos);
760  srcY = s->sc.edge_emu_buffer + 2 + 2*s->linesize;
761  emu = 1;
762  }
763  if(!weighted){
764  Y = s->dest[0] + xoff + yoff *s->linesize;
765  U = s->dest[1] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
766  V = s->dest[2] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
767  }else{
768  Y = r->tmp_b_block_y [dir] + xoff + yoff *s->linesize;
769  U = r->tmp_b_block_uv[dir*2] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
770  V = r->tmp_b_block_uv[dir*2+1] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
771  }
772 
773  if(block_type == RV34_MB_P_16x8){
774  qpel_mc[1][dxy](Y, srcY, s->linesize);
775  Y += 8;
776  srcY += 8;
777  }else if(block_type == RV34_MB_P_8x16){
778  qpel_mc[1][dxy](Y, srcY, s->linesize);
779  Y += 8 * s->linesize;
780  srcY += 8 * s->linesize;
781  }
782  is16x16 = (block_type != RV34_MB_P_8x8) && (block_type != RV34_MB_P_16x8) && (block_type != RV34_MB_P_8x16);
783  qpel_mc[!is16x16][dxy](Y, srcY, s->linesize);
784  if (emu) {
785  uint8_t *uvbuf = s->sc.edge_emu_buffer;
786 
787  s->vdsp.emulated_edge_mc(uvbuf, srcU,
788  s->uvlinesize, s->uvlinesize,
789  (width << 2) + 1, (height << 2) + 1,
790  uvsrc_x, uvsrc_y,
791  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
792  srcU = uvbuf;
793  uvbuf += 9*s->uvlinesize;
794 
795  s->vdsp.emulated_edge_mc(uvbuf, srcV,
796  s->uvlinesize, s->uvlinesize,
797  (width << 2) + 1, (height << 2) + 1,
798  uvsrc_x, uvsrc_y,
799  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
800  srcV = uvbuf;
801  }
802  chroma_mc[2-width] (U, srcU, s->uvlinesize, height*4, uvmx, uvmy);
803  chroma_mc[2-width] (V, srcV, s->uvlinesize, height*4, uvmx, uvmy);
804 }
805 
806 static void rv34_mc_1mv(RV34DecContext *r, const int block_type,
807  const int xoff, const int yoff, int mv_off,
808  const int width, const int height, int dir)
809 {
810  rv34_mc(r, block_type, xoff, yoff, mv_off, width, height, dir, r->rv30, 0,
811  r->rdsp.put_pixels_tab,
812  r->rdsp.put_chroma_pixels_tab);
813 }
814 
816 {
817  r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][0](r->s.dest[0],
818  r->tmp_b_block_y[0],
819  r->tmp_b_block_y[1],
820  r->weight1,
821  r->weight2,
822  r->s.linesize);
823  r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][1](r->s.dest[1],
824  r->tmp_b_block_uv[0],
825  r->tmp_b_block_uv[2],
826  r->weight1,
827  r->weight2,
828  r->s.uvlinesize);
829  r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][1](r->s.dest[2],
830  r->tmp_b_block_uv[1],
831  r->tmp_b_block_uv[3],
832  r->weight1,
833  r->weight2,
834  r->s.uvlinesize);
835 }
836 
837 static void rv34_mc_2mv(RV34DecContext *r, const int block_type)
838 {
839  int weighted = !r->rv30 && block_type != RV34_MB_B_BIDIR && r->weight1 != 8192;
840 
841  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 0, r->rv30, weighted,
842  r->rdsp.put_pixels_tab,
843  r->rdsp.put_chroma_pixels_tab);
844  if(!weighted){
845  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, 0,
846  r->rdsp.avg_pixels_tab,
847  r->rdsp.avg_chroma_pixels_tab);
848  }else{
849  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, 1,
850  r->rdsp.put_pixels_tab,
851  r->rdsp.put_chroma_pixels_tab);
852  rv4_weight(r);
853  }
854 }
855 
857 {
858  int i, j;
859  int weighted = !r->rv30 && r->weight1 != 8192;
860 
861  for(j = 0; j < 2; j++)
862  for(i = 0; i < 2; i++){
863  rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 0, r->rv30,
864  weighted,
865  r->rdsp.put_pixels_tab,
866  r->rdsp.put_chroma_pixels_tab);
867  rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 1, r->rv30,
868  weighted,
869  weighted ? r->rdsp.put_pixels_tab : r->rdsp.avg_pixels_tab,
870  weighted ? r->rdsp.put_chroma_pixels_tab : r->rdsp.avg_chroma_pixels_tab);
871  }
872  if(weighted)
873  rv4_weight(r);
874 }
875 
876 /** number of motion vectors in each macroblock type */
877 static const int num_mvs[RV34_MB_TYPES] = { 0, 0, 1, 4, 1, 1, 0, 0, 2, 2, 2, 1 };
878 
879 /**
880  * Decode motion vector differences
881  * and perform motion vector reconstruction and motion compensation.
882  */
883 static int rv34_decode_mv(RV34DecContext *r, int block_type)
884 {
885  MpegEncContext *s = &r->s;
886  GetBitContext *const gb = &r->gb;
887  int i, j, k, l;
888  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
889  int next_bt;
890 
891  memset(r->dmv, 0, sizeof(r->dmv));
892  for(i = 0; i < num_mvs[block_type]; i++){
893  r->dmv[i][0] = get_interleaved_se_golomb(gb);
894  r->dmv[i][1] = get_interleaved_se_golomb(gb);
895  if (r->dmv[i][0] == INVALID_VLC ||
896  r->dmv[i][1] == INVALID_VLC) {
897  r->dmv[i][0] = r->dmv[i][1] = 0;
898  return AVERROR_INVALIDDATA;
899  }
900  }
901  switch(block_type){
902  case RV34_MB_TYPE_INTRA:
904  ZERO8x2(s->cur_pic.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
905  return 0;
906  case RV34_MB_SKIP:
907  if(s->pict_type == AV_PICTURE_TYPE_P){
908  ZERO8x2(s->cur_pic.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
909  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
910  break;
911  }
912  case RV34_MB_B_DIRECT:
913  //surprisingly, it uses motion scheme from next reference frame
914  /* wait for the current mb row to be finished */
915  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
916  ff_thread_progress_await(&s->next_pic.ptr->progress, FFMAX(0, s->mb_y-1));
917 
918  next_bt = s->next_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride];
919  if(IS_INTRA(next_bt) || IS_SKIP(next_bt)){
920  ZERO8x2(s->cur_pic.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
921  ZERO8x2(s->cur_pic.motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
922  }else
923  for(j = 0; j < 2; j++)
924  for(i = 0; i < 2; i++)
925  for(k = 0; k < 2; k++)
926  for(l = 0; l < 2; l++)
927  s->cur_pic.motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_pic.motion_val[0][mv_pos + i + j*s->b8_stride][k]);
928  if(!(IS_16X8(next_bt) || IS_8X16(next_bt) || IS_8X8(next_bt))) //we can use whole macroblock MC
929  rv34_mc_2mv(r, block_type);
930  else
932  ZERO8x2(s->cur_pic.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
933  break;
934  case RV34_MB_P_16x16:
935  case RV34_MB_P_MIX16x16:
936  rv34_pred_mv(r, block_type, 0, 0);
937  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
938  break;
939  case RV34_MB_B_FORWARD:
940  case RV34_MB_B_BACKWARD:
941  r->dmv[1][0] = r->dmv[0][0];
942  r->dmv[1][1] = r->dmv[0][1];
943  if(r->rv30)
944  rv34_pred_mv_rv3(r, block_type, block_type == RV34_MB_B_BACKWARD);
945  else
946  rv34_pred_mv_b (r, block_type, block_type == RV34_MB_B_BACKWARD);
947  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, block_type == RV34_MB_B_BACKWARD);
948  break;
949  case RV34_MB_P_16x8:
950  case RV34_MB_P_8x16:
951  rv34_pred_mv(r, block_type, 0, 0);
952  rv34_pred_mv(r, block_type, 1 + (block_type == RV34_MB_P_16x8), 1);
953  if(block_type == RV34_MB_P_16x8){
954  rv34_mc_1mv(r, block_type, 0, 0, 0, 2, 1, 0);
955  rv34_mc_1mv(r, block_type, 0, 8, s->b8_stride, 2, 1, 0);
956  }
957  if(block_type == RV34_MB_P_8x16){
958  rv34_mc_1mv(r, block_type, 0, 0, 0, 1, 2, 0);
959  rv34_mc_1mv(r, block_type, 8, 0, 1, 1, 2, 0);
960  }
961  break;
962  case RV34_MB_B_BIDIR:
963  rv34_pred_mv_b (r, block_type, 0);
964  rv34_pred_mv_b (r, block_type, 1);
965  rv34_mc_2mv (r, block_type);
966  break;
967  case RV34_MB_P_8x8:
968  for(i=0;i< 4;i++){
969  rv34_pred_mv(r, block_type, i, i);
970  rv34_mc_1mv (r, block_type, (i&1)<<3, (i&2)<<2, (i&1)+(i>>1)*s->b8_stride, 1, 1, 0);
971  }
972  break;
973  }
974 
975  return 0;
976 }
977 /** @} */ // mv group
978 
979 /**
980  * @name Macroblock reconstruction functions
981  * @{
982  */
983 /** mapping of RV30/40 intra prediction types to standard H.264 types */
984 static const int ittrans[9] = {
987 };
988 
989 /** mapping of RV30/40 intra 16x16 prediction types to standard H.264 types */
990 static const int ittrans16[4] = {
992 };
993 
994 /**
995  * Perform 4x4 intra prediction.
996  */
997 static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right)
998 {
999  uint8_t *prev = dst - stride + 4;
1000  uint32_t topleft;
1001 
1002  if(!up && !left)
1003  itype = DC_128_PRED;
1004  else if(!up){
1005  if(itype == VERT_PRED) itype = HOR_PRED;
1006  if(itype == DC_PRED) itype = LEFT_DC_PRED;
1007  }else if(!left){
1008  if(itype == HOR_PRED) itype = VERT_PRED;
1009  if(itype == DC_PRED) itype = TOP_DC_PRED;
1011  }
1012  if(!down){
1014  if(itype == HOR_UP_PRED) itype = HOR_UP_PRED_RV40_NODOWN;
1015  if(itype == VERT_LEFT_PRED) itype = VERT_LEFT_PRED_RV40_NODOWN;
1016  }
1017  if(!right && up){
1018  topleft = dst[-stride + 3] * 0x01010101u;
1019  prev = (uint8_t*)&topleft;
1020  }
1021  r->h.pred4x4[itype](dst, prev, stride);
1022 }
1023 
1024 static inline int adjust_pred16(int itype, int up, int left)
1025 {
1026  if(!up && !left)
1027  itype = DC_128_PRED8x8;
1028  else if(!up){
1029  if(itype == PLANE_PRED8x8)itype = HOR_PRED8x8;
1030  if(itype == VERT_PRED8x8) itype = HOR_PRED8x8;
1031  if(itype == DC_PRED8x8) itype = LEFT_DC_PRED8x8;
1032  }else if(!left){
1033  if(itype == PLANE_PRED8x8)itype = VERT_PRED8x8;
1034  if(itype == HOR_PRED8x8) itype = VERT_PRED8x8;
1035  if(itype == DC_PRED8x8) itype = TOP_DC_PRED8x8;
1036  }
1037  return itype;
1038 }
1039 
1041  uint8_t *pdst, int stride,
1042  int fc, int sc, int q_dc, int q_ac)
1043 {
1044  int16_t *const ptr = r->block;
1045  int has_ac = rv34_decode_block(ptr, &r->gb, r->cur_vlcs,
1046  fc, sc, q_dc, q_ac, q_ac);
1047  if(has_ac){
1048  r->rdsp.rv34_idct_add(pdst, stride, ptr);
1049  }else{
1050  r->rdsp.rv34_idct_dc_add(pdst, stride, ptr[0]);
1051  ptr[0] = 0;
1052  }
1053 }
1054 
1055 static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
1056 {
1057  LOCAL_ALIGNED_16(int16_t, block16, [16]);
1058  MpegEncContext *s = &r->s;
1059  GetBitContext *const gb = &r->gb;
1060  int q_dc = rv34_qscale_tab[ r->luma_dc_quant_i[s->qscale] ],
1061  q_ac = rv34_qscale_tab[s->qscale];
1062  uint8_t *dst = s->dest[0];
1063  int16_t *const ptr = r->block;
1064  int i, j, itype, has_ac;
1065 
1066  memset(block16, 0, 16 * sizeof(*block16));
1067 
1068  has_ac = rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0, q_dc, q_dc, q_ac);
1069  if(has_ac)
1070  r->rdsp.rv34_inv_transform(block16);
1071  else
1072  r->rdsp.rv34_inv_transform_dc(block16);
1073 
1074  itype = ittrans16[intra_types[0]];
1075  itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
1076  r->h.pred16x16[itype](dst, s->linesize);
1077 
1078  for(j = 0; j < 4; j++){
1079  for(i = 0; i < 4; i++, cbp >>= 1){
1080  int dc = block16[i + j*4];
1081 
1082  if(cbp & 1){
1083  has_ac = rv34_decode_block(ptr, gb, r->cur_vlcs, r->luma_vlc, 0, q_ac, q_ac, q_ac);
1084  }else
1085  has_ac = 0;
1086 
1087  if(has_ac){
1088  ptr[0] = dc;
1089  r->rdsp.rv34_idct_add(dst+4*i, s->linesize, ptr);
1090  }else
1091  r->rdsp.rv34_idct_dc_add(dst+4*i, s->linesize, dc);
1092  }
1093 
1094  dst += 4*s->linesize;
1095  }
1096 
1097  itype = ittrans16[intra_types[0]];
1098  if(itype == PLANE_PRED8x8) itype = DC_PRED8x8;
1099  itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
1100 
1101  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1102  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1103 
1104  for(j = 1; j < 3; j++){
1105  dst = s->dest[j];
1106  r->h.pred8x8[itype](dst, s->uvlinesize);
1107  for(i = 0; i < 4; i++, cbp >>= 1){
1108  uint8_t *pdst;
1109  if(!(cbp & 1)) continue;
1110  pdst = dst + (i&1)*4 + (i&2)*2*s->uvlinesize;
1111 
1112  rv34_process_block(r, pdst, s->uvlinesize,
1113  r->chroma_vlc, 1, q_dc, q_ac);
1114  }
1115  }
1116 }
1117 
1118 static void rv34_output_intra(RV34DecContext *r, int8_t *intra_types, int cbp)
1119 {
1120  MpegEncContext *s = &r->s;
1121  uint8_t *dst = s->dest[0];
1122  int avail[6*8] = {0};
1123  int i, j, k;
1124  int idx, q_ac, q_dc;
1125 
1126  // Set neighbour information.
1127  if(r->avail_cache[1])
1128  avail[0] = 1;
1129  if(r->avail_cache[2])
1130  avail[1] = avail[2] = 1;
1131  if(r->avail_cache[3])
1132  avail[3] = avail[4] = 1;
1133  if(r->avail_cache[4])
1134  avail[5] = 1;
1135  if(r->avail_cache[5])
1136  avail[8] = avail[16] = 1;
1137  if(r->avail_cache[9])
1138  avail[24] = avail[32] = 1;
1139 
1140  q_ac = rv34_qscale_tab[s->qscale];
1141  for(j = 0; j < 4; j++){
1142  idx = 9 + j*8;
1143  for(i = 0; i < 4; i++, cbp >>= 1, dst += 4, idx++){
1144  rv34_pred_4x4_block(r, dst, s->linesize, ittrans[intra_types[i]], avail[idx-8], avail[idx-1], avail[idx+7], avail[idx-7]);
1145  avail[idx] = 1;
1146  if(!(cbp & 1)) continue;
1147 
1148  rv34_process_block(r, dst, s->linesize,
1149  r->luma_vlc, 0, q_ac, q_ac);
1150  }
1151  dst += s->linesize * 4 - 4*4;
1152  intra_types += r->intra_types_stride;
1153  }
1154 
1155  intra_types -= r->intra_types_stride * 4;
1156 
1157  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1158  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1159 
1160  for(k = 0; k < 2; k++){
1161  dst = s->dest[1+k];
1162  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 0, 4);
1163 
1164  for(j = 0; j < 2; j++){
1165  int* acache = r->avail_cache + 6 + j*4;
1166  for(i = 0; i < 2; i++, cbp >>= 1, acache++){
1167  int itype = ittrans[intra_types[i*2+j*2*r->intra_types_stride]];
1168  rv34_pred_4x4_block(r, dst+4*i, s->uvlinesize, itype, acache[-4], acache[-1], !i && !j, acache[-3]);
1169  acache[0] = 1;
1170 
1171  if(!(cbp&1)) continue;
1172 
1173  rv34_process_block(r, dst + 4*i, s->uvlinesize,
1174  r->chroma_vlc, 1, q_dc, q_ac);
1175  }
1176 
1177  dst += 4*s->uvlinesize;
1178  }
1179  }
1180 }
1181 
1182 static int is_mv_diff_gt_3(int16_t (*motion_val)[2], int step)
1183 {
1184  int d;
1185  d = motion_val[0][0] - motion_val[-step][0];
1186  if(d < -3 || d > 3)
1187  return 1;
1188  d = motion_val[0][1] - motion_val[-step][1];
1189  if(d < -3 || d > 3)
1190  return 1;
1191  return 0;
1192 }
1193 
1195 {
1196  MpegEncContext *s = &r->s;
1197  int hmvmask = 0, vmvmask = 0, i, j;
1198  int midx = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
1199  int16_t (*motion_val)[2] = &s->cur_pic.motion_val[0][midx];
1200  for(j = 0; j < 16; j += 8){
1201  for(i = 0; i < 2; i++){
1202  if(is_mv_diff_gt_3(motion_val + i, 1))
1203  vmvmask |= 0x11 << (j + i*2);
1204  if((j || s->mb_y) && is_mv_diff_gt_3(motion_val + i, s->b8_stride))
1205  hmvmask |= 0x03 << (j + i*2);
1206  }
1207  motion_val += s->b8_stride;
1208  }
1209  if(s->first_slice_line)
1210  hmvmask &= ~0x000F;
1211  if(!s->mb_x)
1212  vmvmask &= ~0x1111;
1213  if(r->rv30){ //RV30 marks both subblocks on the edge for filtering
1214  vmvmask |= (vmvmask & 0x4444) >> 1;
1215  hmvmask |= (hmvmask & 0x0F00) >> 4;
1216  if(s->mb_x)
1217  r->deblock_coefs[s->mb_x - 1 + s->mb_y*s->mb_stride] |= (vmvmask & 0x1111) << 3;
1218  if(!s->first_slice_line)
1219  r->deblock_coefs[s->mb_x + (s->mb_y - 1)*s->mb_stride] |= (hmvmask & 0xF) << 12;
1220  }
1221  return hmvmask | vmvmask;
1222 }
1223 
1224 static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
1225 {
1226  MpegEncContext *s = &r->s;
1227  GetBitContext *const gb = &r->gb;
1228  uint8_t *dst = s->dest[0];
1229  int16_t *const ptr = r->block;
1230  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1231  int cbp, cbp2;
1232  int q_dc, q_ac, has_ac;
1233  int i, j;
1234  int dist;
1235 
1236  // Calculate which neighbours are available. Maybe it's worth optimizing too.
1237  memset(r->avail_cache, 0, sizeof(r->avail_cache));
1238  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 1, 4);
1239  dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
1240  if(s->mb_x && dist)
1241  r->avail_cache[5] =
1242  r->avail_cache[9] = s->cur_pic.mb_type[mb_pos - 1];
1243  if(dist >= s->mb_width)
1244  r->avail_cache[2] =
1245  r->avail_cache[3] = s->cur_pic.mb_type[mb_pos - s->mb_stride];
1246  if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
1247  r->avail_cache[4] = s->cur_pic.mb_type[mb_pos - s->mb_stride + 1];
1248  if(s->mb_x && dist > s->mb_width)
1249  r->avail_cache[1] = s->cur_pic.mb_type[mb_pos - s->mb_stride - 1];
1250 
1251  s->qscale = r->si.quant;
1252  cbp = cbp2 = rv34_decode_inter_mb_header(r, intra_types);
1253  r->cbp_luma [mb_pos] = cbp;
1254  r->cbp_chroma[mb_pos] = cbp >> 16;
1255  r->deblock_coefs[mb_pos] = rv34_set_deblock_coef(r) | r->cbp_luma[mb_pos];
1256  s->cur_pic.qscale_table[mb_pos] = s->qscale;
1257 
1258  if(cbp == -1)
1259  return -1;
1260 
1261  if (IS_INTRA(s->cur_pic.mb_type[mb_pos])) {
1262  if(r->is16) rv34_output_i16x16(r, intra_types, cbp);
1263  else rv34_output_intra(r, intra_types, cbp);
1264  return 0;
1265  }
1266 
1267  if(r->is16){
1268  // Only for RV34_MB_P_MIX16x16
1269  LOCAL_ALIGNED_16(int16_t, block16, [16]);
1270  memset(block16, 0, 16 * sizeof(*block16));
1271  q_dc = rv34_qscale_tab[ r->luma_dc_quant_p[s->qscale] ];
1272  q_ac = rv34_qscale_tab[s->qscale];
1273  if (rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0, q_dc, q_dc, q_ac))
1274  r->rdsp.rv34_inv_transform(block16);
1275  else
1276  r->rdsp.rv34_inv_transform_dc(block16);
1277 
1278  q_ac = rv34_qscale_tab[s->qscale];
1279 
1280  for(j = 0; j < 4; j++){
1281  for(i = 0; i < 4; i++, cbp >>= 1){
1282  int dc = block16[i + j*4];
1283 
1284  if(cbp & 1){
1285  has_ac = rv34_decode_block(ptr, gb, r->cur_vlcs, r->luma_vlc, 0, q_ac, q_ac, q_ac);
1286  }else
1287  has_ac = 0;
1288 
1289  if(has_ac){
1290  ptr[0] = dc;
1291  r->rdsp.rv34_idct_add(dst+4*i, s->linesize, ptr);
1292  }else
1293  r->rdsp.rv34_idct_dc_add(dst+4*i, s->linesize, dc);
1294  }
1295 
1296  dst += 4*s->linesize;
1297  }
1298 
1299  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1);
1300  }else{
1301  q_ac = rv34_qscale_tab[s->qscale];
1302 
1303  for(j = 0; j < 4; j++){
1304  for(i = 0; i < 4; i++, cbp >>= 1){
1305  if(!(cbp & 1)) continue;
1306 
1307  rv34_process_block(r, dst + 4*i, s->linesize,
1308  r->luma_vlc, 0, q_ac, q_ac);
1309  }
1310  dst += 4*s->linesize;
1311  }
1312  }
1313 
1314  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1315  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1316 
1317  for(j = 1; j < 3; j++){
1318  dst = s->dest[j];
1319  for(i = 0; i < 4; i++, cbp >>= 1){
1320  uint8_t *pdst;
1321  if(!(cbp & 1)) continue;
1322  pdst = dst + (i&1)*4 + (i&2)*2*s->uvlinesize;
1323 
1324  rv34_process_block(r, pdst, s->uvlinesize,
1325  r->chroma_vlc, 1, q_dc, q_ac);
1326  }
1327  }
1328 
1329  return 0;
1330 }
1331 
1332 static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
1333 {
1334  MpegEncContext *s = &r->s;
1335  int cbp, dist;
1336  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1337 
1338  // Calculate which neighbours are available. Maybe it's worth optimizing too.
1339  memset(r->avail_cache, 0, sizeof(r->avail_cache));
1340  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 1, 4);
1341  dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
1342  if(s->mb_x && dist)
1343  r->avail_cache[5] =
1344  r->avail_cache[9] = s->cur_pic.mb_type[mb_pos - 1];
1345  if(dist >= s->mb_width)
1346  r->avail_cache[2] =
1347  r->avail_cache[3] = s->cur_pic.mb_type[mb_pos - s->mb_stride];
1348  if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
1349  r->avail_cache[4] = s->cur_pic.mb_type[mb_pos - s->mb_stride + 1];
1350  if(s->mb_x && dist > s->mb_width)
1351  r->avail_cache[1] = s->cur_pic.mb_type[mb_pos - s->mb_stride - 1];
1352 
1353  s->qscale = r->si.quant;
1354  cbp = rv34_decode_intra_mb_header(r, intra_types);
1355  r->cbp_luma [mb_pos] = cbp;
1356  r->cbp_chroma[mb_pos] = cbp >> 16;
1357  r->deblock_coefs[mb_pos] = 0xFFFF;
1358  s->cur_pic.qscale_table[mb_pos] = s->qscale;
1359 
1360  if(cbp == -1)
1361  return -1;
1362 
1363  if(r->is16){
1364  rv34_output_i16x16(r, intra_types, cbp);
1365  return 0;
1366  }
1367 
1368  rv34_output_intra(r, intra_types, cbp);
1369  return 0;
1370 }
1371 
1373 {
1374  int bits;
1375  if(s->mb_y >= s->mb_height)
1376  return 1;
1377  if (!r->mb_num_left)
1378  return 1;
1379  if (r->mb_skip_run > 1)
1380  return 0;
1381  bits = get_bits_left(&r->gb);
1382  if (bits <= 0 || (bits < 8 && !show_bits(&r->gb, bits)))
1383  return 1;
1384  return 0;
1385 }
1386 
1387 
1389 {
1390  av_freep(&r->intra_types_hist);
1391  r->intra_types = NULL;
1392  av_freep(&r->tmp_b_block_base);
1393  av_freep(&r->mb_type);
1394  av_freep(&r->cbp_luma);
1395  av_freep(&r->cbp_chroma);
1396  av_freep(&r->deblock_coefs);
1397 }
1398 
1399 
1401 {
1402  r->intra_types_stride = r->s.mb_width * 4 + 4;
1403 
1404  r->cbp_chroma = av_mallocz(r->s.mb_stride * r->s.mb_height *
1405  sizeof(*r->cbp_chroma));
1406  r->cbp_luma = av_mallocz(r->s.mb_stride * r->s.mb_height *
1407  sizeof(*r->cbp_luma));
1408  r->deblock_coefs = av_mallocz(r->s.mb_stride * r->s.mb_height *
1409  sizeof(*r->deblock_coefs));
1410  r->intra_types_hist = av_malloc(r->intra_types_stride * 4 * 2 *
1411  sizeof(*r->intra_types_hist));
1412  r->mb_type = av_mallocz(r->s.mb_stride * r->s.mb_height *
1413  sizeof(*r->mb_type));
1414 
1415  if (!(r->cbp_chroma && r->cbp_luma && r->deblock_coefs &&
1416  r->intra_types_hist && r->mb_type)) {
1417  r->s.context_reinit = 1;
1419  return AVERROR(ENOMEM);
1420  }
1421 
1422  r->intra_types = r->intra_types_hist + r->intra_types_stride * 4;
1423 
1424  return 0;
1425 }
1426 
1427 
1429 {
1431  return rv34_decoder_alloc(r);
1432 }
1433 
1434 
1435 static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int buf_size)
1436 {
1437  MpegEncContext *s = &r->s;
1438  GetBitContext *const gb = &r->gb;
1439  int mb_pos, slice_type;
1440  int res;
1441 
1442  init_get_bits(gb, buf, buf_size*8);
1443  res = r->parse_slice_header(r, gb, &r->si);
1444  if(res < 0){
1445  av_log(s->avctx, AV_LOG_ERROR, "Incorrect or unknown slice header\n");
1446  return -1;
1447  }
1448 
1449  slice_type = r->si.type ? r->si.type : AV_PICTURE_TYPE_I;
1450  if (slice_type != s->pict_type) {
1451  av_log(s->avctx, AV_LOG_ERROR, "Slice type mismatch\n");
1452  return AVERROR_INVALIDDATA;
1453  }
1454  if (s->width != r->si.width || s->height != r->si.height) {
1455  av_log(s->avctx, AV_LOG_ERROR, "Size mismatch\n");
1456  return AVERROR_INVALIDDATA;
1457  }
1458 
1459  r->si.end = end;
1460  s->qscale = r->si.quant;
1461  r->mb_num_left = r->si.end - r->si.start;
1462  r->mb_skip_run = 0;
1463 
1464  mb_pos = s->mb_x + s->mb_y * s->mb_width;
1465  if(r->si.start != mb_pos){
1466  av_log(s->avctx, AV_LOG_ERROR, "Slice indicates MB offset %d, got %d\n", r->si.start, mb_pos);
1467  s->mb_x = r->si.start % s->mb_width;
1468  s->mb_y = r->si.start / s->mb_width;
1469  }
1470  memset(r->intra_types_hist, -1, r->intra_types_stride * 4 * 2 * sizeof(*r->intra_types_hist));
1471  s->first_slice_line = 1;
1472  s->resync_mb_x = s->mb_x;
1473  s->resync_mb_y = s->mb_y;
1474 
1476  while(!check_slice_end(r, s)) {
1477  s->dest[0] += 16;
1478  s->dest[1] += 8;
1479  s->dest[2] += 8;
1480 
1481  if(r->si.type)
1482  res = rv34_decode_inter_macroblock(r, r->intra_types + s->mb_x * 4 + 4);
1483  else
1484  res = rv34_decode_intra_macroblock(r, r->intra_types + s->mb_x * 4 + 4);
1485  if(res < 0){
1486  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_ERROR);
1487  return -1;
1488  }
1489  if (++s->mb_x == s->mb_width) {
1490  s->mb_x = 0;
1491  s->mb_y++;
1493 
1494  memmove(r->intra_types_hist, r->intra_types, r->intra_types_stride * 4 * sizeof(*r->intra_types_hist));
1495  memset(r->intra_types, -1, r->intra_types_stride * 4 * sizeof(*r->intra_types_hist));
1496 
1497  if(r->loop_filter && s->mb_y >= 2)
1498  r->loop_filter(r, s->mb_y - 2);
1499 
1500  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
1501  ff_thread_progress_report(&s->cur_pic.ptr->progress,
1502  s->mb_y - 2);
1503 
1504  }
1505  if(s->mb_x == s->resync_mb_x)
1506  s->first_slice_line=0;
1507  r->mb_num_left--;
1508  }
1509  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END);
1510 
1511  return s->mb_y == s->mb_height;
1512 }
1513 
1514 /** @} */ // reconstruction group end
1515 
1516 /**
1517  * Initialize decoder.
1518  */
1520 {
1521  static AVOnce init_static_once = AV_ONCE_INIT;
1522  RV34DecContext *r = avctx->priv_data;
1523  MpegEncContext *s = &r->s;
1524  int ret;
1525 
1526  ret = ff_mpv_decode_init(s, avctx);
1527  if (ret < 0)
1528  return ret;
1529  s->out_format = FMT_H263;
1530 
1531  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1532  avctx->has_b_frames = 1;
1533  s->low_delay = 0;
1534 
1535  if ((ret = ff_mpv_common_init(s)) < 0)
1536  return ret;
1537 
1538  ff_h264_pred_init(&r->h, AV_CODEC_ID_RV40, 8, 1);
1539 
1541  if (ret < 0)
1542  return ret;
1543 
1544  ff_thread_once(&init_static_once, rv34_init_tables);
1545 
1546  return 0;
1547 }
1548 
1550 {
1551  RV34DecContext *r = dst->priv_data, *r1 = src->priv_data;
1552  MpegEncContext *const s1 = &r1->s;
1553  int ret;
1554 
1555  if (dst == src || !s1->context_initialized)
1556  return 0;
1557 
1559  if (ret < 0)
1560  return ret;
1561 
1562  // Did ff_mpeg_update_thread_context reinit?
1563  if (ret > 0) {
1565  if (ret < 0)
1566  return ret;
1567  }
1568 
1569  r->cur_pts = r1->cur_pts;
1570  r->last_pts = r1->last_pts;
1571  r->next_pts = r1->next_pts;
1572 
1573  memset(&r->si, 0, sizeof(r->si));
1574 
1575  return 0;
1576 }
1577 
1578 static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n, int slice_count, int buf_size)
1579 {
1580  if (n < slice_count) {
1581  return AV_RL32(buf + n*8 - 4) == 1 ? AV_RL32(buf + n*8) : AV_RB32(buf + n*8);
1582  } else
1583  return buf_size;
1584 }
1585 
1586 static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
1587 {
1588  RV34DecContext *r = avctx->priv_data;
1589  MpegEncContext *s = &r->s;
1590  int got_picture = 0, ret;
1591 
1592  ff_er_frame_end(&s->er, NULL);
1594  r->mb_num_left = 0;
1595 
1596  if (s->pict_type == AV_PICTURE_TYPE_B) {
1597  if ((ret = av_frame_ref(pict, s->cur_pic.ptr->f)) < 0)
1598  return ret;
1599  ff_print_debug_info(s, s->cur_pic.ptr, pict);
1600  ff_mpv_export_qp_table(s, pict, s->cur_pic.ptr, FF_MPV_QSCALE_TYPE_MPEG1);
1601  got_picture = 1;
1602  } else if (s->last_pic.ptr) {
1603  if ((ret = av_frame_ref(pict, s->last_pic.ptr->f)) < 0)
1604  return ret;
1605  ff_print_debug_info(s, s->last_pic.ptr, pict);
1606  ff_mpv_export_qp_table(s, pict, s->last_pic.ptr, FF_MPV_QSCALE_TYPE_MPEG1);
1607  got_picture = 1;
1608  }
1609 
1610  return got_picture;
1611 }
1612 
1613 static AVRational update_sar(int old_w, int old_h, AVRational sar, int new_w, int new_h)
1614 {
1615  // attempt to keep aspect during typical resolution switches
1616  if (!sar.num)
1617  sar = (AVRational){1, 1};
1618 
1619  sar = av_mul_q(sar, av_mul_q((AVRational){new_h, new_w}, (AVRational){old_w, old_h}));
1620  return sar;
1621 }
1622 
1624  int *got_picture_ptr, AVPacket *avpkt)
1625 {
1626  const uint8_t *buf = avpkt->data;
1627  int buf_size = avpkt->size;
1628  RV34DecContext *r = avctx->priv_data;
1629  MpegEncContext *s = &r->s;
1630  SliceInfo si;
1631  int i, ret;
1632  int slice_count;
1633  const uint8_t *slices_hdr = NULL;
1634  int last = 0;
1635  int faulty_b = 0;
1636  int offset;
1637 
1638  /* no supplementary picture */
1639  if (buf_size == 0) {
1640  /* special case for last picture */
1641  if (s->next_pic.ptr) {
1642  if ((ret = av_frame_ref(pict, s->next_pic.ptr->f)) < 0)
1643  return ret;
1644  ff_mpv_unref_picture(&s->next_pic);
1645 
1646  *got_picture_ptr = 1;
1647  }
1648  return 0;
1649  }
1650 
1651  slice_count = (*buf++) + 1;
1652  slices_hdr = buf + 4;
1653  buf += 8 * slice_count;
1654  buf_size -= 1 + 8 * slice_count;
1655 
1656  offset = get_slice_offset(avctx, slices_hdr, 0, slice_count, buf_size);
1657  //parse first slice header to check whether this frame can be decoded
1658  if(offset < 0 || offset > buf_size){
1659  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1660  return AVERROR_INVALIDDATA;
1661  }
1662  init_get_bits(&r->gb, buf+offset, (buf_size-offset)*8);
1663  if (r->parse_slice_header(r, &r->gb, &si) < 0 || si.start) {
1664  av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n");
1665  return AVERROR_INVALIDDATA;
1666  }
1667  if (!s->last_pic.ptr && si.type == AV_PICTURE_TYPE_B) {
1668  av_log(avctx, AV_LOG_ERROR, "Invalid decoder state: B-frame without "
1669  "reference data.\n");
1670  faulty_b = 1;
1671  }
1672  if( (avctx->skip_frame >= AVDISCARD_NONREF && si.type==AV_PICTURE_TYPE_B)
1673  || (avctx->skip_frame >= AVDISCARD_NONKEY && si.type!=AV_PICTURE_TYPE_I)
1674  || avctx->skip_frame >= AVDISCARD_ALL)
1675  return avpkt->size;
1676 
1677  /* first slice */
1678  if (si.start == 0) {
1679  if (r->mb_num_left > 0 && s->cur_pic.ptr) {
1680  av_log(avctx, AV_LOG_ERROR, "New frame but still %d MB left.\n",
1681  r->mb_num_left);
1682  if (!s->context_reinit)
1683  ff_er_frame_end(&s->er, NULL);
1685  }
1686 
1687  if (s->width != si.width || s->height != si.height || s->context_reinit) {
1688  int err;
1689 
1690  av_log(s->avctx, AV_LOG_WARNING, "Changing dimensions to %dx%d\n",
1691  si.width, si.height);
1692 
1693  if (av_image_check_size(si.width, si.height, 0, s->avctx))
1694  return AVERROR_INVALIDDATA;
1695 
1696  s->avctx->sample_aspect_ratio = update_sar(
1697  s->width, s->height, s->avctx->sample_aspect_ratio,
1698  si.width, si.height);
1699  s->width = si.width;
1700  s->height = si.height;
1701 
1702  err = ff_set_dimensions(s->avctx, s->width, s->height);
1703  if (err < 0)
1704  return err;
1705  if ((err = ff_mpv_common_frame_size_change(s)) < 0)
1706  return err;
1707  if ((err = rv34_decoder_realloc(r)) < 0)
1708  return err;
1709  }
1710  if (faulty_b)
1711  return AVERROR_INVALIDDATA;
1712  s->pict_type = si.type ? si.type : AV_PICTURE_TYPE_I;
1713  if (ff_mpv_frame_start(s, s->avctx) < 0)
1714  return -1;
1716  if (!r->tmp_b_block_base) {
1717  int i;
1718 
1719  r->tmp_b_block_base = av_malloc(s->linesize * 48);
1720  if (!r->tmp_b_block_base)
1721  return AVERROR(ENOMEM);
1722  for (i = 0; i < 2; i++)
1723  r->tmp_b_block_y[i] = r->tmp_b_block_base
1724  + i * 16 * s->linesize;
1725  for (i = 0; i < 4; i++)
1726  r->tmp_b_block_uv[i] = r->tmp_b_block_base + 32 * s->linesize
1727  + (i >> 1) * 8 * s->uvlinesize
1728  + (i & 1) * 16;
1729  }
1730  r->cur_pts = si.pts;
1731  if (s->pict_type != AV_PICTURE_TYPE_B) {
1732  r->last_pts = r->next_pts;
1733  r->next_pts = r->cur_pts;
1734  } else {
1735  int refdist = GET_PTS_DIFF(r->next_pts, r->last_pts);
1736  int dist0 = GET_PTS_DIFF(r->cur_pts, r->last_pts);
1737  int dist1 = GET_PTS_DIFF(r->next_pts, r->cur_pts);
1738 
1739  if(!refdist){
1740  r->mv_weight1 = r->mv_weight2 = r->weight1 = r->weight2 = 8192;
1741  r->scaled_weight = 0;
1742  }else{
1743  if (FFMAX(dist0, dist1) > refdist)
1744  av_log(avctx, AV_LOG_TRACE, "distance overflow\n");
1745 
1746  r->mv_weight1 = (dist0 << 14) / refdist;
1747  r->mv_weight2 = (dist1 << 14) / refdist;
1748  if((r->mv_weight1|r->mv_weight2) & 511){
1749  r->weight1 = r->mv_weight1;
1750  r->weight2 = r->mv_weight2;
1751  r->scaled_weight = 0;
1752  }else{
1753  r->weight1 = r->mv_weight1 >> 9;
1754  r->weight2 = r->mv_weight2 >> 9;
1755  r->scaled_weight = 1;
1756  }
1757  }
1758  }
1759  s->mb_x = s->mb_y = 0;
1760  ff_thread_finish_setup(s->avctx);
1761  } else if (s->context_reinit) {
1762  av_log(s->avctx, AV_LOG_ERROR, "Decoder needs full frames to "
1763  "reinitialize (start MB is %d).\n", si.start);
1764  return AVERROR_INVALIDDATA;
1765  } else if (HAVE_THREADS &&
1766  (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
1767  av_log(s->avctx, AV_LOG_ERROR, "Decoder needs full frames in frame "
1768  "multithreading mode (start MB is %d).\n", si.start);
1769  return AVERROR_INVALIDDATA;
1770  }
1771 
1772  for(i = 0; i < slice_count; i++){
1773  int offset = get_slice_offset(avctx, slices_hdr, i , slice_count, buf_size);
1774  int offset1 = get_slice_offset(avctx, slices_hdr, i+1, slice_count, buf_size);
1775  int size;
1776 
1777  if(offset < 0 || offset > offset1 || offset1 > buf_size){
1778  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1779  break;
1780  }
1781  size = offset1 - offset;
1782 
1783  r->si.end = s->mb_width * s->mb_height;
1784  r->mb_num_left = r->s.mb_x + r->s.mb_y*r->s.mb_width - r->si.start;
1785 
1786  if(i+1 < slice_count){
1787  int offset2 = get_slice_offset(avctx, slices_hdr, i+2, slice_count, buf_size);
1788  if (offset2 < offset1 || offset2 > buf_size) {
1789  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1790  break;
1791  }
1792  init_get_bits(&r->gb, buf+offset1, (buf_size-offset1)*8);
1793  if (r->parse_slice_header(r, &r->gb, &si) < 0) {
1794  size = offset2 - offset;
1795  }else
1796  r->si.end = si.start;
1797  }
1798  av_assert0 (size >= 0 && size <= buf_size - offset);
1799  last = rv34_decode_slice(r, r->si.end, buf + offset, size);
1800  if(last)
1801  break;
1802  }
1803 
1804  if (s->cur_pic.ptr) {
1805  if (last) {
1806  if(r->loop_filter)
1807  r->loop_filter(r, s->mb_height - 1);
1808 
1809  ret = finish_frame(avctx, pict);
1810  if (ret < 0)
1811  return ret;
1812  *got_picture_ptr = ret;
1813  } else if (HAVE_THREADS &&
1814  (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
1815  av_log(avctx, AV_LOG_INFO, "marking unfinished frame as finished\n");
1816  /* always mark the current frame as finished, frame-mt supports
1817  * only complete frames */
1818  ff_er_frame_end(&s->er, NULL);
1820  r->mb_num_left = 0;
1821  return AVERROR_INVALIDDATA;
1822  }
1823  }
1824 
1825  return avpkt->size;
1826 }
1827 
1829 {
1830  RV34DecContext *r = avctx->priv_data;
1831 
1833 
1834  return ff_mpv_decode_close(avctx);
1835 }
flags
const SwsFlags flags[]
Definition: swscale.c:72
RV34DecContext
decoder context
Definition: rv34.h:87
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:359
A
#define A(x)
Definition: vpx_arith.h:28
IS_8X8
#define IS_8X8(a)
Definition: mpegutils.h:83
rv34_mb_type_to_lavc
static const int rv34_mb_type_to_lavc[12]
translation of RV30/40 macroblock types to lavc ones
Definition: rv34.c:59
HOR_PRED8x8
#define HOR_PRED8x8
Definition: h264pred.h:69
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
rv34_qscale_tab
static const uint16_t rv34_qscale_tab[32]
This table is used for dequantizing.
Definition: rv34data.h:84
ff_thread_progress_report
void ff_thread_progress_report(ThreadProgress *pro, int n)
This function is a no-op in no-op mode; otherwise it notifies other threads that a certain level of p...
Definition: threadprogress.c:53
rv34_output_intra
static void rv34_output_intra(RV34DecContext *r, int8_t *intra_types, int cbp)
Definition: rv34.c:1118
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:688
r
const char * r
Definition: vf_curves.c:127
ff_rv34_decode_end
av_cold int ff_rv34_decode_end(AVCodecContext *avctx)
Definition: rv34.c:1828
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
DC_PRED8x8
#define DC_PRED8x8
Definition: h264pred.h:68
ff_mpv_decode_init
av_cold int ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:86
threadprogress.h
rv34_pred_mv_rv3
static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
motion vector prediction - RV3 version
Definition: rv34.c:625
mem_internal.h
DC_128_PRED
@ DC_128_PRED
Definition: vp9.h:58
ThreadProgress
ThreadProgress is an API to easily notify other threads about progress of any kind as long as it can ...
Definition: threadprogress.h:43
thread.h
rv34_table_inter_secondpat
static const uint8_t rv34_table_inter_secondpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:3737
ittrans16
static const int ittrans16[4]
mapping of RV30/40 intra 16x16 prediction types to standard H.264 types
Definition: rv34.c:990
num_mvs
static const int num_mvs[RV34_MB_TYPES]
number of motion vectors in each macroblock type
Definition: rv34.c:877
MB_TYPE_16x8
#define MB_TYPE_16x8
Definition: mpegutils.h:42
chroma_coeffs
static const int chroma_coeffs[3]
Definition: rv34.c:671
ff_rv34_get_start_offset
int ff_rv34_get_start_offset(GetBitContext *gb, int mb_size)
Decode starting slice position.
Definition: rv34.c:351
mask
int mask
Definition: mediacodecdec_common.c:154
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:434
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
u
#define u(width, name, range_min, range_max)
Definition: cbs_apv.c:68
ff_rv34_decode_update_thread_context
int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: rv34.c:1549
AVPacket::data
uint8_t * data
Definition: packet.h:595
DC_PRED
@ DC_PRED
Definition: vp9.h:48
table
static const uint16_t table[]
Definition: prosumer.c:203
rv34_decoder_realloc
static int rv34_decoder_realloc(RV34DecContext *r)
Definition: rv34.c:1428
VERT_LEFT_PRED
@ VERT_LEFT_PRED
Definition: vp9.h:53
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:41
check_slice_end
static int check_slice_end(RV34DecContext *r, MpegEncContext *s)
Definition: rv34.c:1372
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:840
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:472
chroma_mc
#define chroma_mc(a)
Definition: vc1dsp.c:786
mpegvideo.h
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
decode_subblock
static void decode_subblock(int16_t *dst, int flags, const int is_block2, GetBitContext *gb, const VLCElem *vlc, int q)
Decode 2x2 subblock of coefficients.
Definition: rv34.c:265
rv34_set_deblock_coef
static int rv34_set_deblock_coef(RV34DecContext *r)
Definition: rv34.c:1194
mpegutils.h
MB_TYPE_INTRA16x16
#define MB_TYPE_INTRA16x16
Definition: mpegutils.h:39
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:517
thread.h
MPVWorkPicture::mb_type
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:105
avail_indexes
static const uint8_t avail_indexes[4]
availability index for subblocks
Definition: rv34.c:479
MPVWorkPicture::motion_val
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:103
golomb.h
exp golomb vlc stuff
NUM_INTRA_TABLES
#define NUM_INTRA_TABLES
Definition: rv34vlc.h:32
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:337
adjust_pred16
static int adjust_pred16(int itype, int up, int left)
Definition: rv34.c:1024
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:57
RV34_MB_B_FORWARD
@ RV34_MB_B_FORWARD
B-frame macroblock, forward prediction.
Definition: rv34.h:50
rv34_decoder_alloc
static int rv34_decoder_alloc(RV34DecContext *r)
Definition: rv34.c:1400
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1674
VERT_PRED
@ VERT_PRED
Definition: vp9.h:46
rv34_pred_mv
static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no)
motion vector prediction
Definition: rv34.c:488
rv34_gen_vlc
static av_cold void rv34_gen_vlc(const uint8_t *bits, int size, const VLCElem **vlcp, int mod_three_bits_offset, int *offset)
Definition: rv34.c:145
GetBitContext
Definition: get_bits.h:109
RV34VLC::first_pattern
const VLCElem * first_pattern[4]
VLCs used for decoding coefficients in the first subblock.
Definition: rv34.h:69
DIAG_DOWN_RIGHT_PRED
@ DIAG_DOWN_RIGHT_PRED
Definition: vp9.h:50
rv34_decode_block
static int rv34_decode_block(int16_t *dst, GetBitContext *gb, const RV34VLC *rvlc, int fc, int sc, int q_dc, int q_ac1, int q_ac2)
Decode coefficients for 4x4 block.
Definition: rv34.c:310
RV34_MB_B_DIRECT
@ RV34_MB_B_DIRECT
Bidirectionally predicted B-frame macroblock, no motion vectors.
Definition: rv34.h:53
val
static double val(void *priv, double ch)
Definition: aeval.c:77
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
rv34_count_ones
static const uint8_t rv34_count_ones[16]
number of ones in nibble minus one
Definition: rv34data.h:35
AVRational::num
int num
Numerator.
Definition: rational.h:59
rv34_table_intra_firstpat
static const uint8_t rv34_table_intra_firstpat[NUM_INTRA_TABLES][4][FIRSTBLK_VLC_SIZE]
Definition: rv34vlc.h:940
rv34data.h
quant
static const uint8_t quant[64]
Definition: vmixdec.c:71
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
avassert.h
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
mpegvideodec.h
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:236
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
HOR_PRED
@ HOR_PRED
Definition: vp9.h:47
av_cold
#define av_cold
Definition: attributes.h:111
ff_rv34_decode_init
av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
Initialize decoder.
Definition: rv34.c:1519
rv34_pred_4x4_block
static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right)
Perform 4x4 intra prediction.
Definition: rv34.c:997
rv34_decode_intra_macroblock
static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
Definition: rv34.c:1332
ZERO8x2
static void ZERO8x2(void *dst, int stride)
Definition: rv34.c:52
RV34VLC
VLC tables used by the decoder.
Definition: rv34.h:66
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:705
ff_er_frame_end
void ff_er_frame_end(ERContext *s, int *decode_error_flags)
Indicate that a frame has finished decoding and perform error concealment in case it has been enabled...
Definition: error_resilience.c:910
rv34_mc_1mv
static void rv34_mc_1mv(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir)
Definition: rv34.c:806
rv34_decode_inter_macroblock
static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
Definition: rv34.c:1224
intra_vlcs
static RV34VLC intra_vlcs[NUM_INTRA_TABLES]
Definition: rv34.c:75
s
#define s(width, name)
Definition: cbs_vp9.c:198
IS_16X8
#define IS_16X8(a)
Definition: mpegutils.h:81
VERT_LEFT_PRED_RV40_NODOWN
#define VERT_LEFT_PRED_RV40_NODOWN
Definition: h264pred.h:56
RV34VLC::cbp
VLC cbp[2][4]
VLCs used for coded block patterns decoding.
Definition: rv34.h:68
CBPPAT_VLC_SIZE
#define CBPPAT_VLC_SIZE
Definition: rv34vlc.h:35
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:494
ff_mpeg_er_frame_start
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:46
calc_add_mv
static int calc_add_mv(RV34DecContext *r, int dir, int val)
Calculate motion vector component that should be added for direct blocks.
Definition: rv34.c:543
bits
uint8_t bits
Definition: vp3data.h:128
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:130
LEFT_DC_PRED
@ LEFT_DC_PRED
Definition: vp9.h:56
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:42
B
#define B
Definition: huffyuv.h:42
decode.h
IS_SKIP
#define IS_SKIP(a)
Definition: mpegutils.h:75
CBP_VLC_SIZE
#define CBP_VLC_SIZE
Definition: rv34vlc.h:36
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
finish_frame
static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
Definition: rv34.c:1586
av_mallocz
#define av_mallocz(s)
Definition: tableprint_vlc.h:31
rv34_mb_max_sizes
static const uint16_t rv34_mb_max_sizes[6]
maximum number of macroblocks for each of the possible slice offset sizes
Definition: rv34data.h:106
decode_coeff
static void decode_coeff(int16_t *dst, int coef, int esc, GetBitContext *gb, const VLCElem *vlc, int q)
Get one coefficient value from the bitstream and store it.
Definition: rv34.c:244
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:57
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:57
MB_TYPE_8x16
#define MB_TYPE_8x16
Definition: mpegutils.h:43
TOP_DC_PRED8x8
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
RV34VLC::second_pattern
const VLCElem * second_pattern[2]
VLCs used for decoding coefficients in the subblocks 2 and 3.
Definition: rv34.h:70
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:232
rv34_inter_coeff
static const uint8_t rv34_inter_coeff[NUM_INTER_TABLES][COEFF_VLC_SIZE]
Definition: rv34vlc.h:4024
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
ff_thread_progress_await
void ff_thread_progress_await(const ThreadProgress *pro_c, int n)
This function is a no-op in no-op mode; otherwise it waits until other threads have reached a certain...
Definition: threadprogress.c:64
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const MPVPicture *p, int qp_type)
Definition: mpegvideo_dec.c:419
RV34VLC::cbppattern
const VLCElem * cbppattern[2]
VLCs used for pattern of coded block patterns decoding.
Definition: rv34.h:67
NULL
#define NULL
Definition: coverity.c:32
GET_PTS_DIFF
#define GET_PTS_DIFF(a, b)
Definition: rv34.c:538
rv34_decode_slice
static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t *buf, int buf_size)
Definition: rv34.c:1435
rv34_init_tables
static av_cold void rv34_init_tables(void)
Initialize all tables.
Definition: rv34.c:156
RV34_MB_SKIP
@ RV34_MB_SKIP
Skipped block.
Definition: rv34.h:52
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_mpv_decode_close
av_cold int ff_mpv_decode_close(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:172
COEFF_VLC_SIZE
#define COEFF_VLC_SIZE
Definition: rv34vlc.h:39
rv34_table_intra_cbppat
static const uint8_t rv34_table_intra_cbppat[NUM_INTRA_TABLES][2][CBPPAT_VLC_SIZE]
Definition: rv34vlc.h:42
RV34VLC::third_pattern
const VLCElem * third_pattern[2]
VLCs used for decoding coefficients in the last subblock.
Definition: rv34.h:71
MB_TYPE_8x8
#define MB_TYPE_8x8
Definition: mpegutils.h:44
SliceInfo::type
int type
slice type (intra, inter)
Definition: rv34.h:77
ER_MB_ERROR
#define ER_MB_ERROR
Definition: error_resilience.h:36
decode_subblock3
static void decode_subblock3(int16_t *dst, int flags, GetBitContext *gb, const VLCElem *vlc, int q_dc, int q_ac1, int q_ac2)
Definition: rv34.c:289
V
#define V
Definition: avdct.c:32
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:391
mathops.h
VERT_PRED8x8
#define VERT_PRED8x8
Definition: h264pred.h:70
MB_TYPE_BIDIR_MV
#define MB_TYPE_BIDIR_MV
Definition: mpegutils.h:51
qpeldsp.h
rv34_table_intra_secondpat
static const uint8_t rv34_table_intra_secondpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:2074
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:645
MAX_VLC_SIZE
#define MAX_VLC_SIZE
Definition: rv34vlc.h:40
rv34.h
FF_MPV_QSCALE_TYPE_MPEG1
#define FF_MPV_QSCALE_TYPE_MPEG1
Definition: mpegvideodec.h:40
AVOnce
#define AVOnce
Definition: thread.h:202
rv34_decode_mv
static int rv34_decode_mv(RV34DecContext *r, int block_type)
Decode motion vector differences and perform motion vector reconstruction and motion compensation.
Definition: rv34.c:883
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
RV34_MB_P_8x8
@ RV34_MB_P_8x8
P-frame macroblock, 8x8 motion compensation partitions.
Definition: rv34.h:49
rv34_table_intra_thirdpat
static const uint8_t rv34_table_intra_thirdpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:2177
rv34_gen_vlc_ext
static av_cold void rv34_gen_vlc_ext(const uint8_t *bits, int size, VLC *vlc, const uint8_t *syms, int mod_three_bits_offset, int *offset)
Generate VLC from codeword lengths.
Definition: rv34.c:94
VLC::table_allocated
int table_allocated
Definition: vlc.h:53
rv34_mc_2mv_skip
static void rv34_mc_2mv_skip(RV34DecContext *r)
Definition: rv34.c:856
IS_INTRA
#define IS_INTRA(x, y)
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:231
decode_subblock1
static void decode_subblock1(int16_t *dst, int flags, GetBitContext *gb, const VLCElem *vlc, int q)
Decode a single coefficient.
Definition: rv34.c:282
rv34_cbp_code
static const uint8_t rv34_cbp_code[16]
values used to reconstruct coded block pattern
Definition: rv34data.h:42
is_mv_diff_gt_3
static int is_mv_diff_gt_3(int16_t(*motion_val)[2], int step)
Definition: rv34.c:1182
AVPacket::size
int size
Definition: packet.h:596
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
RV34_MB_B_BACKWARD
@ RV34_MB_B_BACKWARD
B-frame macroblock, backward prediction.
Definition: rv34.h:51
ff_rv34_decode_frame
int ff_rv34_decode_frame(AVCodecContext *avctx, AVFrame *pict, int *got_picture_ptr, AVPacket *avpkt)
Definition: rv34.c:1623
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
rectangle.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h264.c:63
MpegEncContext::context_initialized
int context_initialized
Definition: mpegvideo.h:95
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
update_sar
static AVRational update_sar(int old_w, int old_h, AVRational sar, int new_w, int new_h)
Definition: rv34.c:1613
FIRSTBLK_VLC_SIZE
#define FIRSTBLK_VLC_SIZE
Definition: rv34vlc.h:37
get_interleaved_se_golomb
static int get_interleaved_se_golomb(GetBitContext *gb)
Definition: golomb.h:301
RV34_MB_P_8x16
@ RV34_MB_P_8x16
P-frame macroblock, 8x16 motion compensation partitions.
Definition: rv34.h:55
size
int size
Definition: twinvq_data.h:10344
VERT_RIGHT_PRED
@ VERT_RIGHT_PRED
Definition: vp9.h:51
VLCElem
Definition: vlc.h:32
rv34_decode_cbp
static int rv34_decode_cbp(GetBitContext *gb, const RV34VLC *vlc, int table)
Decode coded block pattern.
Definition: rv34.c:211
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
DC_128_PRED8x8
#define DC_128_PRED8x8
Definition: h264pred.h:76
MB_TYPE_SKIP
#define MB_TYPE_SKIP
Definition: mpegutils.h:61
rv34_inter_cbppat
static const uint8_t rv34_inter_cbppat[NUM_INTER_TABLES][CBPPAT_VLC_SIZE]
Definition: rv34vlc.h:2305
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:353
SliceInfo::pts
int pts
frame timestamp
Definition: rv34.h:83
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
OTHERBLK_VLC_SIZE
#define OTHERBLK_VLC_SIZE
Definition: rv34vlc.h:38
IS_INTRA16x16
#define IS_INTRA16x16(a)
Definition: mpegutils.h:70
ff_vlc_init_sparse
int ff_vlc_init_sparse(VLC *vlc, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Build VLC decoding tables suitable for use with get_vlc2().
Definition: vlc.c:250
PLANE_PRED8x8
#define PLANE_PRED8x8
Definition: h264pred.h:71
Y
#define Y
Definition: boxblur.h:37
rv34_output_i16x16
static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
Definition: rv34.c:1055
RV34_MB_TYPE_INTRA16x16
@ RV34_MB_TYPE_INTRA16x16
Intra macroblock with DCs in a separate 4x4 block.
Definition: rv34.h:47
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
rv34_pred_mv_b
static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
motion vector prediction for B-frames
Definition: rv34.c:573
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1584
rv34_table_inter_thirdpat
static const uint8_t rv34_table_inter_thirdpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:3880
DIAG_DOWN_LEFT_PRED_RV40_NODOWN
#define DIAG_DOWN_LEFT_PRED_RV40_NODOWN
Definition: h264pred.h:54
SliceInfo::height
int height
coded height
Definition: rv34.h:82
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const MPVPicture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:412
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
AV_CODEC_ID_RV40
@ AV_CODEC_ID_RV40
Definition: codec_id.h:121
part_sizes_h
static const uint8_t part_sizes_h[RV34_MB_TYPES]
macroblock partition height in 8x8 blocks
Definition: rv34.c:476
av_malloc
#define av_malloc(s)
Definition: ops_asmgen.c:44
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:373
rv34_table_inter_firstpat
static const uint8_t rv34_table_inter_firstpat[NUM_INTER_TABLES][2][FIRSTBLK_VLC_SIZE]
Definition: rv34vlc.h:2936
internal.h
HOR_UP_PRED_RV40_NODOWN
#define HOR_UP_PRED_RV40_NODOWN
Definition: h264pred.h:55
rv34_mc_2mv
static void rv34_mc_2mv(RV34DecContext *r, const int block_type)
Definition: rv34.c:837
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:58
rv34_table_intra_cbp
static const uint8_t rv34_table_intra_cbp[NUM_INTRA_TABLES][8][CBP_VLC_SIZE]
Definition: rv34vlc.h:886
MB_TYPE_BACKWARD_MV
#define MB_TYPE_BACKWARD_MV
Definition: mpegutils.h:50
RV34_MB_TYPE_INTRA
@ RV34_MB_TYPE_INTRA
Intra macroblock.
Definition: rv34.h:46
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
SUINT
#define SUINT
Definition: dct32_template.c:30
RV34_MB_TYPES
@ RV34_MB_TYPES
Definition: rv34.h:58
table_data
static VLCElem table_data[117592]
Definition: rv34.c:84
rv34_quant_to_vlc_set
static const uint8_t rv34_quant_to_vlc_set[2][32]
tables used to translate a quantizer value into a VLC set for decoding The first table is used for in...
Definition: rv34data.h:95
SliceInfo
essential slice information
Definition: rv34.h:76
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:639
get_slice_offset
static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n, int slice_count, int buf_size)
Definition: rv34.c:1578
mod
static int mod(int a, int b)
Modulo operation with only positive remainders.
Definition: vf_v360.c:755
LEFT_DC_PRED8x8
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
avcodec.h
VLC::bits
int bits
Definition: vlc.h:51
mid_pred
#define mid_pred
Definition: mathops.h:115
ret
ret
Definition: filter_design.txt:187
INVALID_VLC
#define INVALID_VLC
Definition: golomb.h:37
RV34VLC::coefficient
const VLCElem * coefficient
VLCs used for decoding big coefficients.
Definition: rv34.h:72
rv4_weight
static void rv4_weight(RV34DecContext *r)
Definition: rv34.c:815
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
U
#define U(x)
Definition: vpx_arith.h:37
rv34_inter_cbp
static const uint8_t rv34_inter_cbp[NUM_INTER_TABLES][4][CBP_VLC_SIZE]
Definition: rv34vlc.h:2890
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
update_thread_context for mpegvideo-based decoders.
Definition: mpegvideo_dec.c:125
AVCodecContext
main external API structure.
Definition: avcodec.h:439
VLC_INIT_STATIC_OVERLONG
#define VLC_INIT_STATIC_OVERLONG
Definition: vlc.h:191
SliceInfo::start
int start
Definition: rv34.h:80
rv34_decode_inter_mb_header
static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
Decode inter macroblock header and return CBP in case of success, -1 otherwise.
Definition: rv34.c:410
ff_h264_pred_init
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:437
HOR_UP_PRED
@ HOR_UP_PRED
Definition: vp9.h:54
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:280
rv34_intra_coeff
static const uint8_t rv34_intra_coeff[NUM_INTRA_TABLES][COEFF_VLC_SIZE]
Definition: rv34vlc.h:2281
error_resilience.h
part_sizes_w
static const uint8_t part_sizes_w[RV34_MB_TYPES]
macroblock partition width in 8x8 blocks
Definition: rv34.c:473
VLC
Definition: vlc.h:50
ittrans
static const int ittrans[9]
mapping of RV30/40 intra prediction types to standard H.264 types
Definition: rv34.c:984
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:828
rv34_chroma_quant
static const uint8_t rv34_chroma_quant[2][32]
quantizer values used for AC and DC coefficients in chroma blocks
Definition: rv34data.h:74
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:404
VLC::table
VLCElem * table
Definition: vlc.h:52
rv34_decode_intra_mb_header
static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
Decode intra macroblock header and return CBP in case of success, -1 otherwise.
Definition: rv34.c:375
HOR_DOWN_PRED
@ HOR_DOWN_PRED
Definition: vp9.h:52
rv34_mb_bits_sizes
static const uint8_t rv34_mb_bits_sizes[6]
bits needed to code the slice offset for the given size
Definition: rv34data.h:111
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
IS_8X16
#define IS_8X16(a)
Definition: mpegutils.h:82
rv34_process_block
static void rv34_process_block(RV34DecContext *r, uint8_t *pdst, int stride, int fc, int sc, int q_dc, int q_ac)
Definition: rv34.c:1040
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
RV34_MB_P_MIX16x16
@ RV34_MB_P_MIX16x16
P-frame macroblock with DCs in a separate 4x4 block, one motion vector.
Definition: rv34.h:57
rv34vlc.h
VLC::table_size
int table_size
Definition: vlc.h:53
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
mem.h
rv34_mc
static void rv34_mc(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir, const int thirdpel, int weighted, qpel_mc_func(*qpel_mc)[16], h264_chroma_mc_func(*chroma_mc))
generic motion compensation function
Definition: rv34.c:688
ER_MB_END
#define ER_MB_END
Definition: error_resilience.h:37
MPVWorkPicture
Definition: mpegpicture.h:95
MB_TYPE_SEPARATE_DC
#define MB_TYPE_SEPARATE_DC
Definition: rv34.h:39
RV34_MB_P_16x8
@ RV34_MB_P_16x8
P-frame macroblock, 16x8 motion compensation partitions.
Definition: rv34.h:54
TOP_DC_PRED
@ TOP_DC_PRED
Definition: vp9.h:57
AVPacket
This structure stores compressed data.
Definition: packet.h:572
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
inter_vlcs
static RV34VLC inter_vlcs[NUM_INTER_TABLES]
Definition: rv34.c:75
mpeg_er.h
DIAG_DOWN_LEFT_PRED
@ DIAG_DOWN_LEFT_PRED
Definition: vp9.h:49
SliceInfo::width
int width
coded width
Definition: rv34.h:81
imgutils.h
MB_TYPE_DIRECT2
#define MB_TYPE_DIRECT2
Definition: mpegutils.h:46
RV34_MB_P_16x16
@ RV34_MB_P_16x16
P-frame macroblock, one motion frame.
Definition: rv34.h:48
choose_vlc_set
static RV34VLC * choose_vlc_set(int quant, int mod, int type)
Select VLC set for decoding from current quantizer, modifier and frame type.
Definition: rv34.c:363
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
RV34_MB_B_BIDIR
@ RV34_MB_B_BIDIR
Bidirectionally predicted B-frame macroblock, two motion vectors.
Definition: rv34.h:56
modulo_three_table
static const uint8_t modulo_three_table[108]
precalculated results of division by three and modulo three for values 0-107
Definition: rv34data.h:53
stride
#define stride
Definition: h264pred_template.c:536
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
width
#define width
Definition: dsp.h:89
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:228
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:67
MB_TYPE_FORWARD_MV
#define MB_TYPE_FORWARD_MV
Definition: mpegutils.h:49
src
#define src
Definition: vp8dsp.c:248
rv34_decoder_free
static void rv34_decoder_free(RV34DecContext *r)
Definition: rv34.c:1388
ff_mpv_common_frame_size_change
av_cold int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo_dec.c:181
shifts
static const uint8_t shifts[2][12]
Definition: camellia.c:178
MB_TYPE_INTRA
#define MB_TYPE_INTRA
Definition: mpegutils.h:64
NUM_INTER_TABLES
#define NUM_INTER_TABLES
Definition: rv34vlc.h:33
rv34_pred_b_vector
static void rv34_pred_b_vector(int A[2], int B[2], int C[2], int A_avail, int B_avail, int C_avail, int *mx, int *my)
Predict motion vector for B-frame macroblock.
Definition: rv34.c:553