FFmpeg
h264_slice.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG-4 part10 codec.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27 
28 #include "libavutil/avassert.h"
29 #include "libavutil/display.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/stereo3d.h"
32 #include "internal.h"
33 #include "cabac.h"
34 #include "cabac_functions.h"
35 #include "error_resilience.h"
36 #include "avcodec.h"
37 #include "h264.h"
38 #include "h264dec.h"
39 #include "h264data.h"
40 #include "h264chroma.h"
41 #include "h264_mvpred.h"
42 #include "h264_ps.h"
43 #include "golomb.h"
44 #include "mathops.h"
45 #include "mpegutils.h"
46 #include "mpegvideo.h"
47 #include "rectangle.h"
48 #include "thread.h"
49 
50 static const uint8_t field_scan[16+1] = {
51  0 + 0 * 4, 0 + 1 * 4, 1 + 0 * 4, 0 + 2 * 4,
52  0 + 3 * 4, 1 + 1 * 4, 1 + 2 * 4, 1 + 3 * 4,
53  2 + 0 * 4, 2 + 1 * 4, 2 + 2 * 4, 2 + 3 * 4,
54  3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4, 3 + 3 * 4,
55 };
56 
57 static const uint8_t field_scan8x8[64+1] = {
58  0 + 0 * 8, 0 + 1 * 8, 0 + 2 * 8, 1 + 0 * 8,
59  1 + 1 * 8, 0 + 3 * 8, 0 + 4 * 8, 1 + 2 * 8,
60  2 + 0 * 8, 1 + 3 * 8, 0 + 5 * 8, 0 + 6 * 8,
61  0 + 7 * 8, 1 + 4 * 8, 2 + 1 * 8, 3 + 0 * 8,
62  2 + 2 * 8, 1 + 5 * 8, 1 + 6 * 8, 1 + 7 * 8,
63  2 + 3 * 8, 3 + 1 * 8, 4 + 0 * 8, 3 + 2 * 8,
64  2 + 4 * 8, 2 + 5 * 8, 2 + 6 * 8, 2 + 7 * 8,
65  3 + 3 * 8, 4 + 1 * 8, 5 + 0 * 8, 4 + 2 * 8,
66  3 + 4 * 8, 3 + 5 * 8, 3 + 6 * 8, 3 + 7 * 8,
67  4 + 3 * 8, 5 + 1 * 8, 6 + 0 * 8, 5 + 2 * 8,
68  4 + 4 * 8, 4 + 5 * 8, 4 + 6 * 8, 4 + 7 * 8,
69  5 + 3 * 8, 6 + 1 * 8, 6 + 2 * 8, 5 + 4 * 8,
70  5 + 5 * 8, 5 + 6 * 8, 5 + 7 * 8, 6 + 3 * 8,
71  7 + 0 * 8, 7 + 1 * 8, 6 + 4 * 8, 6 + 5 * 8,
72  6 + 6 * 8, 6 + 7 * 8, 7 + 2 * 8, 7 + 3 * 8,
73  7 + 4 * 8, 7 + 5 * 8, 7 + 6 * 8, 7 + 7 * 8,
74 };
75 
76 static const uint8_t field_scan8x8_cavlc[64+1] = {
77  0 + 0 * 8, 1 + 1 * 8, 2 + 0 * 8, 0 + 7 * 8,
78  2 + 2 * 8, 2 + 3 * 8, 2 + 4 * 8, 3 + 3 * 8,
79  3 + 4 * 8, 4 + 3 * 8, 4 + 4 * 8, 5 + 3 * 8,
80  5 + 5 * 8, 7 + 0 * 8, 6 + 6 * 8, 7 + 4 * 8,
81  0 + 1 * 8, 0 + 3 * 8, 1 + 3 * 8, 1 + 4 * 8,
82  1 + 5 * 8, 3 + 1 * 8, 2 + 5 * 8, 4 + 1 * 8,
83  3 + 5 * 8, 5 + 1 * 8, 4 + 5 * 8, 6 + 1 * 8,
84  5 + 6 * 8, 7 + 1 * 8, 6 + 7 * 8, 7 + 5 * 8,
85  0 + 2 * 8, 0 + 4 * 8, 0 + 5 * 8, 2 + 1 * 8,
86  1 + 6 * 8, 4 + 0 * 8, 2 + 6 * 8, 5 + 0 * 8,
87  3 + 6 * 8, 6 + 0 * 8, 4 + 6 * 8, 6 + 2 * 8,
88  5 + 7 * 8, 6 + 4 * 8, 7 + 2 * 8, 7 + 6 * 8,
89  1 + 0 * 8, 1 + 2 * 8, 0 + 6 * 8, 3 + 0 * 8,
90  1 + 7 * 8, 3 + 2 * 8, 2 + 7 * 8, 4 + 2 * 8,
91  3 + 7 * 8, 5 + 2 * 8, 4 + 7 * 8, 5 + 4 * 8,
92  6 + 3 * 8, 6 + 5 * 8, 7 + 3 * 8, 7 + 7 * 8,
93 };
94 
95 // zigzag_scan8x8_cavlc[i] = zigzag_scan8x8[(i/4) + 16*(i%4)]
96 static const uint8_t zigzag_scan8x8_cavlc[64+1] = {
97  0 + 0 * 8, 1 + 1 * 8, 1 + 2 * 8, 2 + 2 * 8,
98  4 + 1 * 8, 0 + 5 * 8, 3 + 3 * 8, 7 + 0 * 8,
99  3 + 4 * 8, 1 + 7 * 8, 5 + 3 * 8, 6 + 3 * 8,
100  2 + 7 * 8, 6 + 4 * 8, 5 + 6 * 8, 7 + 5 * 8,
101  1 + 0 * 8, 2 + 0 * 8, 0 + 3 * 8, 3 + 1 * 8,
102  3 + 2 * 8, 0 + 6 * 8, 4 + 2 * 8, 6 + 1 * 8,
103  2 + 5 * 8, 2 + 6 * 8, 6 + 2 * 8, 5 + 4 * 8,
104  3 + 7 * 8, 7 + 3 * 8, 4 + 7 * 8, 7 + 6 * 8,
105  0 + 1 * 8, 3 + 0 * 8, 0 + 4 * 8, 4 + 0 * 8,
106  2 + 3 * 8, 1 + 5 * 8, 5 + 1 * 8, 5 + 2 * 8,
107  1 + 6 * 8, 3 + 5 * 8, 7 + 1 * 8, 4 + 5 * 8,
108  4 + 6 * 8, 7 + 4 * 8, 5 + 7 * 8, 6 + 7 * 8,
109  0 + 2 * 8, 2 + 1 * 8, 1 + 3 * 8, 5 + 0 * 8,
110  1 + 4 * 8, 2 + 4 * 8, 6 + 0 * 8, 4 + 3 * 8,
111  0 + 7 * 8, 4 + 4 * 8, 7 + 2 * 8, 3 + 6 * 8,
112  5 + 5 * 8, 6 + 5 * 8, 6 + 6 * 8, 7 + 7 * 8,
113 };
114 
115 static void release_unused_pictures(H264Context *h, int remove_current)
116 {
117  int i;
118 
119  /* release non reference frames */
120  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
121  if (h->DPB[i].f->buf[0] && !h->DPB[i].reference &&
122  (remove_current || &h->DPB[i] != h->cur_pic_ptr)) {
123  ff_h264_unref_picture(h, &h->DPB[i]);
124  }
125  }
126 }
127 
128 static int alloc_scratch_buffers(H264SliceContext *sl, int linesize)
129 {
130  const H264Context *h = sl->h264;
131  int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
132 
133  av_fast_malloc(&sl->bipred_scratchpad, &sl->bipred_scratchpad_allocated, 16 * 6 * alloc_size);
134  // edge emu needs blocksize + filter length - 1
135  // (= 21x21 for H.264)
136  av_fast_malloc(&sl->edge_emu_buffer, &sl->edge_emu_buffer_allocated, alloc_size * 2 * 21);
137 
139  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
141  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
142 
143  if (!sl->bipred_scratchpad || !sl->edge_emu_buffer ||
144  !sl->top_borders[0] || !sl->top_borders[1]) {
147  av_freep(&sl->top_borders[0]);
148  av_freep(&sl->top_borders[1]);
149 
152  sl->top_borders_allocated[0] = 0;
153  sl->top_borders_allocated[1] = 0;
154  return AVERROR(ENOMEM);
155  }
156 
157  return 0;
158 }
159 
161 {
162  const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
163  const int mb_array_size = h->mb_stride * h->mb_height;
164  const int b4_stride = h->mb_width * 4 + 1;
165  const int b4_array_size = b4_stride * h->mb_height * 4;
166 
167  h->qscale_table_pool = av_buffer_pool_init(big_mb_num + h->mb_stride,
169  h->mb_type_pool = av_buffer_pool_init((big_mb_num + h->mb_stride) *
170  sizeof(uint32_t), av_buffer_allocz);
171  h->motion_val_pool = av_buffer_pool_init(2 * (b4_array_size + 4) *
172  sizeof(int16_t), av_buffer_allocz);
173  h->ref_index_pool = av_buffer_pool_init(4 * mb_array_size, av_buffer_allocz);
174 
175  if (!h->qscale_table_pool || !h->mb_type_pool || !h->motion_val_pool ||
176  !h->ref_index_pool) {
177  av_buffer_pool_uninit(&h->qscale_table_pool);
178  av_buffer_pool_uninit(&h->mb_type_pool);
179  av_buffer_pool_uninit(&h->motion_val_pool);
180  av_buffer_pool_uninit(&h->ref_index_pool);
181  return AVERROR(ENOMEM);
182  }
183 
184  return 0;
185 }
186 
188 {
189  int i, ret = 0;
190 
191  av_assert0(!pic->f->data[0]);
192 
193  pic->tf.f = pic->f;
194  ret = ff_thread_get_buffer(h->avctx, &pic->tf, pic->reference ?
196  if (ret < 0)
197  goto fail;
198 
199  if (h->avctx->hwaccel) {
200  const AVHWAccel *hwaccel = h->avctx->hwaccel;
202  if (hwaccel->frame_priv_data_size) {
204  if (!pic->hwaccel_priv_buf)
205  return AVERROR(ENOMEM);
207  }
208  }
209  if (CONFIG_GRAY && !h->avctx->hwaccel && h->flags & AV_CODEC_FLAG_GRAY && pic->f->data[2]) {
210  int h_chroma_shift, v_chroma_shift;
212  &h_chroma_shift, &v_chroma_shift);
213 
214  for(i=0; i<AV_CEIL_RSHIFT(pic->f->height, v_chroma_shift); i++) {
215  memset(pic->f->data[1] + pic->f->linesize[1]*i,
216  0x80, AV_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
217  memset(pic->f->data[2] + pic->f->linesize[2]*i,
218  0x80, AV_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
219  }
220  }
221 
222  if (!h->qscale_table_pool) {
224  if (ret < 0)
225  goto fail;
226  }
227 
228  pic->qscale_table_buf = av_buffer_pool_get(h->qscale_table_pool);
229  pic->mb_type_buf = av_buffer_pool_get(h->mb_type_pool);
230  if (!pic->qscale_table_buf || !pic->mb_type_buf)
231  goto fail;
232 
233  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
234  pic->qscale_table = pic->qscale_table_buf->data + 2 * h->mb_stride + 1;
235 
236  for (i = 0; i < 2; i++) {
237  pic->motion_val_buf[i] = av_buffer_pool_get(h->motion_val_pool);
238  pic->ref_index_buf[i] = av_buffer_pool_get(h->ref_index_pool);
239  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
240  goto fail;
241 
242  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
243  pic->ref_index[i] = pic->ref_index_buf[i]->data;
244  }
245 
246  pic->pps_buf = av_buffer_ref(h->ps.pps_ref);
247  if (!pic->pps_buf)
248  goto fail;
249  pic->pps = (const PPS*)pic->pps_buf->data;
250 
251  pic->mb_width = h->mb_width;
252  pic->mb_height = h->mb_height;
253  pic->mb_stride = h->mb_stride;
254 
255  return 0;
256 fail:
257  ff_h264_unref_picture(h, pic);
258  return (ret < 0) ? ret : AVERROR(ENOMEM);
259 }
260 
262 {
263  int i;
264 
265  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
266  if (!h->DPB[i].f->buf[0])
267  return i;
268  }
269  return AVERROR_INVALIDDATA;
270 }
271 
272 
273 #define IN_RANGE(a, b, size) (((void*)(a) >= (void*)(b)) && ((void*)(a) < (void*)((b) + (size))))
274 
275 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
276  (((pic) && (pic) >= (old_ctx)->DPB && \
277  (pic) < (old_ctx)->DPB + H264_MAX_PICTURE_COUNT) ? \
278  &(new_ctx)->DPB[(pic) - (old_ctx)->DPB] : NULL)
279 
280 static void copy_picture_range(H264Picture **to, H264Picture **from, int count,
281  H264Context *new_base,
282  H264Context *old_base)
283 {
284  int i;
285 
286  for (i = 0; i < count; i++) {
287  av_assert1(!from[i] ||
288  IN_RANGE(from[i], old_base, 1) ||
289  IN_RANGE(from[i], old_base->DPB, H264_MAX_PICTURE_COUNT));
290  to[i] = REBASE_PICTURE(from[i], new_base, old_base);
291  }
292 }
293 
295 
297  const AVCodecContext *src)
298 {
299  H264Context *h = dst->priv_data, *h1 = src->priv_data;
300  int inited = h->context_initialized, err = 0;
301  int need_reinit = 0;
302  int i, ret;
303 
304  if (dst == src)
305  return 0;
306 
307  if (inited && !h1->ps.sps)
308  return AVERROR_INVALIDDATA;
309 
310  if (inited &&
311  (h->width != h1->width ||
312  h->height != h1->height ||
313  h->mb_width != h1->mb_width ||
314  h->mb_height != h1->mb_height ||
315  !h->ps.sps ||
316  h->ps.sps->bit_depth_luma != h1->ps.sps->bit_depth_luma ||
317  h->ps.sps->chroma_format_idc != h1->ps.sps->chroma_format_idc ||
318  h->ps.sps->colorspace != h1->ps.sps->colorspace)) {
319  need_reinit = 1;
320  }
321 
322  /* copy block_offset since frame_start may not be called */
323  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
324 
325  // SPS/PPS
326  for (i = 0; i < FF_ARRAY_ELEMS(h->ps.sps_list); i++) {
327  av_buffer_unref(&h->ps.sps_list[i]);
328  if (h1->ps.sps_list[i]) {
329  h->ps.sps_list[i] = av_buffer_ref(h1->ps.sps_list[i]);
330  if (!h->ps.sps_list[i])
331  return AVERROR(ENOMEM);
332  }
333  }
334  for (i = 0; i < FF_ARRAY_ELEMS(h->ps.pps_list); i++) {
335  av_buffer_unref(&h->ps.pps_list[i]);
336  if (h1->ps.pps_list[i]) {
337  h->ps.pps_list[i] = av_buffer_ref(h1->ps.pps_list[i]);
338  if (!h->ps.pps_list[i])
339  return AVERROR(ENOMEM);
340  }
341  }
342 
343  av_buffer_unref(&h->ps.pps_ref);
344  h->ps.pps = NULL;
345  h->ps.sps = NULL;
346  if (h1->ps.pps_ref) {
347  h->ps.pps_ref = av_buffer_ref(h1->ps.pps_ref);
348  if (!h->ps.pps_ref)
349  return AVERROR(ENOMEM);
350  h->ps.pps = (const PPS*)h->ps.pps_ref->data;
351  h->ps.sps = h->ps.pps->sps;
352  }
353 
354  if (need_reinit || !inited) {
355  h->width = h1->width;
356  h->height = h1->height;
357  h->mb_height = h1->mb_height;
358  h->mb_width = h1->mb_width;
359  h->mb_num = h1->mb_num;
360  h->mb_stride = h1->mb_stride;
361  h->b_stride = h1->b_stride;
362  h->x264_build = h1->x264_build;
363 
364  if (h->context_initialized || h1->context_initialized) {
365  if ((err = h264_slice_header_init(h)) < 0) {
366  av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed");
367  return err;
368  }
369  }
370 
371  /* copy block_offset since frame_start may not be called */
372  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
373  }
374 
375  h->avctx->coded_height = h1->avctx->coded_height;
376  h->avctx->coded_width = h1->avctx->coded_width;
377  h->avctx->width = h1->avctx->width;
378  h->avctx->height = h1->avctx->height;
379  h->width_from_caller = h1->width_from_caller;
380  h->height_from_caller = h1->height_from_caller;
381  h->coded_picture_number = h1->coded_picture_number;
382  h->first_field = h1->first_field;
383  h->picture_structure = h1->picture_structure;
384  h->mb_aff_frame = h1->mb_aff_frame;
385  h->droppable = h1->droppable;
386 
387  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
388  ff_h264_unref_picture(h, &h->DPB[i]);
389  if (h1->DPB[i].f->buf[0] &&
390  (ret = ff_h264_ref_picture(h, &h->DPB[i], &h1->DPB[i])) < 0)
391  return ret;
392  }
393 
394  h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
395  ff_h264_unref_picture(h, &h->cur_pic);
396  if (h1->cur_pic.f->buf[0]) {
397  ret = ff_h264_ref_picture(h, &h->cur_pic, &h1->cur_pic);
398  if (ret < 0)
399  return ret;
400  }
401 
402  h->enable_er = h1->enable_er;
403  h->workaround_bugs = h1->workaround_bugs;
404  h->droppable = h1->droppable;
405 
406  // extradata/NAL handling
407  h->is_avc = h1->is_avc;
408  h->nal_length_size = h1->nal_length_size;
409 
410  memcpy(&h->poc, &h1->poc, sizeof(h->poc));
411 
412  memcpy(h->short_ref, h1->short_ref, sizeof(h->short_ref));
413  memcpy(h->long_ref, h1->long_ref, sizeof(h->long_ref));
414  memcpy(h->delayed_pic, h1->delayed_pic, sizeof(h->delayed_pic));
415  memcpy(h->last_pocs, h1->last_pocs, sizeof(h->last_pocs));
416 
417  h->next_output_pic = h1->next_output_pic;
418  h->next_outputed_poc = h1->next_outputed_poc;
419 
420  memcpy(h->mmco, h1->mmco, sizeof(h->mmco));
421  h->nb_mmco = h1->nb_mmco;
422  h->mmco_reset = h1->mmco_reset;
423  h->explicit_ref_marking = h1->explicit_ref_marking;
424  h->long_ref_count = h1->long_ref_count;
425  h->short_ref_count = h1->short_ref_count;
426 
427  copy_picture_range(h->short_ref, h1->short_ref, 32, h, h1);
428  copy_picture_range(h->long_ref, h1->long_ref, 32, h, h1);
429  copy_picture_range(h->delayed_pic, h1->delayed_pic,
430  MAX_DELAYED_PIC_COUNT + 2, h, h1);
431 
432  h->frame_recovered = h1->frame_recovered;
433 
434  av_buffer_unref(&h->sei.a53_caption.buf_ref);
435  if (h1->sei.a53_caption.buf_ref) {
436  h->sei.a53_caption.buf_ref = av_buffer_ref(h1->sei.a53_caption.buf_ref);
437  if (!h->sei.a53_caption.buf_ref)
438  return AVERROR(ENOMEM);
439  }
440 
441  if (!h->cur_pic_ptr)
442  return 0;
443 
444  if (!h->droppable) {
446  h->poc.prev_poc_msb = h->poc.poc_msb;
447  h->poc.prev_poc_lsb = h->poc.poc_lsb;
448  }
449  h->poc.prev_frame_num_offset = h->poc.frame_num_offset;
450  h->poc.prev_frame_num = h->poc.frame_num;
451 
452  h->recovery_frame = h1->recovery_frame;
453 
454  return err;
455 }
456 
458 {
459  H264Picture *pic;
460  int i, ret;
461  const int pixel_shift = h->pixel_shift;
462 
463  if (!ff_thread_can_start_frame(h->avctx)) {
464  av_log(h->avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
465  return -1;
466  }
467 
469  h->cur_pic_ptr = NULL;
470 
472  if (i < 0) {
473  av_log(h->avctx, AV_LOG_ERROR, "no frame buffer available\n");
474  return i;
475  }
476  pic = &h->DPB[i];
477 
478  pic->reference = h->droppable ? 0 : h->picture_structure;
479  pic->f->coded_picture_number = h->coded_picture_number++;
480  pic->field_picture = h->picture_structure != PICT_FRAME;
481  pic->frame_num = h->poc.frame_num;
482  /*
483  * Zero key_frame here; IDR markings per slice in frame or fields are ORed
484  * in later.
485  * See decode_nal_units().
486  */
487  pic->f->key_frame = 0;
488  pic->mmco_reset = 0;
489  pic->recovered = 0;
490  pic->invalid_gap = 0;
491  pic->sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt;
492 
493  pic->f->pict_type = h->slice_ctx[0].slice_type;
494 
495  pic->f->crop_left = h->crop_left;
496  pic->f->crop_right = h->crop_right;
497  pic->f->crop_top = h->crop_top;
498  pic->f->crop_bottom = h->crop_bottom;
499 
500  if ((ret = alloc_picture(h, pic)) < 0)
501  return ret;
502 
503  h->cur_pic_ptr = pic;
504  ff_h264_unref_picture(h, &h->cur_pic);
505  if (CONFIG_ERROR_RESILIENCE) {
506  ff_h264_set_erpic(&h->slice_ctx[0].er.cur_pic, NULL);
507  }
508 
509  if ((ret = ff_h264_ref_picture(h, &h->cur_pic, h->cur_pic_ptr)) < 0)
510  return ret;
511 
512  for (i = 0; i < h->nb_slice_ctx; i++) {
513  h->slice_ctx[i].linesize = h->cur_pic_ptr->f->linesize[0];
514  h->slice_ctx[i].uvlinesize = h->cur_pic_ptr->f->linesize[1];
515  }
516 
517  if (CONFIG_ERROR_RESILIENCE && h->enable_er) {
518  ff_er_frame_start(&h->slice_ctx[0].er);
519  ff_h264_set_erpic(&h->slice_ctx[0].er.last_pic, NULL);
520  ff_h264_set_erpic(&h->slice_ctx[0].er.next_pic, NULL);
521  }
522 
523  for (i = 0; i < 16; i++) {
524  h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
525  h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
526  }
527  for (i = 0; i < 16; i++) {
528  h->block_offset[16 + i] =
529  h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
530  h->block_offset[48 + 16 + i] =
531  h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
532  }
533 
534  /* We mark the current picture as non-reference after allocating it, so
535  * that if we break out due to an error it can be released automatically
536  * in the next ff_mpv_frame_start().
537  */
538  h->cur_pic_ptr->reference = 0;
539 
540  h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX;
541 
542  h->next_output_pic = NULL;
543 
544  h->postpone_filter = 0;
545 
546  h->mb_aff_frame = h->ps.sps->mb_aff && (h->picture_structure == PICT_FRAME);
547 
548  if (h->sei.unregistered.x264_build >= 0)
549  h->x264_build = h->sei.unregistered.x264_build;
550 
551  assert(h->cur_pic_ptr->long_ref == 0);
552 
553  return 0;
554 }
555 
557  uint8_t *src_y,
558  uint8_t *src_cb, uint8_t *src_cr,
559  int linesize, int uvlinesize,
560  int simple)
561 {
562  uint8_t *top_border;
563  int top_idx = 1;
564  const int pixel_shift = h->pixel_shift;
565  int chroma444 = CHROMA444(h);
566  int chroma422 = CHROMA422(h);
567 
568  src_y -= linesize;
569  src_cb -= uvlinesize;
570  src_cr -= uvlinesize;
571 
572  if (!simple && FRAME_MBAFF(h)) {
573  if (sl->mb_y & 1) {
574  if (!MB_MBAFF(sl)) {
575  top_border = sl->top_borders[0][sl->mb_x];
576  AV_COPY128(top_border, src_y + 15 * linesize);
577  if (pixel_shift)
578  AV_COPY128(top_border + 16, src_y + 15 * linesize + 16);
579  if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
580  if (chroma444) {
581  if (pixel_shift) {
582  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
583  AV_COPY128(top_border + 48, src_cb + 15 * uvlinesize + 16);
584  AV_COPY128(top_border + 64, src_cr + 15 * uvlinesize);
585  AV_COPY128(top_border + 80, src_cr + 15 * uvlinesize + 16);
586  } else {
587  AV_COPY128(top_border + 16, src_cb + 15 * uvlinesize);
588  AV_COPY128(top_border + 32, src_cr + 15 * uvlinesize);
589  }
590  } else if (chroma422) {
591  if (pixel_shift) {
592  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
593  AV_COPY128(top_border + 48, src_cr + 15 * uvlinesize);
594  } else {
595  AV_COPY64(top_border + 16, src_cb + 15 * uvlinesize);
596  AV_COPY64(top_border + 24, src_cr + 15 * uvlinesize);
597  }
598  } else {
599  if (pixel_shift) {
600  AV_COPY128(top_border + 32, src_cb + 7 * uvlinesize);
601  AV_COPY128(top_border + 48, src_cr + 7 * uvlinesize);
602  } else {
603  AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
604  AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
605  }
606  }
607  }
608  }
609  } else if (MB_MBAFF(sl)) {
610  top_idx = 0;
611  } else
612  return;
613  }
614 
615  top_border = sl->top_borders[top_idx][sl->mb_x];
616  /* There are two lines saved, the line above the top macroblock
617  * of a pair, and the line above the bottom macroblock. */
618  AV_COPY128(top_border, src_y + 16 * linesize);
619  if (pixel_shift)
620  AV_COPY128(top_border + 16, src_y + 16 * linesize + 16);
621 
622  if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
623  if (chroma444) {
624  if (pixel_shift) {
625  AV_COPY128(top_border + 32, src_cb + 16 * linesize);
626  AV_COPY128(top_border + 48, src_cb + 16 * linesize + 16);
627  AV_COPY128(top_border + 64, src_cr + 16 * linesize);
628  AV_COPY128(top_border + 80, src_cr + 16 * linesize + 16);
629  } else {
630  AV_COPY128(top_border + 16, src_cb + 16 * linesize);
631  AV_COPY128(top_border + 32, src_cr + 16 * linesize);
632  }
633  } else if (chroma422) {
634  if (pixel_shift) {
635  AV_COPY128(top_border + 32, src_cb + 16 * uvlinesize);
636  AV_COPY128(top_border + 48, src_cr + 16 * uvlinesize);
637  } else {
638  AV_COPY64(top_border + 16, src_cb + 16 * uvlinesize);
639  AV_COPY64(top_border + 24, src_cr + 16 * uvlinesize);
640  }
641  } else {
642  if (pixel_shift) {
643  AV_COPY128(top_border + 32, src_cb + 8 * uvlinesize);
644  AV_COPY128(top_border + 48, src_cr + 8 * uvlinesize);
645  } else {
646  AV_COPY64(top_border + 16, src_cb + 8 * uvlinesize);
647  AV_COPY64(top_border + 24, src_cr + 8 * uvlinesize);
648  }
649  }
650  }
651 }
652 
653 /**
654  * Initialize implicit_weight table.
655  * @param field 0/1 initialize the weight for interlaced MBAFF
656  * -1 initializes the rest
657  */
659 {
660  int ref0, ref1, i, cur_poc, ref_start, ref_count0, ref_count1;
661 
662  for (i = 0; i < 2; i++) {
663  sl->pwt.luma_weight_flag[i] = 0;
664  sl->pwt.chroma_weight_flag[i] = 0;
665  }
666 
667  if (field < 0) {
668  if (h->picture_structure == PICT_FRAME) {
669  cur_poc = h->cur_pic_ptr->poc;
670  } else {
671  cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure - 1];
672  }
673  if (sl->ref_count[0] == 1 && sl->ref_count[1] == 1 && !FRAME_MBAFF(h) &&
674  sl->ref_list[0][0].poc + (int64_t)sl->ref_list[1][0].poc == 2LL * cur_poc) {
675  sl->pwt.use_weight = 0;
676  sl->pwt.use_weight_chroma = 0;
677  return;
678  }
679  ref_start = 0;
680  ref_count0 = sl->ref_count[0];
681  ref_count1 = sl->ref_count[1];
682  } else {
683  cur_poc = h->cur_pic_ptr->field_poc[field];
684  ref_start = 16;
685  ref_count0 = 16 + 2 * sl->ref_count[0];
686  ref_count1 = 16 + 2 * sl->ref_count[1];
687  }
688 
689  sl->pwt.use_weight = 2;
690  sl->pwt.use_weight_chroma = 2;
691  sl->pwt.luma_log2_weight_denom = 5;
693 
694  for (ref0 = ref_start; ref0 < ref_count0; ref0++) {
695  int64_t poc0 = sl->ref_list[0][ref0].poc;
696  for (ref1 = ref_start; ref1 < ref_count1; ref1++) {
697  int w = 32;
698  if (!sl->ref_list[0][ref0].parent->long_ref && !sl->ref_list[1][ref1].parent->long_ref) {
699  int poc1 = sl->ref_list[1][ref1].poc;
700  int td = av_clip_int8(poc1 - poc0);
701  if (td) {
702  int tb = av_clip_int8(cur_poc - poc0);
703  int tx = (16384 + (FFABS(td) >> 1)) / td;
704  int dist_scale_factor = (tb * tx + 32) >> 8;
705  if (dist_scale_factor >= -64 && dist_scale_factor <= 128)
706  w = 64 - dist_scale_factor;
707  }
708  }
709  if (field < 0) {
710  sl->pwt.implicit_weight[ref0][ref1][0] =
711  sl->pwt.implicit_weight[ref0][ref1][1] = w;
712  } else {
713  sl->pwt.implicit_weight[ref0][ref1][field] = w;
714  }
715  }
716  }
717 }
718 
719 /**
720  * initialize scan tables
721  */
723 {
724  int i;
725  for (i = 0; i < 16; i++) {
726 #define TRANSPOSE(x) ((x) >> 2) | (((x) << 2) & 0xF)
727  h->zigzag_scan[i] = TRANSPOSE(ff_zigzag_scan[i]);
728  h->field_scan[i] = TRANSPOSE(field_scan[i]);
729 #undef TRANSPOSE
730  }
731  for (i = 0; i < 64; i++) {
732 #define TRANSPOSE(x) ((x) >> 3) | (((x) & 7) << 3)
733  h->zigzag_scan8x8[i] = TRANSPOSE(ff_zigzag_direct[i]);
734  h->zigzag_scan8x8_cavlc[i] = TRANSPOSE(zigzag_scan8x8_cavlc[i]);
735  h->field_scan8x8[i] = TRANSPOSE(field_scan8x8[i]);
736  h->field_scan8x8_cavlc[i] = TRANSPOSE(field_scan8x8_cavlc[i]);
737 #undef TRANSPOSE
738  }
739  if (h->ps.sps->transform_bypass) { // FIXME same ugly
740  memcpy(h->zigzag_scan_q0 , ff_zigzag_scan , sizeof(h->zigzag_scan_q0 ));
741  memcpy(h->zigzag_scan8x8_q0 , ff_zigzag_direct , sizeof(h->zigzag_scan8x8_q0 ));
742  memcpy(h->zigzag_scan8x8_cavlc_q0 , zigzag_scan8x8_cavlc , sizeof(h->zigzag_scan8x8_cavlc_q0));
743  memcpy(h->field_scan_q0 , field_scan , sizeof(h->field_scan_q0 ));
744  memcpy(h->field_scan8x8_q0 , field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
745  memcpy(h->field_scan8x8_cavlc_q0 , field_scan8x8_cavlc , sizeof(h->field_scan8x8_cavlc_q0 ));
746  } else {
747  memcpy(h->zigzag_scan_q0 , h->zigzag_scan , sizeof(h->zigzag_scan_q0 ));
748  memcpy(h->zigzag_scan8x8_q0 , h->zigzag_scan8x8 , sizeof(h->zigzag_scan8x8_q0 ));
749  memcpy(h->zigzag_scan8x8_cavlc_q0 , h->zigzag_scan8x8_cavlc , sizeof(h->zigzag_scan8x8_cavlc_q0));
750  memcpy(h->field_scan_q0 , h->field_scan , sizeof(h->field_scan_q0 ));
751  memcpy(h->field_scan8x8_q0 , h->field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
752  memcpy(h->field_scan8x8_cavlc_q0 , h->field_scan8x8_cavlc , sizeof(h->field_scan8x8_cavlc_q0 ));
753  }
754 }
755 
756 static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
757 {
758 #define HWACCEL_MAX (CONFIG_H264_DXVA2_HWACCEL + \
759  (CONFIG_H264_D3D11VA_HWACCEL * 2) + \
760  CONFIG_H264_NVDEC_HWACCEL + \
761  CONFIG_H264_VAAPI_HWACCEL + \
762  CONFIG_H264_VIDEOTOOLBOX_HWACCEL + \
763  CONFIG_H264_VDPAU_HWACCEL)
764  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
765  const enum AVPixelFormat *choices = pix_fmts;
766  int i;
767 
768  switch (h->ps.sps->bit_depth_luma) {
769  case 9:
770  if (CHROMA444(h)) {
771  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
772  *fmt++ = AV_PIX_FMT_GBRP9;
773  } else
774  *fmt++ = AV_PIX_FMT_YUV444P9;
775  } else if (CHROMA422(h))
776  *fmt++ = AV_PIX_FMT_YUV422P9;
777  else
778  *fmt++ = AV_PIX_FMT_YUV420P9;
779  break;
780  case 10:
781  if (CHROMA444(h)) {
782  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
783  *fmt++ = AV_PIX_FMT_GBRP10;
784  } else
785  *fmt++ = AV_PIX_FMT_YUV444P10;
786  } else if (CHROMA422(h))
787  *fmt++ = AV_PIX_FMT_YUV422P10;
788  else
789  *fmt++ = AV_PIX_FMT_YUV420P10;
790  break;
791  case 12:
792  if (CHROMA444(h)) {
793  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
794  *fmt++ = AV_PIX_FMT_GBRP12;
795  } else
796  *fmt++ = AV_PIX_FMT_YUV444P12;
797  } else if (CHROMA422(h))
798  *fmt++ = AV_PIX_FMT_YUV422P12;
799  else
800  *fmt++ = AV_PIX_FMT_YUV420P12;
801  break;
802  case 14:
803  if (CHROMA444(h)) {
804  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
805  *fmt++ = AV_PIX_FMT_GBRP14;
806  } else
807  *fmt++ = AV_PIX_FMT_YUV444P14;
808  } else if (CHROMA422(h))
809  *fmt++ = AV_PIX_FMT_YUV422P14;
810  else
811  *fmt++ = AV_PIX_FMT_YUV420P14;
812  break;
813  case 8:
814 #if CONFIG_H264_VDPAU_HWACCEL
815  *fmt++ = AV_PIX_FMT_VDPAU;
816 #endif
817 #if CONFIG_H264_NVDEC_HWACCEL
818  *fmt++ = AV_PIX_FMT_CUDA;
819 #endif
820  if (CHROMA444(h)) {
821  if (h->avctx->colorspace == AVCOL_SPC_RGB)
822  *fmt++ = AV_PIX_FMT_GBRP;
823  else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
824  *fmt++ = AV_PIX_FMT_YUVJ444P;
825  else
826  *fmt++ = AV_PIX_FMT_YUV444P;
827  } else if (CHROMA422(h)) {
828  if (h->avctx->color_range == AVCOL_RANGE_JPEG)
829  *fmt++ = AV_PIX_FMT_YUVJ422P;
830  else
831  *fmt++ = AV_PIX_FMT_YUV422P;
832  } else {
833 #if CONFIG_H264_DXVA2_HWACCEL
834  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
835 #endif
836 #if CONFIG_H264_D3D11VA_HWACCEL
837  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
838  *fmt++ = AV_PIX_FMT_D3D11;
839 #endif
840 #if CONFIG_H264_VAAPI_HWACCEL
841  *fmt++ = AV_PIX_FMT_VAAPI;
842 #endif
843 #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
844  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
845 #endif
846  if (h->avctx->codec->pix_fmts)
847  choices = h->avctx->codec->pix_fmts;
848  else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
849  *fmt++ = AV_PIX_FMT_YUVJ420P;
850  else
851  *fmt++ = AV_PIX_FMT_YUV420P;
852  }
853  break;
854  default:
855  av_log(h->avctx, AV_LOG_ERROR,
856  "Unsupported bit depth %d\n", h->ps.sps->bit_depth_luma);
857  return AVERROR_INVALIDDATA;
858  }
859 
860  *fmt = AV_PIX_FMT_NONE;
861 
862  for (i=0; choices[i] != AV_PIX_FMT_NONE; i++)
863  if (choices[i] == h->avctx->pix_fmt && !force_callback)
864  return choices[i];
865  return ff_thread_get_format(h->avctx, choices);
866 }
867 
868 /* export coded and cropped frame dimensions to AVCodecContext */
870 {
871  const SPS *sps = (const SPS*)h->ps.sps;
872  int cr = sps->crop_right;
873  int cl = sps->crop_left;
874  int ct = sps->crop_top;
875  int cb = sps->crop_bottom;
876  int width = h->width - (cr + cl);
877  int height = h->height - (ct + cb);
878  av_assert0(sps->crop_right + sps->crop_left < (unsigned)h->width);
879  av_assert0(sps->crop_top + sps->crop_bottom < (unsigned)h->height);
880 
881  /* handle container cropping */
882  if (h->width_from_caller > 0 && h->height_from_caller > 0 &&
883  !sps->crop_top && !sps->crop_left &&
884  FFALIGN(h->width_from_caller, 16) == FFALIGN(width, 16) &&
885  FFALIGN(h->height_from_caller, 16) == FFALIGN(height, 16) &&
886  h->width_from_caller <= width &&
887  h->height_from_caller <= height) {
888  width = h->width_from_caller;
889  height = h->height_from_caller;
890  cl = 0;
891  ct = 0;
892  cr = h->width - width;
893  cb = h->height - height;
894  } else {
895  h->width_from_caller = 0;
896  h->height_from_caller = 0;
897  }
898 
899  h->avctx->coded_width = h->width;
900  h->avctx->coded_height = h->height;
901  h->avctx->width = width;
902  h->avctx->height = height;
903  h->crop_right = cr;
904  h->crop_left = cl;
905  h->crop_top = ct;
906  h->crop_bottom = cb;
907 }
908 
910 {
911  const SPS *sps = h->ps.sps;
912  int i, ret;
913 
914  if (!sps) {
916  goto fail;
917  }
918 
919  ff_set_sar(h->avctx, sps->sar);
920  av_pix_fmt_get_chroma_sub_sample(h->avctx->pix_fmt,
921  &h->chroma_x_shift, &h->chroma_y_shift);
922 
923  if (sps->timing_info_present_flag) {
924  int64_t den = sps->time_scale;
925  if (h->x264_build < 44U)
926  den *= 2;
927  av_reduce(&h->avctx->framerate.den, &h->avctx->framerate.num,
928  sps->num_units_in_tick * h->avctx->ticks_per_frame, den, 1 << 30);
929  }
930 
932 
933  h->first_field = 0;
934  h->prev_interlaced_frame = 1;
935 
938  if (ret < 0) {
939  av_log(h->avctx, AV_LOG_ERROR, "Could not allocate memory\n");
940  goto fail;
941  }
942 
943  if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 ||
944  sps->bit_depth_luma == 11 || sps->bit_depth_luma == 13
945  ) {
946  av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth %d\n",
947  sps->bit_depth_luma);
949  goto fail;
950  }
951 
952  h->cur_bit_depth_luma =
953  h->avctx->bits_per_raw_sample = sps->bit_depth_luma;
954  h->cur_chroma_format_idc = sps->chroma_format_idc;
955  h->pixel_shift = sps->bit_depth_luma > 8;
956  h->chroma_format_idc = sps->chroma_format_idc;
957  h->bit_depth_luma = sps->bit_depth_luma;
958 
959  ff_h264dsp_init(&h->h264dsp, sps->bit_depth_luma,
960  sps->chroma_format_idc);
961  ff_h264chroma_init(&h->h264chroma, sps->bit_depth_chroma);
962  ff_h264qpel_init(&h->h264qpel, sps->bit_depth_luma);
963  ff_h264_pred_init(&h->hpc, h->avctx->codec_id, sps->bit_depth_luma,
964  sps->chroma_format_idc);
965  ff_videodsp_init(&h->vdsp, sps->bit_depth_luma);
966 
967  if (!HAVE_THREADS || !(h->avctx->active_thread_type & FF_THREAD_SLICE)) {
968  ret = ff_h264_slice_context_init(h, &h->slice_ctx[0]);
969  if (ret < 0) {
970  av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
971  goto fail;
972  }
973  } else {
974  for (i = 0; i < h->nb_slice_ctx; i++) {
975  H264SliceContext *sl = &h->slice_ctx[i];
976 
977  sl->h264 = h;
978  sl->intra4x4_pred_mode = h->intra4x4_pred_mode + i * 8 * 2 * h->mb_stride;
979  sl->mvd_table[0] = h->mvd_table[0] + i * 8 * 2 * h->mb_stride;
980  sl->mvd_table[1] = h->mvd_table[1] + i * 8 * 2 * h->mb_stride;
981 
982  if ((ret = ff_h264_slice_context_init(h, sl)) < 0) {
983  av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
984  goto fail;
985  }
986  }
987  }
988 
989  h->context_initialized = 1;
990 
991  return 0;
992 fail:
994  h->context_initialized = 0;
995  return ret;
996 }
997 
999 {
1000  switch (a) {
1004  default:
1005  return a;
1006  }
1007 }
1008 
1009 static int h264_init_ps(H264Context *h, const H264SliceContext *sl, int first_slice)
1010 {
1011  const SPS *sps;
1012  int needs_reinit = 0, must_reinit, ret;
1013 
1014  if (first_slice) {
1015  av_buffer_unref(&h->ps.pps_ref);
1016  h->ps.pps = NULL;
1017  h->ps.pps_ref = av_buffer_ref(h->ps.pps_list[sl->pps_id]);
1018  if (!h->ps.pps_ref)
1019  return AVERROR(ENOMEM);
1020  h->ps.pps = (const PPS*)h->ps.pps_ref->data;
1021  }
1022 
1023  if (h->ps.sps != h->ps.pps->sps) {
1024  h->ps.sps = (const SPS*)h->ps.pps->sps;
1025 
1026  if (h->mb_width != h->ps.sps->mb_width ||
1027  h->mb_height != h->ps.sps->mb_height ||
1028  h->cur_bit_depth_luma != h->ps.sps->bit_depth_luma ||
1029  h->cur_chroma_format_idc != h->ps.sps->chroma_format_idc
1030  )
1031  needs_reinit = 1;
1032 
1033  if (h->bit_depth_luma != h->ps.sps->bit_depth_luma ||
1034  h->chroma_format_idc != h->ps.sps->chroma_format_idc)
1035  needs_reinit = 1;
1036  }
1037  sps = h->ps.sps;
1038 
1039  must_reinit = (h->context_initialized &&
1040  ( 16*sps->mb_width != h->avctx->coded_width
1041  || 16*sps->mb_height != h->avctx->coded_height
1042  || h->cur_bit_depth_luma != sps->bit_depth_luma
1043  || h->cur_chroma_format_idc != sps->chroma_format_idc
1044  || h->mb_width != sps->mb_width
1045  || h->mb_height != sps->mb_height
1046  ));
1047  if (h->avctx->pix_fmt == AV_PIX_FMT_NONE
1048  || (non_j_pixfmt(h->avctx->pix_fmt) != non_j_pixfmt(get_pixel_format(h, 0))))
1049  must_reinit = 1;
1050 
1051  if (first_slice && av_cmp_q(sps->sar, h->avctx->sample_aspect_ratio))
1052  must_reinit = 1;
1053 
1054  if (!h->setup_finished) {
1055  h->avctx->profile = ff_h264_get_profile(sps);
1056  h->avctx->level = sps->level_idc;
1057  h->avctx->refs = sps->ref_frame_count;
1058 
1059  h->mb_width = sps->mb_width;
1060  h->mb_height = sps->mb_height;
1061  h->mb_num = h->mb_width * h->mb_height;
1062  h->mb_stride = h->mb_width + 1;
1063 
1064  h->b_stride = h->mb_width * 4;
1065 
1066  h->chroma_y_shift = sps->chroma_format_idc <= 1; // 400 uses yuv420p
1067 
1068  h->width = 16 * h->mb_width;
1069  h->height = 16 * h->mb_height;
1070 
1071  init_dimensions(h);
1072 
1073  if (sps->video_signal_type_present_flag) {
1074  h->avctx->color_range = sps->full_range > 0 ? AVCOL_RANGE_JPEG
1075  : AVCOL_RANGE_MPEG;
1076  if (sps->colour_description_present_flag) {
1077  if (h->avctx->colorspace != sps->colorspace)
1078  needs_reinit = 1;
1079  h->avctx->color_primaries = sps->color_primaries;
1080  h->avctx->color_trc = sps->color_trc;
1081  h->avctx->colorspace = sps->colorspace;
1082  }
1083  }
1084 
1085  if (h->sei.alternative_transfer.present &&
1086  av_color_transfer_name(h->sei.alternative_transfer.preferred_transfer_characteristics) &&
1087  h->sei.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) {
1088  h->avctx->color_trc = h->sei.alternative_transfer.preferred_transfer_characteristics;
1089  }
1090  }
1091  h->avctx->chroma_sample_location = sps->chroma_location;
1092 
1093  if (!h->context_initialized || must_reinit || needs_reinit) {
1094  int flush_changes = h->context_initialized;
1095  h->context_initialized = 0;
1096  if (sl != h->slice_ctx) {
1097  av_log(h->avctx, AV_LOG_ERROR,
1098  "changing width %d -> %d / height %d -> %d on "
1099  "slice %d\n",
1100  h->width, h->avctx->coded_width,
1101  h->height, h->avctx->coded_height,
1102  h->current_slice + 1);
1103  return AVERROR_INVALIDDATA;
1104  }
1105 
1106  av_assert1(first_slice);
1107 
1108  if (flush_changes)
1110 
1111  if ((ret = get_pixel_format(h, 1)) < 0)
1112  return ret;
1113  h->avctx->pix_fmt = ret;
1114 
1115  av_log(h->avctx, AV_LOG_VERBOSE, "Reinit context to %dx%d, "
1116  "pix_fmt: %s\n", h->width, h->height, av_get_pix_fmt_name(h->avctx->pix_fmt));
1117 
1118  if ((ret = h264_slice_header_init(h)) < 0) {
1119  av_log(h->avctx, AV_LOG_ERROR,
1120  "h264_slice_header_init() failed\n");
1121  return ret;
1122  }
1123  }
1124 
1125  return 0;
1126 }
1127 
1129 {
1130  const SPS *sps = h->ps.sps;
1131  H264Picture *cur = h->cur_pic_ptr;
1132 
1133  cur->f->interlaced_frame = 0;
1134  cur->f->repeat_pict = 0;
1135 
1136  /* Signal interlacing information externally. */
1137  /* Prioritize picture timing SEI information over used
1138  * decoding process if it exists. */
1139  if (h->sei.picture_timing.present) {
1140  int ret = ff_h264_sei_process_picture_timing(&h->sei.picture_timing, sps,
1141  h->avctx);
1142  if (ret < 0) {
1143  av_log(h->avctx, AV_LOG_ERROR, "Error processing a picture timing SEI\n");
1144  if (h->avctx->err_recognition & AV_EF_EXPLODE)
1145  return ret;
1146  h->sei.picture_timing.present = 0;
1147  }
1148  }
1149 
1150  if (sps->pic_struct_present_flag && h->sei.picture_timing.present) {
1151  H264SEIPictureTiming *pt = &h->sei.picture_timing;
1152  switch (pt->pic_struct) {
1154  break;
1157  cur->f->interlaced_frame = 1;
1158  break;
1162  cur->f->interlaced_frame = 1;
1163  else
1164  // try to flag soft telecine progressive
1165  cur->f->interlaced_frame = h->prev_interlaced_frame;
1166  break;
1169  /* Signal the possibility of telecined film externally
1170  * (pic_struct 5,6). From these hints, let the applications
1171  * decide if they apply deinterlacing. */
1172  cur->f->repeat_pict = 1;
1173  break;
1175  cur->f->repeat_pict = 2;
1176  break;
1178  cur->f->repeat_pict = 4;
1179  break;
1180  }
1181 
1182  if ((pt->ct_type & 3) &&
1183  pt->pic_struct <= H264_SEI_PIC_STRUCT_BOTTOM_TOP)
1184  cur->f->interlaced_frame = (pt->ct_type & (1 << 1)) != 0;
1185  } else {
1186  /* Derive interlacing flag from used decoding process. */
1188  }
1189  h->prev_interlaced_frame = cur->f->interlaced_frame;
1190 
1191  if (cur->field_poc[0] != cur->field_poc[1]) {
1192  /* Derive top_field_first from field pocs. */
1193  cur->f->top_field_first = cur->field_poc[0] < cur->field_poc[1];
1194  } else {
1195  if (sps->pic_struct_present_flag && h->sei.picture_timing.present) {
1196  /* Use picture timing SEI information. Even if it is a
1197  * information of a past frame, better than nothing. */
1198  if (h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM ||
1199  h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP)
1200  cur->f->top_field_first = 1;
1201  else
1202  cur->f->top_field_first = 0;
1203  } else if (cur->f->interlaced_frame) {
1204  /* Default to top field first when pic_struct_present_flag
1205  * is not set but interlaced frame detected */
1206  cur->f->top_field_first = 1;
1207  } else {
1208  /* Most likely progressive */
1209  cur->f->top_field_first = 0;
1210  }
1211  }
1212 
1213  if (h->sei.frame_packing.present &&
1214  h->sei.frame_packing.arrangement_type <= 6 &&
1215  h->sei.frame_packing.content_interpretation_type > 0 &&
1216  h->sei.frame_packing.content_interpretation_type < 3) {
1217  H264SEIFramePacking *fp = &h->sei.frame_packing;
1218  AVStereo3D *stereo = av_stereo3d_create_side_data(cur->f);
1219  if (stereo) {
1220  switch (fp->arrangement_type) {
1222  stereo->type = AV_STEREO3D_CHECKERBOARD;
1223  break;
1225  stereo->type = AV_STEREO3D_COLUMNS;
1226  break;
1228  stereo->type = AV_STEREO3D_LINES;
1229  break;
1231  if (fp->quincunx_sampling_flag)
1233  else
1234  stereo->type = AV_STEREO3D_SIDEBYSIDE;
1235  break;
1237  stereo->type = AV_STEREO3D_TOPBOTTOM;
1238  break;
1240  stereo->type = AV_STEREO3D_FRAMESEQUENCE;
1241  break;
1242  case H264_SEI_FPA_TYPE_2D:
1243  stereo->type = AV_STEREO3D_2D;
1244  break;
1245  }
1246 
1247  if (fp->content_interpretation_type == 2)
1248  stereo->flags = AV_STEREO3D_FLAG_INVERT;
1249 
1250  if (fp->arrangement_type == H264_SEI_FPA_TYPE_INTERLEAVE_TEMPORAL) {
1251  if (fp->current_frame_is_frame0_flag)
1252  stereo->view = AV_STEREO3D_VIEW_LEFT;
1253  else
1254  stereo->view = AV_STEREO3D_VIEW_RIGHT;
1255  }
1256  }
1257  }
1258 
1259  if (h->sei.display_orientation.present &&
1260  (h->sei.display_orientation.anticlockwise_rotation ||
1261  h->sei.display_orientation.hflip ||
1262  h->sei.display_orientation.vflip)) {
1263  H264SEIDisplayOrientation *o = &h->sei.display_orientation;
1264  double angle = o->anticlockwise_rotation * 360 / (double) (1 << 16);
1265  AVFrameSideData *rotation = av_frame_new_side_data(cur->f,
1267  sizeof(int32_t) * 9);
1268  if (rotation) {
1269  av_display_rotation_set((int32_t *)rotation->data, angle);
1270  av_display_matrix_flip((int32_t *)rotation->data,
1271  o->hflip, o->vflip);
1272  }
1273  }
1274 
1275  if (h->sei.afd.present) {
1277  sizeof(uint8_t));
1278 
1279  if (sd) {
1280  *sd->data = h->sei.afd.active_format_description;
1281  h->sei.afd.present = 0;
1282  }
1283  }
1284 
1285  if (h->sei.a53_caption.buf_ref) {
1286  H264SEIA53Caption *a53 = &h->sei.a53_caption;
1287 
1289  if (!sd)
1290  av_buffer_unref(&a53->buf_ref);
1291  a53->buf_ref = NULL;
1292 
1293  h->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
1294  }
1295 
1296  if (h->sei.picture_timing.timecode_cnt > 0) {
1297  uint32_t tc = 0;
1298  uint32_t *tc_sd;
1299 
1300  AVFrameSideData *tcside = av_frame_new_side_data(cur->f,
1302  sizeof(uint32_t)*4);
1303  if (!tcside)
1304  return AVERROR(ENOMEM);
1305 
1306  tc_sd = (uint32_t*)tcside->data;
1307  tc_sd[0] = h->sei.picture_timing.timecode_cnt;
1308 
1309  for (int i = 0; i < tc_sd[0]; i++) {
1310  uint32_t frames;
1311 
1312  /* For SMPTE 12-M timecodes, frame count is a special case if > 30 FPS.
1313  See SMPTE ST 12-1:2014 Sec 12.1 for more info. */
1314  if (av_cmp_q(h->avctx->framerate, (AVRational) {30, 1}) == 1) {
1315  frames = h->sei.picture_timing.timecode[i].frame / 2;
1316  if (h->sei.picture_timing.timecode[i].frame % 2 == 1) {
1317  if (av_cmp_q(h->avctx->framerate, (AVRational) {50, 1}) == 0)
1318  tc |= (1 << 7);
1319  else
1320  tc |= (1 << 23);
1321  }
1322  } else {
1323  frames = h->sei.picture_timing.timecode[i].frame;
1324  }
1325 
1326  tc |= h->sei.picture_timing.timecode[i].dropframe << 30;
1327  tc |= (frames / 10) << 28;
1328  tc |= (frames % 10) << 24;
1329  tc |= (h->sei.picture_timing.timecode[i].seconds / 10) << 20;
1330  tc |= (h->sei.picture_timing.timecode[i].seconds % 10) << 16;
1331  tc |= (h->sei.picture_timing.timecode[i].minutes / 10) << 12;
1332  tc |= (h->sei.picture_timing.timecode[i].minutes % 10) << 8;
1333  tc |= (h->sei.picture_timing.timecode[i].hours / 10) << 4;
1334  tc |= (h->sei.picture_timing.timecode[i].hours % 10);
1335 
1336  tc_sd[i + 1] = tc;
1337  }
1338  h->sei.picture_timing.timecode_cnt = 0;
1339  }
1340 
1341  return 0;
1342 }
1343 
1345 {
1346  const SPS *sps = h->ps.sps;
1347  H264Picture *out = h->cur_pic_ptr;
1348  H264Picture *cur = h->cur_pic_ptr;
1349  int i, pics, out_of_order, out_idx;
1350 
1351  cur->mmco_reset = h->mmco_reset;
1352  h->mmco_reset = 0;
1353 
1354  if (sps->bitstream_restriction_flag ||
1355  h->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT) {
1356  h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, sps->num_reorder_frames);
1357  }
1358 
1359  for (i = 0; 1; i++) {
1360  if(i == MAX_DELAYED_PIC_COUNT || cur->poc < h->last_pocs[i]){
1361  if(i)
1362  h->last_pocs[i-1] = cur->poc;
1363  break;
1364  } else if(i) {
1365  h->last_pocs[i-1]= h->last_pocs[i];
1366  }
1367  }
1368  out_of_order = MAX_DELAYED_PIC_COUNT - i;
1369  if( cur->f->pict_type == AV_PICTURE_TYPE_B
1370  || (h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > INT_MIN && h->last_pocs[MAX_DELAYED_PIC_COUNT-1] - (int64_t)h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > 2))
1371  out_of_order = FFMAX(out_of_order, 1);
1372  if (out_of_order == MAX_DELAYED_PIC_COUNT) {
1373  av_log(h->avctx, AV_LOG_VERBOSE, "Invalid POC %d<%d\n", cur->poc, h->last_pocs[0]);
1374  for (i = 1; i < MAX_DELAYED_PIC_COUNT; i++)
1375  h->last_pocs[i] = INT_MIN;
1376  h->last_pocs[0] = cur->poc;
1377  cur->mmco_reset = 1;
1378  } else if(h->avctx->has_b_frames < out_of_order && !sps->bitstream_restriction_flag){
1379  int loglevel = h->avctx->frame_number > 1 ? AV_LOG_WARNING : AV_LOG_VERBOSE;
1380  av_log(h->avctx, loglevel, "Increasing reorder buffer to %d\n", out_of_order);
1381  h->avctx->has_b_frames = out_of_order;
1382  }
1383 
1384  pics = 0;
1385  while (h->delayed_pic[pics])
1386  pics++;
1387 
1389 
1390  h->delayed_pic[pics++] = cur;
1391  if (cur->reference == 0)
1392  cur->reference = DELAYED_PIC_REF;
1393 
1394  out = h->delayed_pic[0];
1395  out_idx = 0;
1396  for (i = 1; h->delayed_pic[i] &&
1397  !h->delayed_pic[i]->f->key_frame &&
1398  !h->delayed_pic[i]->mmco_reset;
1399  i++)
1400  if (h->delayed_pic[i]->poc < out->poc) {
1401  out = h->delayed_pic[i];
1402  out_idx = i;
1403  }
1404  if (h->avctx->has_b_frames == 0 &&
1405  (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset))
1406  h->next_outputed_poc = INT_MIN;
1407  out_of_order = out->poc < h->next_outputed_poc;
1408 
1409  if (out_of_order || pics > h->avctx->has_b_frames) {
1410  out->reference &= ~DELAYED_PIC_REF;
1411  for (i = out_idx; h->delayed_pic[i]; i++)
1412  h->delayed_pic[i] = h->delayed_pic[i + 1];
1413  }
1414  if (!out_of_order && pics > h->avctx->has_b_frames) {
1415  h->next_output_pic = out;
1416  if (out_idx == 0 && h->delayed_pic[0] && (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset)) {
1417  h->next_outputed_poc = INT_MIN;
1418  } else
1419  h->next_outputed_poc = out->poc;
1420 
1421  if (out->recovered) {
1422  // We have reached an recovery point and all frames after it in
1423  // display order are "recovered".
1424  h->frame_recovered |= FRAME_RECOVERED_SEI;
1425  }
1426  out->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_SEI);
1427 
1428  if (!out->recovered) {
1429  if (!(h->avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT) &&
1430  !(h->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL)) {
1431  h->next_output_pic = NULL;
1432  } else {
1433  out->f->flags |= AV_FRAME_FLAG_CORRUPT;
1434  }
1435  }
1436  } else {
1437  av_log(h->avctx, AV_LOG_DEBUG, "no picture %s\n", out_of_order ? "ooo" : "");
1438  }
1439 
1440  return 0;
1441 }
1442 
1443 /* This function is called right after decoding the slice header for a first
1444  * slice in a field (or a frame). It decides whether we are decoding a new frame
1445  * or a second field in a pair and does the necessary setup.
1446  */
1448  const H2645NAL *nal, int first_slice)
1449 {
1450  int i;
1451  const SPS *sps;
1452 
1453  int last_pic_structure, last_pic_droppable, ret;
1454 
1455  ret = h264_init_ps(h, sl, first_slice);
1456  if (ret < 0)
1457  return ret;
1458 
1459  sps = h->ps.sps;
1460 
1461  if (sps && sps->bitstream_restriction_flag &&
1462  h->avctx->has_b_frames < sps->num_reorder_frames) {
1463  h->avctx->has_b_frames = sps->num_reorder_frames;
1464  }
1465 
1466  last_pic_droppable = h->droppable;
1467  last_pic_structure = h->picture_structure;
1468  h->droppable = (nal->ref_idc == 0);
1469  h->picture_structure = sl->picture_structure;
1470 
1471  h->poc.frame_num = sl->frame_num;
1472  h->poc.poc_lsb = sl->poc_lsb;
1473  h->poc.delta_poc_bottom = sl->delta_poc_bottom;
1474  h->poc.delta_poc[0] = sl->delta_poc[0];
1475  h->poc.delta_poc[1] = sl->delta_poc[1];
1476 
1477  /* Shorten frame num gaps so we don't have to allocate reference
1478  * frames just to throw them away */
1479  if (h->poc.frame_num != h->poc.prev_frame_num) {
1480  int unwrap_prev_frame_num = h->poc.prev_frame_num;
1481  int max_frame_num = 1 << sps->log2_max_frame_num;
1482 
1483  if (unwrap_prev_frame_num > h->poc.frame_num)
1484  unwrap_prev_frame_num -= max_frame_num;
1485 
1486  if ((h->poc.frame_num - unwrap_prev_frame_num) > sps->ref_frame_count) {
1487  unwrap_prev_frame_num = (h->poc.frame_num - sps->ref_frame_count) - 1;
1488  if (unwrap_prev_frame_num < 0)
1489  unwrap_prev_frame_num += max_frame_num;
1490 
1491  h->poc.prev_frame_num = unwrap_prev_frame_num;
1492  }
1493  }
1494 
1495  /* See if we have a decoded first field looking for a pair...
1496  * Here, we're using that to see if we should mark previously
1497  * decode frames as "finished".
1498  * We have to do that before the "dummy" in-between frame allocation,
1499  * since that can modify h->cur_pic_ptr. */
1500  if (h->first_field) {
1501  int last_field = last_pic_structure == PICT_BOTTOM_FIELD;
1502  av_assert0(h->cur_pic_ptr);
1503  av_assert0(h->cur_pic_ptr->f->buf[0]);
1504  assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
1505 
1506  /* Mark old field/frame as completed */
1507  if (h->cur_pic_ptr->tf.owner[last_field] == h->avctx) {
1508  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, last_field);
1509  }
1510 
1511  /* figure out if we have a complementary field pair */
1512  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
1513  /* Previous field is unmatched. Don't display it, but let it
1514  * remain for reference if marked as such. */
1515  if (last_pic_structure != PICT_FRAME) {
1516  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1517  last_pic_structure == PICT_TOP_FIELD);
1518  }
1519  } else {
1520  if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
1521  /* This and previous field were reference, but had
1522  * different frame_nums. Consider this field first in
1523  * pair. Throw away previous field except for reference
1524  * purposes. */
1525  if (last_pic_structure != PICT_FRAME) {
1526  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1527  last_pic_structure == PICT_TOP_FIELD);
1528  }
1529  } else {
1530  /* Second field in complementary pair */
1531  if (!((last_pic_structure == PICT_TOP_FIELD &&
1532  h->picture_structure == PICT_BOTTOM_FIELD) ||
1533  (last_pic_structure == PICT_BOTTOM_FIELD &&
1534  h->picture_structure == PICT_TOP_FIELD))) {
1535  av_log(h->avctx, AV_LOG_ERROR,
1536  "Invalid field mode combination %d/%d\n",
1537  last_pic_structure, h->picture_structure);
1538  h->picture_structure = last_pic_structure;
1539  h->droppable = last_pic_droppable;
1540  return AVERROR_INVALIDDATA;
1541  } else if (last_pic_droppable != h->droppable) {
1542  avpriv_request_sample(h->avctx,
1543  "Found reference and non-reference fields in the same frame, which");
1544  h->picture_structure = last_pic_structure;
1545  h->droppable = last_pic_droppable;
1546  return AVERROR_PATCHWELCOME;
1547  }
1548  }
1549  }
1550  }
1551 
1552  while (h->poc.frame_num != h->poc.prev_frame_num && !h->first_field &&
1553  h->poc.frame_num != (h->poc.prev_frame_num + 1) % (1 << sps->log2_max_frame_num)) {
1554  H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
1555  av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
1556  h->poc.frame_num, h->poc.prev_frame_num);
1557  if (!sps->gaps_in_frame_num_allowed_flag)
1558  for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++)
1559  h->last_pocs[i] = INT_MIN;
1560  ret = h264_frame_start(h);
1561  if (ret < 0) {
1562  h->first_field = 0;
1563  return ret;
1564  }
1565 
1566  h->poc.prev_frame_num++;
1567  h->poc.prev_frame_num %= 1 << sps->log2_max_frame_num;
1568  h->cur_pic_ptr->frame_num = h->poc.prev_frame_num;
1569  h->cur_pic_ptr->invalid_gap = !sps->gaps_in_frame_num_allowed_flag;
1570  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
1571  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
1572 
1573  h->explicit_ref_marking = 0;
1575  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1576  return ret;
1577  /* Error concealment: If a ref is missing, copy the previous ref
1578  * in its place.
1579  * FIXME: Avoiding a memcpy would be nice, but ref handling makes
1580  * many assumptions about there being no actual duplicates.
1581  * FIXME: This does not copy padding for out-of-frame motion
1582  * vectors. Given we are concealing a lost frame, this probably
1583  * is not noticeable by comparison, but it should be fixed. */
1584  if (h->short_ref_count) {
1585  int c[4] = {
1586  1<<(h->ps.sps->bit_depth_luma-1),
1587  1<<(h->ps.sps->bit_depth_chroma-1),
1588  1<<(h->ps.sps->bit_depth_chroma-1),
1589  -1
1590  };
1591 
1592  if (prev &&
1593  h->short_ref[0]->f->width == prev->f->width &&
1594  h->short_ref[0]->f->height == prev->f->height &&
1595  h->short_ref[0]->f->format == prev->f->format) {
1596  ff_thread_await_progress(&prev->tf, INT_MAX, 0);
1597  if (prev->field_picture)
1598  ff_thread_await_progress(&prev->tf, INT_MAX, 1);
1599  av_image_copy(h->short_ref[0]->f->data,
1600  h->short_ref[0]->f->linesize,
1601  (const uint8_t **)prev->f->data,
1602  prev->f->linesize,
1603  prev->f->format,
1604  prev->f->width,
1605  prev->f->height);
1606  h->short_ref[0]->poc = prev->poc + 2U;
1607  } else if (!h->frame_recovered && !h->avctx->hwaccel)
1608  ff_color_frame(h->short_ref[0]->f, c);
1609  h->short_ref[0]->frame_num = h->poc.prev_frame_num;
1610  }
1611  }
1612 
1613  /* See if we have a decoded first field looking for a pair...
1614  * We're using that to see whether to continue decoding in that
1615  * frame, or to allocate a new one. */
1616  if (h->first_field) {
1617  av_assert0(h->cur_pic_ptr);
1618  av_assert0(h->cur_pic_ptr->f->buf[0]);
1619  assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
1620 
1621  /* figure out if we have a complementary field pair */
1622  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
1623  /* Previous field is unmatched. Don't display it, but let it
1624  * remain for reference if marked as such. */
1625  h->missing_fields ++;
1626  h->cur_pic_ptr = NULL;
1627  h->first_field = FIELD_PICTURE(h);
1628  } else {
1629  h->missing_fields = 0;
1630  if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
1631  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1632  h->picture_structure==PICT_BOTTOM_FIELD);
1633  /* This and the previous field had different frame_nums.
1634  * Consider this field first in pair. Throw away previous
1635  * one except for reference purposes. */
1636  h->first_field = 1;
1637  h->cur_pic_ptr = NULL;
1638  } else if (h->cur_pic_ptr->reference & DELAYED_PIC_REF) {
1639  /* This frame was already output, we cannot draw into it
1640  * anymore.
1641  */
1642  h->first_field = 1;
1643  h->cur_pic_ptr = NULL;
1644  } else {
1645  /* Second field in complementary pair */
1646  h->first_field = 0;
1647  }
1648  }
1649  } else {
1650  /* Frame or first field in a potentially complementary pair */
1651  h->first_field = FIELD_PICTURE(h);
1652  }
1653 
1654  if (!FIELD_PICTURE(h) || h->first_field) {
1655  if (h264_frame_start(h) < 0) {
1656  h->first_field = 0;
1657  return AVERROR_INVALIDDATA;
1658  }
1659  } else {
1660  int field = h->picture_structure == PICT_BOTTOM_FIELD;
1662  h->cur_pic_ptr->tf.owner[field] = h->avctx;
1663  }
1664  /* Some macroblocks can be accessed before they're available in case
1665  * of lost slices, MBAFF or threading. */
1666  if (FIELD_PICTURE(h)) {
1667  for(i = (h->picture_structure == PICT_BOTTOM_FIELD); i<h->mb_height; i++)
1668  memset(h->slice_table + i*h->mb_stride, -1, (h->mb_stride - (i+1==h->mb_height)) * sizeof(*h->slice_table));
1669  } else {
1670  memset(h->slice_table, -1,
1671  (h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table));
1672  }
1673 
1674  ret = ff_h264_init_poc(h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc,
1675  h->ps.sps, &h->poc, h->picture_structure, nal->ref_idc);
1676  if (ret < 0)
1677  return ret;
1678 
1679  memcpy(h->mmco, sl->mmco, sl->nb_mmco * sizeof(*h->mmco));
1680  h->nb_mmco = sl->nb_mmco;
1681  h->explicit_ref_marking = sl->explicit_ref_marking;
1682 
1683  h->picture_idr = nal->type == H264_NAL_IDR_SLICE;
1684 
1685  if (h->sei.recovery_point.recovery_frame_cnt >= 0) {
1686  const int sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt;
1687 
1688  if (h->poc.frame_num != sei_recovery_frame_cnt || sl->slice_type_nos != AV_PICTURE_TYPE_I)
1689  h->valid_recovery_point = 1;
1690 
1691  if ( h->recovery_frame < 0
1692  || av_mod_uintp2(h->recovery_frame - h->poc.frame_num, h->ps.sps->log2_max_frame_num) > sei_recovery_frame_cnt) {
1693  h->recovery_frame = av_mod_uintp2(h->poc.frame_num + sei_recovery_frame_cnt, h->ps.sps->log2_max_frame_num);
1694 
1695  if (!h->valid_recovery_point)
1696  h->recovery_frame = h->poc.frame_num;
1697  }
1698  }
1699 
1700  h->cur_pic_ptr->f->key_frame |= (nal->type == H264_NAL_IDR_SLICE);
1701 
1702  if (nal->type == H264_NAL_IDR_SLICE ||
1703  (h->recovery_frame == h->poc.frame_num && nal->ref_idc)) {
1704  h->recovery_frame = -1;
1705  h->cur_pic_ptr->recovered = 1;
1706  }
1707  // If we have an IDR, all frames after it in decoded order are
1708  // "recovered".
1709  if (nal->type == H264_NAL_IDR_SLICE)
1710  h->frame_recovered |= FRAME_RECOVERED_IDR;
1711 #if 1
1712  h->cur_pic_ptr->recovered |= h->frame_recovered;
1713 #else
1714  h->cur_pic_ptr->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_IDR);
1715 #endif
1716 
1717  /* Set the frame properties/side data. Only done for the second field in
1718  * field coded frames, since some SEI information is present for each field
1719  * and is merged by the SEI parsing code. */
1720  if (!FIELD_PICTURE(h) || !h->first_field || h->missing_fields > 1) {
1722  if (ret < 0)
1723  return ret;
1724 
1726  if (ret < 0)
1727  return ret;
1728  }
1729 
1730  return 0;
1731 }
1732 
1734  const H2645NAL *nal)
1735 {
1736  const SPS *sps;
1737  const PPS *pps;
1738  int ret;
1739  unsigned int slice_type, tmp, i;
1740  int field_pic_flag, bottom_field_flag;
1741  int first_slice = sl == h->slice_ctx && !h->current_slice;
1742  int picture_structure;
1743 
1744  if (first_slice)
1745  av_assert0(!h->setup_finished);
1746 
1747  sl->first_mb_addr = get_ue_golomb_long(&sl->gb);
1748 
1749  slice_type = get_ue_golomb_31(&sl->gb);
1750  if (slice_type > 9) {
1751  av_log(h->avctx, AV_LOG_ERROR,
1752  "slice type %d too large at %d\n",
1753  slice_type, sl->first_mb_addr);
1754  return AVERROR_INVALIDDATA;
1755  }
1756  if (slice_type > 4) {
1757  slice_type -= 5;
1758  sl->slice_type_fixed = 1;
1759  } else
1760  sl->slice_type_fixed = 0;
1761 
1762  slice_type = ff_h264_golomb_to_pict_type[slice_type];
1763  sl->slice_type = slice_type;
1764  sl->slice_type_nos = slice_type & 3;
1765 
1766  if (nal->type == H264_NAL_IDR_SLICE &&
1768  av_log(h->avctx, AV_LOG_ERROR, "A non-intra slice in an IDR NAL unit.\n");
1769  return AVERROR_INVALIDDATA;
1770  }
1771 
1772  sl->pps_id = get_ue_golomb(&sl->gb);
1773  if (sl->pps_id >= MAX_PPS_COUNT) {
1774  av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", sl->pps_id);
1775  return AVERROR_INVALIDDATA;
1776  }
1777  if (!h->ps.pps_list[sl->pps_id]) {
1778  av_log(h->avctx, AV_LOG_ERROR,
1779  "non-existing PPS %u referenced\n",
1780  sl->pps_id);
1781  return AVERROR_INVALIDDATA;
1782  }
1783  pps = (const PPS*)h->ps.pps_list[sl->pps_id]->data;
1784  sps = pps->sps;
1785 
1786  sl->frame_num = get_bits(&sl->gb, sps->log2_max_frame_num);
1787  if (!first_slice) {
1788  if (h->poc.frame_num != sl->frame_num) {
1789  av_log(h->avctx, AV_LOG_ERROR, "Frame num change from %d to %d\n",
1790  h->poc.frame_num, sl->frame_num);
1791  return AVERROR_INVALIDDATA;
1792  }
1793  }
1794 
1795  sl->mb_mbaff = 0;
1796 
1797  if (sps->frame_mbs_only_flag) {
1798  picture_structure = PICT_FRAME;
1799  } else {
1800  if (!sps->direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) {
1801  av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n");
1802  return -1;
1803  }
1804  field_pic_flag = get_bits1(&sl->gb);
1805  if (field_pic_flag) {
1806  bottom_field_flag = get_bits1(&sl->gb);
1807  picture_structure = PICT_TOP_FIELD + bottom_field_flag;
1808  } else {
1809  picture_structure = PICT_FRAME;
1810  }
1811  }
1812  sl->picture_structure = picture_structure;
1813  sl->mb_field_decoding_flag = picture_structure != PICT_FRAME;
1814 
1815  if (picture_structure == PICT_FRAME) {
1816  sl->curr_pic_num = sl->frame_num;
1817  sl->max_pic_num = 1 << sps->log2_max_frame_num;
1818  } else {
1819  sl->curr_pic_num = 2 * sl->frame_num + 1;
1820  sl->max_pic_num = 1 << (sps->log2_max_frame_num + 1);
1821  }
1822 
1823  if (nal->type == H264_NAL_IDR_SLICE)
1824  get_ue_golomb_long(&sl->gb); /* idr_pic_id */
1825 
1826  sl->poc_lsb = 0;
1827  sl->delta_poc_bottom = 0;
1828  if (sps->poc_type == 0) {
1829  sl->poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb);
1830 
1831  if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
1832  sl->delta_poc_bottom = get_se_golomb(&sl->gb);
1833  }
1834 
1835  sl->delta_poc[0] = sl->delta_poc[1] = 0;
1836  if (sps->poc_type == 1 && !sps->delta_pic_order_always_zero_flag) {
1837  sl->delta_poc[0] = get_se_golomb(&sl->gb);
1838 
1839  if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
1840  sl->delta_poc[1] = get_se_golomb(&sl->gb);
1841  }
1842 
1843  sl->redundant_pic_count = 0;
1844  if (pps->redundant_pic_cnt_present)
1845  sl->redundant_pic_count = get_ue_golomb(&sl->gb);
1846 
1847  if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
1848  sl->direct_spatial_mv_pred = get_bits1(&sl->gb);
1849 
1851  &sl->gb, pps, sl->slice_type_nos,
1852  picture_structure, h->avctx);
1853  if (ret < 0)
1854  return ret;
1855 
1856  if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
1858  if (ret < 0) {
1859  sl->ref_count[1] = sl->ref_count[0] = 0;
1860  return ret;
1861  }
1862  }
1863 
1864  sl->pwt.use_weight = 0;
1865  for (i = 0; i < 2; i++) {
1866  sl->pwt.luma_weight_flag[i] = 0;
1867  sl->pwt.chroma_weight_flag[i] = 0;
1868  }
1869  if ((pps->weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) ||
1870  (pps->weighted_bipred_idc == 1 &&
1873  sl->slice_type_nos, &sl->pwt,
1874  picture_structure, h->avctx);
1875  if (ret < 0)
1876  return ret;
1877  }
1878 
1879  sl->explicit_ref_marking = 0;
1880  if (nal->ref_idc) {
1881  ret = ff_h264_decode_ref_pic_marking(sl, &sl->gb, nal, h->avctx);
1882  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1883  return AVERROR_INVALIDDATA;
1884  }
1885 
1886  if (sl->slice_type_nos != AV_PICTURE_TYPE_I && pps->cabac) {
1887  tmp = get_ue_golomb_31(&sl->gb);
1888  if (tmp > 2) {
1889  av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc %u overflow\n", tmp);
1890  return AVERROR_INVALIDDATA;
1891  }
1892  sl->cabac_init_idc = tmp;
1893  }
1894 
1895  sl->last_qscale_diff = 0;
1896  tmp = pps->init_qp + (unsigned)get_se_golomb(&sl->gb);
1897  if (tmp > 51 + 6 * (sps->bit_depth_luma - 8)) {
1898  av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp);
1899  return AVERROR_INVALIDDATA;
1900  }
1901  sl->qscale = tmp;
1902  sl->chroma_qp[0] = get_chroma_qp(pps, 0, sl->qscale);
1903  sl->chroma_qp[1] = get_chroma_qp(pps, 1, sl->qscale);
1904  // FIXME qscale / qp ... stuff
1905  if (sl->slice_type == AV_PICTURE_TYPE_SP)
1906  get_bits1(&sl->gb); /* sp_for_switch_flag */
1907  if (sl->slice_type == AV_PICTURE_TYPE_SP ||
1909  get_se_golomb(&sl->gb); /* slice_qs_delta */
1910 
1911  sl->deblocking_filter = 1;
1912  sl->slice_alpha_c0_offset = 0;
1913  sl->slice_beta_offset = 0;
1914  if (pps->deblocking_filter_parameters_present) {
1915  tmp = get_ue_golomb_31(&sl->gb);
1916  if (tmp > 2) {
1917  av_log(h->avctx, AV_LOG_ERROR,
1918  "deblocking_filter_idc %u out of range\n", tmp);
1919  return AVERROR_INVALIDDATA;
1920  }
1921  sl->deblocking_filter = tmp;
1922  if (sl->deblocking_filter < 2)
1923  sl->deblocking_filter ^= 1; // 1<->0
1924 
1925  if (sl->deblocking_filter) {
1926  int slice_alpha_c0_offset_div2 = get_se_golomb(&sl->gb);
1927  int slice_beta_offset_div2 = get_se_golomb(&sl->gb);
1928  if (slice_alpha_c0_offset_div2 > 6 ||
1929  slice_alpha_c0_offset_div2 < -6 ||
1930  slice_beta_offset_div2 > 6 ||
1931  slice_beta_offset_div2 < -6) {
1932  av_log(h->avctx, AV_LOG_ERROR,
1933  "deblocking filter parameters %d %d out of range\n",
1934  slice_alpha_c0_offset_div2, slice_beta_offset_div2);
1935  return AVERROR_INVALIDDATA;
1936  }
1937  sl->slice_alpha_c0_offset = slice_alpha_c0_offset_div2 * 2;
1938  sl->slice_beta_offset = slice_beta_offset_div2 * 2;
1939  }
1940  }
1941 
1942  return 0;
1943 }
1944 
1945 /* do all the per-slice initialization needed before we can start decoding the
1946  * actual MBs */
1948  const H2645NAL *nal)
1949 {
1950  int i, j, ret = 0;
1951 
1952  if (h->picture_idr && nal->type != H264_NAL_IDR_SLICE) {
1953  av_log(h->avctx, AV_LOG_ERROR, "Invalid mix of IDR and non-IDR slices\n");
1954  return AVERROR_INVALIDDATA;
1955  }
1956 
1957  av_assert1(h->mb_num == h->mb_width * h->mb_height);
1958  if (sl->first_mb_addr << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num ||
1959  sl->first_mb_addr >= h->mb_num) {
1960  av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
1961  return AVERROR_INVALIDDATA;
1962  }
1963  sl->resync_mb_x = sl->mb_x = sl->first_mb_addr % h->mb_width;
1964  sl->resync_mb_y = sl->mb_y = (sl->first_mb_addr / h->mb_width) <<
1966  if (h->picture_structure == PICT_BOTTOM_FIELD)
1967  sl->resync_mb_y = sl->mb_y = sl->mb_y + 1;
1968  av_assert1(sl->mb_y < h->mb_height);
1969 
1970  ret = ff_h264_build_ref_list(h, sl);
1971  if (ret < 0)
1972  return ret;
1973 
1974  if (h->ps.pps->weighted_bipred_idc == 2 &&
1976  implicit_weight_table(h, sl, -1);
1977  if (FRAME_MBAFF(h)) {
1978  implicit_weight_table(h, sl, 0);
1979  implicit_weight_table(h, sl, 1);
1980  }
1981  }
1982 
1985  if (!h->setup_finished)
1987 
1988  if (h->avctx->skip_loop_filter >= AVDISCARD_ALL ||
1989  (h->avctx->skip_loop_filter >= AVDISCARD_NONKEY &&
1990  h->nal_unit_type != H264_NAL_IDR_SLICE) ||
1991  (h->avctx->skip_loop_filter >= AVDISCARD_NONINTRA &&
1993  (h->avctx->skip_loop_filter >= AVDISCARD_BIDIR &&
1995  (h->avctx->skip_loop_filter >= AVDISCARD_NONREF &&
1996  nal->ref_idc == 0))
1997  sl->deblocking_filter = 0;
1998 
1999  if (sl->deblocking_filter == 1 && h->nb_slice_ctx > 1) {
2000  if (h->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
2001  /* Cheat slightly for speed:
2002  * Do not bother to deblock across slices. */
2003  sl->deblocking_filter = 2;
2004  } else {
2005  h->postpone_filter = 1;
2006  }
2007  }
2008  sl->qp_thresh = 15 -
2010  FFMAX3(0,
2011  h->ps.pps->chroma_qp_index_offset[0],
2012  h->ps.pps->chroma_qp_index_offset[1]) +
2013  6 * (h->ps.sps->bit_depth_luma - 8);
2014 
2015  sl->slice_num = ++h->current_slice;
2016 
2017  if (sl->slice_num)
2018  h->slice_row[(sl->slice_num-1)&(MAX_SLICES-1)]= sl->resync_mb_y;
2019  if ( h->slice_row[sl->slice_num&(MAX_SLICES-1)] + 3 >= sl->resync_mb_y
2020  && h->slice_row[sl->slice_num&(MAX_SLICES-1)] <= sl->resync_mb_y
2021  && sl->slice_num >= MAX_SLICES) {
2022  //in case of ASO this check needs to be updated depending on how we decide to assign slice numbers in this case
2023  av_log(h->avctx, AV_LOG_WARNING, "Possibly too many slices (%d >= %d), increase MAX_SLICES and recompile if there are artifacts\n", sl->slice_num, MAX_SLICES);
2024  }
2025 
2026  for (j = 0; j < 2; j++) {
2027  int id_list[16];
2028  int *ref2frm = h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][j];
2029  for (i = 0; i < 16; i++) {
2030  id_list[i] = 60;
2031  if (j < sl->list_count && i < sl->ref_count[j] &&
2032  sl->ref_list[j][i].parent->f->buf[0]) {
2033  int k;
2034  AVBuffer *buf = sl->ref_list[j][i].parent->f->buf[0]->buffer;
2035  for (k = 0; k < h->short_ref_count; k++)
2036  if (h->short_ref[k]->f->buf[0]->buffer == buf) {
2037  id_list[i] = k;
2038  break;
2039  }
2040  for (k = 0; k < h->long_ref_count; k++)
2041  if (h->long_ref[k] && h->long_ref[k]->f->buf[0]->buffer == buf) {
2042  id_list[i] = h->short_ref_count + k;
2043  break;
2044  }
2045  }
2046  }
2047 
2048  ref2frm[0] =
2049  ref2frm[1] = -1;
2050  for (i = 0; i < 16; i++)
2051  ref2frm[i + 2] = 4 * id_list[i] + (sl->ref_list[j][i].reference & 3);
2052  ref2frm[18 + 0] =
2053  ref2frm[18 + 1] = -1;
2054  for (i = 16; i < 48; i++)
2055  ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
2056  (sl->ref_list[j][i].reference & 3);
2057  }
2058 
2059  if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
2060  av_log(h->avctx, AV_LOG_DEBUG,
2061  "slice:%d %s mb:%d %c%s%s frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
2062  sl->slice_num,
2063  (h->picture_structure == PICT_FRAME ? "F" : h->picture_structure == PICT_TOP_FIELD ? "T" : "B"),
2064  sl->mb_y * h->mb_width + sl->mb_x,
2066  sl->slice_type_fixed ? " fix" : "",
2067  nal->type == H264_NAL_IDR_SLICE ? " IDR" : "",
2068  h->poc.frame_num,
2069  h->cur_pic_ptr->field_poc[0],
2070  h->cur_pic_ptr->field_poc[1],
2071  sl->ref_count[0], sl->ref_count[1],
2072  sl->qscale,
2073  sl->deblocking_filter,
2075  sl->pwt.use_weight,
2076  sl->pwt.use_weight == 1 && sl->pwt.use_weight_chroma ? "c" : "",
2077  sl->slice_type == AV_PICTURE_TYPE_B ? (sl->direct_spatial_mv_pred ? "SPAT" : "TEMP") : "");
2078  }
2079 
2080  return 0;
2081 }
2082 
2084 {
2085  H264SliceContext *sl = h->slice_ctx + h->nb_slice_ctx_queued;
2086  int first_slice = sl == h->slice_ctx && !h->current_slice;
2087  int ret;
2088 
2089  sl->gb = nal->gb;
2090 
2091  ret = h264_slice_header_parse(h, sl, nal);
2092  if (ret < 0)
2093  return ret;
2094 
2095  // discard redundant pictures
2096  if (sl->redundant_pic_count > 0) {
2097  sl->ref_count[0] = sl->ref_count[1] = 0;
2098  return 0;
2099  }
2100 
2101  if (sl->first_mb_addr == 0 || !h->current_slice) {
2102  if (h->setup_finished) {
2103  av_log(h->avctx, AV_LOG_ERROR, "Too many fields\n");
2104  return AVERROR_INVALIDDATA;
2105  }
2106  }
2107 
2108  if (sl->first_mb_addr == 0) { // FIXME better field boundary detection
2109  if (h->current_slice) {
2110  // this slice starts a new field
2111  // first decode any pending queued slices
2112  if (h->nb_slice_ctx_queued) {
2113  H264SliceContext tmp_ctx;
2114 
2116  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
2117  return ret;
2118 
2119  memcpy(&tmp_ctx, h->slice_ctx, sizeof(tmp_ctx));
2120  memcpy(h->slice_ctx, sl, sizeof(tmp_ctx));
2121  memcpy(sl, &tmp_ctx, sizeof(tmp_ctx));
2122  sl = h->slice_ctx;
2123  }
2124 
2125  if (h->cur_pic_ptr && FIELD_PICTURE(h) && h->first_field) {
2126  ret = ff_h264_field_end(h, h->slice_ctx, 1);
2127  if (ret < 0)
2128  return ret;
2129  } else if (h->cur_pic_ptr && !FIELD_PICTURE(h) && !h->first_field && h->nal_unit_type == H264_NAL_IDR_SLICE) {
2130  av_log(h, AV_LOG_WARNING, "Broken frame packetizing\n");
2131  ret = ff_h264_field_end(h, h->slice_ctx, 1);
2132  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
2133  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
2134  h->cur_pic_ptr = NULL;
2135  if (ret < 0)
2136  return ret;
2137  } else
2138  return AVERROR_INVALIDDATA;
2139  }
2140 
2141  if (!h->first_field) {
2142  if (h->cur_pic_ptr && !h->droppable) {
2143  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
2144  h->picture_structure == PICT_BOTTOM_FIELD);
2145  }
2146  h->cur_pic_ptr = NULL;
2147  }
2148  }
2149 
2150  if (!h->current_slice)
2151  av_assert0(sl == h->slice_ctx);
2152 
2153  if (h->current_slice == 0 && !h->first_field) {
2154  if (
2155  (h->avctx->skip_frame >= AVDISCARD_NONREF && !h->nal_ref_idc) ||
2156  (h->avctx->skip_frame >= AVDISCARD_BIDIR && sl->slice_type_nos == AV_PICTURE_TYPE_B) ||
2157  (h->avctx->skip_frame >= AVDISCARD_NONINTRA && sl->slice_type_nos != AV_PICTURE_TYPE_I) ||
2158  (h->avctx->skip_frame >= AVDISCARD_NONKEY && h->nal_unit_type != H264_NAL_IDR_SLICE && h->sei.recovery_point.recovery_frame_cnt < 0) ||
2159  h->avctx->skip_frame >= AVDISCARD_ALL) {
2160  return 0;
2161  }
2162  }
2163 
2164  if (!first_slice) {
2165  const PPS *pps = (const PPS*)h->ps.pps_list[sl->pps_id]->data;
2166 
2167  if (h->ps.pps->sps_id != pps->sps_id ||
2168  h->ps.pps->transform_8x8_mode != pps->transform_8x8_mode /*||
2169  (h->setup_finished && h->ps.pps != pps)*/) {
2170  av_log(h->avctx, AV_LOG_ERROR, "PPS changed between slices\n");
2171  return AVERROR_INVALIDDATA;
2172  }
2173  if (h->ps.sps != pps->sps) {
2174  av_log(h->avctx, AV_LOG_ERROR,
2175  "SPS changed in the middle of the frame\n");
2176  return AVERROR_INVALIDDATA;
2177  }
2178  }
2179 
2180  if (h->current_slice == 0) {
2181  ret = h264_field_start(h, sl, nal, first_slice);
2182  if (ret < 0)
2183  return ret;
2184  } else {
2185  if (h->picture_structure != sl->picture_structure ||
2186  h->droppable != (nal->ref_idc == 0)) {
2187  av_log(h->avctx, AV_LOG_ERROR,
2188  "Changing field mode (%d -> %d) between slices is not allowed\n",
2189  h->picture_structure, sl->picture_structure);
2190  return AVERROR_INVALIDDATA;
2191  } else if (!h->cur_pic_ptr) {
2192  av_log(h->avctx, AV_LOG_ERROR,
2193  "unset cur_pic_ptr on slice %d\n",
2194  h->current_slice + 1);
2195  return AVERROR_INVALIDDATA;
2196  }
2197  }
2198 
2199  ret = h264_slice_init(h, sl, nal);
2200  if (ret < 0)
2201  return ret;
2202 
2203  h->nb_slice_ctx_queued++;
2204 
2205  return 0;
2206 }
2207 
2209 {
2210  switch (sl->slice_type) {
2211  case AV_PICTURE_TYPE_P:
2212  return 0;
2213  case AV_PICTURE_TYPE_B:
2214  return 1;
2215  case AV_PICTURE_TYPE_I:
2216  return 2;
2217  case AV_PICTURE_TYPE_SP:
2218  return 3;
2219  case AV_PICTURE_TYPE_SI:
2220  return 4;
2221  default:
2222  return AVERROR_INVALIDDATA;
2223  }
2224 }
2225 
2227  H264SliceContext *sl,
2228  int mb_type, int top_xy,
2229  int left_xy[LEFT_MBS],
2230  int top_type,
2231  int left_type[LEFT_MBS],
2232  int mb_xy, int list)
2233 {
2234  int b_stride = h->b_stride;
2235  int16_t(*mv_dst)[2] = &sl->mv_cache[list][scan8[0]];
2236  int8_t *ref_cache = &sl->ref_cache[list][scan8[0]];
2237  if (IS_INTER(mb_type) || IS_DIRECT(mb_type)) {
2238  if (USES_LIST(top_type, list)) {
2239  const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
2240  const int b8_xy = 4 * top_xy + 2;
2241  const int *ref2frm = &h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2242  AV_COPY128(mv_dst - 1 * 8, h->cur_pic.motion_val[list][b_xy + 0]);
2243  ref_cache[0 - 1 * 8] =
2244  ref_cache[1 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 0]];
2245  ref_cache[2 - 1 * 8] =
2246  ref_cache[3 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 1]];
2247  } else {
2248  AV_ZERO128(mv_dst - 1 * 8);
2249  AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2250  }
2251 
2252  if (!IS_INTERLACED(mb_type ^ left_type[LTOP])) {
2253  if (USES_LIST(left_type[LTOP], list)) {
2254  const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
2255  const int b8_xy = 4 * left_xy[LTOP] + 1;
2256  const int *ref2frm = &h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2257  AV_COPY32(mv_dst - 1 + 0, h->cur_pic.motion_val[list][b_xy + b_stride * 0]);
2258  AV_COPY32(mv_dst - 1 + 8, h->cur_pic.motion_val[list][b_xy + b_stride * 1]);
2259  AV_COPY32(mv_dst - 1 + 16, h->cur_pic.motion_val[list][b_xy + b_stride * 2]);
2260  AV_COPY32(mv_dst - 1 + 24, h->cur_pic.motion_val[list][b_xy + b_stride * 3]);
2261  ref_cache[-1 + 0] =
2262  ref_cache[-1 + 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 0]];
2263  ref_cache[-1 + 16] =
2264  ref_cache[-1 + 24] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 1]];
2265  } else {
2266  AV_ZERO32(mv_dst - 1 + 0);
2267  AV_ZERO32(mv_dst - 1 + 8);
2268  AV_ZERO32(mv_dst - 1 + 16);
2269  AV_ZERO32(mv_dst - 1 + 24);
2270  ref_cache[-1 + 0] =
2271  ref_cache[-1 + 8] =
2272  ref_cache[-1 + 16] =
2273  ref_cache[-1 + 24] = LIST_NOT_USED;
2274  }
2275  }
2276  }
2277 
2278  if (!USES_LIST(mb_type, list)) {
2279  fill_rectangle(mv_dst, 4, 4, 8, pack16to32(0, 0), 4);
2280  AV_WN32A(&ref_cache[0 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2281  AV_WN32A(&ref_cache[1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2282  AV_WN32A(&ref_cache[2 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2283  AV_WN32A(&ref_cache[3 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2284  return;
2285  }
2286 
2287  {
2288  int8_t *ref = &h->cur_pic.ref_index[list][4 * mb_xy];
2289  const int *ref2frm = &h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2290  uint32_t ref01 = (pack16to32(ref2frm[ref[0]], ref2frm[ref[1]]) & 0x00FF00FF) * 0x0101;
2291  uint32_t ref23 = (pack16to32(ref2frm[ref[2]], ref2frm[ref[3]]) & 0x00FF00FF) * 0x0101;
2292  AV_WN32A(&ref_cache[0 * 8], ref01);
2293  AV_WN32A(&ref_cache[1 * 8], ref01);
2294  AV_WN32A(&ref_cache[2 * 8], ref23);
2295  AV_WN32A(&ref_cache[3 * 8], ref23);
2296  }
2297 
2298  {
2299  int16_t(*mv_src)[2] = &h->cur_pic.motion_val[list][4 * sl->mb_x + 4 * sl->mb_y * b_stride];
2300  AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride);
2301  AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride);
2302  AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride);
2303  AV_COPY128(mv_dst + 8 * 3, mv_src + 3 * b_stride);
2304  }
2305 }
2306 
2307 /**
2308  * @return non zero if the loop filter can be skipped
2309  */
2310 static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
2311 {
2312  const int mb_xy = sl->mb_xy;
2313  int top_xy, left_xy[LEFT_MBS];
2314  int top_type, left_type[LEFT_MBS];
2315  uint8_t *nnz;
2316  uint8_t *nnz_cache;
2317 
2318  top_xy = mb_xy - (h->mb_stride << MB_FIELD(sl));
2319 
2320  left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
2321  if (FRAME_MBAFF(h)) {
2322  const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
2323  const int curr_mb_field_flag = IS_INTERLACED(mb_type);
2324  if (sl->mb_y & 1) {
2325  if (left_mb_field_flag != curr_mb_field_flag)
2326  left_xy[LTOP] -= h->mb_stride;
2327  } else {
2328  if (curr_mb_field_flag)
2329  top_xy += h->mb_stride &
2330  (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
2331  if (left_mb_field_flag != curr_mb_field_flag)
2332  left_xy[LBOT] += h->mb_stride;
2333  }
2334  }
2335 
2336  sl->top_mb_xy = top_xy;
2337  sl->left_mb_xy[LTOP] = left_xy[LTOP];
2338  sl->left_mb_xy[LBOT] = left_xy[LBOT];
2339  {
2340  /* For sufficiently low qp, filtering wouldn't do anything.
2341  * This is a conservative estimate: could also check beta_offset
2342  * and more accurate chroma_qp. */
2343  int qp_thresh = sl->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice
2344  int qp = h->cur_pic.qscale_table[mb_xy];
2345  if (qp <= qp_thresh &&
2346  (left_xy[LTOP] < 0 ||
2347  ((qp + h->cur_pic.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) &&
2348  (top_xy < 0 ||
2349  ((qp + h->cur_pic.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
2350  if (!FRAME_MBAFF(h))
2351  return 1;
2352  if ((left_xy[LTOP] < 0 ||
2353  ((qp + h->cur_pic.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) &&
2354  (top_xy < h->mb_stride ||
2355  ((qp + h->cur_pic.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh))
2356  return 1;
2357  }
2358  }
2359 
2360  top_type = h->cur_pic.mb_type[top_xy];
2361  left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
2362  left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
2363  if (sl->deblocking_filter == 2) {
2364  if (h->slice_table[top_xy] != sl->slice_num)
2365  top_type = 0;
2366  if (h->slice_table[left_xy[LBOT]] != sl->slice_num)
2367  left_type[LTOP] = left_type[LBOT] = 0;
2368  } else {
2369  if (h->slice_table[top_xy] == 0xFFFF)
2370  top_type = 0;
2371  if (h->slice_table[left_xy[LBOT]] == 0xFFFF)
2372  left_type[LTOP] = left_type[LBOT] = 0;
2373  }
2374  sl->top_type = top_type;
2375  sl->left_type[LTOP] = left_type[LTOP];
2376  sl->left_type[LBOT] = left_type[LBOT];
2377 
2378  if (IS_INTRA(mb_type))
2379  return 0;
2380 
2381  fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
2382  top_type, left_type, mb_xy, 0);
2383  if (sl->list_count == 2)
2384  fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
2385  top_type, left_type, mb_xy, 1);
2386 
2387  nnz = h->non_zero_count[mb_xy];
2388  nnz_cache = sl->non_zero_count_cache;
2389  AV_COPY32(&nnz_cache[4 + 8 * 1], &nnz[0]);
2390  AV_COPY32(&nnz_cache[4 + 8 * 2], &nnz[4]);
2391  AV_COPY32(&nnz_cache[4 + 8 * 3], &nnz[8]);
2392  AV_COPY32(&nnz_cache[4 + 8 * 4], &nnz[12]);
2393  sl->cbp = h->cbp_table[mb_xy];
2394 
2395  if (top_type) {
2396  nnz = h->non_zero_count[top_xy];
2397  AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[3 * 4]);
2398  }
2399 
2400  if (left_type[LTOP]) {
2401  nnz = h->non_zero_count[left_xy[LTOP]];
2402  nnz_cache[3 + 8 * 1] = nnz[3 + 0 * 4];
2403  nnz_cache[3 + 8 * 2] = nnz[3 + 1 * 4];
2404  nnz_cache[3 + 8 * 3] = nnz[3 + 2 * 4];
2405  nnz_cache[3 + 8 * 4] = nnz[3 + 3 * 4];
2406  }
2407 
2408  /* CAVLC 8x8dct requires NNZ values for residual decoding that differ
2409  * from what the loop filter needs */
2410  if (!CABAC(h) && h->ps.pps->transform_8x8_mode) {
2411  if (IS_8x8DCT(top_type)) {
2412  nnz_cache[4 + 8 * 0] =
2413  nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12;
2414  nnz_cache[6 + 8 * 0] =
2415  nnz_cache[7 + 8 * 0] = (h->cbp_table[top_xy] & 0x8000) >> 12;
2416  }
2417  if (IS_8x8DCT(left_type[LTOP])) {
2418  nnz_cache[3 + 8 * 1] =
2419  nnz_cache[3 + 8 * 2] = (h->cbp_table[left_xy[LTOP]] & 0x2000) >> 12; // FIXME check MBAFF
2420  }
2421  if (IS_8x8DCT(left_type[LBOT])) {
2422  nnz_cache[3 + 8 * 3] =
2423  nnz_cache[3 + 8 * 4] = (h->cbp_table[left_xy[LBOT]] & 0x8000) >> 12; // FIXME check MBAFF
2424  }
2425 
2426  if (IS_8x8DCT(mb_type)) {
2427  nnz_cache[scan8[0]] =
2428  nnz_cache[scan8[1]] =
2429  nnz_cache[scan8[2]] =
2430  nnz_cache[scan8[3]] = (sl->cbp & 0x1000) >> 12;
2431 
2432  nnz_cache[scan8[0 + 4]] =
2433  nnz_cache[scan8[1 + 4]] =
2434  nnz_cache[scan8[2 + 4]] =
2435  nnz_cache[scan8[3 + 4]] = (sl->cbp & 0x2000) >> 12;
2436 
2437  nnz_cache[scan8[0 + 8]] =
2438  nnz_cache[scan8[1 + 8]] =
2439  nnz_cache[scan8[2 + 8]] =
2440  nnz_cache[scan8[3 + 8]] = (sl->cbp & 0x4000) >> 12;
2441 
2442  nnz_cache[scan8[0 + 12]] =
2443  nnz_cache[scan8[1 + 12]] =
2444  nnz_cache[scan8[2 + 12]] =
2445  nnz_cache[scan8[3 + 12]] = (sl->cbp & 0x8000) >> 12;
2446  }
2447  }
2448 
2449  return 0;
2450 }
2451 
2452 static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, int end_x)
2453 {
2454  uint8_t *dest_y, *dest_cb, *dest_cr;
2455  int linesize, uvlinesize, mb_x, mb_y;
2456  const int end_mb_y = sl->mb_y + FRAME_MBAFF(h);
2457  const int old_slice_type = sl->slice_type;
2458  const int pixel_shift = h->pixel_shift;
2459  const int block_h = 16 >> h->chroma_y_shift;
2460 
2461  if (h->postpone_filter)
2462  return;
2463 
2464  if (sl->deblocking_filter) {
2465  for (mb_x = start_x; mb_x < end_x; mb_x++)
2466  for (mb_y = end_mb_y - FRAME_MBAFF(h); mb_y <= end_mb_y; mb_y++) {
2467  int mb_xy, mb_type;
2468  mb_xy = sl->mb_xy = mb_x + mb_y * h->mb_stride;
2469  mb_type = h->cur_pic.mb_type[mb_xy];
2470 
2471  if (FRAME_MBAFF(h))
2472  sl->mb_mbaff =
2473  sl->mb_field_decoding_flag = !!IS_INTERLACED(mb_type);
2474 
2475  sl->mb_x = mb_x;
2476  sl->mb_y = mb_y;
2477  dest_y = h->cur_pic.f->data[0] +
2478  ((mb_x << pixel_shift) + mb_y * sl->linesize) * 16;
2479  dest_cb = h->cur_pic.f->data[1] +
2480  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
2481  mb_y * sl->uvlinesize * block_h;
2482  dest_cr = h->cur_pic.f->data[2] +
2483  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
2484  mb_y * sl->uvlinesize * block_h;
2485  // FIXME simplify above
2486 
2487  if (MB_FIELD(sl)) {
2488  linesize = sl->mb_linesize = sl->linesize * 2;
2489  uvlinesize = sl->mb_uvlinesize = sl->uvlinesize * 2;
2490  if (mb_y & 1) { // FIXME move out of this function?
2491  dest_y -= sl->linesize * 15;
2492  dest_cb -= sl->uvlinesize * (block_h - 1);
2493  dest_cr -= sl->uvlinesize * (block_h - 1);
2494  }
2495  } else {
2496  linesize = sl->mb_linesize = sl->linesize;
2497  uvlinesize = sl->mb_uvlinesize = sl->uvlinesize;
2498  }
2499  backup_mb_border(h, sl, dest_y, dest_cb, dest_cr, linesize,
2500  uvlinesize, 0);
2501  if (fill_filter_caches(h, sl, mb_type))
2502  continue;
2503  sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, h->cur_pic.qscale_table[mb_xy]);
2504  sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, h->cur_pic.qscale_table[mb_xy]);
2505 
2506  if (FRAME_MBAFF(h)) {
2507  ff_h264_filter_mb(h, sl, mb_x, mb_y, dest_y, dest_cb, dest_cr,
2508  linesize, uvlinesize);
2509  } else {
2510  ff_h264_filter_mb_fast(h, sl, mb_x, mb_y, dest_y, dest_cb,
2511  dest_cr, linesize, uvlinesize);
2512  }
2513  }
2514  }
2515  sl->slice_type = old_slice_type;
2516  sl->mb_x = end_x;
2517  sl->mb_y = end_mb_y - FRAME_MBAFF(h);
2518  sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, sl->qscale);
2519  sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, sl->qscale);
2520 }
2521 
2523 {
2524  const int mb_xy = sl->mb_x + sl->mb_y * h->mb_stride;
2525  int mb_type = (h->slice_table[mb_xy - 1] == sl->slice_num) ?
2526  h->cur_pic.mb_type[mb_xy - 1] :
2527  (h->slice_table[mb_xy - h->mb_stride] == sl->slice_num) ?
2528  h->cur_pic.mb_type[mb_xy - h->mb_stride] : 0;
2529  sl->mb_mbaff = sl->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
2530 }
2531 
2532 /**
2533  * Draw edges and report progress for the last MB row.
2534  */
2536 {
2537  int top = 16 * (sl->mb_y >> FIELD_PICTURE(h));
2538  int pic_height = 16 * h->mb_height >> FIELD_PICTURE(h);
2539  int height = 16 << FRAME_MBAFF(h);
2540  int deblock_border = (16 + 4) << FRAME_MBAFF(h);
2541 
2542  if (sl->deblocking_filter) {
2543  if ((top + height) >= pic_height)
2544  height += deblock_border;
2545  top -= deblock_border;
2546  }
2547 
2548  if (top >= pic_height || (top + height) < 0)
2549  return;
2550 
2551  height = FFMIN(height, pic_height - top);
2552  if (top < 0) {
2553  height = top + height;
2554  top = 0;
2555  }
2556 
2557  ff_h264_draw_horiz_band(h, sl, top, height);
2558 
2559  if (h->droppable || sl->h264->slice_ctx[0].er.error_occurred)
2560  return;
2561 
2562  ff_thread_report_progress(&h->cur_pic_ptr->tf, top + height - 1,
2563  h->picture_structure == PICT_BOTTOM_FIELD);
2564 }
2565 
2567  int startx, int starty,
2568  int endx, int endy, int status)
2569 {
2570  if (!sl->h264->enable_er)
2571  return;
2572 
2573  if (CONFIG_ERROR_RESILIENCE) {
2574  ERContext *er = &sl->h264->slice_ctx[0].er;
2575 
2576  ff_er_add_slice(er, startx, starty, endx, endy, status);
2577  }
2578 }
2579 
2580 static int decode_slice(struct AVCodecContext *avctx, void *arg)
2581 {
2582  H264SliceContext *sl = arg;
2583  const H264Context *h = sl->h264;
2584  int lf_x_start = sl->mb_x;
2585  int orig_deblock = sl->deblocking_filter;
2586  int ret;
2587 
2588  sl->linesize = h->cur_pic_ptr->f->linesize[0];
2589  sl->uvlinesize = h->cur_pic_ptr->f->linesize[1];
2590 
2591  ret = alloc_scratch_buffers(sl, sl->linesize);
2592  if (ret < 0)
2593  return ret;
2594 
2595  sl->mb_skip_run = -1;
2596 
2597  av_assert0(h->block_offset[15] == (4 * ((scan8[15] - scan8[0]) & 7) << h->pixel_shift) + 4 * sl->linesize * ((scan8[15] - scan8[0]) >> 3));
2598 
2599  if (h->postpone_filter)
2600  sl->deblocking_filter = 0;
2601 
2602  sl->is_complex = FRAME_MBAFF(h) || h->picture_structure != PICT_FRAME ||
2603  (CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
2604 
2605  if (!(h->avctx->active_thread_type & FF_THREAD_SLICE) && h->picture_structure == PICT_FRAME && h->slice_ctx[0].er.error_status_table) {
2606  const int start_i = av_clip(sl->resync_mb_x + sl->resync_mb_y * h->mb_width, 0, h->mb_num - 1);
2607  if (start_i) {
2608  int prev_status = h->slice_ctx[0].er.error_status_table[h->slice_ctx[0].er.mb_index2xy[start_i - 1]];
2609  prev_status &= ~ VP_START;
2610  if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END))
2611  h->slice_ctx[0].er.error_occurred = 1;
2612  }
2613  }
2614 
2615  if (h->ps.pps->cabac) {
2616  /* realign */
2617  align_get_bits(&sl->gb);
2618 
2619  /* init cabac */
2621  sl->gb.buffer + get_bits_count(&sl->gb) / 8,
2622  (get_bits_left(&sl->gb) + 7) / 8);
2623  if (ret < 0)
2624  return ret;
2625 
2627 
2628  for (;;) {
2629  int ret, eos;
2630  if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
2631  av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
2632  sl->next_slice_idx);
2633  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2634  sl->mb_y, ER_MB_ERROR);
2635  return AVERROR_INVALIDDATA;
2636  }
2637 
2638  ret = ff_h264_decode_mb_cabac(h, sl);
2639 
2640  if (ret >= 0)
2641  ff_h264_hl_decode_mb(h, sl);
2642 
2643  // FIXME optimal? or let mb_decode decode 16x32 ?
2644  if (ret >= 0 && FRAME_MBAFF(h)) {
2645  sl->mb_y++;
2646 
2647  ret = ff_h264_decode_mb_cabac(h, sl);
2648 
2649  if (ret >= 0)
2650  ff_h264_hl_decode_mb(h, sl);
2651  sl->mb_y--;
2652  }
2653  eos = get_cabac_terminate(&sl->cabac);
2654 
2655  if ((h->workaround_bugs & FF_BUG_TRUNCATED) &&
2656  sl->cabac.bytestream > sl->cabac.bytestream_end + 2) {
2657  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
2658  sl->mb_y, ER_MB_END);
2659  if (sl->mb_x >= lf_x_start)
2660  loop_filter(h, sl, lf_x_start, sl->mb_x + 1);
2661  goto finish;
2662  }
2663  if (sl->cabac.bytestream > sl->cabac.bytestream_end + 2 )
2664  av_log(h->avctx, AV_LOG_DEBUG, "bytestream overread %"PTRDIFF_SPECIFIER"\n", sl->cabac.bytestream_end - sl->cabac.bytestream);
2665  if (ret < 0 || sl->cabac.bytestream > sl->cabac.bytestream_end + 4) {
2666  av_log(h->avctx, AV_LOG_ERROR,
2667  "error while decoding MB %d %d, bytestream %"PTRDIFF_SPECIFIER"\n",
2668  sl->mb_x, sl->mb_y,
2669  sl->cabac.bytestream_end - sl->cabac.bytestream);
2670  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2671  sl->mb_y, ER_MB_ERROR);
2672  return AVERROR_INVALIDDATA;
2673  }
2674 
2675  if (++sl->mb_x >= h->mb_width) {
2676  loop_filter(h, sl, lf_x_start, sl->mb_x);
2677  sl->mb_x = lf_x_start = 0;
2678  decode_finish_row(h, sl);
2679  ++sl->mb_y;
2680  if (FIELD_OR_MBAFF_PICTURE(h)) {
2681  ++sl->mb_y;
2682  if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
2684  }
2685  }
2686 
2687  if (eos || sl->mb_y >= h->mb_height) {
2688  ff_tlog(h->avctx, "slice end %d %d\n",
2689  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2690  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
2691  sl->mb_y, ER_MB_END);
2692  if (sl->mb_x > lf_x_start)
2693  loop_filter(h, sl, lf_x_start, sl->mb_x);
2694  goto finish;
2695  }
2696  }
2697  } else {
2698  for (;;) {
2699  int ret;
2700 
2701  if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
2702  av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
2703  sl->next_slice_idx);
2704  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2705  sl->mb_y, ER_MB_ERROR);
2706  return AVERROR_INVALIDDATA;
2707  }
2708 
2709  ret = ff_h264_decode_mb_cavlc(h, sl);
2710 
2711  if (ret >= 0)
2712  ff_h264_hl_decode_mb(h, sl);
2713 
2714  // FIXME optimal? or let mb_decode decode 16x32 ?
2715  if (ret >= 0 && FRAME_MBAFF(h)) {
2716  sl->mb_y++;
2717  ret = ff_h264_decode_mb_cavlc(h, sl);
2718 
2719  if (ret >= 0)
2720  ff_h264_hl_decode_mb(h, sl);
2721  sl->mb_y--;
2722  }
2723 
2724  if (ret < 0) {
2725  av_log(h->avctx, AV_LOG_ERROR,
2726  "error while decoding MB %d %d\n", sl->mb_x, sl->mb_y);
2727  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2728  sl->mb_y, ER_MB_ERROR);
2729  return ret;
2730  }
2731 
2732  if (++sl->mb_x >= h->mb_width) {
2733  loop_filter(h, sl, lf_x_start, sl->mb_x);
2734  sl->mb_x = lf_x_start = 0;
2735  decode_finish_row(h, sl);
2736  ++sl->mb_y;
2737  if (FIELD_OR_MBAFF_PICTURE(h)) {
2738  ++sl->mb_y;
2739  if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
2741  }
2742  if (sl->mb_y >= h->mb_height) {
2743  ff_tlog(h->avctx, "slice end %d %d\n",
2744  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2745 
2746  if ( get_bits_left(&sl->gb) == 0
2747  || get_bits_left(&sl->gb) > 0 && !(h->avctx->err_recognition & AV_EF_AGGRESSIVE)) {
2748  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2749  sl->mb_x - 1, sl->mb_y, ER_MB_END);
2750 
2751  goto finish;
2752  } else {
2753  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2754  sl->mb_x, sl->mb_y, ER_MB_END);
2755 
2756  return AVERROR_INVALIDDATA;
2757  }
2758  }
2759  }
2760 
2761  if (get_bits_left(&sl->gb) <= 0 && sl->mb_skip_run <= 0) {
2762  ff_tlog(h->avctx, "slice end %d %d\n",
2763  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2764 
2765  if (get_bits_left(&sl->gb) == 0) {
2766  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2767  sl->mb_x - 1, sl->mb_y, ER_MB_END);
2768  if (sl->mb_x > lf_x_start)
2769  loop_filter(h, sl, lf_x_start, sl->mb_x);
2770 
2771  goto finish;
2772  } else {
2773  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2774  sl->mb_y, ER_MB_ERROR);
2775 
2776  return AVERROR_INVALIDDATA;
2777  }
2778  }
2779  }
2780  }
2781 
2782 finish:
2783  sl->deblocking_filter = orig_deblock;
2784  return 0;
2785 }
2786 
2787 /**
2788  * Call decode_slice() for each context.
2789  *
2790  * @param h h264 master context
2791  */
2793 {
2794  AVCodecContext *const avctx = h->avctx;
2795  H264SliceContext *sl;
2796  int context_count = h->nb_slice_ctx_queued;
2797  int ret = 0;
2798  int i, j;
2799 
2800  h->slice_ctx[0].next_slice_idx = INT_MAX;
2801 
2802  if (h->avctx->hwaccel || context_count < 1)
2803  return 0;
2804 
2805  av_assert0(context_count && h->slice_ctx[context_count - 1].mb_y < h->mb_height);
2806 
2807  if (context_count == 1) {
2808 
2809  h->slice_ctx[0].next_slice_idx = h->mb_width * h->mb_height;
2810  h->postpone_filter = 0;
2811 
2812  ret = decode_slice(avctx, &h->slice_ctx[0]);
2813  h->mb_y = h->slice_ctx[0].mb_y;
2814  if (ret < 0)
2815  goto finish;
2816  } else {
2817  av_assert0(context_count > 0);
2818  for (i = 0; i < context_count; i++) {
2819  int next_slice_idx = h->mb_width * h->mb_height;
2820  int slice_idx;
2821 
2822  sl = &h->slice_ctx[i];
2823  if (CONFIG_ERROR_RESILIENCE) {
2824  sl->er.error_count = 0;
2825  }
2826 
2827  /* make sure none of those slices overlap */
2828  slice_idx = sl->mb_y * h->mb_width + sl->mb_x;
2829  for (j = 0; j < context_count; j++) {
2830  H264SliceContext *sl2 = &h->slice_ctx[j];
2831  int slice_idx2 = sl2->mb_y * h->mb_width + sl2->mb_x;
2832 
2833  if (i == j || slice_idx2 < slice_idx)
2834  continue;
2835  next_slice_idx = FFMIN(next_slice_idx, slice_idx2);
2836  }
2837  sl->next_slice_idx = next_slice_idx;
2838  }
2839 
2840  avctx->execute(avctx, decode_slice, h->slice_ctx,
2841  NULL, context_count, sizeof(h->slice_ctx[0]));
2842 
2843  /* pull back stuff from slices to master context */
2844  sl = &h->slice_ctx[context_count - 1];
2845  h->mb_y = sl->mb_y;
2846  if (CONFIG_ERROR_RESILIENCE) {
2847  for (i = 1; i < context_count; i++)
2848  h->slice_ctx[0].er.error_count += h->slice_ctx[i].er.error_count;
2849  }
2850 
2851  if (h->postpone_filter) {
2852  h->postpone_filter = 0;
2853 
2854  for (i = 0; i < context_count; i++) {
2855  int y_end, x_end;
2856 
2857  sl = &h->slice_ctx[i];
2858  y_end = FFMIN(sl->mb_y + 1, h->mb_height);
2859  x_end = (sl->mb_y >= h->mb_height) ? h->mb_width : sl->mb_x;
2860 
2861  for (j = sl->resync_mb_y; j < y_end; j += 1 + FIELD_OR_MBAFF_PICTURE(h)) {
2862  sl->mb_y = j;
2863  loop_filter(h, sl, j > sl->resync_mb_y ? 0 : sl->resync_mb_x,
2864  j == y_end - 1 ? x_end : h->mb_width);
2865  }
2866  }
2867  }
2868  }
2869 
2870 finish:
2871  h->nb_slice_ctx_queued = 0;
2872  return ret;
2873 }
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:39
er_add_slice
static void er_add_slice(H264SliceContext *sl, int startx, int starty, int endx, int endy, int status)
Definition: h264_slice.c:2566
ff_h264_filter_mb_fast
void ff_h264_filter_mb_fast(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
Definition: h264_loopfilter.c:418
h264_slice_header_init
static int h264_slice_header_init(H264Context *h)
Definition: h264_slice.c:909
implicit_weight_table
static void implicit_weight_table(const H264Context *h, H264SliceContext *sl, int field)
Initialize implicit_weight table.
Definition: h264_slice.c:658
H264SliceContext::mb_xy
int mb_xy
Definition: h264dec.h:237
ff_h264_unref_picture
void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
Definition: h264_picture.c:44
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:235
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
H264SliceContext::ref_cache
int8_t ref_cache[2][5 *8]
Definition: h264dec.h:306
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
ff_h264_free_tables
void ff_h264_free_tables(H264Context *h)
Definition: h264dec.c:138
H264SEIDisplayOrientation::hflip
int hflip
Definition: h264_sei.h:161
AV_STEREO3D_VIEW_LEFT
@ AV_STEREO3D_VIEW_LEFT
Frame contains only the left view.
Definition: stereo3d.h:156
h264_init_ps
static int h264_init_ps(H264Context *h, const H264SliceContext *sl, int first_slice)
Definition: h264_slice.c:1009
H264SliceContext::max_pic_num
int max_pic_num
Definition: h264dec.h:337
H264SliceContext::nb_mmco
int nb_mmco
Definition: h264dec.h:329
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
CHROMA422
#define CHROMA422(h)
Definition: h264dec.h:98
FF_BUG_TRUNCATED
#define FF_BUG_TRUNCATED
Definition: avcodec.h:1574
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
cabac.h
AV_STEREO3D_SIDEBYSIDE_QUINCUNX
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:117
H264Picture::poc
int poc
frame POC
Definition: h264dec.h:148
h264_export_frame_props
static int h264_export_frame_props(H264Context *h)
Definition: h264_slice.c:1128
H264Picture::f
AVFrame * f
Definition: h264dec.h:129
out
FILE * out
Definition: movenc.c:54
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:215
zigzag_scan8x8_cavlc
static const uint8_t zigzag_scan8x8_cavlc[64+1]
Definition: h264_slice.c:96
ff_thread_can_start_frame
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread_frame.c:906
H264Context::slice_ctx
H264SliceContext * slice_ctx
Definition: h264dec.h:356
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:89
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
H264Picture::ref_index
int8_t * ref_index[2]
Definition: h264dec.h:145
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:727
HWACCEL_MAX
#define HWACCEL_MAX
ff_h264_slice_context_init
int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
Init context Allocate buffers which are not shared amongst multiple threads.
Definition: h264dec.c:238
AVFrame::coded_picture_number
int coded_picture_number
picture number in bitstream order
Definition: frame.h:414
MB_MBAFF
#define MB_MBAFF(h)
Definition: h264dec.h:71
H264SliceContext::mvd_table
uint8_t(*[2] mvd_table)[2]
Definition: h264dec.h:319
ff_h264_set_erpic
void ff_h264_set_erpic(ERPicture *dst, H264Picture *src)
Definition: h264_picture.c:136
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
H264_SEI_PIC_STRUCT_TOP_BOTTOM
@ H264_SEI_PIC_STRUCT_TOP_BOTTOM
3: top field, bottom field, in that order
Definition: h264_sei.h:50
H264Picture::pps
const PPS * pps
Definition: h264dec.h:166
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:168
GetBitContext::size_in_bits
int size_in_bits
Definition: get_bits.h:68
H2645NAL::ref_idc
int ref_idc
H.264 only, nal_ref_idc.
Definition: h2645_parse.h:70
predict_field_decoding_flag
static void predict_field_decoding_flag(const H264Context *h, H264SliceContext *sl)
Definition: h264_slice.c:2522
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
AVFrame::width
int width
Definition: frame.h:358
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:535
get_ue_golomb
static int get_ue_golomb(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to 8190.
Definition: golomb.h:55
av_display_matrix_flip
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:65
internal.h
ff_h264_update_thread_context
int ff_h264_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:296
alloc_scratch_buffers
static int alloc_scratch_buffers(H264SliceContext *sl, int linesize)
Definition: h264_slice.c:128
AVFrame::top_field_first
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:452
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:483
FRAME_RECOVERED_IDR
#define FRAME_RECOVERED_IDR
We have seen an IDR, so all the following frames in coded order are correctly decodable.
Definition: h264dec.h:523
H264SliceContext::mmco
MMCO mmco[MAX_MMCO_COUNT]
Definition: h264dec.h:328
decode_finish_row
static void decode_finish_row(const H264Context *h, H264SliceContext *sl)
Draw edges and report progress for the last MB row.
Definition: h264_slice.c:2535
H264SliceContext::ref_count
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264dec.h:273
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:397
ff_er_frame_start
void ff_er_frame_start(ERContext *s)
Definition: error_resilience.c:797
H264_SEI_FPA_TYPE_CHECKERBOARD
@ H264_SEI_FPA_TYPE_CHECKERBOARD
Definition: h264_sei.h:62
H264Picture::qscale_table
int8_t * qscale_table
Definition: h264dec.h:133
H264SliceContext::h264
struct H264Context * h264
Definition: h264dec.h:184
H264SliceContext::left_mb_xy
int left_mb_xy[LEFT_MBS]
Definition: h264dec.h:217
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:229
ERContext
Definition: error_resilience.h:53
H264PredWeightTable::use_weight_chroma
int use_weight_chroma
Definition: h264_parse.h:32
av_buffer_allocz
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:38
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
Definition: pixfmt.h:510
AV_WN32A
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:830
av_display_rotation_set
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
Definition: display.c:50
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
ff_tlog
#define ff_tlog(ctx,...)
Definition: internal.h:86
mpegvideo.h
H264Picture::ref_index_buf
AVBufferRef * ref_index_buf[2]
Definition: h264dec.h:144
ff_h264_pred_weight_table
int ff_h264_pred_weight_table(GetBitContext *gb, const SPS *sps, const int *ref_count, int slice_type_nos, H264PredWeightTable *pwt, int picture_structure, void *logctx)
Definition: h264_parse.c:27
FRAME_RECOVERED_SEI
#define FRAME_RECOVERED_SEI
Sufficient number of frames have been decoded since a SEI recovery point, so all the following frames...
Definition: h264dec.h:528
H264SliceContext::is_complex
int is_complex
Definition: h264dec.h:244
ER_DC_END
#define ER_DC_END
Definition: error_resilience.h:35
ff_h264_decode_ref_pic_list_reordering
int ff_h264_decode_ref_pic_list_reordering(H264SliceContext *sl, void *logctx)
Definition: h264_refs.c:423
mpegutils.h
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:491
H264Picture::invalid_gap
int invalid_gap
Definition: h264dec.h:162
AV_STEREO3D_VIEW_RIGHT
@ AV_STEREO3D_VIEW_RIGHT
Frame contains only the right view.
Definition: stereo3d.h:161
H264Picture::pps_buf
AVBufferRef * pps_buf
Definition: h264dec.h:165
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
ThreadFrame::f
AVFrame * f
Definition: thread.h:35
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1612
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
H264SliceContext::mb_x
int mb_x
Definition: h264dec.h:236
h264_mvpred.h
H264Picture::frame_num
int frame_num
frame_num (raw frame_num from slice header)
Definition: h264dec.h:149
H264SliceContext::next_slice_idx
int next_slice_idx
Definition: h264dec.h:242
H264SliceContext
Definition: h264dec.h:183
fill_filter_caches_inter
static av_always_inline void fill_filter_caches_inter(const H264Context *h, H264SliceContext *sl, int mb_type, int top_xy, int left_xy[LEFT_MBS], int top_type, int left_type[LEFT_MBS], int mb_xy, int list)
Definition: h264_slice.c:2226
golomb.h
exp golomb vlc stuff
MB_FIELD
#define MB_FIELD(sl)
Definition: h264dec.h:72
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:67
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
ff_h264_filter_mb
void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
Definition: h264_loopfilter.c:718
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
Definition: buffer.c:239
H264SliceContext::mv_cache
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264dec.h:305
AV_CODEC_FLAG_OUTPUT_CORRUPT
#define AV_CODEC_FLAG_OUTPUT_CORRUPT
Output even those frames that might be corrupted.
Definition: avcodec.h:283
AVHWAccel
Definition: avcodec.h:2410
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:415
finish
static void finish(void)
Definition: movenc.c:345
get_chroma_qp
static av_always_inline int get_chroma_qp(const PPS *pps, int t, int qscale)
Get the chroma qp.
Definition: h264dec.h:687
U
#define U(x)
Definition: vp56_arith.h:37
H264Picture::mmco_reset
int mmco_reset
MMCO_RESET set this 1.
Definition: h264dec.h:150
fail
#define fail()
Definition: checkasm.h:123
copy_picture_range
static void copy_picture_range(H264Picture **to, H264Picture **from, int count, H264Context *new_base, H264Context *old_base)
Definition: h264_slice.c:280
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:55
H264SEIA53Caption
Definition: h264_sei.h:123
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:413
frames
if it could not because there are no more frames
Definition: filter_design.txt:266
h264_select_output_frame
static int h264_select_output_frame(H264Context *h)
Definition: h264_slice.c:1344
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:395
H264SliceContext::er
ERContext er
Definition: h264dec.h:186
USES_LIST
#define USES_LIST(a, list)
Definition: mpegutils.h:99
CABACContext::bytestream
const uint8_t * bytestream
Definition: cabac.h:48
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:378
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2577
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
H264Picture::mb_stride
int mb_stride
Definition: h264dec.h:169
IN_RANGE
#define IN_RANGE(a, b, size)
Definition: h264_slice.c:273
ff_h264_flush_change
void ff_h264_flush_change(H264Context *h)
Definition: h264dec.c:464
ff_h264qpel_init
av_cold void ff_h264qpel_init(H264QpelContext *c, int bit_depth)
Definition: h264qpel.c:49
ff_h264_sei_process_picture_timing
int ff_h264_sei_process_picture_timing(H264SEIPictureTiming *h, const SPS *sps, void *logctx)
Parse the contents of a picture timing message given an active SPS.
Definition: h264_sei.c:57
h264_frame_start
static int h264_frame_start(H264Context *h)
Definition: h264_slice.c:457
H264SliceContext::deblocking_filter
int deblocking_filter
disable_deblocking_filter_idc with 1 <-> 0
Definition: h264dec.h:199
H264PredWeightTable::luma_log2_weight_denom
int luma_log2_weight_denom
Definition: h264_parse.h:33
H264SliceContext::picture_structure
int picture_structure
Definition: h264dec.h:246
ff_h264_golomb_to_pict_type
const uint8_t ff_h264_golomb_to_pict_type[5]
Definition: h264data.c:37
release_unused_pictures
static void release_unused_pictures(H264Context *h, int remove_current)
Definition: h264_slice.c:115
H264PredWeightTable::use_weight
int use_weight
Definition: h264_parse.h:31
H264_SEI_FPA_TYPE_SIDE_BY_SIDE
@ H264_SEI_FPA_TYPE_SIDE_BY_SIDE
Definition: h264_sei.h:65
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
H264SliceContext::direct_spatial_mv_pred
int direct_spatial_mv_pred
Definition: h264dec.h:257
H264SliceContext::slice_num
int slice_num
Definition: h264dec.h:188
H264_SEI_FPA_TYPE_INTERLEAVE_TEMPORAL
@ H264_SEI_FPA_TYPE_INTERLEAVE_TEMPORAL
Definition: h264_sei.h:67
non_j_pixfmt
static enum AVPixelFormat non_j_pixfmt(enum AVPixelFormat a)
Definition: h264_slice.c:998
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:400
ff_h264_init_cabac_states
void ff_h264_init_cabac_states(const H264Context *h, H264SliceContext *sl)
Definition: h264_cabac.c:1262
ff_h264_hl_decode_mb
void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl)
Definition: h264_mb.c:799
avassert.h
AV_STEREO3D_FRAMESEQUENCE
@ AV_STEREO3D_FRAMESEQUENCE
Views are alternated temporally.
Definition: stereo3d.h:92
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
ff_color_frame
void ff_color_frame(AVFrame *frame, const int color[4])
Definition: utils.c:422
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:568
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:337
ff_h264_queue_decode_slice
int ff_h264_queue_decode_slice(H264Context *h, const H2645NAL *nal)
Submit a slice for decoding.
Definition: h264_slice.c:2083
width
#define width
H264Context::DPB
H264Picture DPB[H264_MAX_PICTURE_COUNT]
Definition: h264dec.h:351
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:129
FFMAX3
#define FFMAX3(a, b, c)
Definition: common.h:95
stereo3d.h
H264_SEI_FPA_TYPE_TOP_BOTTOM
@ H264_SEI_FPA_TYPE_TOP_BOTTOM
Definition: h264_sei.h:66
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:137
H264SEIA53Caption::buf_ref
AVBufferRef * buf_ref
Definition: h264_sei.h:124
H264PredWeightTable::chroma_log2_weight_denom
int chroma_log2_weight_denom
Definition: h264_parse.h:34
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:509
FIELD_PICTURE
#define FIELD_PICTURE(h)
Definition: h264dec.h:74
ff_h264_execute_ref_pic_marking
int ff_h264_execute_ref_pic_marking(H264Context *h)
Execute the reference picture marking (memory management control operations).
Definition: h264_refs.c:610
ff_h264_decode_ref_pic_marking
int ff_h264_decode_ref_pic_marking(H264SliceContext *sl, GetBitContext *gb, const H2645NAL *nal, void *logctx)
Definition: h264_refs.c:834
from
const char * from
Definition: jacosubdec.c:65
to
const char * to
Definition: webvttdec.c:34
h264_slice_header_parse
static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl, const H2645NAL *nal)
Definition: h264_slice.c:1733
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
H264PredWeightTable::chroma_weight_flag
int chroma_weight_flag[2]
7.4.3.2 chroma_weight_lX_flag
Definition: h264_parse.h:36
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
h264data.h
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:394
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
H264_NAL_IDR_SLICE
@ H264_NAL_IDR_SLICE
Definition: h264.h:39
H264Ref::parent
H264Picture * parent
Definition: h264dec.h:180
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:37
field_scan8x8_cavlc
static const uint8_t field_scan8x8_cavlc[64+1]
Definition: h264_slice.c:76
H264SliceContext::slice_alpha_c0_offset
int slice_alpha_c0_offset
Definition: h264dec.h:200
IS_INTRA
#define IS_INTRA(x, y)
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AVFrame::crop_right
size_t crop_right
Definition: frame.h:663
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
H264SliceContext::slice_type
int slice_type
Definition: h264dec.h:189
H264SliceContext::resync_mb_x
int resync_mb_x
Definition: h264dec.h:238
H264Picture::sei_recovery_frame_cnt
int sei_recovery_frame_cnt
Definition: h264dec.h:163
AVDISCARD_BIDIR
@ AVDISCARD_BIDIR
discard all bidirectional frames
Definition: avcodec.h:233
get_se_golomb
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:239
H2645NAL::type
int type
NAL unit type.
Definition: h2645_parse.h:52
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
H264Context::enable_er
int enable_er
Definition: h264dec.h:551
ff_h264_draw_horiz_band
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
Definition: h264dec.c:103
H264SliceContext::curr_pic_num
int curr_pic_num
Definition: h264dec.h:336
int32_t
int32_t
Definition: audio_convert.c:194
arg
const char * arg
Definition: jacosubdec.c:66
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:236
GetBitContext::buffer
const uint8_t * buffer
Definition: get_bits.h:62
alloc_picture
static int alloc_picture(H264Context *h, H264Picture *pic)
Definition: h264_slice.c:187
H264Picture::motion_val_buf
AVBufferRef * motion_val_buf[2]
Definition: h264dec.h:135
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:263
NULL
#define NULL
Definition: coverity.c:32
AV_COPY128
#define AV_COPY128(d, s)
Definition: intreadwrite.h:609
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:125
AV_COPY64
#define AV_COPY64(d, s)
Definition: intreadwrite.h:605
H264SliceContext::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: h264dec.h:290
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
SPS
Sequence parameter set.
Definition: h264_ps.h:44
TRANSPOSE
#define TRANSPOSE(x)
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
ER_MB_ERROR
#define ER_MB_ERROR
Definition: error_resilience.h:38
ff_h264_decode_mb_cabac
int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl)
Decode a macroblock.
Definition: h264_cabac.c:1911
AV_PICTURE_TYPE_SI
@ AV_PICTURE_TYPE_SI
Switching Intra.
Definition: avutil.h:278
H264SliceContext::chroma_qp
int chroma_qp[2]
Definition: h264dec.h:194
AV_CODEC_FLAG2_FAST
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:348
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
src
#define src
Definition: vp8dsp.c:254
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:276
PPS
Picture parameter set.
Definition: h264_ps.h:111
av_fast_mallocz
void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size)
Allocate and clear a buffer, reusing the given one if large enough.
Definition: mem.c:507
mathops.h
list
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
Definition: filter_design.txt:25
IS_INTERLACED
#define IS_INTERLACED(a)
Definition: mpegutils.h:83
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:695
H264Picture::mb_height
int mb_height
Definition: h264dec.h:168
MAX_PPS_COUNT
#define MAX_PPS_COUNT
Definition: h264_ps.h:38
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:398
H264SliceContext::qscale
int qscale
Definition: h264dec.h:193
get_pixel_format
static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
Definition: h264_slice.c:756
fill_filter_caches
static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
Definition: h264_slice.c:2310
ERContext::error_occurred
int error_occurred
Definition: error_resilience.h:65
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1666
fp
#define fp
Definition: regdef.h:44
AV_ZERO128
#define AV_ZERO128(d)
Definition: intreadwrite.h:637
init_scan_tables
static void init_scan_tables(H264Context *h)
initialize scan tables
Definition: h264_slice.c:722
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:412
H264SliceContext::top_borders_allocated
int top_borders_allocated[2]
Definition: h264dec.h:294
AV_FRAME_DATA_AFD
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:89
AV_PICTURE_TYPE_SP
@ AV_PICTURE_TYPE_SP
Switching Predicted.
Definition: avutil.h:279
FIELD_OR_MBAFF_PICTURE
#define FIELD_OR_MBAFF_PICTURE(h)
Definition: h264dec.h:91
H264SliceContext::mb_skip_run
int mb_skip_run
Definition: h264dec.h:243
h264_ps.h
init_dimensions
static void init_dimensions(H264Context *h)
Definition: h264_slice.c:869
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
H264SliceContext::top_type
int top_type
Definition: h264dec.h:220
AVFrame::crop_bottom
size_t crop_bottom
Definition: frame.h:661
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
H264SliceContext::resync_mb_y
int resync_mb_y
Definition: h264dec.h:239
H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM
@ H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM
6: bottom field, top field, bottom field repeated, in that order
Definition: h264_sei.h:53
DELAYED_PIC_REF
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output.
Definition: diracdec.c:67
H264SEIPictureTiming
Definition: h264_sei.h:82
H264SliceContext::cabac
CABACContext cabac
Cabac.
Definition: h264dec.h:324
H264SliceContext::redundant_pic_count
int redundant_pic_count
Definition: h264dec.h:250
AVFrame::crop_left
size_t crop_left
Definition: frame.h:662
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: avcodec.h:235
ERContext::error_count
atomic_int error_count
Definition: error_resilience.h:64
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:383
ff_zigzag_scan
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
AV_STEREO3D_CHECKERBOARD
@ AV_STEREO3D_CHECKERBOARD
Views are packed in a checkerboard-like structure per pixel.
Definition: stereo3d.h:104
H264Picture::reference
int reference
Definition: h264dec.h:160
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:308
CABAC
#define CABAC(h)
Definition: h264_cabac.c:28
LEFT_MBS
#define LEFT_MBS
Definition: h264dec.h:75
pps
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
Definition: cbs_h264_syntax_template.c:404
H264SEIFramePacking
Definition: h264_sei.h:147
rectangle.h
FF_COMPLIANCE_STRICT
#define FF_COMPLIANCE_STRICT
Strictly conform to all the things in the spec no matter what consequences.
Definition: avcodec.h:1591
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
H264SliceContext::mb_uvlinesize
ptrdiff_t mb_uvlinesize
Definition: h264dec.h:234
VP_START
#define VP_START
< current MB is the first after a resync marker
Definition: error_resilience.h:30
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:402
H264SliceContext::pwt
H264PredWeightTable pwt
Definition: h264dec.h:203
MAX_DELAYED_PIC_COUNT
#define MAX_DELAYED_PIC_COUNT
Definition: h264dec.h:56
H264Picture::tf
ThreadFrame tf
Definition: h264dec.h:130
H264Picture::mb_type
uint32_t * mb_type
Definition: h264dec.h:139
ff_h264_decode_mb_cavlc
int ff_h264_decode_mb_cavlc(const H264Context *h, H264SliceContext *sl)
Decode a macroblock.
Definition: h264_cavlc.c:702
H264_SEI_PIC_STRUCT_BOTTOM_TOP
@ H264_SEI_PIC_STRUCT_BOTTOM_TOP
4: bottom field, top field, in that order
Definition: h264_sei.h:51
H264Picture::recovered
int recovered
picture at IDR or recovery point + recovery count
Definition: h264dec.h:161
H2645NAL::gb
GetBitContext gb
Definition: h2645_parse.h:47
H264SliceContext::top_mb_xy
int top_mb_xy
Definition: h264dec.h:215
H264SliceContext::qp_thresh
int qp_thresh
QP threshold to skip loopfilter.
Definition: h264dec.h:195
H2645NAL
Definition: h2645_parse.h:32
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:404
H264SliceContext::top_borders
uint8_t(*[2] top_borders)[(16 *3) *2]
Definition: h264dec.h:291
AVFrameSideData::data
uint8_t * data
Definition: frame.h:208
h264chroma.h
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1797
H264SliceContext::cbp
int cbp
Definition: h264dec.h:261
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:373
H264SliceContext::left_type
int left_type[LEFT_MBS]
Definition: h264dec.h:222
ff_h264_direct_ref_list_init
void ff_h264_direct_ref_list_init(const H264Context *const h, H264SliceContext *sl)
Definition: h264_direct.c:121
H264SliceContext::mb_y
int mb_y
Definition: h264dec.h:236
H264PredWeightTable::implicit_weight
int implicit_weight[48][48][2]
Definition: h264_parse.h:40
height
#define height
decode_slice
static int decode_slice(struct AVCodecContext *avctx, void *arg)
Definition: h264_slice.c:2580
H264SliceContext::explicit_ref_marking
int explicit_ref_marking
Definition: h264dec.h:330
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
H264_SEI_FPA_TYPE_INTERLEAVE_COLUMN
@ H264_SEI_FPA_TYPE_INTERLEAVE_COLUMN
Definition: h264_sei.h:63
pt
int pt
Definition: rtp.c:35
H264SliceContext::uvlinesize
ptrdiff_t uvlinesize
Definition: h264dec.h:232
AVBufferRef::buffer
AVBuffer * buffer
Definition: buffer.h:82
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:313
H264SEIDisplayOrientation::anticlockwise_rotation
int anticlockwise_rotation
Definition: h264_sei.h:160
H264SliceContext::slice_type_nos
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
Definition: h264dec.h:190
H264SliceContext::delta_poc_bottom
int delta_poc_bottom
Definition: h264dec.h:334
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:167
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Definition: pixfmt.h:122
FRAME_MBAFF
#define FRAME_MBAFF(h)
Definition: h264dec.h:73
IS_DIRECT
#define IS_DIRECT(a)
Definition: mpegutils.h:84
H264_SEI_PIC_STRUCT_FRAME
@ H264_SEI_PIC_STRUCT_FRAME
0: frame
Definition: h264_sei.h:47
pack16to32
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264dec.h:666
get_cabac_terminate
static int av_unused get_cabac_terminate(CABACContext *c)
Definition: cabac_functions.h:181
H264_SEI_PIC_STRUCT_FRAME_TRIPLING
@ H264_SEI_PIC_STRUCT_FRAME_TRIPLING
8: frame tripling
Definition: h264_sei.h:55
field_scan
static const uint8_t field_scan[16+1]
Definition: h264_slice.c:50
loop_filter
static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, int end_x)
Definition: h264_slice.c:2452
ff_init_cabac_decoder
int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
Definition: cabac.c:176
H264SliceContext::mb_mbaff
int mb_mbaff
mb_aff_frame && mb_field_decoding_flag
Definition: h264dec.h:248
field_scan8x8
static const uint8_t field_scan8x8[64+1]
Definition: h264_slice.c:57
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:197
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:282
AVFrame::interlaced_frame
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:447
LIST_NOT_USED
#define LIST_NOT_USED
Definition: h264dec.h:396
H264Picture::field_picture
int field_picture
whether or not picture was encoded in separate fields
Definition: h264dec.h:158
h264dec.h
H264SliceContext::poc_lsb
int poc_lsb
Definition: h264dec.h:333
H264SliceContext::first_mb_addr
unsigned int first_mb_addr
Definition: h264dec.h:240
ff_h264_direct_dist_scale_factor
void ff_h264_direct_dist_scale_factor(const H264Context *const h, H264SliceContext *sl)
Definition: h264_direct.c:62
AVBuffer
A reference counted buffer type.
Definition: buffer_internal.h:33
H264Context
H264Context.
Definition: h264dec.h:343
AVDISCARD_NONINTRA
@ AVDISCARD_NONINTRA
discard all non intra frames
Definition: avcodec.h:234
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AV_CODEC_FLAG2_SHOW_ALL
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:376
AV_FRAME_FLAG_CORRUPT
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:525
H264_SEI_PIC_STRUCT_FRAME_DOUBLING
@ H264_SEI_PIC_STRUCT_FRAME_DOUBLING
7: frame doubling
Definition: h264_sei.h:54
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
H264SliceContext::frame_num
int frame_num
Definition: h264dec.h:332
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:414
display.h
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:79
ff_h264_execute_decode_slices
int ff_h264_execute_decode_slices(H264Context *h)
Call decode_slice() for each context.
Definition: h264_slice.c:2792
H264SliceContext::mb_linesize
ptrdiff_t mb_linesize
may be equal to s->linesize or s->linesize * 2, for mbaff
Definition: h264dec.h:233
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
av_always_inline
#define av_always_inline
Definition: attributes.h:49
uint8_t
uint8_t
Definition: audio_convert.c:194
cabac_functions.h
H264Picture::hwaccel_priv_buf
AVBufferRef * hwaccel_priv_buf
Definition: h264dec.h:141
tb
#define tb
Definition: regdef.h:68
AV_COPY32
#define AV_COPY32(d, s)
Definition: intreadwrite.h:601
ff_h264_parse_ref_count
int ff_h264_parse_ref_count(int *plist_count, int ref_count[2], GetBitContext *gb, const PPS *pps, int slice_type_nos, int picture_structure, void *logctx)
Definition: h264_parse.c:219
H264_SEI_FPA_TYPE_INTERLEAVE_ROW
@ H264_SEI_FPA_TYPE_INTERLEAVE_ROW
Definition: h264_sei.h:64
ff_h264_alloc_tables
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264dec.c:181
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:534
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:396
H264SliceContext::list_count
unsigned int list_count
Definition: h264dec.h:274
avcodec.h
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
ff_h264dsp_init
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:67
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ff_h264_ref_picture
int ff_h264_ref_picture(H264Context *h, H264Picture *dst, H264Picture *src)
Definition: h264_picture.c:66
ret
ret
Definition: filter_design.txt:187
ff_h264_init_poc
int ff_h264_init_poc(int pic_field_poc[2], int *pic_poc, const SPS *sps, H264POCContext *pc, int picture_structure, int nal_ref_idc)
Definition: h264_parse.c:277
ff_h264_get_profile
int ff_h264_get_profile(const SPS *sps)
Compute profile from profile_idc and constraint_set?_flags.
Definition: h264_parse.c:529
AV_STEREO3D_COLUMNS
@ AV_STEREO3D_COLUMNS
Views are packed per column.
Definition: stereo3d.h:141
h264_field_start
static int h264_field_start(H264Context *h, const H264SliceContext *sl, const H2645NAL *nal, int first_slice)
Definition: h264_slice.c:1447
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
H264SliceContext::last_qscale_diff
int last_qscale_diff
Definition: h264dec.h:196
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:693
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:401
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:406
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:119
H264SliceContext::pps_id
unsigned int pps_id
Definition: h264dec.h:284
H264SliceContext::linesize
ptrdiff_t linesize
Definition: h264dec.h:232
H264SliceContext::slice_beta_offset
int slice_beta_offset
Definition: h264dec.h:201
AVCodecContext
main external API structure.
Definition: avcodec.h:526
AVFrame::height
int height
Definition: frame.h:358
get_ue_golomb_31
static int get_ue_golomb_31(GetBitContext *gb)
read unsigned exp golomb code, constraint to a max of 31.
Definition: golomb.h:120
MAX_SLICES
#define MAX_SLICES
Definition: dxva2_hevc.c:29
backup_mb_border
static av_always_inline void backup_mb_border(const H264Context *h, H264SliceContext *sl, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int simple)
Definition: h264_slice.c:556
ff_h264_build_ref_list
int ff_h264_build_ref_list(H264Context *h, H264SliceContext *sl)
Definition: h264_refs.c:299
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1825
av_image_copy
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:387
H264SliceContext::bipred_scratchpad
uint8_t * bipred_scratchpad
Definition: h264dec.h:289
ff_h264_pred_init
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:411
H264Picture::field_poc
int field_poc[2]
top/bottom POC
Definition: h264dec.h:147
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
error_resilience.h
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AVHWAccel::frame_priv_data_size
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
Definition: avcodec.h:2520
H264Picture::mb_width
int mb_width
Definition: h264dec.h:168
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:831
H264Picture
Definition: h264dec.h:128
find_unused_picture
static int find_unused_picture(H264Context *h)
Definition: h264_slice.c:261
scan8
static const uint8_t scan8[16 *3+3]
Definition: h264dec.h:650
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
h264_slice_init
static int h264_slice_init(H264Context *h, H264SliceContext *sl, const H2645NAL *nal)
Definition: h264_slice.c:1947
H264SEIDisplayOrientation::vflip
int vflip
Definition: h264_sei.h:161
H264SEIDisplayOrientation
Definition: h264_sei.h:158
FF_CODEC_PROPERTY_CLOSED_CAPTIONS
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:2193
ff_h264_field_end
int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup)
Definition: h264_picture.c:159
av_buffer_ref
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
CABACContext::bytestream_end
const uint8_t * bytestream_end
Definition: cabac.h:49
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
init_table_pools
static int init_table_pools(H264Context *h)
Definition: h264_slice.c:160
H264Picture::mb_type_buf
AVBufferRef * mb_type_buf
Definition: h264dec.h:138
H264SliceContext::ref_list
H264Ref ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264dec.h:275
LBOT
#define LBOT
Definition: h264dec.h:77
AV_EF_AGGRESSIVE
#define AV_EF_AGGRESSIVE
consider things that a sane encoder should not do as an error
Definition: avcodec.h:1671
H264SliceContext::non_zero_count_cache
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264dec.h:300
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
tc
#define tc
Definition: regdef.h:69
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
IS_INTER
#define IS_INTER(a)
Definition: mpegutils.h:79
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
get_ue_golomb_long
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
Definition: golomb.h:105
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:39
ER_MB_END
#define ER_MB_END
Definition: error_resilience.h:39
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:206
ff_thread_get_format
enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
Definition: pthread_frame.c:972
H264_SEI_PIC_STRUCT_BOTTOM_FIELD
@ H264_SEI_PIC_STRUCT_BOTTOM_FIELD
2: bottom field
Definition: h264_sei.h:49
H264Picture::hwaccel_picture_private
void * hwaccel_picture_private
hardware accelerator private data
Definition: h264dec.h:142
ER_MV_END
#define ER_MV_END
Definition: error_resilience.h:36
AVStereo3D::view
enum AVStereo3DView view
Determines which views are packed.
Definition: stereo3d.h:190
H264_SEI_FPA_TYPE_2D
@ H264_SEI_FPA_TYPE_2D
Definition: h264_sei.h:68
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:553
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:216
AVFrame::crop_top
size_t crop_top
Definition: frame.h:660
H264SliceContext::gb
GetBitContext gb
Definition: h264dec.h:185
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:502
H264SliceContext::intra4x4_pred_mode
int8_t * intra4x4_pred_mode
Definition: h264dec.h:212
LTOP
#define LTOP
Definition: h264dec.h:76
h264.h
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
H264SliceContext::edge_emu_buffer_allocated
int edge_emu_buffer_allocated
Definition: h264dec.h:293
REBASE_PICTURE
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
Definition: h264_slice.c:275
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
CHROMA444
#define CHROMA444(h)
Definition: h264dec.h:99
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
ff_h264_get_slice_type
int ff_h264_get_slice_type(const H264SliceContext *sl)
Reconstruct bitstream slice_type.
Definition: h264_slice.c:2208
h
h
Definition: vp9dsp_template.c:2038
H264SliceContext::cabac_init_idc
int cabac_init_idc
Definition: h264dec.h:326
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:407
H264PredWeightTable::luma_weight_flag
int luma_weight_flag[2]
7.4.3.2 luma_weight_lX_flag
Definition: h264_parse.h:35
H264_MAX_PICTURE_COUNT
#define H264_MAX_PICTURE_COUNT
Definition: h264dec.h:52
ER_AC_END
#define ER_AC_END
Definition: error_resilience.h:34
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:176
H264SliceContext::bipred_scratchpad_allocated
int bipred_scratchpad_allocated
Definition: h264dec.h:292
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: avcodec.h:232
H264SliceContext::slice_type_fixed
int slice_type_fixed
Definition: h264dec.h:191
H264Ref::poc
int poc
Definition: h264dec.h:177
IS_8x8DCT
#define IS_8x8DCT(a)
Definition: h264dec.h:104
H264Picture::qscale_table_buf
AVBufferRef * qscale_table_buf
Definition: h264dec.h:132
H264_SEI_PIC_STRUCT_TOP_FIELD
@ H264_SEI_PIC_STRUCT_TOP_FIELD
1: top field
Definition: h264_sei.h:48
H264SliceContext::delta_poc
int delta_poc[2]
Definition: h264dec.h:335
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:2918
H264Picture::long_ref
int long_ref
1->long term reference 0->short term reference
Definition: h264dec.h:154
H264Ref::reference
int reference
Definition: h264dec.h:176
AVFrame::repeat_pict
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:442
H264Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: h264dec.h:136
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:405
H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP
@ H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP
5: top field, bottom field, top field repeated, in that order
Definition: h264_sei.h:52
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2465
H264SliceContext::mb_field_decoding_flag
int mb_field_decoding_flag
Definition: h264dec.h:247