Go to the documentation of this file.
49 if (!pic->
f || !pic->
f->
buf[0])
58 for (
i = 0;
i < 2;
i++) {
63 memset((
uint8_t*)pic + off, 0,
sizeof(*pic) - off);
90 for (
i = 0;
i < 2;
i++) {
101 if (
src->hwaccel_picture_private) {
110 for (
i = 0;
i < 2;
i++)
138 #if CONFIG_ERROR_RESILIENCE
141 memset(dst, 0,
sizeof(*dst));
149 for (
i = 0;
i < 2;
i++) {
168 h->poc.prev_poc_msb =
h->poc.poc_msb;
169 h->poc.prev_poc_lsb =
h->poc.poc_lsb;
171 h->poc.prev_frame_num_offset =
h->poc.frame_num_offset;
172 h->poc.prev_frame_num =
h->poc.frame_num;
179 "hardware accelerator failed to decode picture\n");
182 if (!in_setup && !
h->droppable)
187 h->current_slice = 0;
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
uint8_t * data
The data buffer.
int ref_count[2][2]
number of entries in ref_poc (FIXME need per slice)
void ff_h264_set_erpic(ERPicture *dst, H264Picture *src)
#define PICT_BOTTOM_FIELD
AVBufferRef * ref_index_buf[2]
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
int frame_num
frame_num (raw frame_num from slice header)
int mmco_reset
MMCO_RESET set this 1.
int ref_poc[2][2][32]
POCs of the frames/fields used as reference (FIXME need per slice)
int16_t(*[2] motion_val)[2]
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
int mbaff
1 -> MBAFF frame 0-> not MBAFF
int ff_h264_execute_ref_pic_marking(H264Context *h)
Execute the reference picture marking (memory management control operations).
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int sei_recovery_frame_cnt
AVBufferRef * motion_val_buf[2]
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
int recovered
picture at IDR or recovery point + recovery count
#define FF_THREAD_FRAME
Decode more than one frame at once.
int field_picture
whether or not picture was encoded in separate fields
AVBufferRef * hwaccel_priv_buf
int ff_h264_ref_picture(H264Context *h, H264Picture *dst, H264Picture *src)
main external API structure.
int active_thread_type
Which multithreading methods are in use by the codec.
int field_poc[2]
top/bottom POC
int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup)
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
AVBufferRef * mb_type_buf
void * hwaccel_picture_private
hardware accelerator private data
AVBufferRef * qscale_table_buf
int long_ref
1->long term reference 0->short term reference
int16_t(*[2] motion_val)[2]