FFmpeg
libdav1d.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Ronald S. Bultje <rsbultje gmail com>
3  * Copyright (c) 2018 James Almer <jamrial gmail com>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <dav1d/dav1d.h>
23 
24 #include "libavutil/avassert.h"
26 #include "libavutil/imgutils.h"
27 #include "libavutil/opt.h"
28 
29 #include "avcodec.h"
30 #include "decode.h"
31 #include "internal.h"
32 
33 typedef struct Libdav1dContext {
34  AVClass *class;
35  Dav1dContext *c;
37  int pool_size;
38 
39  Dav1dData data;
46 
47 static const enum AVPixelFormat pix_fmt[][3] = {
48  [DAV1D_PIXEL_LAYOUT_I400] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12 },
49  [DAV1D_PIXEL_LAYOUT_I420] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV420P12 },
50  [DAV1D_PIXEL_LAYOUT_I422] = { AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV422P12 },
51  [DAV1D_PIXEL_LAYOUT_I444] = { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV444P12 },
52 };
53 
54 static const enum AVPixelFormat pix_fmt_rgb[3] = {
56 };
57 
58 static void libdav1d_log_callback(void *opaque, const char *fmt, va_list vl)
59 {
60  AVCodecContext *c = opaque;
61 
62  av_vlog(c, AV_LOG_ERROR, fmt, vl);
63 }
64 
65 static int libdav1d_picture_allocator(Dav1dPicture *p, void *cookie)
66 {
67  Libdav1dContext *dav1d = cookie;
68  enum AVPixelFormat format = pix_fmt[p->p.layout][p->seq_hdr->hbd];
69  int ret, linesize[4], h = FFALIGN(p->p.h, 128);
70  uint8_t *aligned_ptr, *data[4];
71  AVBufferRef *buf;
72 
73  ret = av_image_fill_arrays(data, linesize, NULL, format, FFALIGN(p->p.w, 128),
74  h, DAV1D_PICTURE_ALIGNMENT);
75  if (ret < 0)
76  return ret;
77 
78  if (ret != dav1d->pool_size) {
79  av_buffer_pool_uninit(&dav1d->pool);
80  // Use twice the amount of required padding bytes for aligned_ptr below.
81  dav1d->pool = av_buffer_pool_init(ret + DAV1D_PICTURE_ALIGNMENT * 2, NULL);
82  if (!dav1d->pool) {
83  dav1d->pool_size = 0;
84  return AVERROR(ENOMEM);
85  }
86  dav1d->pool_size = ret;
87  }
88  buf = av_buffer_pool_get(dav1d->pool);
89  if (!buf)
90  return AVERROR(ENOMEM);
91 
92  // libdav1d requires DAV1D_PICTURE_ALIGNMENT aligned buffers, which av_malloc()
93  // doesn't guarantee for example when AVX is disabled at configure time.
94  // Use the extra DAV1D_PICTURE_ALIGNMENT padding bytes in the buffer to align it
95  // if required.
96  aligned_ptr = (uint8_t *)FFALIGN((uintptr_t)buf->data, DAV1D_PICTURE_ALIGNMENT);
97  ret = av_image_fill_pointers(data, format, h, aligned_ptr, linesize);
98  if (ret < 0) {
99  av_buffer_unref(&buf);
100  return ret;
101  }
102 
103  p->data[0] = data[0];
104  p->data[1] = data[1];
105  p->data[2] = data[2];
106  p->stride[0] = linesize[0];
107  p->stride[1] = linesize[1];
108  p->allocator_data = buf;
109 
110  return 0;
111 }
112 
113 static void libdav1d_picture_release(Dav1dPicture *p, void *cookie)
114 {
115  AVBufferRef *buf = p->allocator_data;
116 
117  av_buffer_unref(&buf);
118 }
119 
121 {
122  Libdav1dContext *dav1d = c->priv_data;
123  Dav1dSettings s;
124  int threads = (c->thread_count ? c->thread_count : av_cpu_count()) * 3 / 2;
125  int res;
126 
127  av_log(c, AV_LOG_INFO, "libdav1d %s\n", dav1d_version());
128 
129  dav1d_default_settings(&s);
130  s.logger.cookie = c;
131  s.logger.callback = libdav1d_log_callback;
132  s.allocator.cookie = dav1d;
133  s.allocator.alloc_picture_callback = libdav1d_picture_allocator;
134  s.allocator.release_picture_callback = libdav1d_picture_release;
135  s.frame_size_limit = c->max_pixels;
136  if (dav1d->apply_grain >= 0)
137  s.apply_grain = dav1d->apply_grain;
138 
139  s.all_layers = dav1d->all_layers;
140  if (dav1d->operating_point >= 0)
141  s.operating_point = dav1d->operating_point;
142 
143  s.n_tile_threads = dav1d->tile_threads
144  ? dav1d->tile_threads
145  : FFMIN(floor(sqrt(threads)), DAV1D_MAX_TILE_THREADS);
146  s.n_frame_threads = dav1d->frame_threads
147  ? dav1d->frame_threads
148  : FFMIN(ceil(threads / s.n_tile_threads), DAV1D_MAX_FRAME_THREADS);
149  av_log(c, AV_LOG_DEBUG, "Using %d frame threads, %d tile threads\n",
150  s.n_frame_threads, s.n_tile_threads);
151 
152  res = dav1d_open(&dav1d->c, &s);
153  if (res < 0)
154  return AVERROR(ENOMEM);
155 
156  return 0;
157 }
158 
160 {
161  Libdav1dContext *dav1d = c->priv_data;
162 
163  dav1d_data_unref(&dav1d->data);
164  dav1d_flush(dav1d->c);
165 }
166 
167 static void libdav1d_data_free(const uint8_t *data, void *opaque) {
168  AVBufferRef *buf = opaque;
169 
170  av_buffer_unref(&buf);
171 }
172 
173 static void libdav1d_user_data_free(const uint8_t *data, void *opaque) {
174  av_assert0(data == opaque);
175  av_free(opaque);
176 }
177 
179 {
180  Libdav1dContext *dav1d = c->priv_data;
181  Dav1dData *data = &dav1d->data;
182  Dav1dPicture pic = { 0 }, *p = &pic;
183  int res;
184 
185  if (!data->sz) {
186  AVPacket pkt = { 0 };
187 
188  res = ff_decode_get_packet(c, &pkt);
189  if (res < 0 && res != AVERROR_EOF)
190  return res;
191 
192  if (pkt.size) {
193  res = dav1d_data_wrap(data, pkt.data, pkt.size, libdav1d_data_free, pkt.buf);
194  if (res < 0) {
196  return res;
197  }
198 
199  data->m.timestamp = pkt.pts;
200  data->m.offset = pkt.pos;
201  data->m.duration = pkt.duration;
202 
203  pkt.buf = NULL;
205 
206  if (c->reordered_opaque != AV_NOPTS_VALUE) {
207  uint8_t *reordered_opaque = av_malloc(sizeof(c->reordered_opaque));
208  if (!reordered_opaque) {
209  dav1d_data_unref(data);
210  return AVERROR(ENOMEM);
211  }
212 
213  memcpy(reordered_opaque, &c->reordered_opaque, sizeof(c->reordered_opaque));
214  res = dav1d_data_wrap_user_data(data, reordered_opaque,
215  libdav1d_user_data_free, reordered_opaque);
216  if (res < 0) {
217  av_free(reordered_opaque);
218  dav1d_data_unref(data);
219  return res;
220  }
221  }
222  }
223  }
224 
225  res = dav1d_send_data(dav1d->c, data);
226  if (res < 0) {
227  if (res == AVERROR(EINVAL))
228  res = AVERROR_INVALIDDATA;
229  if (res != AVERROR(EAGAIN)) {
230  dav1d_data_unref(data);
231  return res;
232  }
233  }
234 
235  res = dav1d_get_picture(dav1d->c, p);
236  if (res < 0) {
237  if (res == AVERROR(EINVAL))
238  res = AVERROR_INVALIDDATA;
239  else if (res == AVERROR(EAGAIN) && c->internal->draining)
240  res = AVERROR_EOF;
241 
242  return res;
243  }
244 
245  av_assert0(p->data[0] && p->allocator_data);
246 
247  // This requires the custom allocator above
248  frame->buf[0] = av_buffer_ref(p->allocator_data);
249  if (!frame->buf[0]) {
250  dav1d_picture_unref(p);
251  return AVERROR(ENOMEM);
252  }
253 
254  frame->data[0] = p->data[0];
255  frame->data[1] = p->data[1];
256  frame->data[2] = p->data[2];
257  frame->linesize[0] = p->stride[0];
258  frame->linesize[1] = p->stride[1];
259  frame->linesize[2] = p->stride[1];
260 
261  c->profile = p->seq_hdr->profile;
262  c->level = ((p->seq_hdr->operating_points[0].major_level - 2) << 2)
263  | p->seq_hdr->operating_points[0].minor_level;
264  frame->width = p->p.w;
265  frame->height = p->p.h;
266  if (c->width != p->p.w || c->height != p->p.h) {
267  res = ff_set_dimensions(c, p->p.w, p->p.h);
268  if (res < 0)
269  goto fail;
270  }
271 
272  av_reduce(&frame->sample_aspect_ratio.num,
273  &frame->sample_aspect_ratio.den,
274  frame->height * (int64_t)p->frame_hdr->render_width,
275  frame->width * (int64_t)p->frame_hdr->render_height,
276  INT_MAX);
277 
278  switch (p->seq_hdr->chr) {
279  case DAV1D_CHR_VERTICAL:
280  frame->chroma_location = c->chroma_sample_location = AVCHROMA_LOC_LEFT;
281  break;
282  case DAV1D_CHR_COLOCATED:
283  frame->chroma_location = c->chroma_sample_location = AVCHROMA_LOC_TOPLEFT;
284  break;
285  }
286  frame->colorspace = c->colorspace = (enum AVColorSpace) p->seq_hdr->mtrx;
287  frame->color_primaries = c->color_primaries = (enum AVColorPrimaries) p->seq_hdr->pri;
288  frame->color_trc = c->color_trc = (enum AVColorTransferCharacteristic) p->seq_hdr->trc;
289  frame->color_range = c->color_range = p->seq_hdr->color_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
290 
291  if (p->p.layout == DAV1D_PIXEL_LAYOUT_I444 &&
292  p->seq_hdr->mtrx == DAV1D_MC_IDENTITY &&
293  p->seq_hdr->pri == DAV1D_COLOR_PRI_BT709 &&
294  p->seq_hdr->trc == DAV1D_TRC_SRGB)
295  frame->format = c->pix_fmt = pix_fmt_rgb[p->seq_hdr->hbd];
296  else
297  frame->format = c->pix_fmt = pix_fmt[p->p.layout][p->seq_hdr->hbd];
298 
299  if (p->m.user_data.data)
300  memcpy(&frame->reordered_opaque, p->m.user_data.data, sizeof(frame->reordered_opaque));
301  else
302  frame->reordered_opaque = AV_NOPTS_VALUE;
303 
304  if (p->seq_hdr->num_units_in_tick && p->seq_hdr->time_scale) {
305  av_reduce(&c->framerate.den, &c->framerate.num,
306  p->seq_hdr->num_units_in_tick, p->seq_hdr->time_scale, INT_MAX);
307  if (p->seq_hdr->equal_picture_interval)
308  c->ticks_per_frame = p->seq_hdr->num_ticks_per_picture;
309  }
310 
311  // match timestamps and packet size
312  frame->pts = frame->best_effort_timestamp = p->m.timestamp;
313 #if FF_API_PKT_PTS
315  frame->pkt_pts = p->m.timestamp;
317 #endif
318  frame->pkt_dts = p->m.timestamp;
319  frame->pkt_pos = p->m.offset;
320  frame->pkt_size = p->m.size;
321  frame->pkt_duration = p->m.duration;
322  frame->key_frame = p->frame_hdr->frame_type == DAV1D_FRAME_TYPE_KEY;
323 
324  switch (p->frame_hdr->frame_type) {
325  case DAV1D_FRAME_TYPE_KEY:
326  case DAV1D_FRAME_TYPE_INTRA:
327  frame->pict_type = AV_PICTURE_TYPE_I;
328  break;
329  case DAV1D_FRAME_TYPE_INTER:
330  frame->pict_type = AV_PICTURE_TYPE_P;
331  break;
332  case DAV1D_FRAME_TYPE_SWITCH:
333  frame->pict_type = AV_PICTURE_TYPE_SP;
334  break;
335  default:
336  res = AVERROR_INVALIDDATA;
337  goto fail;
338  }
339 
340  if (p->mastering_display) {
342  if (!mastering) {
343  res = AVERROR(ENOMEM);
344  goto fail;
345  }
346 
347  for (int i = 0; i < 3; i++) {
348  mastering->display_primaries[i][0] = av_make_q(p->mastering_display->primaries[i][0], 1 << 16);
349  mastering->display_primaries[i][1] = av_make_q(p->mastering_display->primaries[i][1], 1 << 16);
350  }
351  mastering->white_point[0] = av_make_q(p->mastering_display->white_point[0], 1 << 16);
352  mastering->white_point[1] = av_make_q(p->mastering_display->white_point[1], 1 << 16);
353 
354  mastering->max_luminance = av_make_q(p->mastering_display->max_luminance, 1 << 8);
355  mastering->min_luminance = av_make_q(p->mastering_display->min_luminance, 1 << 14);
356 
357  mastering->has_primaries = 1;
358  mastering->has_luminance = 1;
359  }
360  if (p->content_light) {
362  if (!light) {
363  res = AVERROR(ENOMEM);
364  goto fail;
365  }
366  light->MaxCLL = p->content_light->max_content_light_level;
367  light->MaxFALL = p->content_light->max_frame_average_light_level;
368  }
369 
370  res = 0;
371 fail:
372  dav1d_picture_unref(p);
373  if (res < 0)
375  return res;
376 }
377 
379 {
380  Libdav1dContext *dav1d = c->priv_data;
381 
382  av_buffer_pool_uninit(&dav1d->pool);
383  dav1d_data_unref(&dav1d->data);
384  dav1d_close(&dav1d->c);
385 
386  return 0;
387 }
388 
389 #define OFFSET(x) offsetof(Libdav1dContext, x)
390 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
391 static const AVOption libdav1d_options[] = {
392  { "tilethreads", "Tile threads", OFFSET(tile_threads), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, DAV1D_MAX_TILE_THREADS, VD },
393  { "framethreads", "Frame threads", OFFSET(frame_threads), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, DAV1D_MAX_FRAME_THREADS, VD },
394  { "filmgrain", "Apply Film Grain", OFFSET(apply_grain), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, VD },
395  { "oppoint", "Select an operating point of the scalable bitstream", OFFSET(operating_point), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 31, VD },
396  { "alllayers", "Output all spatial layers", OFFSET(all_layers), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
397  { NULL }
398 };
399 
400 static const AVClass libdav1d_class = {
401  .class_name = "libdav1d decoder",
402  .item_name = av_default_item_name,
403  .option = libdav1d_options,
404  .version = LIBAVUTIL_VERSION_INT,
405 };
406 
408  .name = "libdav1d",
409  .long_name = NULL_IF_CONFIG_SMALL("dav1d AV1 decoder by VideoLAN"),
410  .type = AVMEDIA_TYPE_VIDEO,
411  .id = AV_CODEC_ID_AV1,
412  .priv_data_size = sizeof(Libdav1dContext),
413  .init = libdav1d_init,
414  .close = libdav1d_close,
419  .priv_class = &libdav1d_class,
420  .wrapper_name = "libdav1d",
421 };
Libdav1dContext::c
Dav1dContext * c
Definition: libdav1d.c:35
av_vlog
void av_vlog(void *avcl, int level, const char *fmt, va_list vl)
Send the specified message to the log if the level is less than or equal to the current av_log_level.
Definition: log.c:424
AVMasteringDisplayMetadata::has_primaries
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
Definition: mastering_display_metadata.h:62
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:605
Libdav1dContext::pool_size
int pool_size
Definition: libdav1d.c:37
AVCodec
AVCodec.
Definition: codec.h:190
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
ff_decode_get_packet
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:238
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
AVMasteringDisplayMetadata::max_luminance
AVRational max_luminance
Max luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:57
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
FF_CODEC_CAP_SETS_PKT_DTS
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: internal.h:55
libdav1d_picture_allocator
static int libdav1d_picture_allocator(Dav1dPicture *p, void *cookie)
Definition: libdav1d.c:65
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AVColorTransferCharacteristic
AVColorTransferCharacteristic
Color Transfer Characteristic.
Definition: pixfmt.h:480
libdav1d_class
static const AVClass libdav1d_class
Definition: libdav1d.c:400
AVBufferPool
The buffer pool.
Definition: buffer_internal.h:77
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:89
AVMasteringDisplayMetadata::display_primaries
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
Definition: mastering_display_metadata.h:42
AVMasteringDisplayMetadata::has_luminance
int has_luminance
Flag indicating whether the luminance (min_ and max_) have been set.
Definition: mastering_display_metadata.h:67
AVContentLightMetadata::MaxCLL
unsigned MaxCLL
Max content light level (cd/m^2).
Definition: mastering_display_metadata.h:102
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:535
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:355
AVOption
AVOption.
Definition: opt.h:246
data
const char data[16]
Definition: mxf.c:91
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:397
libdav1d_user_data_free
static void libdav1d_user_data_free(const uint8_t *data, void *opaque)
Definition: libdav1d.c:173
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:373
AVColorPrimaries
AVColorPrimaries
Chromaticity coordinates of the source primaries.
Definition: pixfmt.h:455
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AVContentLightMetadata
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
Definition: mastering_display_metadata.h:98
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
Definition: buffer.c:239
fail
#define fail()
Definition: checkasm.h:123
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:413
libdav1d_init
static av_cold int libdav1d_init(AVCodecContext *c)
Definition: libdav1d.c:120
av_image_fill_pointers
int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, uint8_t *ptr, const int linesizes[4])
Fill plane data pointers for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:111
Libdav1dContext::tile_threads
int tile_threads
Definition: libdav1d.c:40
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:400
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:90
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:337
AVMasteringDisplayMetadata::white_point
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
Definition: mastering_display_metadata.h:47
s
#define s(width, name)
Definition: cbs_vp9.c:257
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
decode.h
Libdav1dContext
Definition: libdav1d.c:33
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:378
if
if(ret)
Definition: filter_design.txt:179
AVPacket::buf
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: packet.h:338
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:500
NULL
#define NULL
Definition: coverity.c:32
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:125
AV_CODEC_ID_AV1
@ AV_CODEC_ID_AV1
Definition: codec_id.h:274
AVCHROMA_LOC_LEFT
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:556
AVCHROMA_LOC_TOPLEFT
@ AVCHROMA_LOC_TOPLEFT
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
Definition: pixfmt.h:558
receive_frame
static CopyRet receive_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame)
Definition: crystalhd.c:560
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:276
libdav1d_picture_release
static void libdav1d_picture_release(Dav1dPicture *p, void *cookie)
Definition: libdav1d.c:113
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:398
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
AV_PICTURE_TYPE_SP
@ AV_PICTURE_TYPE_SP
Switching Predicted.
Definition: avutil.h:279
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
av_cpu_count
int av_cpu_count(void)
Definition: cpu.c:267
Libdav1dContext::data
Dav1dData data
Definition: libdav1d.c:39
AV_CODEC_CAP_AUTO_THREADS
#define AV_CODEC_CAP_AUTO_THREADS
Codec supports avctx->thread_count == 0 (auto).
Definition: codec.h:118
AVPacket::size
int size
Definition: packet.h:356
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
av_image_fill_arrays
int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], const uint8_t *src, enum AVPixelFormat pix_fmt, int width, int height, int align)
Setup the data pointers and linesizes based on the specified image parameters and the provided array.
Definition: imgutils.c:411
Libdav1dContext::pool
AVBufferPool * pool
Definition: libdav1d.c:36
libdav1d_log_callback
static void libdav1d_log_callback(void *opaque, const char *fmt, va_list vl)
Definition: libdav1d.c:58
pix_fmt_rgb
static enum AVPixelFormat pix_fmt_rgb[3]
Definition: libdav1d.c:54
libdav1d_receive_frame
static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
Definition: libdav1d.c:178
OFFSET
#define OFFSET(x)
Definition: libdav1d.c:389
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:402
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:404
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
av_content_light_metadata_create_side_data
AVContentLightMetadata * av_content_light_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVContentLightMetadata and add it to the frame.
Definition: mastering_display_metadata.c:55
ff_libdav1d_decoder
AVCodec ff_libdav1d_decoder
Definition: libdav1d.c:407
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:348
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:414
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:509
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:197
AVMasteringDisplayMetadata
Mastering display metadata capable of representing the color volume of the display used to master the...
Definition: mastering_display_metadata.h:38
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:534
Libdav1dContext::apply_grain
int apply_grain
Definition: libdav1d.c:42
avcodec.h
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:401
AVCodecContext
main external API structure.
Definition: avcodec.h:526
av_mastering_display_metadata_create_side_data
AVMasteringDisplayMetadata * av_mastering_display_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVMasteringDisplayMetadata and add it to the frame.
Definition: mastering_display_metadata.c:32
pkt
static AVPacket pkt
Definition: demuxing_decoding.c:54
pix_fmt
static enum AVPixelFormat pix_fmt[][3]
Definition: libdav1d.c:47
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:75
AVMasteringDisplayMetadata::min_luminance
AVRational min_luminance
Min luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:52
av_buffer_ref
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
Libdav1dContext::frame_threads
int frame_threads
Definition: libdav1d.c:41
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
libdav1d_options
static const AVOption libdav1d_options[]
Definition: libdav1d.c:391
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
libdav1d_flush
static void libdav1d_flush(AVCodecContext *c)
Definition: libdav1d.c:159
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:81
mastering_display_metadata.h
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
libdav1d_data_free
static void libdav1d_data_free(const uint8_t *data, void *opaque)
Definition: libdav1d.c:167
AVContentLightMetadata::MaxFALL
unsigned MaxFALL
Max average light level per frame (cd/m^2).
Definition: mastering_display_metadata.h:107
AVPacket
This structure stores compressed data.
Definition: packet.h:332
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:240
AVPacket::pos
int64_t pos
byte position in stream, -1 if unknown
Definition: packet.h:375
Libdav1dContext::operating_point
int operating_point
Definition: libdav1d.c:43
imgutils.h
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
Libdav1dContext::all_layers
int all_layers
Definition: libdav1d.c:44
h
h
Definition: vp9dsp_template.c:2038
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:379
VD
#define VD
Definition: libdav1d.c:390
libdav1d_close
static av_cold int libdav1d_close(AVCodecContext *c)
Definition: libdav1d.c:378