FFmpeg
vdpau_vp9.c
Go to the documentation of this file.
1 /*
2  * VP9 HW decode acceleration through VDPAU
3  *
4  * Copyright (c) 2019 Manoj Gupta Bonda
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software Foundation,
20  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <vdpau/vdpau.h>
24 #include "libavutil/pixdesc.h"
25 #include "avcodec.h"
26 #include "hwaccel_internal.h"
27 #include "vp9dec.h"
28 #include "vdpau.h"
29 #include "vdpau_internal.h"
30 
32  const uint8_t *buffer, uint32_t size)
33 {
34  VP9Context *s = avctx->priv_data;
35  VP9SharedContext *h = &(s->s);
36  VP9Frame pic = h->frames[CUR_FRAME];
37  struct vdpau_picture_context *pic_ctx = pic.hwaccel_picture_private;
38  int i;
39 
40  VdpPictureInfoVP9 *info = &pic_ctx->info.vp9;
41  const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
42  if (!pixdesc) {
43  return AV_PIX_FMT_NONE;
44  }
45 
46  info->width = avctx->width;
47  info->height = avctx->height;
48  /* fill LvPictureInfoVP9 struct */
49  info->lastReference = VDP_INVALID_HANDLE;
50  info->goldenReference = VDP_INVALID_HANDLE;
51  info->altReference = VDP_INVALID_HANDLE;
52 
53  if (h->refs[h->h.refidx[0]].f && h->refs[h->h.refidx[0]].f->private_ref) {
54  info->lastReference = ff_vdpau_get_surface_id(h->refs[h->h.refidx[0]].f);
55  }
56  if (h->refs[h->h.refidx[1]].f && h->refs[h->h.refidx[1]].f->private_ref) {
57  info->goldenReference = ff_vdpau_get_surface_id(h->refs[h->h.refidx[1]].f);
58  }
59  if (h->refs[h->h.refidx[2]].f && h->refs[h->h.refidx[2]].f->private_ref) {
60  info->altReference = ff_vdpau_get_surface_id(h->refs[h->h.refidx[2]].f);
61  }
62 
63  info->profile = h->h.profile;
64  info->frameContextIdx = h->h.framectxid;
65  info->keyFrame = h->h.keyframe;
66  info->showFrame = !h->h.invisible;
67  info->errorResilient = h->h.errorres;
68  info->frameParallelDecoding = h->h.parallelmode;
69 
70  info->subSamplingX = pixdesc->log2_chroma_w;
71  info->subSamplingY = pixdesc->log2_chroma_h;
72 
73  info->intraOnly = h->h.intraonly;
74  info->allowHighPrecisionMv = h->h.keyframe ? 0 : h->h.highprecisionmvs;
75  info->refreshEntropyProbs = h->h.refreshctx;
76 
77  info->bitDepthMinus8Luma = pixdesc->comp[0].depth - 8;
78  info->bitDepthMinus8Chroma = pixdesc->comp[1].depth - 8;
79 
80  info->loopFilterLevel = h->h.filter.level;
81  info->loopFilterSharpness = h->h.filter.sharpness;
82  info->modeRefLfEnabled = h->h.lf_delta.enabled;
83 
84  info->log2TileColumns = h->h.tiling.log2_tile_cols;
85  info->log2TileRows = h->h.tiling.log2_tile_rows;
86 
87  info->segmentEnabled = h->h.segmentation.enabled;
88  info->segmentMapUpdate = h->h.segmentation.update_map;
89  info->segmentMapTemporalUpdate = h->h.segmentation.temporal;
90  info->segmentFeatureMode = h->h.segmentation.absolute_vals;
91 
92  info->qpYAc = h->h.yac_qi;
93  info->qpYDc = h->h.ydc_qdelta;
94  info->qpChDc = h->h.uvdc_qdelta;
95  info->qpChAc = h->h.uvac_qdelta;
96 
97  info->resetFrameContext = h->h.resetctx;
98  info->mcompFilterType = h->h.filtermode ^ (h->h.filtermode <= 1);
99  info->uncompressedHeaderSize = h->h.uncompressed_header_size;
100  info->compressedHeaderSize = h->h.compressed_header_size;
101  info->refFrameSignBias[0] = 0;
102 
103 
104  for (i = 0; i < FF_ARRAY_ELEMS(info->mbModeLfDelta); i++)
105  info->mbModeLfDelta[i] = h->h.lf_delta.mode[i];
106 
107  for (i = 0; i < FF_ARRAY_ELEMS(info->mbRefLfDelta); i++)
108  info->mbRefLfDelta[i] = h->h.lf_delta.ref[i];
109 
110  for (i = 0; i < FF_ARRAY_ELEMS(info->mbSegmentTreeProbs); i++)
111  info->mbSegmentTreeProbs[i] = h->h.segmentation.prob[i];
112 
113  for (i = 0; i < FF_ARRAY_ELEMS(info->activeRefIdx); i++) {
114  info->activeRefIdx[i] = h->h.refidx[i];
115  info->segmentPredProbs[i] = h->h.segmentation.pred_prob[i];
116  info->refFrameSignBias[i + 1] = h->h.signbias[i];
117  }
118 
119  for (i = 0; i < FF_ARRAY_ELEMS(info->segmentFeatureEnable); i++) {
120  info->segmentFeatureEnable[i][0] = h->h.segmentation.feat[i].q_enabled;
121  info->segmentFeatureEnable[i][1] = h->h.segmentation.feat[i].lf_enabled;
122  info->segmentFeatureEnable[i][2] = h->h.segmentation.feat[i].ref_enabled;
123  info->segmentFeatureEnable[i][3] = h->h.segmentation.feat[i].skip_enabled;
124 
125  info->segmentFeatureData[i][0] = h->h.segmentation.feat[i].q_val;
126  info->segmentFeatureData[i][1] = h->h.segmentation.feat[i].lf_val;
127  info->segmentFeatureData[i][2] = h->h.segmentation.feat[i].ref_val;
128  info->segmentFeatureData[i][3] = 0;
129  }
130 
131  switch (avctx->colorspace) {
132  default:
134  info->colorSpace = 0;
135  break;
136  case AVCOL_SPC_BT470BG:
137  info->colorSpace = 1;
138  break;
139  case AVCOL_SPC_BT709:
140  info->colorSpace = 2;
141  break;
142  case AVCOL_SPC_SMPTE170M:
143  info->colorSpace = 3;
144  break;
145  case AVCOL_SPC_SMPTE240M:
146  info->colorSpace = 4;
147  break;
149  info->colorSpace = 5;
150  break;
151  case AVCOL_SPC_RESERVED:
152  info->colorSpace = 6;
153  break;
154  case AVCOL_SPC_RGB:
155  info->colorSpace = 7;
156  break;
157  }
158 
159  return ff_vdpau_common_start_frame(pic_ctx, buffer, size);
160 
161 }
162 
163 static const uint8_t start_code_prefix[3] = { 0x00, 0x00, 0x01 };
164 
166  const uint8_t *buffer, uint32_t size)
167 {
168  VP9SharedContext *h = avctx->priv_data;
169  VP9Frame pic = h->frames[CUR_FRAME];
170  struct vdpau_picture_context *pic_ctx = pic.hwaccel_picture_private;
171 
172  int val;
173 
175  if (val)
176  return val;
177 
178  val = ff_vdpau_add_buffer(pic_ctx, buffer, size);
179  if (val)
180  return val;
181 
182  return 0;
183 }
184 
186 {
187  VP9SharedContext *h = avctx->priv_data;
188  VP9Frame pic = h->frames[CUR_FRAME];
189  struct vdpau_picture_context *pic_ctx = pic.hwaccel_picture_private;
190 
191  int val;
192 
193  val = ff_vdpau_common_end_frame(avctx, pic.tf.f, pic_ctx);
194  if (val < 0)
195  return val;
196 
197  return 0;
198 }
199 
200 static int vdpau_vp9_init(AVCodecContext *avctx)
201 {
202  VdpDecoderProfile profile;
203  uint32_t level = avctx->level;
204 
205  switch (avctx->profile) {
206  case AV_PROFILE_VP9_0:
207  profile = VDP_DECODER_PROFILE_VP9_PROFILE_0;
208  break;
209  case AV_PROFILE_VP9_1:
210  profile = VDP_DECODER_PROFILE_VP9_PROFILE_1;
211  break;
212  case AV_PROFILE_VP9_2:
213  profile = VDP_DECODER_PROFILE_VP9_PROFILE_2;
214  break;
215  case AV_PROFILE_VP9_3:
216  profile = VDP_DECODER_PROFILE_VP9_PROFILE_3;
217  break;
218  default:
219  return AVERROR(ENOTSUP);
220  }
221 
222  return ff_vdpau_common_init(avctx, profile, level);
223 }
224 
226  .p.name = "vp9_vdpau",
227  .p.type = AVMEDIA_TYPE_VIDEO,
228  .p.id = AV_CODEC_ID_VP9,
229  .p.pix_fmt = AV_PIX_FMT_VDPAU,
230  .start_frame = vdpau_vp9_start_frame,
231  .end_frame = vdpau_vp9_end_frame,
232  .decode_slice = vdpau_vp9_decode_slice,
233  .frame_priv_data_size = sizeof(struct vdpau_picture_context),
234  .init = vdpau_vp9_init,
235  .uninit = ff_vdpau_common_uninit,
236  .frame_params = ff_vdpau_common_frame_params,
237  .priv_data_size = sizeof(VDPAUContext),
238  .caps_internal = HWACCEL_CAP_ASYNC_SAFE,
239 };
level
uint8_t level
Definition: svq3.c:205
ff_vdpau_common_frame_params
int ff_vdpau_common_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
Definition: vdpau.c:123
vdpau_vp9_end_frame
static int vdpau_vp9_end_frame(AVCodecContext *avctx)
Definition: vdpau_vp9.c:185
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:691
VP9Frame
Definition: vp9shared.h:65
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3170
AV_PROFILE_VP9_1
#define AV_PROFILE_VP9_1
Definition: defs.h:155
FFHWAccel::p
AVHWAccel p
The public AVHWAccel.
Definition: hwaccel_internal.h:38
pixdesc.h
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:57
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:641
VP9Frame::tf
ProgressFrame tf
Definition: vp9shared.h:66
vdpau_internal.h
vdpau_vp9_init
static int vdpau_vp9_init(AVCodecContext *avctx)
Definition: vdpau_vp9.c:200
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:646
AVCOL_SPC_RESERVED
@ AVCOL_SPC_RESERVED
reserved for future use by ITU-T and ISO/IEC just like 15-255 are
Definition: pixfmt.h:644
vdpau_picture_context
Definition: vdpau_internal.h:98
FFHWAccel
Definition: hwaccel_internal.h:34
val
static double val(void *priv, double ch)
Definition: aeval.c:77
AV_PROFILE_VP9_3
#define AV_PROFILE_VP9_3
Definition: defs.h:157
VP9Frame::hwaccel_picture_private
void * hwaccel_picture_private
RefStruct reference.
Definition: vp9shared.h:72
ff_vdpau_add_buffer
int ff_vdpau_add_buffer(struct vdpau_picture_context *pic_ctx, const uint8_t *buf, uint32_t size)
Definition: vdpau.c:386
ff_vdpau_common_init
int ff_vdpau_common_init(AVCodecContext *avctx, VdpDecoderProfile profile, int level)
Definition: vdpau.c:142
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
vdpau.h
s
#define s(width, name)
Definition: cbs_vp9.c:198
VP9SharedContext
Definition: vp9shared.h:164
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:647
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:222
info
MIPS optimizations info
Definition: mips.txt:2
ff_vdpau_common_start_frame
int ff_vdpau_common_start_frame(struct vdpau_picture_context *pic_ctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
Definition: vdpau.c:331
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
ff_vdpau_get_surface_id
static uintptr_t ff_vdpau_get_surface_id(AVFrame *pic)
Extract VdpVideoSurface from an AVFrame.
Definition: vdpau_internal.h:38
HWACCEL_CAP_ASYNC_SAFE
#define HWACCEL_CAP_ASYNC_SAFE
Header providing the internals of AVHWAccel.
Definition: hwaccel_internal.h:31
hwaccel_internal.h
VP9Context
Definition: vp9dec.h:96
ff_vdpau_common_end_frame
int ff_vdpau_common_end_frame(AVCodecContext *avctx, AVFrame *frame, struct vdpau_picture_context *pic_ctx)
Definition: vdpau.c:341
AVCodecContext::level
int level
Encoding level descriptor.
Definition: avcodec.h:1794
ff_vdpau_common_uninit
int ff_vdpau_common_uninit(AVCodecContext *avctx)
Definition: vdpau.c:293
VDPAUContext
Definition: vdpau_internal.h:73
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
size
int size
Definition: twinvq_data.h:10344
ff_vp9_vdpau_hwaccel
const FFHWAccel ff_vp9_vdpau_hwaccel
Definition: vdpau_vp9.c:225
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:194
AVHWAccel::name
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:2105
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
Definition: pixfmt.h:648
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:651
profile
int profile
Definition: mxfenc.c:2228
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:643
AVCodecContext::height
int height
Definition: avcodec.h:624
start_code_prefix
static const uint8_t start_code_prefix[3]
Definition: vdpau_vp9.c:163
avcodec.h
AV_PROFILE_VP9_2
#define AV_PROFILE_VP9_2
Definition: defs.h:156
ProgressFrame::f
struct AVFrame * f
Definition: progressframe.h:74
AVCodecContext
main external API structure.
Definition: avcodec.h:451
vdpau_picture_context::info
union VDPAUPictureInfo info
VDPAU picture information.
Definition: vdpau_internal.h:102
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1650
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
vdpau_vp9_start_frame
static int vdpau_vp9_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: vdpau_vp9.c:31
AV_PROFILE_VP9_0
#define AV_PROFILE_VP9_0
Definition: defs.h:154
vp9dec.h
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
CUR_FRAME
#define CUR_FRAME
Definition: vp9shared.h:168
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
vdpau_vp9_decode_slice
static int vdpau_vp9_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: vdpau_vp9.c:165
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:624
h
h
Definition: vp9dsp_template.c:2070
AVCodecContext::sw_pix_fmt
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:670
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:642
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89