Go to the documentation of this file.
21 #include "config_components.h"
24 #include <multimedia/player_framework/native_avcapability.h>
25 #include <multimedia/player_framework/native_avcodec_videodecoder.h>
82 OH_AVCodec *dec = (OH_AVCodec *)
data;
83 OH_AVErrCode err = OH_VideoDecoder_Destroy(dec);
93 const char *
name =
s->name;
99 OH_AVCapability *cap = OH_AVCodec_GetCapabilityByCategory(mime,
false, HARDWARE);
106 "Failed to get hardware codec %s, try software backend\n", mime);
107 cap = OH_AVCodec_GetCapabilityByCategory(mime,
false, SOFTWARE);
113 name = OH_AVCapability_GetName(cap);
118 s->dec = OH_VideoDecoder_CreateByName(
name);
143 s->output_to_window =
true;
152 "Invalid width/height (%dx%d), width and height are mandatory for ohcodec\n",
157 OH_AVFormat *
format = OH_AVFormat_Create();
161 OH_AVFormat_SetIntValue(
format, OH_MD_KEY_WIDTH, avctx->
width);
162 OH_AVFormat_SetIntValue(
format, OH_MD_KEY_HEIGHT, avctx->
height);
163 if (!
s->output_to_window)
164 OH_AVFormat_SetIntValue(
format, OH_MD_KEY_PIXEL_FORMAT,
165 AV_PIXEL_FORMAT_NV12);
167 OH_AVFormat_SetIntValue(
format, OH_MD_KEY_PIXEL_FORMAT,
168 AV_PIXEL_FORMAT_SURFACE_FORMAT);
169 OH_AVErrCode err = OH_VideoDecoder_Configure(
s->dec,
format);
170 OH_AVFormat_Destroy(
format);
171 if (err != AV_ERR_OK) {
178 if (
s->output_to_window) {
179 err = OH_VideoDecoder_SetSurface(
s->dec,
window);
180 if (err != AV_ERR_OK) {
216 if (!OH_AVFormat_GetIntValue(
format, OH_MD_KEY_VIDEO_PIC_WIDTH, &
s->width) ||
217 !OH_AVFormat_GetIntValue(
format, OH_MD_KEY_VIDEO_PIC_HEIGHT, &
s->height) ||
218 !OH_AVFormat_GetIntValue(
format, OH_MD_KEY_VIDEO_STRIDE, &
s->stride) ||
219 !OH_AVFormat_GetIntValue(
format, OH_MD_KEY_VIDEO_SLICE_HEIGHT,
228 if (
s->stride <= 0 ||
s->slice_height <= 0) {
230 "Buffer stride (%d) or slice height (%d) is invalid\n",
231 s->stride,
s->slice_height);
235 if (OH_AVFormat_GetIntValue(
format, OH_MD_KEY_PIXEL_FORMAT, &n)) {
241 if (
s->output_to_window)
256 if (OH_AVFormat_GetIntValue(
format,
257 OH_MD_KEY_MATRIX_COEFFICIENTS,
260 if (OH_AVFormat_GetIntValue(
format,
261 OH_MD_KEY_COLOR_PRIMARIES,
264 if (OH_AVFormat_GetIntValue(
format,
265 OH_MD_KEY_TRANSFER_CHARACTERISTICS,
268 if (OH_AVFormat_GetIntValue(
format,
269 OH_MD_KEY_RANGE_FLAG,
273 if (OH_AVFormat_GetDoubleValue(
format, OH_MD_KEY_VIDEO_SAR, &d)) {
278 s->got_stream_info =
true;
283 OH_AVFormat_DumpInfo(
format));
288 OH_AVBuffer *
buffer,
void *userdata)
307 OH_AVBuffer *
buffer,
void *userdata)
329 OH_AVCodecCallback
cb = {
336 err = OH_VideoDecoder_RegisterCallback(
s->dec,
cb, avctx);
337 if (err != AV_ERR_OK) {
343 err = OH_VideoDecoder_Prepare(
s->dec);
344 if (err != AV_ERR_OK) {
350 err = OH_VideoDecoder_Start(
s->dec);
351 if (err != AV_ERR_OK) {
383 if (!
s->input_queue || !
s->output_queue)
401 OH_AVErrCode err = OH_VideoDecoder_Stop(
s->dec);
402 if (err == AV_ERR_OK)
437 OH_AVCodec *dec = (OH_AVCodec *)
buffer->dec_ref->
data;
438 OH_AVCodecBufferAttr attr;
439 OH_AVErrCode err = OH_AVBuffer_GetBufferAttr(
buffer->buffer, &attr);
440 if (err == AV_ERR_OK && !(attr.flags & AVCODEC_BUFFER_FLAGS_DISCARD))
441 OH_VideoDecoder_RenderOutputBuffer(dec,
buffer->index);
443 OH_VideoDecoder_FreeOutputBuffer(dec,
buffer->index);
452 const OH_AVCodecBufferAttr *attr)
457 frame->height =
s->height;
478 if (!
frame->buf[0]) {
492 const OH_AVCodecBufferAttr *attr)
498 frame->height =
s->height;
506 uint8_t *p = OH_AVBuffer_GetAddr(
output->buffer);
512 uint8_t *
src[4] = {0};
513 int src_linesizes[4] = {0};
525 OH_AVErrCode err = OH_VideoDecoder_FreeOutputBuffer(
s->dec,
output->index);
526 if (err != AV_ERR_OK) {
540 OH_AVCodecBufferAttr attr;
542 OH_AVErrCode err = OH_AVBuffer_GetBufferAttr(
output->buffer, &attr);
543 if (err != AV_ERR_OK)
546 if (attr.flags & AVCODEC_BUFFER_FLAGS_EOS) {
548 OH_VideoDecoder_FreeOutputBuffer(
s->dec,
output->index);
552 if (!
s->got_stream_info) {
555 "decoder didn't notify stream info, try get format explicitly\n");
557 OH_AVFormat *
format = OH_VideoDecoder_GetOutputDescription(
s->dec);
564 OH_AVFormat_Destroy(
format);
565 if (!
s->got_stream_info)
569 if (
s->output_to_window)
580 if (!
s->pkt.size && !
s->eof_sent) {
581 OH_AVCodecBufferAttr attr = {
582 .flags = AVCODEC_BUFFER_FLAGS_EOS,
584 err = OH_AVBuffer_SetBufferAttr(
input->buffer, &attr);
585 if (err != AV_ERR_OK)
587 err = OH_VideoDecoder_PushInputBuffer(
s->dec,
input->index);
588 if (err != AV_ERR_OK)
594 uint8_t *p = OH_AVBuffer_GetAddr(
input->buffer);
598 "Failed to get buffer addr (%p) or capacity (%d)\n",
602 n =
FFMIN(
s->pkt.size, n);
603 memcpy(p,
s->pkt.data, n);
605 OH_AVCodecBufferAttr attr = {
611 ? AVCODEC_BUFFER_FLAGS_SYNC_FRAME : 0,
614 err = OH_AVBuffer_SetBufferAttr(
input->buffer, &attr);
615 if (err != AV_ERR_OK) {
619 err = OH_VideoDecoder_PushInputBuffer(
s->dec,
input->index);
620 if (err != AV_ERR_OK) {
647 while (!
s->decode_status) {
651 if (
s->eof_sent && !
s->decode_status)
657 ret =
s->decode_status;
675 while (!
s->decode_status) {
681 ret =
s->decode_status;
699 OH_VideoDecoder_Flush(
s->dec);
705 s->decode_status = 0;
710 OH_VideoDecoder_Start(
s->dec);
726 #define OFFSET(x) offsetof(OHCodecDecContext, x)
727 #define VD (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM)
729 {
"codec_name",
"Select codec by name",
731 {
"allow_sw",
"Allow software decoding",
736 #define DECLARE_OHCODEC_VCLASS(short_name) \
737 static const AVClass short_name##_oh_dec_class = { \
738 .class_name = #short_name "_ohcodec", \
739 .item_name = av_default_item_name, \
740 .option = ohcodec_vdec_options, \
741 .version = LIBAVUTIL_VERSION_INT, \
744 #define DECLARE_OHCODEC_VDEC(short_name, full_name, codec_id, bsf) \
745 DECLARE_OHCODEC_VCLASS(short_name) \
746 const FFCodec ff_##short_name##_oh_decoder = { \
747 .p.name = #short_name "_ohcodec", \
748 CODEC_LONG_NAME(full_name " OpenHarmony Codec"), \
749 .p.type = AVMEDIA_TYPE_VIDEO, \
751 .p.priv_class = &short_name##_oh_dec_class, \
752 .priv_data_size = sizeof(OHCodecDecContext), \
753 .init = oh_decode_init, \
754 FF_CODEC_RECEIVE_FRAME_CB(oh_decode_receive_frame), \
755 .flush = oh_decode_flush, \
756 .close = oh_decode_close, \
757 .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING | \
758 AV_CODEC_CAP_HARDWARE, \
759 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, \
761 .hw_configs = oh_hw_configs, \
762 .p.wrapper_name = "ohcodec", \
765 #if CONFIG_H264_OH_DECODER
769 #if CONFIG_HEVC_OH_DECODER
void * hwctx
The format-specific data, allocated and freed by libavutil along with this context.
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
#define AV_LOG_WARNING
Something somehow does not look correct.
static int oh_decode_wrap_hw_buffer(AVCodecContext *avctx, AVFrame *frame, OHBufferQueueItem *output, const OH_AVCodecBufferAttr *attr)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
static int ff_mutex_init(AVMutex *mutex, const void *attr)
static void oh_decode_release(void *opaque, uint8_t *data)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
enum AVColorSpace colorspace
YUV colorspace type.
static double cb(void *priv, double x, double y)
#define AVERROR_EOF
End of file.
uint8_t * data
The data buffer.
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
static void oh_decode_on_output(OH_AVCodec *codec, uint32_t index, OH_AVBuffer *buffer, void *userdata)
This structure describes decoded (raw) audio or video data.
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
@ AVCOL_RANGE_JPEG
Full range content.
static int oh_decode_receive_frame(AVCodecContext *avctx, AVFrame *frame)
static int oh_decode_create(OHCodecDecContext *s, AVCodecContext *avctx)
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static int ff_mutex_unlock(AVMutex *mutex)
static SDL_Window * window
static av_cold int oh_decode_close(AVCodecContext *avctx)
static void oh_decode_on_stream_changed(OH_AVCodec *codec, OH_AVFormat *format, void *userdata)
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
@ AV_CODEC_HW_CONFIG_METHOD_AD_HOC
The codec supports this format by some ad-hoc method.
int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, uint8_t *ptr, const int linesizes[4])
Fill plane data pointers for an image with pixel format pix_fmt and height height.
int ff_oh_err_to_ff_err(OH_AVErrCode err)
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
#define AV_BUFFER_FLAG_READONLY
Always treat the buffer as read-only, even when it has only one reference.
static void oh_buffer_release(void *opaque, uint8_t *data)
static int ff_cond_wait(AVCond *cond, AVMutex *mutex)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static void oh_decode_on_need_input(OH_AVCodec *codec, uint32_t index, OH_AVBuffer *buffer, void *userdata)
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
enum AVPixelFormat pix_fmt
For decoders, a hardware pixel format which that decoder may be able to decode to if suitable hardwar...
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
static int oh_decode_send_pkt(AVCodecContext *avctx, OHBufferQueueItem *input)
Describe the class of an AVClass context structure.
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image format
enum AVColorRange color_range
MPEG vs JPEG YUV range.
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Rational number (pair of numerator and denominator).
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
static int oh_decode_set_format(OHCodecDecContext *s, AVCodecContext *avctx)
static int oh_decode_start(OHCodecDecContext *s, AVCodecContext *avctx)
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
@ AV_HWDEVICE_TYPE_OHCODEC
static int ff_mutex_destroy(AVMutex *mutex)
void av_fifo_reset2(AVFifo *f)
static const char * ff_oh_mime(enum AVCodecID codec_id, void *log)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are expressed.
#define AV_NOPTS_VALUE
Undefined timestamp value.
static av_cold int oh_decode_init(AVCodecContext *avctx)
static const AVCodecHWConfigInternal *const oh_hw_configs[]
static int oh_decode_wrap_sw_buffer(AVCodecContext *avctx, AVFrame *frame, OHBufferQueueItem *output, const OH_AVCodecBufferAttr *attr)
#define AVERROR_EXTERNAL
Generic error in an external library.
static int ff_mutex_lock(AVMutex *mutex)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
AVRational av_d2q(double d, int max)
Convert a double precision floating point number to a rational.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
enum AVHWDeviceType type
This field identifies the underlying API used for hardware access.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static const AVOption ohcodec_vdec_options[]
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
static void oh_decode_on_err(OH_AVCodec *codec, int32_t err, void *userdata)
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
main external API structure.
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
static void av_image_copy2(uint8_t *const dst_data[4], const int dst_linesizes[4], uint8_t *const src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Wrapper around av_image_copy() to workaround the limitation that the conversion from uint8_t * const ...
OpenHarmony codec device.
enum AVPixelFormat ff_oh_pix_to_ff_pix(OH_AVPixelFormat oh_pix)
static int ff_cond_signal(AVCond *cond)
#define DECLARE_OHCODEC_VDEC(short_name, full_name, codec_id, bsf)
A reference to a data buffer.
@ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
The codec supports this format via the hw_device_ctx interface.
static int ff_cond_destroy(AVCond *cond)
This structure stores compressed data.
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
int width
picture width / height.
static void oh_decode_flush(AVCodecContext *avctx)
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
static int ff_cond_init(AVCond *cond, const void *attr)
static int oh_decode_output_frame(AVCodecContext *avctx, AVFrame *frame, OHBufferQueueItem *output)
void * native_window
Pointer to OHNativeWindow.
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.