Go to the documentation of this file.
29 #define BITSTREAM_READER_LE
69 s->bitstream_size = 0;
78 s->bitstream_size = 0;
115 case 24:
s->factor = 256;
131 sizeof(*
s->decode_buffer[ch]));
132 if (!
s->decode_buffer[ch])
143 memset(
cb->history, 0,
sizeof(
cb->history));
144 cb->pos =
cb->count =
cb->sum = 0;
168 double f =
floor(sum / 1.4426952 + 0.5);
171 }
else if (
f >= 31) {
206 if (
cb->prediction >= 15)
208 if (
cb->coding_mode > 0 &&
cb->coding_mode < 3) {
210 if (!
cb->residue_parameter ||
cb->residue_parameter >= 31)
212 }
else if (
cb->coding_mode == 3) {
214 if (!
cb->residue_bits ||
cb->residue_bits >= 31)
216 }
else if (
cb->coding_mode) {
220 if (
cb->coding_mode == 2)
231 #define P2 (((unsigned)dst[A] + dst[A]) - dst[B])
232 #define P3 (((unsigned)dst[A] - dst[B]) * 3 + dst[C])
238 const int nb_samples =
frame->nb_samples;
241 for (
int n = 0; n < nb_samples; n++) {
242 for (
int ch = 0; ch < nb_channels; ch++) {
247 if (nb_channels == 2 && ch == 1 &&
decorrelate !=
s->decorrelate) {
262 if (!
cb->coding_mode) {
264 }
else if (
cb->coding_mode == 3) {
278 switch (
cb->prediction) {
285 dst[n] += (unsigned)
dst[
A] + p;
300 dst[n] += (int)(
P2 +
P3) / 2 + (unsigned)p;
306 dst[n] += (int)(
P2 * 2 +
P3) / 3 + (unsigned)p;
309 dst[n] += (int)(
P2 +
P3 * 2) / 3 + (unsigned)p;
321 dst[n] += (int)((
unsigned)
P2 +
dst[
A]) / 2 + (
unsigned)p;
338 if (
cb->coding_mode == 2) {
343 if (nb_channels == 2 && ch == 1) {
345 dst[n] += (unsigned)
s->decode_buffer[0][
OFFSET+n];
359 const int nb_samples =
frame->nb_samples;
361 const unsigned factor =
s->factor;
369 for (
int ch = 0; ch < nb_channels; ch++) {
383 for (
int ch = 0; ch < nb_channels; ch++) {
384 uint8_t *
dst = (uint8_t *)
frame->extended_data[ch];
387 for (
int n = 0; n < nb_samples; n++)
392 for (
int ch = 0; ch < nb_channels; ch++) {
393 int16_t *
dst = (int16_t *)
frame->extended_data[ch];
396 for (
int n = 0; n < nb_samples; n++)
401 for (
int ch = 0; ch < nb_channels; ch++) {
405 for (
int n = 0; n < nb_samples; n++)
422 while (
s->bitstream_size <
s->max_framesize) {
435 size =
FFMIN(
s->pkt->size -
s->pkt_offset,
s->max_framesize -
s->bitstream_size);
436 memcpy(
s->bitstream +
s->bitstream_size,
s->pkt->data +
s->pkt_offset,
size);
437 s->bitstream_size +=
size;
438 s->pkt_offset +=
size;
440 if (
s->pkt_offset ==
s->pkt->size) {
446 frame->nb_samples =
FFMIN(
s->frame_samples,
s->nb_samples);
447 if (
frame->nb_samples <= 0)
459 s->nb_samples -=
frame->nb_samples;
462 if (n >
s->bitstream_size) {
467 memmove(
s->bitstream, &
s->bitstream[n],
s->bitstream_size - n);
468 s->bitstream_size -= n;
473 s->bitstream_size = 0;
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
static void decorrelate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median)
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
static int get_bits_left(GetBitContext *gb)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
int sample_rate
samples per second
static double cb(void *priv, double x, double y)
#define AVERROR_EOF
End of file.
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
int32_t * decode_buffer[2]
static int get_bits_count(const GetBitContext *s)
static void osq_flush(AVCodecContext *avctx)
This structure describes decoded (raw) audio or video data.
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
enum AVChannelOrder order
Channel order used in this layout.
int nb_channels
Number of channels in this layout.
unsigned residue_parameter
static int do_decode(AVCodecContext *avctx, AVFrame *frame, int decorrelate, int downsample)
AVCodec p
The public AVCodec.
AVChannelLayout ch_layout
Audio channel layout.
static double val(void *priv, double ch)
static __device__ float ceil(float a)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
static __device__ float floor(float a)
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
#define CODEC_LONG_NAME(str)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
static int update_residue_parameter(OSQChannel *cb)
struct AVCodecInternal * internal
Private context used for internal data.
static unsigned int get_bits1(GetBitContext *s)
static int get_unary(GetBitContext *gb, int stop, int len)
Get unary code of limited length.
#define AV_CODEC_CAP_CHANNEL_CONF
Codec should fill in channel configuration and samplerate instead of container.
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
@ AV_SAMPLE_FMT_U8P
unsigned 8 bits, planar
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
enum AVSampleFormat sample_fmt
audio sample format
static void skip_bits1(GetBitContext *s)
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
static int osq_decode_block(AVCodecContext *avctx, AVFrame *frame)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
AVPacket * in_pkt
This packet is used to hold the packet given to decoders implementing the .decode API; it is unused b...
AVSampleFormat
Audio sample formats.
const char * name
Name of the codec implementation.
static int osq_receive_frame(AVCodecContext *avctx, AVFrame *frame)
void * av_calloc(size_t nmemb, size_t size)
static void reset_stats(OSQChannel *cb)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static const uint8_t * align_get_bits(GetBitContext *s)
#define AV_INPUT_BUFFER_PADDING_SIZE
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
main external API structure.
#define FF_CODEC_RECEIVE_FRAME_CB(func)
static av_cold int osq_init(AVCodecContext *avctx)
static uint32_t get_urice(GetBitContext *gb, int k)
const FFCodec ff_osq_decoder
static av_cold int osq_close(AVCodecContext *avctx)
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
static const int factor[16]
static void update_stats(OSQChannel *cb, int val)
This structure stores compressed data.
static int osq_channel_parameters(AVCodecContext *avctx, int ch)
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int get_sbits_long(GetBitContext *s, int n)
Read 0-32 bits as a signed integer.
static int32_t get_srice(GetBitContext *gb, int x)