Go to the documentation of this file.
  140                       const float *in1, 
const float *in2,
 
  153                       (uint8_t**)
s->cur_out, nb_samples,
 
  157     else if (
ret != nb_samples) {
 
  164         if (celt_size != nb_samples) {
 
  169         for (
i = 0; 
i < 
s->output_channels; 
i++) {
 
  170             s->fdsp->vector_fmac_scalar(
s->cur_out[
i],
 
  171                                         s->celt_output[
i], 1.0,
 
  176     if (
s->redundancy_idx) {
 
  177         for (
i = 0; 
i < 
s->output_channels; 
i++)
 
  179                       s->redundancy_output[
i] + 120 + 
s->redundancy_idx,
 
  181         s->redundancy_idx = 0;
 
  184     s->cur_out[0]         += nb_samples;
 
  185     s->cur_out[1]         += nb_samples;
 
  186     s->remaining_out_size -= nb_samples * 
sizeof(
float);
 
  193     static const float delay[16] = { 0.0 };
 
  194     const uint8_t *delayptr[2] = { (uint8_t*)delay, (uint8_t*)delay };
 
  209                "Error feeding initial silence to the resampler.\n");
 
  224                                s->redundancy_output,
 
  225                                s->packet.stereo + 1, 240,
 
  238     int samples    = 
s->packet.frame_duration;
 
  240     int redundancy_size, redundancy_pos;
 
  241     int ret, 
i, consumed;
 
  258                                             s->packet.stereo + 1,
 
  265                               (uint8_t**)
s->cur_out, 
s->packet.frame_duration,
 
  266                               (
const uint8_t**)
s->silk_output, 
samples);
 
  272         s->delayed_samples += 
s->packet.frame_duration - 
samples;
 
  289             redundancy_size = 
size - (consumed + 7) / 8;
 
  290         size -= redundancy_size;
 
  296         if (redundancy_pos) {
 
  306         float *out_tmp[2] = { 
s->cur_out[0], 
s->cur_out[1] };
 
  308                       out_tmp : 
s->celt_output;
 
  309         int celt_output_samples = 
samples;
 
  316                 for (
i = 0; 
i < 
s->output_channels; 
i++) {
 
  317                     s->fdsp->vector_fmac_scalar(out_tmp[
i], 
s->celt_output[
i], 1.0,
 
  319                     out_tmp[
i] += delay_samples;
 
  321                 celt_output_samples -= delay_samples;
 
  324                        "Spurious CELT delay samples present.\n");
 
  334                                    s->packet.stereo + 1,
 
  335                                    s->packet.frame_duration,
 
  342             int celt_delay = 
s->packet.frame_duration - celt_output_samples;
 
  343             void *delaybuf[2] = { 
s->celt_output[0] + celt_output_samples,
 
  344                                   s->celt_output[1] + celt_output_samples };
 
  346             for (
i = 0; 
i < 
s->output_channels; 
i++) {
 
  347                 s->fdsp->vector_fmac_scalar(out_tmp[
i],
 
  348                                             s->celt_output[
i], 1.0,
 
  349                                             celt_output_samples);
 
  359     if (
s->redundancy_idx) {
 
  360         for (
i = 0; 
i < 
s->output_channels; 
i++)
 
  362                       s->redundancy_output[
i] + 120 + 
s->redundancy_idx,
 
  364         s->redundancy_idx = 0;
 
  367         if (!redundancy_pos) {
 
  373             for (
i = 0; 
i < 
s->output_channels; 
i++) {
 
  376                           s->redundancy_output[
i] + 120,
 
  382             for (
i = 0; 
i < 
s->output_channels; 
i++) {
 
  385                           s->redundancy_output[
i] + 120,
 
  396                                  const uint8_t *buf, 
int buf_size,
 
  399     int output_samples = 0;
 
  400     int flush_needed   = 0;
 
  403     s->cur_out[0]         = 
s->out[0];
 
  404     s->cur_out[1]         = 
s->out[1];
 
  405     s->remaining_out_size = 
s->out_size;
 
  410             int64_t cur_samplerate;
 
  412             flush_needed = (
s->packet.mode == 
OPUS_MODE_CELT) || (cur_samplerate != 
s->silk_samplerate);
 
  414             flush_needed = !!
s->delayed_samples;
 
  418     if (!buf && !flush_needed)
 
  422     if (!
s->cur_out[0] ||
 
  423         (
s->output_channels == 2 && !
s->cur_out[1])) {
 
  425                        s->remaining_out_size);
 
  429             s->cur_out[0] = 
s->out_dummy;
 
  431             s->cur_out[1] = 
s->out_dummy;
 
  442         output_samples += 
s->delayed_samples;
 
  443         s->delayed_samples = 0;
 
  450     for (
i = 0; 
i < 
s->packet.frame_count; 
i++) {
 
  451         int size = 
s->packet.frame_size[
i];
 
  459             for (j = 0; j < 
s->output_channels; j++)
 
  460                 memset(
s->cur_out[j], 0, 
s->packet.frame_duration * 
sizeof(
float));
 
  465         for (j = 0; j < 
s->output_channels; j++)
 
  467         s->remaining_out_size -= 
samples * 
sizeof(
float);
 
  471     s->cur_out[0] = 
s->cur_out[1] = 
NULL;
 
  472     s->remaining_out_size = 0;
 
  474     return output_samples;
 
  478                               int *got_frame_ptr, 
AVPacket *avpkt)
 
  481     const uint8_t *buf  = avpkt->
data;
 
  482     int buf_size        = avpkt->
size;
 
  483     int coded_samples   = 0;
 
  489     for (
int i = 0; 
i < 
c->p.nb_streams; 
i++) {
 
  505         coded_samples += 
pkt->frame_count * 
pkt->frame_duration;
 
  512     if (!
frame->nb_samples) {
 
  521     frame->nb_samples = 0;
 
  526             c->streams[
map->stream_idx].out[
map->channel_idx] = (
float*)
frame->extended_data[
i];
 
  530     for (
int i = 0; 
i < 
c->p.nb_streams; 
i++) {
 
  532         float          **
out = 
s->out;
 
  535         float sync_dummy[32];
 
  562     for (
int i = 0; 
i < 
c->p.nb_streams; 
i++) {
 
  571             if (coded_samples != 
s->packet.frame_count * 
s->packet.frame_duration) {
 
  573                        "Mismatching coded sample count in substream %d.\n", 
i);
 
  584         s->decoded_samples = 
ret;
 
  587         buf      += 
s->packet.packet_size;
 
  588         buf_size -= 
s->packet.packet_size;
 
  592     for (
int i = 0; 
i < 
c->p.nb_streams; 
i++) {
 
  595         if (buffer_samples) {
 
  596             float *buf[2] = { 
s->out[0] ? 
s->out[0] : (
float*)
frame->extended_data[0],
 
  597                               s->out[1] ? 
s->out[1] : (
float*)
frame->extended_data[0] };
 
  611             memcpy(
frame->extended_data[
i],
 
  612                    frame->extended_data[
map->copy_idx],
 
  614         } 
else if (
map->silence) {
 
  615             memset(
frame->extended_data[
i], 0, 
frame->linesize[0]);
 
  619             c->fdsp->vector_fmul_scalar((
float*)
frame->extended_data[
i],
 
  620                                        (
float*)
frame->extended_data[
i],
 
  635     for (
int i = 0; 
i < 
c->p.nb_streams; 
i++) {
 
  638         memset(&
s->packet, 0, 
sizeof(
s->packet));
 
  639         s->delayed_samples = 0;
 
  655     for (
int i = 0; 
i < 
c->p.nb_streams; 
i++) {
 
  662         s->out_dummy_allocated_size = 0;
 
  696         c->gain = 
ff_exp10(
c->p.gain_i / (20.0 * 256));
 
  699     c->streams = 
av_calloc(
c->p.nb_streams, 
sizeof(*
c->streams));
 
  705     for (
int i = 0; 
i < 
c->p.nb_streams; 
i++) {
 
  709         s->output_channels = (
i < 
c->p.nb_stereo_streams) ? 2 : 1;
 
  713         for (
int j = 0; j < 
s->output_channels; j++) {
 
  714             s->silk_output[j]       = 
s->silk_buf[j];
 
  715             s->celt_output[j]       = 
s->celt_buf[j];
 
  716             s->redundancy_output[j] = 
s->redundancy_buf[j];
 
  743                                             s->output_channels, 1024);
 
  748                                              s->output_channels, 32);
 
  756 #define OFFSET(x) offsetof(OpusContext, x) 
  757 #define AD AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM 
  759     { 
"apply_phase_inv", 
"Apply intensity stereo phase inversion", 
OFFSET(apply_phase_inv), 
AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, 
AD },
 
  
void av_audio_fifo_free(AVAudioFifo *af)
Free an AVAudioFifo.
 
@ AV_SAMPLE_FMT_FLTP
float, planar
 
#define AV_LOG_WARNING
Something somehow does not look correct.
 
static av_always_inline double ff_exp10(double x)
Compute 10^x for floating point values.
 
#define AV_EF_EXPLODE
abort decoding on minor error detection
 
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
 
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
 
#define AV_CHANNEL_LAYOUT_STEREO
 
int sample_rate
samples per second
 
const FFCodec ff_opus_decoder
 
static int opus_decode_packet(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt)
 
static int opus_decode_frame(OpusStreamContext *s, const uint8_t *data, int size)
 
This structure describes decoded (raw) audio or video data.
 
static av_cold int opus_decode_close(AVCodecContext *avctx)
 
int nb_channels
Number of channels in this layout.
 
static av_always_inline uint32_t opus_rc_tell(const OpusRangeCoder *rc)
CELT: estimate bits of entropy that have thus far been consumed for the current CELT frame,...
 
void ff_celt_flush(CeltFrame *f)
 
static const uint16_t silk_frame_duration_ms[16]
 
int ff_celt_decode_frame(CeltFrame *f, OpusRangeCoder *rc, float **output, int coded_channels, int frame_size, int startband, int endband)
 
static const AVOption opus_options[]
 
static SDL_Window * window
 
void ff_silk_flush(SilkContext *s)
 
Context for an Audio FIFO Buffer.
 
AVCodec p
The public AVCodec.
 
AVChannelLayout ch_layout
Audio channel layout.
 
int av_audio_fifo_drain(AVAudioFifo *af, int nb_samples)
Drain data from an AVAudioFifo.
 
int swr_is_initialized(struct SwrContext *s)
Check whether an swr context has been initialized or not.
 
float redundancy_buf[2][960]
 
static void opus_fade(float *out, const float *in1, const float *in2, const float *window, int len)
 
static int opus_decode_redundancy(OpusStreamContext *s, const uint8_t *data, int size)
 
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
 
#define FF_ARRAY_ELEMS(a)
 
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
 
const uint8_t ff_celt_band_end[]
 
#define FF_CODEC_DECODE_CB(func)
 
uint32_t ff_opus_rc_dec_uint(OpusRangeCoder *rc, uint32_t size)
CELT: read a uniform distribution.
 
int av_audio_fifo_write(AVAudioFifo *af, void **data, int nb_samples)
Write data to an AVAudioFifo.
 
int(* init)(AVBSFContext *ctx)
 
av_cold struct SwrContext * swr_alloc(void)
Allocate SwrContext.
 
static int opus_flush_resample(OpusStreamContext *s, int nb_samples)
 
AVAudioFifo * sync_buffer
 
The libswresample context.
 
#define CODEC_LONG_NAME(str)
 
@ OPUS_BANDWIDTH_WIDEBAND
 
#define LIBAVUTIL_VERSION_INT
 
Describe the class of an AVClass context structure.
 
AVAudioFifo * av_audio_fifo_alloc(enum AVSampleFormat sample_fmt, int channels, int nb_samples)
Allocate an AVAudioFifo.
 
const char * av_default_item_name(void *ptr)
Return the context name.
 
int out_dummy_allocated_size
 
int av_opt_get_int(void *obj, const char *name, int search_flags, int64_t *out_val)
 
OpusRangeCoder redundancy_rc
 
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
 
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
 
static const AVClass opus_class
 
static av_cold int opus_decode_init(AVCodecContext *avctx)
 
#define AV_CODEC_CAP_CHANNEL_CONF
Codec should fill in channel configuration and samplerate instead of container.
 
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
 
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
 
An AVChannelLayout holds information about the channel layout of audio data.
 
#define DECLARE_ALIGNED(n, t, v)
 
int av_opt_set_chlayout(void *obj, const char *name, const AVChannelLayout *channel_layout, int search_flags)
 
static int get_silk_samplerate(int config)
 
enum AVSampleFormat sample_fmt
audio sample format
 
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
 
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t **out_arg, int out_count, const uint8_t **in_arg, int in_count)
Convert audio.
 
const float ff_celt_window2[120]
 
int ff_opus_rc_dec_init(OpusRangeCoder *rc, const uint8_t *data, int size)
 
struct OpusStreamContext * streams
 
int av_audio_fifo_size(AVAudioFifo *af)
Get the current number of samples in the AVAudioFifo available for reading.
 
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
 
int ff_silk_init(AVCodecContext *avctx, SilkContext **ps, int output_channels)
 
int av_audio_fifo_read(AVAudioFifo *af, void **data, int nb_samples)
Read data from an AVAudioFifo.
 
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
 
#define i(width, name, range_min, range_max)
 
void ff_silk_free(SilkContext **ps)
 
void ff_opus_rc_dec_raw_init(OpusRangeCoder *rc, const uint8_t *rightend, uint32_t bytes)
 
static av_cold void opus_decode_flush(AVCodecContext *ctx)
 
const char * name
Name of the codec implementation.
 
void * av_calloc(size_t nmemb, size_t size)
 
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
 
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
 
static int opus_decode_subpacket(OpusStreamContext *s, const uint8_t *buf, int buf_size, int nb_samples)
 
main external API structure.
 
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
 
Filter the word “frame” indicates either a video frame or a group of audio samples
 
static int opus_init_resample(OpusStreamContext *s)
 
static const int silk_resample_delay[]
 
const VDPAUPixFmtMap * map
 
#define AV_CHANNEL_LAYOUT_MONO
 
This structure stores compressed data.
 
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
 
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
 
av_cold int ff_opus_parse_extradata(AVCodecContext *avctx, OpusParseContext *s)
 
void ff_celt_free(CeltFrame **f)
 
av_cold void swr_close(SwrContext *s)
Closes the context so that swr_is_initialized() returns 0.
 
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
 
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
 
uint32_t ff_opus_rc_dec_log(OpusRangeCoder *rc, uint32_t bits)
 
int ff_silk_decode_superframe(SilkContext *s, OpusRangeCoder *rc, float *output[2], enum OpusBandwidth bandwidth, int coded_channels, int duration_ms)
Decode the LP layer of one Opus frame (which may correspond to several SILK frames).
 
float * redundancy_output[2]
 
void * priv_data
Format private data.
 
int ff_opus_parse_packet(OpusPacket *pkt, const uint8_t *buf, int buf_size, int self_delimiting)
Parse Opus packet info from raw packet data.
 
int ff_celt_init(AVCodecContext *avctx, CeltFrame **f, int output_channels, int apply_phase_inv)