Go to the documentation of this file.
44 #define MUL(a,b) (((int64_t)(a) * (int64_t)(b)) >> FRAC_BITS)
46 #define SAMPLES_BUF_SIZE 4096
69 float scale_factor_inv_table[64];
125 s->bitrate_index =
i;
130 s->frame_size = ((
int)
a) * 8;
134 s->frame_frac_incr = (
int)((
a -
floor(
a)) * 65536.0);
143 ff_dlog(avctx,
"%d kb/s, %d Hz, frame_size=%d bits, table=%d, padincr=%x\n",
146 for(
i=0;
i<
s->nb_channels;
i++)
147 s->samples_offset[
i] = 0;
155 s->filter_bank[
i] = v;
159 s->filter_bank[512 -
i] = v;
163 v = (
int)(
exp2((3 -
i) / 3.0) * (1 << 20));
166 s->scale_factor_table[
i] = v;
168 s->scale_factor_inv_table[
i] =
exp2(-(3 -
i) / 3.0) / (float)(1 << 20);
171 s->scale_factor_shift[
i] = 21 -
P - (
i / 3);
172 s->scale_factor_mult[
i] = (1 <<
P) *
exp2((
i % 3) / 3.0);
187 s->scale_diff_table[
i] = v;
196 s->total_quant_bits[
i] = 12 * v;
209 for(j=31;j>=3;j-=2)
tab[j] +=
tab[j - 2];
253 x1 =
MUL((t[8] - x2), xp[0]);
254 x2 =
MUL((t[8] + x2), xp[1]);
267 xr =
MUL(t[28],xp[0]);
271 xr =
MUL(t[4],xp[1]);
272 t[ 4] = (t[24] - xr);
273 t[24] = (t[24] + xr);
275 xr =
MUL(t[20],xp[2]);
279 xr =
MUL(t[12],xp[3]);
280 t[12] = (t[16] - xr);
281 t[16] = (t[16] + xr);
286 for (
i = 0;
i < 4;
i++) {
309 xr =
MUL(
t1[0], *xp);
322 #define WSHIFT (WFRAC_BITS + 15 - FRAC_BITS)
332 offset =
s->samples_offset[ch];
333 out = &
s->sb_samples[ch][0][0][0];
342 p =
s->samples_buf[ch] +
offset;
346 sum = p[0*64] * q[0*64];
347 sum += p[1*64] * q[1*64];
348 sum += p[2*64] * q[2*64];
349 sum += p[3*64] * q[3*64];
350 sum += p[4*64] * q[4*64];
351 sum += p[5*64] * q[5*64];
352 sum += p[6*64] * q[6*64];
353 sum += p[7*64] * q[7*64];
370 s->samples_buf[ch], (512 - 32) * 2);
374 s->samples_offset[ch] =
offset;
378 unsigned char scale_code[
SBLIMIT],
379 unsigned char scale_factors[
SBLIMIT][3],
380 int sb_samples[3][12][
SBLIMIT],
383 int *p, vmax, v, n,
i, j, k,
code;
385 unsigned char *sf = &scale_factors[0][0];
387 for(j=0;j<sblimit;j++) {
390 p = &sb_samples[
i][0][j];
403 index = (21 - n) * 3 - 3;
405 while (vmax <= s->scale_factor_table[
index+1])
423 d1 =
s->scale_diff_table[sf[0] - sf[1] + 64];
424 d2 =
s->scale_diff_table[sf[1] - sf[2] + 64];
427 switch(d1 * 5 + d2) {
459 sf[1] = sf[2] = sf[0];
464 sf[0] = sf[1] = sf[2];
470 sf[0] = sf[2] = sf[1];
476 sf[1] = sf[2] = sf[0];
484 sf[0], sf[1], sf[2], d1, d2,
code);
485 scale_code[j] =
code;
497 for(
i=0;
i<
s->sblimit;
i++) {
503 #define SB_NOTALLOCATED 0
504 #define SB_ALLOCATED 1
515 int i, ch,
b, max_smr, max_ch, max_sb, current_frame_size, max_frame_size;
519 const unsigned char *alloc;
521 memcpy(smr, smr1,
s->nb_channels *
sizeof(
short) *
SBLIMIT);
526 max_frame_size =
s->frame_size;
527 s->frame_frac +=
s->frame_frac_incr;
528 if (
s->frame_frac >= 65536) {
529 s->frame_frac -= 65536;
537 current_frame_size = 32;
538 alloc =
s->alloc_table;
539 for(
i=0;
i<
s->sblimit;
i++) {
541 current_frame_size += incr *
s->nb_channels;
549 for(ch=0;ch<
s->nb_channels;ch++) {
550 for(
i=0;
i<
s->sblimit;
i++) {
551 if (smr[ch][
i] > max_smr && subband_status[ch][
i] !=
SB_NOMORE) {
552 max_smr = smr[ch][
i];
560 ff_dlog(
NULL,
"current=%d max=%d max_sb=%d max_ch=%d alloc=%d\n",
561 current_frame_size, max_frame_size, max_sb, max_ch,
566 alloc =
s->alloc_table;
567 for(
i=0;
i<max_sb;
i++) {
568 alloc += 1 << alloc[0];
574 incr +=
s->total_quant_bits[alloc[1]];
578 incr =
s->total_quant_bits[alloc[
b + 1]] -
579 s->total_quant_bits[alloc[
b]];
582 if (current_frame_size + incr <= max_frame_size) {
585 current_frame_size += incr;
587 smr[max_ch][max_sb] = smr1[max_ch][max_sb] -
quant_snr[alloc[
b]];
589 if (
b == ((1 << alloc[0]) - 1))
590 subband_status[max_ch][max_sb] =
SB_NOMORE;
595 subband_status[max_ch][max_sb] =
SB_NOMORE;
598 *padding = max_frame_size - current_frame_size;
610 int i, j, k, l, bit_alloc_bits,
b, ch;
633 for(
i=0;
i<
s->sblimit;
i++) {
634 bit_alloc_bits =
s->alloc_table[j];
635 for(ch=0;ch<
s->nb_channels;ch++) {
638 j += 1 << bit_alloc_bits;
642 for(
i=0;
i<
s->sblimit;
i++) {
643 for(ch=0;ch<
s->nb_channels;ch++) {
650 for(
i=0;
i<
s->sblimit;
i++) {
651 for(ch=0;ch<
s->nb_channels;ch++) {
653 sf = &
s->scale_factors[ch][
i][0];
654 switch(
s->scale_code[ch][
i]) {
678 for(
i=0;
i<
s->sblimit;
i++) {
679 bit_alloc_bits =
s->alloc_table[j];
680 for(ch=0;ch<
s->nb_channels;ch++) {
685 qindex =
s->alloc_table[j+
b];
688 sample =
s->sb_samples[ch][k][l + m][
i];
693 a = (float)
sample *
s->scale_factor_inv_table[
s->scale_factors[ch][
i][k]];
694 q[m] = (
int)((
a + 1.0) * steps * 0.5);
699 e =
s->scale_factors[ch][
i][k];
700 shift =
s->scale_factor_shift[e];
701 mult =
s->scale_factor_mult[e];
712 q[m] = (
q1 * (unsigned)steps) >> (
P + 1);
723 q[0] + steps * (q[1] + steps * q[2]));
732 j += 1 << bit_alloc_bits;
738 for(
i=0;
i<padding;
i++)
746 const int16_t *
samples = (
const int16_t *)
frame->data[0];
755 for(
i=0;
i<
s->nb_channels;
i++) {
757 s->sb_samples[
i],
s->sblimit);
759 for(
i=0;
i<
s->nb_channels;
i++) {
static void idct32(int *out, int *tab)
int frame_size
Number of samples per channel in an audio frame.
static const uint8_t q1[256]
int sb_samples[MPA_MAX_CHANNELS][3][12][SBLIMIT]
int ff_mpa_l2_select_table(int bitrate, int nb_channels, int freq, int lsf)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int put_bytes_output(const PutBitContext *s)
int sample_rate
samples per second
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
static void compute_scale_factors(MpegAudioContext *s, unsigned char scale_code[SBLIMIT], unsigned char scale_factors[SBLIMIT][3], int sb_samples[3][12][SBLIMIT], int sblimit)
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
static const uint16_t table[]
const int ff_mpa_quant_bits[17]
unsigned char scale_diff_table[128]
static void filter(MpegAudioContext *s, int ch, const short *samples, int incr)
unsigned char scale_code[MPA_MAX_CHANNELS][SBLIMIT]
unsigned short scale_factor_mult[64]
static const AVCodecDefault mp2_defaults[]
unsigned short total_quant_bits[17]
static const unsigned char nb_scale_factors[4]
int initial_padding
Audio only.
static const struct twinvq_data tab
static int bit_alloc(AC3EncodeContext *s, int snr_offset)
Run the bit allocation with a given SNR offset.
static int16_t mult(Float11 *f1, Float11 *f2)
static av_always_inline int64_t ff_samples_to_time_base(AVCodecContext *avctx, int64_t samples)
Rescale from sample rate to AVCodecContext.time_base.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
const int ff_mpa_quant_steps[17]
int scale_factor_table[64]
unsigned char scale_factors[MPA_MAX_CHANNELS][SBLIMIT][3]
static __device__ float floor(float a)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static int MPA_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
static const int costab32[30]
const unsigned char *const ff_mpa_alloc_tables[5]
int64_t bit_rate
the average bitrate
static const float fixed_smr[SBLIMIT]
static const int bitinv32[32]
static const unsigned short quant_snr[17]
const int32_t ff_mpa_enwindow[257]
static void encode_frame(MpegAudioContext *s, unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT], int padding)
const unsigned char * alloc_table
#define AV_NOPTS_VALUE
Undefined timestamp value.
static void psycho_acoustic_model(MpegAudioContext *s, short smr[SBLIMIT])
short samples_buf[MPA_MAX_CHANNELS][SAMPLES_BUF_SIZE]
static void compute_bit_allocation(MpegAudioContext *s, short smr1[MPA_MAX_CHANNELS][SBLIMIT], unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT], int *padding)
int8_t scale_factor_shift[64]
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
int channels
number of audio channels
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
const int ff_mpa_sblimit_table[5]
const uint16_t ff_mpa_bitrate_tab[2][3][15]
static av_cold int MPA_encode_init(AVCodecContext *avctx)
int samples_offset[MPA_MAX_CHANNELS]
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
main external API structure.
const uint16_t ff_mpa_freq_tab[3]
Filter the word “frame” indicates either a video frame or a group of audio samples
static int shift(int a, int b)
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
This structure stores compressed data.
#define MPA_MAX_CODED_FRAME_SIZE
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.