Go to the documentation of this file.
26 #define BITSTREAM_READER_LE
43 #define DSD_BYTE_READY(low,high) (!(((low) ^ (high)) & 0xff000000))
46 #define PTABLE_BINS (1<<PTABLE_BITS)
47 #define PTABLE_MASK (PTABLE_BINS-1)
50 #define DOWN 0x00010000
54 #define VALUE_ONE (1 << PRECISION)
55 #define PRECISION_USE 12
59 #define MAX_HISTORY_BITS 5
60 #define MAX_HISTORY_BINS (1 << MAX_HISTORY_BITS)
61 #define MAX_BIN_BYTES 1280 // for value_lookup, per bin (2k - 512 - 256)
117 #define LEVEL_DECAY(a) (((a) + 0x80) >> 8)
126 e = (1LL << (p + 1)) - k - 1;
137 for (
i = 0;
i <=
ctx->stereo_in;
i++) {
138 if (
ctx->ch[
i].bitrate_acc > UINT_MAX -
ctx->ch[
i].bitrate_delta)
140 ctx->ch[
i].bitrate_acc +=
ctx->ch[
i].bitrate_delta;
141 br[
i] =
ctx->ch[
i].bitrate_acc >> 16;
144 if (
ctx->stereo_in &&
ctx->hybrid_bitrate) {
145 int balance = (sl[1] - sl[0] + br[1] + 1) >> 1;
146 if (balance > br[0]) {
149 }
else if (-balance > br[0]) {
153 br[1] = br[0] + balance;
154 br[0] = br[0] - balance;
157 for (
i = 0;
i <=
ctx->stereo_in;
i++) {
158 if (
ctx->hybrid_bitrate) {
159 if (sl[
i] - br[
i] > -0x100)
162 ctx->ch[
i].error_limit = 0;
180 if ((
ctx->ch[0].median[0] < 2
U) && (
ctx->ch[1].median[0] < 2
U) &&
181 !
ctx->zero && !
ctx->one) {
200 memset(
ctx->ch[0].median, 0,
sizeof(
ctx->ch[0].median));
201 memset(
ctx->ch[1].median, 0,
sizeof(
ctx->ch[1].median));
265 if (!
c->error_limit) {
270 int mid = (
base * 2
U + add + 1) >> 1;
271 while (add >
c->error_limit) {
275 add -= (mid - (unsigned)
base);
278 add = mid - (unsigned)
base - 1;
279 mid = (
base * 2
U + add + 1) >> 1;
284 if (
ctx->hybrid_bitrate)
303 S *= 1 <<
s->extra_bits;
305 if (
s->got_extra_bits &&
308 *crc = *crc * 9 + (
S & 0xffff) * 3 + ((
unsigned)
S >> 16);
312 bit = (
S &
s->and) |
s->or;
318 return bit <<
s->post_shift;
329 int exp =
s->float_max_exp;
331 if (
s->got_extra_bits) {
332 const int max_bits = 1 + 23 + 8 + 1;
340 S *= 1
U <<
s->float_shift;
344 if (
S >= 0x1000000U) {
345 if (
s->got_extra_bits &&
get_bits1(&
s->gb_extra_bits))
352 exp =
s->float_max_exp;
360 (
s->got_extra_bits &&
364 }
else if (
s->got_extra_bits &&
370 exp =
s->float_max_exp;
379 if (
s->float_max_exp >= 25)
389 *crc = *crc * 27 +
S * 9 +
exp * 3 + sign;
391 value.u = (sign << 31) | (
exp << 23) |
S;
396 uint32_t crc_extra_bits)
402 if (
s->got_extra_bits && crc_extra_bits !=
s->crc_extra_bits) {
412 int value = 0x808000, rate = rate_i << 8;
414 for (
int c = (rate + 128) >> 8;
c--;)
421 if (
value > 0x010000) {
422 rate += (rate * rate_s + 128) >> 8;
424 for (
int c = (rate + 64) >> 7;
c--;)
437 uint32_t checksum = 0xFFFFFFFF;
438 uint8_t *dst_l = dst_left, *dst_r = dst_right;
439 int total_samples =
s->samples, stereo = dst_r ? 1 : 0;
447 rate_i = bytestream2_get_byte(&
s->gbyte);
448 rate_s = bytestream2_get_byte(&
s->gbyte);
464 sp->
factor = bytestream2_get_byte(&
s->gbyte) & 0xff;
465 sp->
factor |= (bytestream2_get_byte(&
s->gbyte) << 8) & 0xff00;
469 value = bytestream2_get_be32(&
s->gbyte);
473 while (total_samples--) {
483 uint32_t
split = low + ((
high - low) >> 8) * (*pp >> 16);
498 value = (
value << 8) | bytestream2_get_byte(&
s->gbyte);
504 sp[0].
byte = (sp[0].
byte << 1) | (sp[0].fltr0 & 1);
506 ((sp[0].
value ^ (sp[0].
value - (sp[0].fltr6 * 16))) >> 31);
520 split = low + ((
high - low) >> 8) * (*pp >> 16);
535 value = (
value << 8) | bytestream2_get_byte(&
s->gbyte);
541 sp[1].
byte = (sp[1].
byte << 1) | (sp[1].fltr0 & 1);
543 ((sp[1].
value ^ (sp[1].
value - (sp[1].fltr6 * 16))) >> 31);
554 checksum += (checksum << 1) + (*dst_l = sp[0].
byte & 0xff);
559 checksum += (checksum << 1) + (*dst_r =
filters[1].
byte & 0xff);
569 memset(dst_left, 0x69,
s->samples * 4);
572 memset(dst_right, 0x69,
s->samples * 4);
580 uint8_t *dst_l = dst_left, *dst_r = dst_right;
581 uint8_t history_bits, max_probability;
582 int total_summed_probabilities = 0;
583 int total_samples =
s->samples;
584 uint8_t *vlb =
s->value_lookup_buffer;
585 int history_bins, p0, p1, chan;
586 uint32_t checksum = 0xFFFFFFFF;
592 history_bits = bytestream2_get_byte(&
s->gbyte);
597 history_bins = 1 << history_bits;
598 max_probability = bytestream2_get_byte(&
s->gbyte);
600 if (max_probability < 0xff) {
601 uint8_t *outptr = (uint8_t *)
s->probabilities;
602 uint8_t *outend = outptr +
sizeof(*
s->probabilities) * history_bins;
605 int code = bytestream2_get_byte(&
s->gbyte);
607 if (
code > max_probability) {
608 int zcount =
code - max_probability;
610 while (outptr < outend && zcount--)
620 if (outptr < outend ||
625 sizeof(*
s->probabilities) * history_bins);
630 for (p0 = 0; p0 < history_bins; p0++) {
633 for (
int i = 0;
i < 256;
i++)
634 s->summed_probabilities[p0][
i] = sum_values +=
s->probabilities[p0][
i];
637 total_summed_probabilities += sum_values;
639 if (total_summed_probabilities > history_bins *
MAX_BIN_BYTES)
642 s->value_lookup[p0] = vlb;
644 for (
int i = 0;
i < 256;
i++) {
645 int c =
s->probabilities[p0][
i];
657 low = 0;
high = 0xffffffff;
658 value = bytestream2_get_be32(&
s->gbyte);
663 while (total_samples--) {
666 if (!
s->summed_probabilities[p0][255])
669 mult = (
high - low) /
s->summed_probabilities[p0][255];
673 value = bytestream2_get_be32(&
s->gbyte);
677 mult =
high /
s->summed_probabilities[p0][255];
685 if (
index >=
s->summed_probabilities[p0][255])
689 if ((*dst_l =
code =
s->value_lookup[p0][
index]))
690 low +=
s->summed_probabilities[p0][
code-1] *
mult;
695 low +=
s->summed_probabilities[p0][
code-1] *
mult;
710 checksum += (checksum << 1) +
code;
713 p0 =
code & (history_bins-1);
716 p1 =
code & (history_bins-1);
720 value = (
value << 8) | bytestream2_get_byte(&
s->gbyte);
730 memset(dst_left, 0x69,
s->samples * 4);
733 memset(dst_right, 0x69,
s->samples * 4);
741 uint8_t *dst_l = dst_left, *dst_r = dst_right;
742 int total_samples =
s->samples;
743 uint32_t checksum = 0xFFFFFFFF;
748 while (total_samples--) {
749 checksum += (checksum << 1) + (*dst_l = bytestream2_get_byte(&
s->gbyte));
753 checksum += (checksum << 1) + (*dst_r = bytestream2_get_byte(&
s->gbyte));
762 memset(dst_left, 0x69,
s->samples * 4);
765 memset(dst_right, 0x69,
s->samples * 4);
772 void *dst_l,
void *dst_r,
const int type)
778 uint32_t crc = 0xFFFFFFFF;
779 uint32_t crc_extra_bits = 0xFFFFFFFF;
780 int16_t *dst16_l = dst_l;
781 int16_t *dst16_r = dst_r;
784 float *dstfl_l = dst_l;
785 float *dstfl_r = dst_r;
787 s->one =
s->zero =
s->zeroes = 0;
795 for (
i = 0;
i <
s->terms;
i++) {
820 L2 =
L + (unsigned)((
int)(decorr->
weightA * (unsigned)
A + 512) >> 10);
821 R2 =
R + (unsigned)((
int)(decorr->
weightB * (unsigned)
B + 512) >> 10);
829 }
else if (t == -1) {
833 L2 =
L + (unsigned)((
int)(decorr->
weightA * (unsigned)decorr->
samplesA[0] + 512) >> 10);
839 R2 =
R + (unsigned)((
int)(decorr->
weightB * (unsigned)
L2 + 512) >> 10);
847 R2 =
R + (unsigned)((
int)(decorr->
weightB * (unsigned)decorr->
samplesB[0] + 512) >> 10);
859 L2 =
L + (unsigned)((
int)(decorr->
weightA * (unsigned)
R2 + 512) >> 10);
875 L += (unsigned)(
R -= (
unsigned)(
L >> 1));
876 crc = (crc * 3 +
L) * 3 +
R;
889 }
while (!last && count < s->
samples);
891 if (last && count < s->
samples) {
893 memset((uint8_t*)dst_l + count*
size, 0, (
s->samples-count)*
size);
894 memset((uint8_t*)dst_r + count*
size, 0, (
s->samples-count)*
size);
911 uint32_t crc = 0xFFFFFFFF;
912 uint32_t crc_extra_bits = 0xFFFFFFFF;
913 int16_t *dst16 =
dst;
917 s->one =
s->zero =
s->zeroes = 0;
923 for (
i = 0;
i <
s->terms;
i++) {
941 S =
T + (unsigned)((
int)(decorr->
weightA * (unsigned)
A + 512) >> 10);
957 }
while (!last && count < s->
samples);
959 if (last && count < s->
samples) {
961 memset((uint8_t*)
dst + count*
size, 0, (
s->samples-count)*
size);
982 if (!
c->fdec[
c->fdec_num])
985 c->fdec[
c->fdec_num - 1]->avctx =
c->avctx;
1003 channels > SIZE_MAX /
sizeof(*
s->dsdctx))
1012 memset(
s->dsdctx[
i].buf, 0x69,
sizeof(
s->dsdctx[
i].buf));
1063 progress_pool_init_cb,
1064 progress_pool_reset_cb,
1065 progress_pool_free_entry_cb,
NULL);
1066 if (!
s->progress_pool)
1078 for (
int i = 0;
i <
s->fdec_num;
i++)
1090 const uint8_t *buf,
int buf_size,
int *new_progress)
1096 void *samples_l =
NULL, *samples_r =
NULL;
1098 int got_terms = 0, got_weights = 0, got_samples = 0,
1099 got_entropy = 0, got_pcm = 0, got_float = 0, got_hybrid = 0;
1102 int bpp, chan = 0, orig_bpp, sample_rate = 0, rate_x = 1, dsd_mode = 0;
1104 uint64_t chmask = 0;
1111 s = wc->
fdec[block_no];
1114 memset(
s->ch, 0,
sizeof(
s->ch));
1116 s->and =
s->or =
s->shift = 0;
1117 s->got_extra_bits = 0;
1121 s->samples = bytestream2_get_le32(&gb);
1124 "a sequence: %d and %d\n", wc->
samples,
s->samples);
1127 s->frame_flags = bytestream2_get_le32(&gb);
1131 else if ((
s->frame_flags & 0x03) <= 1)
1140 orig_bpp = ((
s->frame_flags & 0x03) + 1) << 3;
1143 s->stereo = !(
s->frame_flags &
WV_MONO);
1148 s->post_shift = bpp * 8 - orig_bpp + ((
s->frame_flags >> 13) & 0x1f);
1149 if (
s->post_shift < 0 ||
s->post_shift > 31) {
1152 s->hybrid_maxclip = ((1LL << (orig_bpp - 1)) - 1);
1153 s->hybrid_minclip = ((-1UL << (orig_bpp - 1)));
1154 s->CRC = bytestream2_get_le32(&gb);
1158 id = bytestream2_get_byte(&gb);
1159 size = bytestream2_get_byte(&gb);
1161 size |= (bytestream2_get_le16u(&gb)) << 8;
1168 "Got incorrect block %02X with size %i\n",
id,
size);
1173 "Block size %i is out of bounds\n",
size);
1185 for (
i = 0;
i <
s->terms;
i++) {
1186 uint8_t
val = bytestream2_get_byte(&gb);
1187 s->decorr[
s->terms -
i - 1].value = (
val & 0x1F) - 5;
1188 s->decorr[
s->terms -
i - 1].delta =
val >> 5;
1204 t = (int8_t)bytestream2_get_byte(&gb);
1205 s->decorr[
s->terms -
i - 1].weightA = t * (1 << 3);
1206 if (
s->decorr[
s->terms -
i - 1].weightA > 0)
1207 s->decorr[
s->terms -
i - 1].weightA +=
1208 (
s->decorr[
s->terms -
i - 1].weightA + 64) >> 7;
1210 t = (int8_t)bytestream2_get_byte(&gb);
1211 s->decorr[
s->terms -
i - 1].weightB = t * (1 << 3);
1212 if (
s->decorr[
s->terms -
i - 1].weightB > 0)
1213 s->decorr[
s->terms -
i - 1].weightB +=
1214 (
s->decorr[
s->terms -
i - 1].weightB + 64) >> 7;
1225 for (
i =
s->terms - 1; (
i >= 0) && (t <
size);
i--) {
1228 if (decorr->
value > 8) {
1230 wp_exp2(bytestream2_get_le16(&gb));
1232 wp_exp2(bytestream2_get_le16(&gb));
1236 wp_exp2(bytestream2_get_le16(&gb));
1238 wp_exp2(bytestream2_get_le16(&gb));
1242 }
else if (decorr->
value < 0) {
1244 wp_exp2(bytestream2_get_le16(&gb));
1246 wp_exp2(bytestream2_get_le16(&gb));
1249 for (j = 0; j < decorr->
value; j++) {
1251 wp_exp2(bytestream2_get_le16(&gb));
1254 wp_exp2(bytestream2_get_le16(&gb));
1257 t += decorr->
value * 2 * (
s->stereo_in + 1);
1263 if (
size != 6 * (
s->stereo_in + 1)) {
1265 "Entropy vars size should be %i, got %i.\n",
1266 6 * (
s->stereo_in + 1),
size);
1270 for (j = 0; j <=
s->stereo_in; j++)
1271 for (
i = 0;
i < 3;
i++) {
1272 s->ch[j].median[
i] =
wp_exp2(bytestream2_get_le16(&gb));
1277 if (
s->hybrid_bitrate) {
1278 for (
i = 0;
i <=
s->stereo_in;
i++) {
1279 s->ch[
i].slow_level =
wp_exp2(bytestream2_get_le16(&gb));
1283 for (
i = 0;
i < (
s->stereo_in + 1);
i++) {
1284 s->ch[
i].bitrate_acc = bytestream2_get_le16(&gb) << 16;
1288 for (
i = 0;
i < (
s->stereo_in + 1);
i++) {
1289 s->ch[
i].bitrate_delta =
1290 wp_exp2((int16_t)bytestream2_get_le16(&gb));
1293 for (
i = 0;
i < (
s->stereo_in + 1);
i++)
1294 s->ch[
i].bitrate_delta = 0;
1302 "Invalid INT32INFO, size = %i\n",
1310 "Invalid INT32INFO, extra_bits = %d (> 30)\n",
val[0]);
1313 s->extra_bits =
val[0];
1325 if (
s->shift > 31) {
1327 "Invalid INT32INFO, shift = %d (> 31)\n",
s->shift);
1328 s->and =
s->or =
s->shift = 0;
1333 if (
s->hybrid && bpp == 4 &&
s->post_shift < 8 &&
s->shift > 8) {
1336 s->hybrid_maxclip >>= 8;
1337 s->hybrid_minclip >>= 8;
1344 "Invalid FLOATINFO, size = %i\n",
size);
1348 s->float_flag = bytestream2_get_byte(&gb);
1349 s->float_shift = bytestream2_get_byte(&gb);
1350 s->float_max_exp = bytestream2_get_byte(&gb);
1351 if (
s->float_shift > 31) {
1353 "Invalid FLOATINFO, shift = %d (> 31)\n",
s->float_shift);
1373 rate_x = bytestream2_get_byte(&gb);
1376 rate_x = 1 << rate_x;
1377 dsd_mode = bytestream2_get_byte(&gb);
1378 if (dsd_mode && dsd_mode != 1 && dsd_mode != 3) {
1398 s->got_extra_bits = 1;
1403 "Insufficient channel information\n");
1406 chan = bytestream2_get_byte(&gb);
1409 chmask = bytestream2_get_byte(&gb);
1412 chmask = bytestream2_get_le16(&gb);
1415 chmask = bytestream2_get_le24(&gb);
1418 chmask = bytestream2_get_le32(&gb);
1421 bytestream2_get_byte(&gb);
1422 chan |= (bytestream2_get_byte(&gb) & 0xF) << 8;
1424 chmask = bytestream2_get_le24(&gb);
1427 bytestream2_get_byte(&gb);
1428 chan |= (bytestream2_get_byte(&gb) & 0xF) << 8;
1430 chmask = bytestream2_get_le32(&gb);
1443 sample_rate = bytestream2_get_le24(&gb);
1469 if (
s->hybrid && !got_hybrid) {
1479 const int wanted =
s->samples *
s->extra_bits <<
s->stereo_in;
1480 if (
size < wanted) {
1482 s->got_extra_bits = 0;
1487 if (!got_pcm && !got_dsd) {
1501 int sr = (
s->frame_flags >> 23) & 0
xf;
1507 new_samplerate = sample_rate;
1511 if (new_samplerate * (uint64_t)rate_x > INT_MAX)
1513 new_samplerate *= rate_x;
1531 if ((wc->
dsdctx && !got_dsd) ||
1547 frame->nb_samples =
s->samples;
1580 if (dsd_mode == 3) {
1582 }
else if (dsd_mode == 1) {
1594 if (dsd_mode == 3) {
1596 }
else if (dsd_mode == 1) {
1608 memcpy(samples_r, samples_l, bpp *
s->samples);
1627 (uint8_t *)
frame->extended_data[jobnr], 4,
1628 (
float *)
frame->extended_data[jobnr], 1);
1634 int *got_frame_ptr,
AVPacket *avpkt)
1637 const uint8_t *buf = avpkt->
data;
1638 int buf_size = avpkt->
size;
1640 int block = 0, new_progress = 0;
1651 frame_flags =
AV_RL32(buf + 24);
1664 if (frame_size <= 0 || frame_size > buf_size) {
1666 "Block %d has invalid size (size %d vs. %d bytes left)\n",
1687 if (
s->prev_progress)
1690 if (
s->curr_progress)
1700 if (
s->prev_progress)
1709 .
p.
name =
"wavpack",
static void error(const char *err)
#define WV_HYBRID_BITRATE
@ AV_SAMPLE_FMT_FLTP
float, planar
#define AV_LOG_WARNING
Something somehow does not look correct.
F H1 F F H1 F F F F H1<-F-------F-------F v v v H2 H3 H2 ^ ^ ^ F-------F-------F-> H1<-F-------F-------F|||||||||F H1 F|||||||||F H1 Funavailable fullpel samples(outside the picture for example) shall be equalto the closest available fullpel sampleSmaller pel interpolation:--------------------------if diag_mc is set then points which lie on a line between 2 vertically, horizontally or diagonally adjacent halfpel points shall be interpolatedlinearly with rounding to nearest and halfway values rounded up.points which lie on 2 diagonals at the same time should only use the onediagonal not containing the fullpel point F--> O q O<--h1-> O q O<--F v \/v \/v O O O O O O O|/|\|q q q q q|/|\|O O O O O O O ^/\ ^/\ ^ h2--> O q O<--h3-> O q O<--h2 v \/v \/v O O O O O O O|\|/|q q q q q|\|/|O O O O O O O ^/\ ^/\ ^ F--> O q O<--h1-> O q O<--Fthe remaining points shall be bilinearly interpolated from theup to 4 surrounding halfpel and fullpel points, again rounding should be tonearest and halfway values rounded upcompliant Snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chromainterpolation at leastOverlapped block motion compensation:-------------------------------------FIXMELL band prediction:===================Each sample in the LL0 subband is predicted by the median of the left, top andleft+top-topleft samples, samples outside the subband shall be considered tobe 0. To reverse this prediction in the decoder apply the following.for(y=0;y< height;y++){ for(x=0;x< width;x++){ sample[y][x]+=median(sample[y-1][x], sample[y][x-1], sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]);}}sample[-1][ *]=sample[ *][-1]=0;width, height here are the width and height of the LL0 subband not of the finalvideoDequantization:===============FIXMEWavelet Transform:==================Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integertransform and an integer approximation of the symmetric biorthogonal 9/7daubechies wavelet.2D IDWT(inverse discrete wavelet transform) --------------------------------------------The 2D IDWT applies a 2D filter recursively, each time combining the4 lowest frequency subbands into a single subband until only 1 subbandremains.The 2D filter is done by first applying a 1D filter in the vertical directionand then applying it in the horizontal one. --------------- --------------- --------------- ---------------|LL0|HL0|||||||||||||---+---|HL1||L0|H0|HL1||LL1|HL1|||||LH0|HH0|||||||||||||-------+-------|-> L1 H1 LH1 HH1 LH1 HH1 LH1 HH1 L2
#define AV_EF_EXPLODE
abort decoding on minor error detection
void ff_thread_progress_report(ThreadProgress *pro, int n)
This function is a no-op in no-op mode; otherwise it notifies other threads that a certain level of p...
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
static int get_bits_left(GetBitContext *gb)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
ThreadProgress is an API to easily notify other threads about progress of any kind as long as it can ...
int sample_rate
samples per second
#define u(width, name, range_min, range_max)
static int wv_unpack_dsd_high(WavpackFrameContext *s, uint8_t *dst_left, uint8_t *dst_right)
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
static void wavpack_decode_flush(AVCodecContext *avctx)
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
@ FF_THREAD_IS_FIRST_THREAD
#define WV_FLT_SHIFT_ONES
This structure describes decoded (raw) audio or video data.
static const uint16_t table[]
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
RefStruct is an API for creating reference-counted objects with minimal overhead.
int nb_channels
Number of channels in this layout.
#define bit(string, value)
static int wv_unpack_dsd_copy(WavpackFrameContext *s, uint8_t *dst_left, uint8_t *dst_right)
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static int update_error_limit(WavpackFrameContext *ctx)
AVCodec p
The public AVCodec.
AVChannelLayout ch_layout
Audio channel layout.
static void ff_refstruct_pool_uninit(FFRefStructPool **poolp)
Mark the pool as being available for freeing.
const FFCodec ff_wavpack_decoder
static double val(void *priv, double ch)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
static av_always_inline int wp_log2(uint32_t val)
#define FF_REFSTRUCT_POOL_FLAG_FREE_ON_INIT_ERROR
If this flag is set and both init_cb and free_entry_cb callbacks are provided, then free_cb will be c...
static int16_t mult(Float11 *f1, Float11 *f2)
ThreadProgress * curr_progress
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
#define FF_CODEC_DECODE_CB(func)
static int wavpack_decode_block(AVCodecContext *avctx, AVFrame *frame, int block_no, const uint8_t *buf, int buf_size, int *new_progress)
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
static const int wv_rates[16]
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
int av_channel_layout_from_mask(AVChannelLayout *channel_layout, uint64_t mask)
Initialize a native channel layout from a bitmask indicating which channels are present.
#define filters(fmt, type, inverse, clp, inverset, clip, one, clip_fn, packed)
static av_always_inline int wp_exp2(int16_t val)
uint16_t summed_probabilities[MAX_HISTORY_BINS][256]
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
FFRefStructPool is an API for a thread-safe pool of objects managed via the RefStruct API.
#define CODEC_LONG_NAME(str)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
void ff_thread_progress_await(const ThreadProgress *pro_c, int n)
This function is a no-op in no-op mode; otherwise it waits until other threads have reached a certain...
#define DSD_BYTE_READY(low, high)
DSDContext * dsdctx
RefStruct reference.
WavpackFrameContext ** fdec
static unsigned int get_bits1(GetBitContext *s)
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
static void * ff_refstruct_allocz(size_t size)
Equivalent to ff_refstruct_alloc_ext(size, 0, NULL, NULL)
#define UPDATE_THREAD_CONTEXT(func)
static int get_unary_0_33(GetBitContext *gb)
Get unary code terminated by a 0 with a maximum length of 33.
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data,...
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static int wv_get_value_integer(WavpackFrameContext *s, uint32_t *crc, unsigned S)
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
#define AV_CODEC_CAP_CHANNEL_CONF
Codec should fill in channel configuration and samplerate instead of container.
av_cold void ff_init_dsd_data(void)
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
An AVChannelLayout holds information about the channel layout of audio data.
static int shift(int a, int b)
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
static int wv_check_crc(WavpackFrameContext *s, uint32_t crc, uint32_t crc_extra_bits)
enum AVSampleFormat sample_fmt
audio sample format
static char * split(char *message, char delim)
static void init_ptable(int *table, int rate_i, int rate_s)
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
#define WV_FLT_SHIFT_SENT
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
static int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb, void *dst_l, void *dst_r, const int type)
#define FF_THREAD_FRAME
Decode more than one frame at once.
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
static int wv_unpack_mono(WavpackFrameContext *s, GetBitContext *gb, void *dst, const int type)
#define i(width, name, range_min, range_max)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
static av_cold int wavpack_decode_end(AVCodecContext *avctx)
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
static av_always_inline unsigned get_tail(GetBitContext *gb, unsigned k)
static const int weights[]
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
AVSampleFormat
Audio sample formats.
#define xf(width, name, var, range_min, range_max, subs,...)
av_cold int ff_thread_progress_init(ThreadProgress *pro, int init_mode)
Initialize a ThreadProgress.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
#define WV_FLT_SHIFT_SAME
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
void ff_dsd2pcm_translate(DSDContext *s, size_t samples, int lsbf, const uint8_t *src, ptrdiff_t src_stride, float *dst, ptrdiff_t dst_stride)
uint8_t value_lookup_buffer[MAX_HISTORY_BINS *MAX_BIN_BYTES]
static int wv_unpack_dsd_fast(WavpackFrameContext *s, uint8_t *dst_left, uint8_t *dst_right)
#define FFSWAP(type, a, b)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
enum ThreadingStatus ff_thread_sync_ref(AVCodecContext *avctx, size_t offset)
Allows to synchronize objects whose lifetime is the whole decoding process among all frame threads.
void ff_refstruct_replace(void *dstp, const void *src)
Ensure *dstp refers to the same object as src.
av_cold void ff_thread_progress_destroy(ThreadProgress *pro)
Destroy a ThreadProgress.
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
#define AV_INPUT_BUFFER_PADDING_SIZE
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
GetBitContext gb_extra_bits
static av_cold int wavpack_decode_init(AVCodecContext *avctx)
main external API structure.
int active_thread_type
Which multithreading methods are in use by the codec.
#define UPDATE_WEIGHT_CLIP(weight, delta, samples, in)
static int wv_get_value(WavpackFrameContext *ctx, GetBitContext *gb, int channel, int *last)
static FFRefStructPool * ff_refstruct_pool_alloc_ext(size_t size, unsigned flags, void *opaque, int(*init_cb)(FFRefStructOpaque opaque, void *obj), void(*reset_cb)(FFRefStructOpaque opaque, void *obj), void(*free_entry_cb)(FFRefStructOpaque opaque, void *obj), void(*free_cb)(FFRefStructOpaque opaque))
A wrapper around ff_refstruct_pool_alloc_ext_c() for the common case of a non-const qualified opaque.
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. Use ff_thread_get_buffer()(or ff_progress_frame_get_buffer() in case you have inter-frame dependencies and use the ProgressFrame API) to allocate frame buffers. Call ff_progress_frame_report() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
static float wv_get_value_float(WavpackFrameContext *s, uint32_t *crc, int S)
Filter the word “frame” indicates either a video frame or a group of audio samples
static void ff_thread_progress_reset(ThreadProgress *pro)
Reset the ThreadProgress.progress counter; must only be called if the ThreadProgress is not in use in...
static const int factor[16]
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
static av_cold int wv_alloc_frame_context(WavpackContext *c)
static int wavpack_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt)
uint8_t probabilities[MAX_HISTORY_BINS][256]
This structure stores compressed data.
ThreadProgress * prev_progress
RefStruct references.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
The exact code depends on how similar the blocks are and how related they are to the block
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int wv_dsd_reset(WavpackContext *s, int channels)
uint8_t * value_lookup[MAX_HISTORY_BINS]
FFRefStructPool * progress_pool
RefStruct reference.
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
void ff_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
void * ff_refstruct_pool_get(FFRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
static int dsd_channel(AVCodecContext *avctx, void *frmptr, int jobnr, int threadnr)