36 0, 8, 16, 24, 33, 41, 49, 57, 66, 74, 82, 90,
37 99, 107, 115, 123, 132, 140, 148, 156, 165, 173, 181, 189,
38 198, 206, 214, 222, 231, 239, 247, 255,
42 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44,
43 48, 52, 56, 60, 65, 69, 73, 77, 81, 85, 89, 93,
44 97, 101, 105, 109, 113, 117, 121, 125, 130, 134, 138, 142,
45 146, 150, 154, 158, 162, 166, 170, 174, 178, 182, 186, 190,
46 195, 199, 203, 207, 211, 215, 219, 223, 227, 231, 235, 239,
51 { 0, 0 }, { 0, 0 }, { 0, 1 }, { 0, 1 }, { 1, 0 }, { 1, 0 },
52 { 1, 0 }, { 1, 1 }, { 1, 1 }, { 2, 0 }, { 2, 0 }, { 0, 4 },
53 { 2, 1 }, { 2, 1 }, { 2, 1 }, { 3, 0 }, { 3, 0 }, { 3, 0 },
54 { 3, 1 }, { 1, 5 }, { 3, 2 }, { 3, 2 }, { 4, 0 }, { 4, 0 },
55 { 4, 1 }, { 4, 1 }, { 4, 2 }, { 4, 2 }, { 4, 2 }, { 3, 5 },
56 { 5, 1 }, { 5, 1 }, { 5, 2 }, { 4, 4 }, { 5, 3 }, { 5, 3 },
57 { 5, 3 }, { 6, 2 }, { 6, 2 }, { 6, 2 }, { 6, 3 }, { 5, 5 },
58 { 6, 4 }, { 6, 4 }, { 4, 8 }, { 7, 3 }, { 7, 3 }, { 7, 3 },
59 { 7, 4 }, { 7, 4 }, { 7, 4 }, { 7, 5 }, { 5, 9 }, { 7, 6 },
60 { 7, 6 }, { 8, 4 }, { 8, 4 }, { 8, 5 }, { 8, 5 }, { 8, 6 },
61 { 8, 6 }, { 8, 6 }, { 7, 9 }, { 9, 5 }, { 9, 5 }, { 9, 6 },
62 { 8, 8 }, { 9, 7 }, { 9, 7 }, { 9, 7 }, { 10, 6 }, { 10, 6 },
63 { 10, 6 }, { 10, 7 }, { 9, 9 }, { 10, 8 }, { 10, 8 }, { 8, 12 },
64 { 11, 7 }, { 11, 7 }, { 11, 7 }, { 11, 8 }, { 11, 8 }, { 11, 8 },
65 { 11, 9 }, { 9, 13 }, { 11, 10 }, { 11, 10 }, { 12, 8 }, { 12, 8 },
66 { 12, 9 }, { 12, 9 }, { 12, 10 }, { 12, 10 }, { 12, 10 }, { 11, 13 },
67 { 13, 9 }, { 13, 9 }, { 13, 10 }, { 12, 12 }, { 13, 11 }, { 13, 11 },
68 { 13, 11 }, { 14, 10 }, { 14, 10 }, { 14, 10 }, { 14, 11 }, { 13, 13 },
69 { 14, 12 }, { 14, 12 }, { 12, 16 }, { 15, 11 }, { 15, 11 }, { 15, 11 },
70 { 15, 12 }, { 15, 12 }, { 15, 12 }, { 15, 13 }, { 13, 17 }, { 15, 14 },
71 { 15, 14 }, { 16, 12 }, { 16, 12 }, { 16, 13 }, { 16, 13 }, { 16, 14 },
72 { 16, 14 }, { 16, 14 }, { 15, 17 }, { 17, 13 }, { 17, 13 }, { 17, 14 },
73 { 16, 16 }, { 17, 15 }, { 17, 15 }, { 17, 15 }, { 18, 14 }, { 18, 14 },
74 { 18, 14 }, { 18, 15 }, { 17, 17 }, { 18, 16 }, { 18, 16 }, { 16, 20 },
75 { 19, 15 }, { 19, 15 }, { 19, 15 }, { 19, 16 }, { 19, 16 }, { 19, 16 },
76 { 19, 17 }, { 17, 21 }, { 19, 18 }, { 19, 18 }, { 20, 16 }, { 20, 16 },
77 { 20, 17 }, { 20, 17 }, { 20, 18 }, { 20, 18 }, { 20, 18 }, { 19, 21 },
78 { 21, 17 }, { 21, 17 }, { 21, 18 }, { 20, 20 }, { 21, 19 }, { 21, 19 },
79 { 21, 19 }, { 22, 18 }, { 22, 18 }, { 22, 18 }, { 22, 19 }, { 21, 21 },
80 { 22, 20 }, { 22, 20 }, { 20, 24 }, { 23, 19 }, { 23, 19 }, { 23, 19 },
81 { 23, 20 }, { 23, 20 }, { 23, 20 }, { 23, 21 }, { 21, 25 }, { 23, 22 },
82 { 23, 22 }, { 24, 20 }, { 24, 20 }, { 24, 21 }, { 24, 21 }, { 24, 22 },
83 { 24, 22 }, { 24, 22 }, { 23, 25 }, { 25, 21 }, { 25, 21 }, { 25, 22 },
84 { 24, 24 }, { 25, 23 }, { 25, 23 }, { 25, 23 }, { 26, 22 }, { 26, 22 },
85 { 26, 22 }, { 26, 23 }, { 25, 25 }, { 26, 24 }, { 26, 24 }, { 24, 28 },
86 { 27, 23 }, { 27, 23 }, { 27, 23 }, { 27, 24 }, { 27, 24 }, { 27, 24 },
87 { 27, 25 }, { 25, 29 }, { 27, 26 }, { 27, 26 }, { 28, 24 }, { 28, 24 },
88 { 28, 25 }, { 28, 25 }, { 28, 26 }, { 28, 26 }, { 28, 26 }, { 27, 29 },
89 { 29, 25 }, { 29, 25 }, { 29, 26 }, { 28, 28 }, { 29, 27 }, { 29, 27 },
90 { 29, 27 }, { 30, 26 }, { 30, 26 }, { 30, 26 }, { 30, 27 }, { 29, 29 },
91 { 30, 28 }, { 30, 28 }, { 30, 28 }, { 31, 27 }, { 31, 27 }, { 31, 27 },
92 { 31, 28 }, { 31, 28 }, { 31, 28 }, { 31, 29 }, { 31, 29 }, { 31, 30 },
93 { 31, 30 }, { 31, 30 }, { 31, 31 }, { 31, 31 },
97 { 0, 0 }, { 0, 1 }, { 1, 0 }, { 1, 0 }, { 1, 1 }, { 2, 0 },
98 { 2, 1 }, { 3, 0 }, { 3, 0 }, { 3, 1 }, { 4, 0 }, { 4, 0 },
99 { 4, 1 }, { 5, 0 }, { 5, 1 }, { 6, 0 }, { 6, 0 }, { 6, 1 },
100 { 7, 0 }, { 7, 0 }, { 7, 1 }, { 8, 0 }, { 8, 1 }, { 8, 1 },
101 { 8, 2 }, { 9, 1 }, { 9, 2 }, { 9, 2 }, { 9, 3 }, { 10, 2 },
102 { 10, 3 }, { 10, 3 }, { 10, 4 }, { 11, 3 }, { 11, 4 }, { 11, 4 },
103 { 11, 5 }, { 12, 4 }, { 12, 5 }, { 12, 5 }, { 12, 6 }, { 13, 5 },
104 { 13, 6 }, { 8, 16 }, { 13, 7 }, { 14, 6 }, { 14, 7 }, { 9, 17 },
105 { 14, 8 }, { 15, 7 }, { 15, 8 }, { 11, 16 }, { 15, 9 }, { 15, 10 },
106 { 16, 8 }, { 16, 9 }, { 16, 10 }, { 15, 13 }, { 17, 9 }, { 17, 10 },
107 { 17, 11 }, { 15, 16 }, { 18, 10 }, { 18, 11 }, { 18, 12 }, { 16, 16 },
108 { 19, 11 }, { 19, 12 }, { 19, 13 }, { 17, 17 }, { 20, 12 }, { 20, 13 },
109 { 20, 14 }, { 19, 16 }, { 21, 13 }, { 21, 14 }, { 21, 15 }, { 20, 17 },
110 { 22, 14 }, { 22, 15 }, { 25, 10 }, { 22, 16 }, { 23, 15 }, { 23, 16 },
111 { 26, 11 }, { 23, 17 }, { 24, 16 }, { 24, 17 }, { 27, 12 }, { 24, 18 },
112 { 25, 17 }, { 25, 18 }, { 28, 13 }, { 25, 19 }, { 26, 18 }, { 26, 19 },
113 { 29, 14 }, { 26, 20 }, { 27, 19 }, { 27, 20 }, { 30, 15 }, { 27, 21 },
114 { 28, 20 }, { 28, 21 }, { 28, 21 }, { 28, 22 }, { 29, 21 }, { 29, 22 },
115 { 24, 32 }, { 29, 23 }, { 30, 22 }, { 30, 23 }, { 25, 33 }, { 30, 24 },
116 { 31, 23 }, { 31, 24 }, { 27, 32 }, { 31, 25 }, { 31, 26 }, { 32, 24 },
117 { 32, 25 }, { 32, 26 }, { 31, 29 }, { 33, 25 }, { 33, 26 }, { 33, 27 },
118 { 31, 32 }, { 34, 26 }, { 34, 27 }, { 34, 28 }, { 32, 32 }, { 35, 27 },
119 { 35, 28 }, { 35, 29 }, { 33, 33 }, { 36, 28 }, { 36, 29 }, { 36, 30 },
120 { 35, 32 }, { 37, 29 }, { 37, 30 }, { 37, 31 }, { 36, 33 }, { 38, 30 },
121 { 38, 31 }, { 41, 26 }, { 38, 32 }, { 39, 31 }, { 39, 32 }, { 42, 27 },
122 { 39, 33 }, { 40, 32 }, { 40, 33 }, { 43, 28 }, { 40, 34 }, { 41, 33 },
123 { 41, 34 }, { 44, 29 }, { 41, 35 }, { 42, 34 }, { 42, 35 }, { 45, 30 },
124 { 42, 36 }, { 43, 35 }, { 43, 36 }, { 46, 31 }, { 43, 37 }, { 44, 36 },
125 { 44, 37 }, { 44, 37 }, { 44, 38 }, { 45, 37 }, { 45, 38 }, { 40, 48 },
126 { 45, 39 }, { 46, 38 }, { 46, 39 }, { 41, 49 }, { 46, 40 }, { 47, 39 },
127 { 47, 40 }, { 43, 48 }, { 47, 41 }, { 47, 42 }, { 48, 40 }, { 48, 41 },
128 { 48, 42 }, { 47, 45 }, { 49, 41 }, { 49, 42 }, { 49, 43 }, { 47, 48 },
129 { 50, 42 }, { 50, 43 }, { 50, 44 }, { 48, 48 }, { 51, 43 }, { 51, 44 },
130 { 51, 45 }, { 49, 49 }, { 52, 44 }, { 52, 45 }, { 52, 46 }, { 51, 48 },
131 { 53, 45 }, { 53, 46 }, { 53, 47 }, { 52, 49 }, { 54, 46 }, { 54, 47 },
132 { 57, 42 }, { 54, 48 }, { 55, 47 }, { 55, 48 }, { 58, 43 }, { 55, 49 },
133 { 56, 48 }, { 56, 49 }, { 59, 44 }, { 56, 50 }, { 57, 49 }, { 57, 50 },
134 { 60, 45 }, { 57, 51 }, { 58, 50 }, { 58, 51 }, { 61, 46 }, { 58, 52 },
135 { 59, 51 }, { 59, 52 }, { 62, 47 }, { 59, 53 }, { 60, 52 }, { 60, 53 },
136 { 60, 53 }, { 60, 54 }, { 61, 53 }, { 61, 54 }, { 61, 54 }, { 61, 55 },
137 { 62, 54 }, { 62, 55 }, { 62, 55 }, { 62, 56 }, { 63, 55 }, { 63, 56 },
138 { 63, 56 }, { 63, 57 }, { 63, 58 }, { 63, 59 }, { 63, 59 }, { 63, 60 },
139 { 63, 61 }, { 63, 62 }, { 63, 62 }, { 63, 63 },
143 #define mul8(a, b) (((a) * (b) + 128 + (((a) * (b) + 128) >> 8)) >> 8)
146 #define rgb2rgb565(r, g, b) \
147 ((mul8(r, 31) << 11) | (mul8(g, 63) << 5) | (mul8(b, 31) << 0))
150 #define lerp13(a, b) ((2 * (a) + (b)) / 3)
155 out[0] =
lerp13(p1[0], p2[0]);
156 out[1] =
lerp13(p1[1], p2[1]);
157 out[2] =
lerp13(p1[2], p2[2]);
163 int rv = (v & 0xf800) >> 11;
164 int gv = (v & 0x07e0) >> 5;
165 int bv = (v & 0x001f) >> 0;
175 uint16_t c0, uint16_t
c1)
178 int dirr, dirg, dirb;
182 int c0_point, half_point, c3_point;
184 static const int indexMap[8] = {
185 0 << 30, 2 << 30, 0 << 30, 2 << 30,
186 3 << 30, 3 << 30, 1 << 30, 1 << 30,
192 lerp13rgb(color + 8, color + 0, color + 4);
193 lerp13rgb(color + 12, color + 4, color + 0);
195 dirr = color[0 * 4 + 0] - color[1 * 4 + 0];
196 dirg = color[0 * 4 + 1] - color[1 * 4 + 1];
197 dirb = color[0 * 4 + 2] - color[1 * 4 + 2];
199 for (y = 0; y < 4; y++) {
200 for (x = 0; x < 4; x++)
201 dots[k++] = block[0 + x * 4 + y * stride] * dirr +
202 block[1 + x * 4 + y * stride] * dirg +
203 block[2 + x * 4 + y * stride] * dirb;
205 stops[y] = color[0 + y * 4] * dirr +
206 color[1 + y * 4] * dirg +
207 color[2 + y * 4] * dirb;
219 c0_point = (stops[1] + stops[3]) >> 1;
220 half_point = (stops[3] + stops[2]) >> 1;
221 c3_point = (stops[2] + stops[0]) >> 1;
223 for (x = 0; x < 16; x++) {
225 int bits = (dot < half_point ? 4 : 0) |
226 (dot < c0_point ? 2 : 0) |
227 (dot < c3_point ? 1 : 0);
230 mask |= indexMap[bits];
238 uint16_t *pmax16, uint16_t *pmin16)
242 const int iter_power = 4;
245 float covf[6], vfr, vfg, vfb;
248 int mu[3],
min[3], max[3];
252 for (ch = 0; ch < 3; ch++) {
256 muv = minv = maxv = bp[0];
257 for (y = 0; y < 4; y++) {
258 for (x = 4; x < 4; x += 4) {
259 muv += bp[x * 4 + y *
stride];
261 minv = bp[x * 4 + y *
stride];
262 else if (bp[x] > maxv)
263 maxv = bp[x * 4 + y *
stride];
267 mu[
ch] = (muv + 8) >> 4;
273 for (y = 0; y < 4; y++) {
274 for (x = 0; x < 4; x++) {
275 int r = block[x * 4 + stride * y + 0] - mu[0];
276 int g = block[x * 4 + stride * y + 1] - mu[1];
277 int b = block[x * 4 + stride * y + 2] - mu[2];
289 for (x = 0; x < 6; x++)
290 covf[x] = cov[x] / 255.0
f;
292 vfr = (float) (max[0] - min[0]);
293 vfg = (float) (max[1] - min[1]);
294 vfb = (float) (max[2] - min[2]);
296 for (iter = 0; iter < iter_power; iter++) {
297 float r = vfr * covf[0] + vfg * covf[1] + vfb * covf[2];
298 float g = vfr * covf[1] + vfg * covf[3] + vfb * covf[4];
299 float b = vfr * covf[2] + vfg * covf[4] + vfb * covf[5];
307 if (fabs(vfg) > magn)
309 if (fabs(vfb) > magn)
320 v_r = (
int) (vfr * magn);
321 v_g = (
int) (vfg * magn);
322 v_b = (
int) (vfb * magn);
326 mind = maxd = block[0] * v_r + block[1] * v_g + block[2] * v_b;
328 for (y = 0; y < 4; y++) {
329 for (x = 0; x < 4; x++) {
330 int dot = block[x * 4 + y * stride + 0] * v_r +
331 block[x * 4 + y * stride + 1] * v_g +
332 block[x * 4 + y * stride + 2] * v_b;
336 minp = block + x * 4 + y *
stride;
337 }
else if (dot > maxd) {
339 maxp = block + x * 4 + y *
stride;
344 *pmax16 =
rgb2rgb565(maxp[0], maxp[1], maxp[2]);
345 *pmin16 =
rgb2rgb565(minp[0], minp[1], minp[2]);
351 uint16_t *pmax16, uint16_t *pmin16, uint32_t
mask)
354 uint16_t oldMin = *pmin16;
355 uint16_t oldMax = *pmax16;
356 uint16_t min16, max16;
362 static const int w1tab[4] = { 3, 0, 2, 1 };
363 static const int prods[4] = { 0x090000, 0x000900, 0x040102, 0x010402 };
366 if ((mask ^ (mask << 2)) < 4) {
369 int r = 8,
g = 8,
b = 8;
370 for (y = 0; y < 4; y++) {
371 for (x = 0; x < 4; x++) {
372 r += block[0 + x * 4 + y *
stride];
373 g += block[1 + x * 4 + y *
stride];
374 b += block[2 + x * 4 + y *
stride];
386 int at1_r = 0, at1_g = 0, at1_b = 0;
387 int at2_r = 0, at2_g = 0, at2_b = 0;
391 for (y = 0; y < 4; y++) {
392 for (x = 0; x < 4; x++) {
394 int w1 = w1tab[step];
395 int r = block[0 + x * 4 + y *
stride];
396 int g = block[1 + x * 4 + y *
stride];
397 int b = block[2 + x * 4 + y *
stride];
411 at2_r = 3 * at2_r - at1_r;
412 at2_g = 3 * at2_g - at1_g;
413 at2_b = 3 * at2_b - at1_b;
417 yy = (akku >> 8) & 0xFF;
418 xy = (akku >> 0) & 0xFF;
420 fr = 3.0f * 31.0f / 255.0f / (xx * yy - xy * xy);
421 fg = fr * 63.0f / 31.0f;
425 max16 = av_clip_uintp2((at1_r * yy - at2_r * xy) * fr + 0.5
f, 5) << 11;
426 max16 |= av_clip_uintp2((at1_g * yy - at2_g * xy) * fg + 0.5
f, 6) << 5;
427 max16 |= av_clip_uintp2((at1_b * yy - at2_b * xy) * fb + 0.5
f, 5) << 0;
429 min16 = av_clip_uintp2((at2_r * xx - at1_r * xy) * fr + 0.5
f, 5) << 11;
430 min16 |= av_clip_uintp2((at2_g * xx - at1_g * xy) * fg + 0.5
f, 6) << 5;
431 min16 |= av_clip_uintp2((at2_b * xx - at1_b * xy) * fb + 0.5
f, 5) << 0;
436 return oldMin != min16 || oldMax != max16;
443 uint32_t first =
AV_RL32(block);
445 for (y = 0; y < 4; y++)
446 for (x = 0; x < 4; x++)
447 if (first !=
AV_RL32(block + x * 4 + y * stride))
456 uint16_t max16, min16;
489 FFSWAP(uint16_t, min16, max16);
502 int dist, bias, dist4, dist2;
511 for (y = 0; y < 4; y++) {
512 for (x = 0; x < 4; x++) {
538 bias = dist - 1 - mn * 7;
540 bias = dist / 2 + 2 - mn * 7;
542 for (y = 0; y < 4; y++) {
543 for (x = 0; x < 4; x++) {
544 int alp = block[3 + x * 4 + y *
stride] * 7 + bias;
549 tmp = (alp >= dist4) ? -1 : 0;
552 tmp = (alp >= dist2) ? -1 : 0;
555 ind += (alp >= dist);
582 int g = (pixel[1] + 1) >> 1;
584 int t = (2 + r +
b) >> 2;
586 dst[0] = av_clip_uint8(128 + ((r - b + 1) >> 1));
587 dst[1] = av_clip_uint8(128 + g - t);
589 dst[3] = av_clip_uint8(g + t);
640 for (y = 0; y < 4; y++)
641 for (x = 0; x < 4; x++)
642 rgba2ycocg(reorder + x * 4 + y * 16, block + x * 4 + y * stride);
static int dxt5ys_block(uint8_t *dst, ptrdiff_t stride, const uint8_t *block)
Compress one block of RGBA pixels in a DXT5-YCoCg texture and store the resulting bytes in 'dst'...
static int dxt5_block(uint8_t *dst, ptrdiff_t stride, const uint8_t *block)
Compress one block of RGBA pixels in a DXT5 texture and store the resulting bytes in 'dst'...
const char const char void * val
static void rgb5652rgb(uint8_t *out, uint16_t v)
int(* dxt1_block)(uint8_t *dst, ptrdiff_t stride, const uint8_t *block)
static int constant_color(const uint8_t *block, ptrdiff_t stride)
Texture block (4x4) module.
static int refine_colors(const uint8_t *block, ptrdiff_t stride, uint16_t *pmax16, uint16_t *pmin16, uint32_t mask)
av_cold void ff_texturedspenc_init(TextureDSPContext *c)
Macro definitions for various function/variable attributes.
static void rgba2ycocg(uint8_t *dst, const uint8_t *pixel)
Convert a RGBA buffer to unscaled YCoCg.
static int dxt1_block(uint8_t *dst, ptrdiff_t stride, const uint8_t *block)
Compress one block of RGBA pixels in a DXT1 texture and store the resulting bytes in 'dst'...
static const uint8_t match5[256][2]
#define rgb2rgb565(r, g, b)
static const uint16_t mask[17]
int(* dxt5_block)(uint8_t *dst, ptrdiff_t stride, const uint8_t *block)
static int rgtc1u_alpha_block(uint8_t *dst, ptrdiff_t stride, const uint8_t *block)
Compress one block of RGBA pixels in a RGTC1U texture and store the resulting bytes in 'dst'...
static void lerp13rgb(uint8_t *out, uint8_t *p1, uint8_t *p2)
static unsigned int match_colors(const uint8_t *block, ptrdiff_t stride, uint16_t c0, uint16_t c1)
static void optimize_colors(const uint8_t *block, ptrdiff_t stride, uint16_t *pmax16, uint16_t *pmin16)
static void compress_alpha(uint8_t *dst, ptrdiff_t stride, const uint8_t *block)
GLint GLenum GLboolean GLsizei stride
common internal and external API header
static const uint8_t match6[256][2]
static const uint8_t expand5[32]
int(* rgtc1u_alpha_block)(uint8_t *dst, ptrdiff_t stride, const uint8_t *block)
static const uint8_t expand6[64]
#define FFSWAP(type, a, b)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
int(* dxt5ys_block)(uint8_t *dst, ptrdiff_t stride, const uint8_t *block)
static void compress_color(uint8_t *dst, ptrdiff_t stride, const uint8_t *block)
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch