56 const int16_t *window,
unsigned int len);
58 const int16_t *window,
unsigned int len);
60 const int16_t *window,
unsigned int len);
62 const int16_t *window,
unsigned int len);
64 const int16_t *window,
unsigned int len);
66 const int16_t *window,
unsigned int len);
68 #if ARCH_X86_32 && defined(__INTEL_COMPILER)
73 #if HAVE_SSE_INLINE && HAVE_7REGS
78 #define MIX5(mono, stereo) \
80 "movss 0(%1), %%xmm5 \n" \
81 "movss 8(%1), %%xmm6 \n" \
82 "movss 24(%1), %%xmm7 \n" \
83 "shufps $0, %%xmm5, %%xmm5 \n" \
84 "shufps $0, %%xmm6, %%xmm6 \n" \
85 "shufps $0, %%xmm7, %%xmm7 \n" \
87 "movaps (%0, %2), %%xmm0 \n" \
88 "movaps (%0, %3), %%xmm1 \n" \
89 "movaps (%0, %4), %%xmm2 \n" \
90 "movaps (%0, %5), %%xmm3 \n" \
91 "movaps (%0, %6), %%xmm4 \n" \
92 "mulps %%xmm5, %%xmm0 \n" \
93 "mulps %%xmm6, %%xmm1 \n" \
94 "mulps %%xmm5, %%xmm2 \n" \
95 "mulps %%xmm7, %%xmm3 \n" \
96 "mulps %%xmm7, %%xmm4 \n" \
97 stereo("addps %%xmm1, %%xmm0 \n") \
98 "addps %%xmm1, %%xmm2 \n" \
99 "addps %%xmm3, %%xmm0 \n" \
100 "addps %%xmm4, %%xmm2 \n" \
101 mono("addps %%xmm2, %%xmm0 \n") \
102 "movaps %%xmm0, (%0, %2) \n" \
103 stereo("movaps %%xmm2, (%0, %3) \n") \
108 "r"(samples[0] + len), \
109 "r"(samples[1] + len), \
110 "r"(samples[2] + len), \
111 "r"(samples[3] + len), \
112 "r"(samples[4] + len) \
113 : XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", \
114 "%xmm4", "%xmm5", "%xmm6", "%xmm7",) \
118 #define MIX_MISC(stereo) \
122 "mov -%c7(%6, %2, %c8), %3 \n" \
123 "movaps (%3, %0), %%xmm0 \n" \
124 stereo("movaps %%xmm0, %%xmm1 \n") \
125 "mulps %%xmm4, %%xmm0 \n" \
126 stereo("mulps %%xmm5, %%xmm1 \n") \
128 "mov (%6, %2, %c8), %1 \n" \
129 "movaps (%1, %0), %%xmm2 \n" \
130 stereo("movaps %%xmm2, %%xmm3 \n") \
131 "mulps (%4, %2, 8), %%xmm2 \n" \
132 stereo("mulps 16(%4, %2, 8), %%xmm3 \n") \
133 "addps %%xmm2, %%xmm0 \n" \
134 stereo("addps %%xmm3, %%xmm1 \n") \
138 stereo("mov (%6, %2, %c8), %1 \n") \
139 "movaps %%xmm0, (%3, %0) \n" \
140 stereo("movaps %%xmm1, (%1, %0) \n") \
143 : "+&r"(i), "=&r"(j), "=&r"(k), "=&r"(m) \
144 : "r"(matrix_simd + in_ch), \
145 "g"((intptr_t) - 4 * (in_ch - 1)), \
147 "i"(sizeof(float *)), "i"(sizeof(float *)/4) \
151 static void ac3_downmix_sse(
float **samples,
float (*matrix)[2],
152 int out_ch,
int in_ch,
int len)
154 int (*matrix_cmp)[2] = (int(*)[2])matrix;
157 i = -len *
sizeof(float);
158 if (in_ch == 5 && out_ch == 2 &&
159 !(matrix_cmp[0][1] | matrix_cmp[2][0] |
160 matrix_cmp[3][1] | matrix_cmp[4][0] |
161 (matrix_cmp[1][0] ^ matrix_cmp[1][1]) |
162 (matrix_cmp[0][0] ^ matrix_cmp[2][1]))) {
164 }
else if (in_ch == 5 && out_ch == 1 &&
165 matrix_cmp[0][0] == matrix_cmp[2][0] &&
166 matrix_cmp[3][0] == matrix_cmp[4][0]) {
172 for (j = 0; j < in_ch; j++)
173 samp[j] = samples[j] + len;
175 j = 2 * in_ch *
sizeof(float);
179 "movss (%2, %0), %%xmm4 \n"
180 "movss 4(%2, %0), %%xmm5 \n"
181 "shufps $0, %%xmm4, %%xmm4 \n"
182 "shufps $0, %%xmm5, %%xmm5 \n"
183 "movaps %%xmm4, (%1, %0, 4) \n"
184 "movaps %%xmm5, 16(%1, %0, 4) \n"
187 :
"r"(matrix_simd),
"r"(matrix)
239 }
else if (!(cpu_flags & AV_CPU_FLAG_SSE2SLOW)) {
253 #if HAVE_SSE_INLINE && HAVE_7REGS