FFmpeg
rv40dsp_init.c
Go to the documentation of this file.
1 /*
2  * RV40 decoder motion compensation functions x86-optimised
3  * Copyright (c) 2008 Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * RV40 decoder motion compensation functions x86-optimised
25  * 2,0 and 0,2 have h264 equivalents.
26  * 3,3 is bugged in the rv40 format and maps to _xy2 version
27  */
28 
29 #include "libavcodec/rv34dsp.h"
30 #include "libavutil/attributes.h"
31 #include "libavutil/mem.h"
32 #include "libavutil/mem_internal.h"
33 #include "libavutil/x86/cpu.h"
34 #include "hpeldsp.h"
35 
36 #define DEFINE_FN(op, size, insn) \
37 static void op##_rv40_qpel##size##_mc33_##insn(uint8_t *dst, const uint8_t *src, \
38  ptrdiff_t stride) \
39 { \
40  ff_##op##_pixels##size##_xy2_##insn(dst, src, stride, size); \
41 }
42 
43 #if HAVE_X86ASM
44 void ff_put_rv40_chroma_mc8_mmx (uint8_t *dst, uint8_t *src,
45  ptrdiff_t stride, int h, int x, int y);
46 void ff_avg_rv40_chroma_mc8_mmxext(uint8_t *dst, uint8_t *src,
47  ptrdiff_t stride, int h, int x, int y);
48 void ff_avg_rv40_chroma_mc8_3dnow(uint8_t *dst, uint8_t *src,
49  ptrdiff_t stride, int h, int x, int y);
50 
51 void ff_put_rv40_chroma_mc4_mmx (uint8_t *dst, uint8_t *src,
52  ptrdiff_t stride, int h, int x, int y);
53 void ff_avg_rv40_chroma_mc4_mmxext(uint8_t *dst, uint8_t *src,
54  ptrdiff_t stride, int h, int x, int y);
55 void ff_avg_rv40_chroma_mc4_3dnow(uint8_t *dst, uint8_t *src,
56  ptrdiff_t stride, int h, int x, int y);
57 
58 #define DECLARE_WEIGHT(opt) \
59 void ff_rv40_weight_func_rnd_16_##opt(uint8_t *dst, uint8_t *src1, uint8_t *src2, \
60  int w1, int w2, ptrdiff_t stride); \
61 void ff_rv40_weight_func_rnd_8_##opt (uint8_t *dst, uint8_t *src1, uint8_t *src2, \
62  int w1, int w2, ptrdiff_t stride); \
63 void ff_rv40_weight_func_nornd_16_##opt(uint8_t *dst, uint8_t *src1, uint8_t *src2, \
64  int w1, int w2, ptrdiff_t stride); \
65 void ff_rv40_weight_func_nornd_8_##opt (uint8_t *dst, uint8_t *src1, uint8_t *src2, \
66  int w1, int w2, ptrdiff_t stride);
67 DECLARE_WEIGHT(mmxext)
68 DECLARE_WEIGHT(sse2)
69 DECLARE_WEIGHT(ssse3)
70 
71 /** @{ */
72 /**
73  * Define one qpel function.
74  * LOOPSIZE must be already set to the number of pixels processed per
75  * iteration in the inner loop of the called functions.
76  * COFF(x) must be already defined so as to provide the offset into any
77  * array of coeffs used by the called function for the qpel position x.
78  */
79 #define QPEL_FUNC_DECL(OP, SIZE, PH, PV, OPT) \
80 static void OP ## rv40_qpel ##SIZE ##_mc ##PH ##PV ##OPT(uint8_t *dst, \
81  const uint8_t *src, \
82  ptrdiff_t stride) \
83 { \
84  int i; \
85  if (PH && PV) { \
86  LOCAL_ALIGNED(16, uint8_t, tmp, [SIZE * (SIZE + 5)]); \
87  uint8_t *tmpptr = tmp + SIZE * 2; \
88  src -= stride * 2; \
89  \
90  for (i = 0; i < SIZE; i += LOOPSIZE) \
91  ff_put_rv40_qpel_h ##OPT(tmp + i, SIZE, src + i, stride, \
92  SIZE + 5, HCOFF(PH)); \
93  for (i = 0; i < SIZE; i += LOOPSIZE) \
94  ff_ ##OP ##rv40_qpel_v ##OPT(dst + i, stride, tmpptr + i, \
95  SIZE, SIZE, VCOFF(PV)); \
96  } else if (PV) { \
97  for (i = 0; i < SIZE; i += LOOPSIZE) \
98  ff_ ##OP ##rv40_qpel_v ## OPT(dst + i, stride, src + i, \
99  stride, SIZE, VCOFF(PV)); \
100  } else { \
101  for (i = 0; i < SIZE; i += LOOPSIZE) \
102  ff_ ##OP ##rv40_qpel_h ## OPT(dst + i, stride, src + i, \
103  stride, SIZE, HCOFF(PH)); \
104  } \
105 }
106 
107 /** Declare functions for sizes 8 and 16 and given operations
108  * and qpel position. */
109 #define QPEL_FUNCS_DECL(OP, PH, PV, OPT) \
110  QPEL_FUNC_DECL(OP, 8, PH, PV, OPT) \
111  QPEL_FUNC_DECL(OP, 16, PH, PV, OPT)
112 
113 /** Declare all functions for all sizes and qpel positions */
114 #define QPEL_MC_DECL(OP, OPT) \
115 void ff_ ##OP ##rv40_qpel_h ##OPT(uint8_t *dst, ptrdiff_t dstStride, \
116  const uint8_t *src, \
117  ptrdiff_t srcStride, \
118  int len, int m); \
119 void ff_ ##OP ##rv40_qpel_v ##OPT(uint8_t *dst, ptrdiff_t dstStride, \
120  const uint8_t *src, \
121  ptrdiff_t srcStride, \
122  int len, int m); \
123 QPEL_FUNCS_DECL(OP, 0, 1, OPT) \
124 QPEL_FUNCS_DECL(OP, 0, 3, OPT) \
125 QPEL_FUNCS_DECL(OP, 1, 0, OPT) \
126 QPEL_FUNCS_DECL(OP, 1, 1, OPT) \
127 QPEL_FUNCS_DECL(OP, 1, 2, OPT) \
128 QPEL_FUNCS_DECL(OP, 1, 3, OPT) \
129 QPEL_FUNCS_DECL(OP, 2, 1, OPT) \
130 QPEL_FUNCS_DECL(OP, 2, 2, OPT) \
131 QPEL_FUNCS_DECL(OP, 2, 3, OPT) \
132 QPEL_FUNCS_DECL(OP, 3, 0, OPT) \
133 QPEL_FUNCS_DECL(OP, 3, 1, OPT) \
134 QPEL_FUNCS_DECL(OP, 3, 2, OPT)
135 /** @} */
136 
137 #define LOOPSIZE 8
138 #define HCOFF(x) (32 * ((x) - 1))
139 #define VCOFF(x) (32 * ((x) - 1))
140 QPEL_MC_DECL(put_, _ssse3)
141 QPEL_MC_DECL(avg_, _ssse3)
142 
143 #undef LOOPSIZE
144 #undef HCOFF
145 #undef VCOFF
146 #define LOOPSIZE 8
147 #define HCOFF(x) (64 * ((x) - 1))
148 #define VCOFF(x) (64 * ((x) - 1))
149 QPEL_MC_DECL(put_, _sse2)
150 QPEL_MC_DECL(avg_, _sse2)
151 
152 #if ARCH_X86_32
153 #undef LOOPSIZE
154 #undef HCOFF
155 #undef VCOFF
156 #define LOOPSIZE 4
157 #define HCOFF(x) (64 * ((x) - 1))
158 #define VCOFF(x) (64 * ((x) - 1))
159 
160 QPEL_MC_DECL(put_, _mmx)
161 
162 #define ff_put_rv40_qpel_h_mmxext ff_put_rv40_qpel_h_mmx
163 #define ff_put_rv40_qpel_v_mmxext ff_put_rv40_qpel_v_mmx
164 QPEL_MC_DECL(avg_, _mmxext)
165 
166 #define ff_put_rv40_qpel_h_3dnow ff_put_rv40_qpel_h_mmx
167 #define ff_put_rv40_qpel_v_3dnow ff_put_rv40_qpel_v_mmx
168 QPEL_MC_DECL(avg_, _3dnow)
169 #endif
170 
171 /** @{ */
172 /** Set one function */
173 #define QPEL_FUNC_SET(OP, SIZE, PH, PV, OPT) \
174  c-> OP ## pixels_tab[2 - SIZE / 8][4 * PV + PH] = OP ## rv40_qpel ##SIZE ## _mc ##PH ##PV ##OPT;
175 
176 /** Set functions put and avg for sizes 8 and 16 and a given qpel position */
177 #define QPEL_FUNCS_SET(OP, PH, PV, OPT) \
178  QPEL_FUNC_SET(OP, 8, PH, PV, OPT) \
179  QPEL_FUNC_SET(OP, 16, PH, PV, OPT)
180 
181 /** Set all functions for all sizes and qpel positions */
182 #define QPEL_MC_SET(OP, OPT) \
183 QPEL_FUNCS_SET (OP, 0, 1, OPT) \
184 QPEL_FUNCS_SET (OP, 0, 3, OPT) \
185 QPEL_FUNCS_SET (OP, 1, 0, OPT) \
186 QPEL_FUNCS_SET (OP, 1, 1, OPT) \
187 QPEL_FUNCS_SET (OP, 1, 2, OPT) \
188 QPEL_FUNCS_SET (OP, 1, 3, OPT) \
189 QPEL_FUNCS_SET (OP, 2, 1, OPT) \
190 QPEL_FUNCS_SET (OP, 2, 2, OPT) \
191 QPEL_FUNCS_SET (OP, 2, 3, OPT) \
192 QPEL_FUNCS_SET (OP, 3, 0, OPT) \
193 QPEL_FUNCS_SET (OP, 3, 1, OPT) \
194 QPEL_FUNCS_SET (OP, 3, 2, OPT)
195 /** @} */
196 
197 DEFINE_FN(put, 8, ssse3)
198 
199 DEFINE_FN(put, 16, sse2)
200 DEFINE_FN(put, 16, ssse3)
201 
202 DEFINE_FN(avg, 8, mmxext)
203 DEFINE_FN(avg, 8, ssse3)
204 
205 DEFINE_FN(avg, 16, sse2)
206 DEFINE_FN(avg, 16, ssse3)
207 #endif /* HAVE_X86ASM */
208 
209 #if HAVE_MMX_INLINE
210 DEFINE_FN(put, 8, mmx)
211 DEFINE_FN(avg, 8, mmx)
212 DEFINE_FN(put, 16, mmx)
213 DEFINE_FN(avg, 16, mmx)
214 #endif
215 
217 {
219 
220 #if HAVE_MMX_INLINE
221  if (INLINE_MMX(cpu_flags)) {
222  c->put_pixels_tab[0][15] = put_rv40_qpel16_mc33_mmx;
223  c->put_pixels_tab[1][15] = put_rv40_qpel8_mc33_mmx;
224  c->avg_pixels_tab[0][15] = avg_rv40_qpel16_mc33_mmx;
225  c->avg_pixels_tab[1][15] = avg_rv40_qpel8_mc33_mmx;
226  }
227 #endif /* HAVE_MMX_INLINE */
228 
229 #if HAVE_X86ASM
230  if (EXTERNAL_MMX(cpu_flags)) {
231  c->put_chroma_pixels_tab[0] = ff_put_rv40_chroma_mc8_mmx;
232  c->put_chroma_pixels_tab[1] = ff_put_rv40_chroma_mc4_mmx;
233 #if ARCH_X86_32
234  QPEL_MC_SET(put_, _mmx)
235 #endif
236  }
238  c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_3dnow;
239  c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_3dnow;
240 #if ARCH_X86_32
241  QPEL_MC_SET(avg_, _3dnow)
242 #endif
243  }
244  if (EXTERNAL_MMXEXT(cpu_flags)) {
245  c->avg_pixels_tab[1][15] = avg_rv40_qpel8_mc33_mmxext;
246  c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_mmxext;
247  c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_mmxext;
248  c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_mmxext;
249  c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_mmxext;
250  c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_mmxext;
251  c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_mmxext;
252 #if ARCH_X86_32
253  QPEL_MC_SET(avg_, _mmxext)
254 #endif
255  }
256  if (EXTERNAL_SSE2(cpu_flags)) {
257  c->put_pixels_tab[0][15] = put_rv40_qpel16_mc33_sse2;
258  c->avg_pixels_tab[0][15] = avg_rv40_qpel16_mc33_sse2;
259  c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_sse2;
260  c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_sse2;
261  c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_sse2;
262  c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_sse2;
263  QPEL_MC_SET(put_, _sse2)
264  QPEL_MC_SET(avg_, _sse2)
265  }
266  if (EXTERNAL_SSSE3(cpu_flags)) {
267  c->put_pixels_tab[0][15] = put_rv40_qpel16_mc33_ssse3;
268  c->put_pixels_tab[1][15] = put_rv40_qpel8_mc33_ssse3;
269  c->avg_pixels_tab[0][15] = avg_rv40_qpel16_mc33_ssse3;
270  c->avg_pixels_tab[1][15] = avg_rv40_qpel8_mc33_ssse3;
271  c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_ssse3;
272  c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_ssse3;
273  c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_ssse3;
274  c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_ssse3;
275  QPEL_MC_SET(put_, _ssse3)
276  QPEL_MC_SET(avg_, _ssse3)
277  }
278 #endif /* HAVE_X86ASM */
279 }
INLINE_MMX
#define INLINE_MMX(flags)
Definition: cpu.h:86
stride
int stride
Definition: mace.c:144
cpu.h
mem_internal.h
av_unused
#define av_unused
Definition: attributes.h:131
EXTERNAL_AMD3DNOW
#define EXTERNAL_AMD3DNOW(flags)
Definition: cpu.h:54
av_get_cpu_flags
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
Definition: cpu.c:95
cpu_flags
static atomic_int cpu_flags
Definition: cpu.c:50
av_cold
#define av_cold
Definition: attributes.h:90
src
#define src
Definition: vp8dsp.c:255
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
RV34DSPContext
Definition: rv34dsp.h:57
DEFINE_FN
#define DEFINE_FN(op, size, insn)
Definition: rv40dsp_init.c:36
rv34dsp.h
avg
#define avg(a, b, c, d)
Definition: colorspacedsp_template.c:28
attributes.h
EXTERNAL_SSE2
#define EXTERNAL_SSE2(flags)
Definition: cpu.h:59
uint8_t
uint8_t
Definition: audio_convert.c:194
hpeldsp.h
ff_rv40dsp_init_x86
av_cold void ff_rv40dsp_init_x86(RV34DSPContext *c)
Definition: rv40dsp_init.c:216
mem.h
h
h
Definition: vp9dsp_template.c:2038
EXTERNAL_SSSE3
#define EXTERNAL_SSSE3(flags)
Definition: cpu.h:65
EXTERNAL_MMX
#define EXTERNAL_MMX(flags)
Definition: cpu.h:56
EXTERNAL_MMXEXT
#define EXTERNAL_MMXEXT(flags)
Definition: cpu.h:57