Libav
cavsdsp.c
Go to the documentation of this file.
1 /*
2  * Chinese AVS video (AVS1-P2, JiZhun profile) decoder.
3  * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de>
4  *
5  * MMX-optimized DSP functions, based on H.264 optimizations by
6  * Michael Niedermayer and Loren Merritt
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 #include "libavutil/attributes.h"
26 #include "libavutil/common.h"
27 #include "libavutil/cpu.h"
28 #include "libavutil/x86/asm.h"
29 #include "libavutil/x86/cpu.h"
30 #include "libavcodec/cavsdsp.h"
31 #include "constants.h"
32 #include "dsputil_x86.h"
33 #include "config.h"
34 
35 #if HAVE_MMX_INLINE
36 
37 /* in/out: mma=mma+mmb, mmb=mmb-mma */
38 #define SUMSUB_BA( a, b ) \
39  "paddw "#b", "#a" \n\t"\
40  "paddw "#b", "#b" \n\t"\
41  "psubw "#a", "#b" \n\t"
42 
43 /*****************************************************************************
44  *
45  * inverse transform
46  *
47  ****************************************************************************/
48 
49 static inline void cavs_idct8_1d(int16_t *block, uint64_t bias)
50 {
51  __asm__ volatile(
52  "movq 112(%0), %%mm4 \n\t" /* mm4 = src7 */
53  "movq 16(%0), %%mm5 \n\t" /* mm5 = src1 */
54  "movq 80(%0), %%mm2 \n\t" /* mm2 = src5 */
55  "movq 48(%0), %%mm7 \n\t" /* mm7 = src3 */
56  "movq %%mm4, %%mm0 \n\t"
57  "movq %%mm5, %%mm3 \n\t"
58  "movq %%mm2, %%mm6 \n\t"
59  "movq %%mm7, %%mm1 \n\t"
60 
61  "paddw %%mm4, %%mm4 \n\t" /* mm4 = 2*src7 */
62  "paddw %%mm3, %%mm3 \n\t" /* mm3 = 2*src1 */
63  "paddw %%mm6, %%mm6 \n\t" /* mm6 = 2*src5 */
64  "paddw %%mm1, %%mm1 \n\t" /* mm1 = 2*src3 */
65  "paddw %%mm4, %%mm0 \n\t" /* mm0 = 3*src7 */
66  "paddw %%mm3, %%mm5 \n\t" /* mm5 = 3*src1 */
67  "paddw %%mm6, %%mm2 \n\t" /* mm2 = 3*src5 */
68  "paddw %%mm1, %%mm7 \n\t" /* mm7 = 3*src3 */
69  "psubw %%mm4, %%mm5 \n\t" /* mm5 = 3*src1 - 2*src7 = a0 */
70  "paddw %%mm6, %%mm7 \n\t" /* mm7 = 3*src3 + 2*src5 = a1 */
71  "psubw %%mm2, %%mm1 \n\t" /* mm1 = 2*src3 - 3*src5 = a2 */
72  "paddw %%mm0, %%mm3 \n\t" /* mm3 = 2*src1 + 3*src7 = a3 */
73 
74  "movq %%mm5, %%mm4 \n\t"
75  "movq %%mm7, %%mm6 \n\t"
76  "movq %%mm3, %%mm0 \n\t"
77  "movq %%mm1, %%mm2 \n\t"
78  SUMSUB_BA( %%mm7, %%mm5 ) /* mm7 = a0 + a1 mm5 = a0 - a1 */
79  "paddw %%mm3, %%mm7 \n\t" /* mm7 = a0 + a1 + a3 */
80  "paddw %%mm1, %%mm5 \n\t" /* mm5 = a0 - a1 + a2 */
81  "paddw %%mm7, %%mm7 \n\t"
82  "paddw %%mm5, %%mm5 \n\t"
83  "paddw %%mm6, %%mm7 \n\t" /* mm7 = b4 */
84  "paddw %%mm4, %%mm5 \n\t" /* mm5 = b5 */
85 
86  SUMSUB_BA( %%mm1, %%mm3 ) /* mm1 = a3 + a2 mm3 = a3 - a2 */
87  "psubw %%mm1, %%mm4 \n\t" /* mm4 = a0 - a2 - a3 */
88  "movq %%mm4, %%mm1 \n\t" /* mm1 = a0 - a2 - a3 */
89  "psubw %%mm6, %%mm3 \n\t" /* mm3 = a3 - a2 - a1 */
90  "paddw %%mm1, %%mm1 \n\t"
91  "paddw %%mm3, %%mm3 \n\t"
92  "psubw %%mm2, %%mm1 \n\t" /* mm1 = b7 */
93  "paddw %%mm0, %%mm3 \n\t" /* mm3 = b6 */
94 
95  "movq 32(%0), %%mm2 \n\t" /* mm2 = src2 */
96  "movq 96(%0), %%mm6 \n\t" /* mm6 = src6 */
97  "movq %%mm2, %%mm4 \n\t"
98  "movq %%mm6, %%mm0 \n\t"
99  "psllw $2, %%mm4 \n\t" /* mm4 = 4*src2 */
100  "psllw $2, %%mm6 \n\t" /* mm6 = 4*src6 */
101  "paddw %%mm4, %%mm2 \n\t" /* mm2 = 5*src2 */
102  "paddw %%mm6, %%mm0 \n\t" /* mm0 = 5*src6 */
103  "paddw %%mm2, %%mm2 \n\t"
104  "paddw %%mm0, %%mm0 \n\t"
105  "psubw %%mm0, %%mm4 \n\t" /* mm4 = 4*src2 - 10*src6 = a7 */
106  "paddw %%mm2, %%mm6 \n\t" /* mm6 = 4*src6 + 10*src2 = a6 */
107 
108  "movq (%0), %%mm2 \n\t" /* mm2 = src0 */
109  "movq 64(%0), %%mm0 \n\t" /* mm0 = src4 */
110  SUMSUB_BA( %%mm0, %%mm2 ) /* mm0 = src0+src4 mm2 = src0-src4 */
111  "psllw $3, %%mm0 \n\t"
112  "psllw $3, %%mm2 \n\t"
113  "paddw %1, %%mm0 \n\t" /* add rounding bias */
114  "paddw %1, %%mm2 \n\t" /* add rounding bias */
115 
116  SUMSUB_BA( %%mm6, %%mm0 ) /* mm6 = a4 + a6 mm0 = a4 - a6 */
117  SUMSUB_BA( %%mm4, %%mm2 ) /* mm4 = a5 + a7 mm2 = a5 - a7 */
118  SUMSUB_BA( %%mm7, %%mm6 ) /* mm7 = dst0 mm6 = dst7 */
119  SUMSUB_BA( %%mm5, %%mm4 ) /* mm5 = dst1 mm4 = dst6 */
120  SUMSUB_BA( %%mm3, %%mm2 ) /* mm3 = dst2 mm2 = dst5 */
121  SUMSUB_BA( %%mm1, %%mm0 ) /* mm1 = dst3 mm0 = dst4 */
122  :: "r"(block), "m"(bias)
123  );
124 }
125 
126 #define SBUTTERFLY(a,b,t,n,m)\
127  "mov" #m " " #a ", " #t " \n\t" /* abcd */\
128  "punpckl" #n " " #b ", " #a " \n\t" /* aebf */\
129  "punpckh" #n " " #b ", " #t " \n\t" /* cgdh */\
130 
131 #define TRANSPOSE4(a,b,c,d,t)\
132  SBUTTERFLY(a,b,t,wd,q) /* a=aebf t=cgdh */\
133  SBUTTERFLY(c,d,b,wd,q) /* c=imjn b=kolp */\
134  SBUTTERFLY(a,c,d,dq,q) /* a=aeim d=bfjn */\
135  SBUTTERFLY(t,b,c,dq,q) /* t=cgko c=dhlp */
136 
137 static void cavs_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
138 {
139  int i;
140  DECLARE_ALIGNED(8, int16_t, b2)[64];
141 
142  for(i=0; i<2; i++){
143  cavs_idct8_1d(block + 4 * i, ff_pw_4.a);
144 
145  __asm__ volatile(
146  "psraw $3, %%mm7 \n\t"
147  "psraw $3, %%mm6 \n\t"
148  "psraw $3, %%mm5 \n\t"
149  "psraw $3, %%mm4 \n\t"
150  "psraw $3, %%mm3 \n\t"
151  "psraw $3, %%mm2 \n\t"
152  "psraw $3, %%mm1 \n\t"
153  "psraw $3, %%mm0 \n\t"
154  "movq %%mm7, (%0) \n\t"
155  TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 )
156  "movq %%mm0, 8(%0) \n\t"
157  "movq %%mm6, 24(%0) \n\t"
158  "movq %%mm7, 40(%0) \n\t"
159  "movq %%mm4, 56(%0) \n\t"
160  "movq (%0), %%mm7 \n\t"
161  TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 )
162  "movq %%mm7, (%0) \n\t"
163  "movq %%mm1, 16(%0) \n\t"
164  "movq %%mm0, 32(%0) \n\t"
165  "movq %%mm3, 48(%0) \n\t"
166  :
167  : "r"(b2 + 32 * i)
168  : "memory"
169  );
170  }
171 
172  for(i=0; i<2; i++){
173  cavs_idct8_1d(b2+4*i, ff_pw_64.a);
174 
175  __asm__ volatile(
176  "psraw $7, %%mm7 \n\t"
177  "psraw $7, %%mm6 \n\t"
178  "psraw $7, %%mm5 \n\t"
179  "psraw $7, %%mm4 \n\t"
180  "psraw $7, %%mm3 \n\t"
181  "psraw $7, %%mm2 \n\t"
182  "psraw $7, %%mm1 \n\t"
183  "psraw $7, %%mm0 \n\t"
184  "movq %%mm7, (%0) \n\t"
185  "movq %%mm5, 16(%0) \n\t"
186  "movq %%mm3, 32(%0) \n\t"
187  "movq %%mm1, 48(%0) \n\t"
188  "movq %%mm0, 64(%0) \n\t"
189  "movq %%mm2, 80(%0) \n\t"
190  "movq %%mm4, 96(%0) \n\t"
191  "movq %%mm6, 112(%0) \n\t"
192  :: "r"(b2+4*i)
193  : "memory"
194  );
195  }
196 
197  ff_add_pixels_clamped_mmx(b2, dst, stride);
198 }
199 
200 #endif /* HAVE_MMX_INLINE */
201 
202 #if (HAVE_MMXEXT_INLINE || HAVE_AMD3DNOW_INLINE)
203 
204 /*****************************************************************************
205  *
206  * motion compensation
207  *
208  ****************************************************************************/
209 
210 /* vertical filter [-1 -2 96 42 -7 0] */
211 #define QPEL_CAVSV1(A,B,C,D,E,F,OP,MUL2) \
212  "movd (%0), "#F" \n\t"\
213  "movq "#C", %%mm6 \n\t"\
214  "pmullw %5, %%mm6 \n\t"\
215  "movq "#D", %%mm7 \n\t"\
216  "pmullw "MANGLE(MUL2)", %%mm7\n\t"\
217  "psllw $3, "#E" \n\t"\
218  "psubw "#E", %%mm6 \n\t"\
219  "psraw $3, "#E" \n\t"\
220  "paddw %%mm7, %%mm6 \n\t"\
221  "paddw "#E", %%mm6 \n\t"\
222  "paddw "#B", "#B" \n\t"\
223  "pxor %%mm7, %%mm7 \n\t"\
224  "add %2, %0 \n\t"\
225  "punpcklbw %%mm7, "#F" \n\t"\
226  "psubw "#B", %%mm6 \n\t"\
227  "psraw $1, "#B" \n\t"\
228  "psubw "#A", %%mm6 \n\t"\
229  "paddw %4, %%mm6 \n\t"\
230  "psraw $7, %%mm6 \n\t"\
231  "packuswb %%mm6, %%mm6 \n\t"\
232  OP(%%mm6, (%1), A, d) \
233  "add %3, %1 \n\t"
234 
235 /* vertical filter [ 0 -1 5 5 -1 0] */
236 #define QPEL_CAVSV2(A,B,C,D,E,F,OP,MUL2) \
237  "movd (%0), "#F" \n\t"\
238  "movq "#C", %%mm6 \n\t"\
239  "paddw "#D", %%mm6 \n\t"\
240  "pmullw %5, %%mm6 \n\t"\
241  "add %2, %0 \n\t"\
242  "punpcklbw %%mm7, "#F" \n\t"\
243  "psubw "#B", %%mm6 \n\t"\
244  "psubw "#E", %%mm6 \n\t"\
245  "paddw %4, %%mm6 \n\t"\
246  "psraw $3, %%mm6 \n\t"\
247  "packuswb %%mm6, %%mm6 \n\t"\
248  OP(%%mm6, (%1), A, d) \
249  "add %3, %1 \n\t"
250 
251 /* vertical filter [ 0 -7 42 96 -2 -1] */
252 #define QPEL_CAVSV3(A,B,C,D,E,F,OP,MUL2) \
253  "movd (%0), "#F" \n\t"\
254  "movq "#C", %%mm6 \n\t"\
255  "pmullw "MANGLE(MUL2)", %%mm6\n\t"\
256  "movq "#D", %%mm7 \n\t"\
257  "pmullw %5, %%mm7 \n\t"\
258  "psllw $3, "#B" \n\t"\
259  "psubw "#B", %%mm6 \n\t"\
260  "psraw $3, "#B" \n\t"\
261  "paddw %%mm7, %%mm6 \n\t"\
262  "paddw "#B", %%mm6 \n\t"\
263  "paddw "#E", "#E" \n\t"\
264  "pxor %%mm7, %%mm7 \n\t"\
265  "add %2, %0 \n\t"\
266  "punpcklbw %%mm7, "#F" \n\t"\
267  "psubw "#E", %%mm6 \n\t"\
268  "psraw $1, "#E" \n\t"\
269  "psubw "#F", %%mm6 \n\t"\
270  "paddw %4, %%mm6 \n\t"\
271  "psraw $7, %%mm6 \n\t"\
272  "packuswb %%mm6, %%mm6 \n\t"\
273  OP(%%mm6, (%1), A, d) \
274  "add %3, %1 \n\t"
275 
276 
277 #define QPEL_CAVSVNUM(VOP,OP,ADD,MUL1,MUL2)\
278  int w= 2;\
279  src -= 2*srcStride;\
280  \
281  while(w--){\
282  __asm__ volatile(\
283  "pxor %%mm7, %%mm7 \n\t"\
284  "movd (%0), %%mm0 \n\t"\
285  "add %2, %0 \n\t"\
286  "movd (%0), %%mm1 \n\t"\
287  "add %2, %0 \n\t"\
288  "movd (%0), %%mm2 \n\t"\
289  "add %2, %0 \n\t"\
290  "movd (%0), %%mm3 \n\t"\
291  "add %2, %0 \n\t"\
292  "movd (%0), %%mm4 \n\t"\
293  "add %2, %0 \n\t"\
294  "punpcklbw %%mm7, %%mm0 \n\t"\
295  "punpcklbw %%mm7, %%mm1 \n\t"\
296  "punpcklbw %%mm7, %%mm2 \n\t"\
297  "punpcklbw %%mm7, %%mm3 \n\t"\
298  "punpcklbw %%mm7, %%mm4 \n\t"\
299  VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, MUL2)\
300  VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, MUL2)\
301  VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, MUL2)\
302  VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, MUL2)\
303  VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP, MUL2)\
304  VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP, MUL2)\
305  VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, MUL2)\
306  VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, MUL2)\
307  \
308  : "+a"(src), "+c"(dst)\
309  : "S"((x86_reg)srcStride), "r"((x86_reg)dstStride), "m"(ADD), "m"(MUL1)\
310  : "memory"\
311  );\
312  if(h==16){\
313  __asm__ volatile(\
314  VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, MUL2)\
315  VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, MUL2)\
316  VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP, MUL2)\
317  VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP, MUL2)\
318  VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, MUL2)\
319  VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, MUL2)\
320  VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, MUL2)\
321  VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, MUL2)\
322  \
323  : "+a"(src), "+c"(dst)\
324  : "S"((x86_reg)srcStride), "r"((x86_reg)dstStride), "m"(ADD), "m"(MUL1)\
325  : "memory"\
326  );\
327  }\
328  src += 4-(h+5)*srcStride;\
329  dst += 4-h*dstStride;\
330  }
331 
332 #define QPEL_CAVS(OPNAME, OP, MMX)\
333 static void OPNAME ## cavs_qpel8_h_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
334  int h=8;\
335  __asm__ volatile(\
336  "pxor %%mm7, %%mm7 \n\t"\
337  "movq %5, %%mm6 \n\t"\
338  "1: \n\t"\
339  "movq (%0), %%mm0 \n\t"\
340  "movq 1(%0), %%mm2 \n\t"\
341  "movq %%mm0, %%mm1 \n\t"\
342  "movq %%mm2, %%mm3 \n\t"\
343  "punpcklbw %%mm7, %%mm0 \n\t"\
344  "punpckhbw %%mm7, %%mm1 \n\t"\
345  "punpcklbw %%mm7, %%mm2 \n\t"\
346  "punpckhbw %%mm7, %%mm3 \n\t"\
347  "paddw %%mm2, %%mm0 \n\t"\
348  "paddw %%mm3, %%mm1 \n\t"\
349  "pmullw %%mm6, %%mm0 \n\t"\
350  "pmullw %%mm6, %%mm1 \n\t"\
351  "movq -1(%0), %%mm2 \n\t"\
352  "movq 2(%0), %%mm4 \n\t"\
353  "movq %%mm2, %%mm3 \n\t"\
354  "movq %%mm4, %%mm5 \n\t"\
355  "punpcklbw %%mm7, %%mm2 \n\t"\
356  "punpckhbw %%mm7, %%mm3 \n\t"\
357  "punpcklbw %%mm7, %%mm4 \n\t"\
358  "punpckhbw %%mm7, %%mm5 \n\t"\
359  "paddw %%mm4, %%mm2 \n\t"\
360  "paddw %%mm3, %%mm5 \n\t"\
361  "psubw %%mm2, %%mm0 \n\t"\
362  "psubw %%mm5, %%mm1 \n\t"\
363  "movq %6, %%mm5 \n\t"\
364  "paddw %%mm5, %%mm0 \n\t"\
365  "paddw %%mm5, %%mm1 \n\t"\
366  "psraw $3, %%mm0 \n\t"\
367  "psraw $3, %%mm1 \n\t"\
368  "packuswb %%mm1, %%mm0 \n\t"\
369  OP(%%mm0, (%1),%%mm5, q) \
370  "add %3, %0 \n\t"\
371  "add %4, %1 \n\t"\
372  "decl %2 \n\t"\
373  " jnz 1b \n\t"\
374  : "+a"(src), "+c"(dst), "+m"(h)\
375  : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_4)\
376  : "memory"\
377  );\
378 }\
379 \
380 static inline void OPNAME ## cavs_qpel8or16_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
381  QPEL_CAVSVNUM(QPEL_CAVSV1,OP,ff_pw_64,ff_pw_96,ff_pw_42) \
382 }\
383 \
384 static inline void OPNAME ## cavs_qpel8or16_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
385  QPEL_CAVSVNUM(QPEL_CAVSV2,OP,ff_pw_4,ff_pw_5,ff_pw_5) \
386 }\
387 \
388 static inline void OPNAME ## cavs_qpel8or16_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
389  QPEL_CAVSVNUM(QPEL_CAVSV3,OP,ff_pw_64,ff_pw_96,ff_pw_42) \
390 }\
391 \
392 static void OPNAME ## cavs_qpel8_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
393  OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst , src , dstStride, srcStride, 8);\
394 }\
395 static void OPNAME ## cavs_qpel16_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
396  OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst , src , dstStride, srcStride, 16);\
397  OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
398 }\
399 \
400 static void OPNAME ## cavs_qpel8_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
401  OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst , src , dstStride, srcStride, 8);\
402 }\
403 static void OPNAME ## cavs_qpel16_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
404  OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst , src , dstStride, srcStride, 16);\
405  OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
406 }\
407 \
408 static void OPNAME ## cavs_qpel8_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
409  OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst , src , dstStride, srcStride, 8);\
410 }\
411 static void OPNAME ## cavs_qpel16_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
412  OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst , src , dstStride, srcStride, 16);\
413  OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
414 }\
415 \
416 static void OPNAME ## cavs_qpel16_h_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
417  OPNAME ## cavs_qpel8_h_ ## MMX(dst , src , dstStride, srcStride);\
418  OPNAME ## cavs_qpel8_h_ ## MMX(dst+8, src+8, dstStride, srcStride);\
419  src += 8*srcStride;\
420  dst += 8*dstStride;\
421  OPNAME ## cavs_qpel8_h_ ## MMX(dst , src , dstStride, srcStride);\
422  OPNAME ## cavs_qpel8_h_ ## MMX(dst+8, src+8, dstStride, srcStride);\
423 }\
424 
425 #define CAVS_MC(OPNAME, SIZE, MMX) \
426 static void OPNAME ## cavs_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
427 {\
428  OPNAME ## cavs_qpel ## SIZE ## _h_ ## MMX(dst, src, stride, stride);\
429 }\
430 \
431 static void OPNAME ## cavs_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
432 {\
433  OPNAME ## cavs_qpel ## SIZE ## _v1_ ## MMX(dst, src, stride, stride);\
434 }\
435 \
436 static void OPNAME ## cavs_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
437 {\
438  OPNAME ## cavs_qpel ## SIZE ## _v2_ ## MMX(dst, src, stride, stride);\
439 }\
440 \
441 static void OPNAME ## cavs_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
442 {\
443  OPNAME ## cavs_qpel ## SIZE ## _v3_ ## MMX(dst, src, stride, stride);\
444 }\
445 
446 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
447 #define AVG_3DNOW_OP(a,b,temp, size) \
448 "mov" #size " " #b ", " #temp " \n\t"\
449 "pavgusb " #temp ", " #a " \n\t"\
450 "mov" #size " " #a ", " #b " \n\t"
451 #define AVG_MMXEXT_OP(a, b, temp, size) \
452 "mov" #size " " #b ", " #temp " \n\t"\
453 "pavgb " #temp ", " #a " \n\t"\
454 "mov" #size " " #a ", " #b " \n\t"
455 
456 #endif /* (HAVE_MMXEXT_INLINE || HAVE_AMD3DNOW_INLINE) */
457 
458 #if HAVE_MMX_INLINE
459 static void put_cavs_qpel8_mc00_mmx(uint8_t *dst, uint8_t *src,
460  ptrdiff_t stride)
461 {
462  ff_put_pixels8_mmx(dst, src, stride, 8);
463 }
464 
465 static void avg_cavs_qpel8_mc00_mmx(uint8_t *dst, uint8_t *src,
466  ptrdiff_t stride)
467 {
468  ff_avg_pixels8_mmx(dst, src, stride, 8);
469 }
470 
471 static void put_cavs_qpel16_mc00_mmx(uint8_t *dst, uint8_t *src,
472  ptrdiff_t stride)
473 {
474  ff_put_pixels16_mmx(dst, src, stride, 16);
475 }
476 
477 static void avg_cavs_qpel16_mc00_mmx(uint8_t *dst, uint8_t *src,
478  ptrdiff_t stride)
479 {
480  ff_avg_pixels16_mmx(dst, src, stride, 16);
481 }
482 
483 static av_cold void cavsdsp_init_mmx(CAVSDSPContext *c,
484  AVCodecContext *avctx)
485 {
486  c->put_cavs_qpel_pixels_tab[0][0] = put_cavs_qpel16_mc00_mmx;
487  c->put_cavs_qpel_pixels_tab[1][0] = put_cavs_qpel8_mc00_mmx;
488  c->avg_cavs_qpel_pixels_tab[0][0] = avg_cavs_qpel16_mc00_mmx;
489  c->avg_cavs_qpel_pixels_tab[1][0] = avg_cavs_qpel8_mc00_mmx;
490 
491  c->cavs_idct8_add = cavs_idct8_add_mmx;
493 }
494 #endif /* HAVE_MMX_INLINE */
495 
496 #define DSPFUNC(PFX, IDX, NUM, EXT) \
497  c->PFX ## _cavs_qpel_pixels_tab[IDX][ 2] = PFX ## _cavs_qpel ## NUM ## _mc20_ ## EXT; \
498  c->PFX ## _cavs_qpel_pixels_tab[IDX][ 4] = PFX ## _cavs_qpel ## NUM ## _mc01_ ## EXT; \
499  c->PFX ## _cavs_qpel_pixels_tab[IDX][ 8] = PFX ## _cavs_qpel ## NUM ## _mc02_ ## EXT; \
500  c->PFX ## _cavs_qpel_pixels_tab[IDX][12] = PFX ## _cavs_qpel ## NUM ## _mc03_ ## EXT; \
501 
502 #if HAVE_MMXEXT_INLINE
503 QPEL_CAVS(put_, PUT_OP, mmxext)
504 QPEL_CAVS(avg_, AVG_MMXEXT_OP, mmxext)
505 
506 CAVS_MC(put_, 8, mmxext)
507 CAVS_MC(put_, 16, mmxext)
508 CAVS_MC(avg_, 8, mmxext)
509 CAVS_MC(avg_, 16, mmxext)
510 
511 static av_cold void cavsdsp_init_mmxext(CAVSDSPContext *c,
512  AVCodecContext *avctx)
513 {
514  DSPFUNC(put, 0, 16, mmxext);
515  DSPFUNC(put, 1, 8, mmxext);
516  DSPFUNC(avg, 0, 16, mmxext);
517  DSPFUNC(avg, 1, 8, mmxext);
518 }
519 #endif /* HAVE_MMXEXT_INLINE */
520 
521 #if HAVE_AMD3DNOW_INLINE
522 QPEL_CAVS(put_, PUT_OP, 3dnow)
523 QPEL_CAVS(avg_, AVG_3DNOW_OP, 3dnow)
524 
525 CAVS_MC(put_, 8, 3dnow)
526 CAVS_MC(put_, 16,3dnow)
527 CAVS_MC(avg_, 8, 3dnow)
528 CAVS_MC(avg_, 16,3dnow)
529 
530 static av_cold void cavsdsp_init_3dnow(CAVSDSPContext *c,
531  AVCodecContext *avctx)
532 {
533  DSPFUNC(put, 0, 16, 3dnow);
534  DSPFUNC(put, 1, 8, 3dnow);
535  DSPFUNC(avg, 0, 16, 3dnow);
536  DSPFUNC(avg, 1, 8, 3dnow);
537 }
538 #endif /* HAVE_AMD3DNOW_INLINE */
539 
541 {
542 #if HAVE_MMX_INLINE
543  int cpu_flags = av_get_cpu_flags();
544 
545  if (INLINE_MMX(cpu_flags))
546  cavsdsp_init_mmx(c, avctx);
547 #endif /* HAVE_MMX_INLINE */
548 #if HAVE_AMD3DNOW_INLINE
549  if (INLINE_AMD3DNOW(cpu_flags))
550  cavsdsp_init_3dnow(c, avctx);
551 #endif /* HAVE_AMD3DNOW_INLINE */
552 #if HAVE_MMXEXT_INLINE
553  if (INLINE_MMXEXT(cpu_flags))
554  cavsdsp_init_mmxext(c, avctx);
555 #endif /* HAVE_MMXEXT_INLINE */
556 }