motion_est.c 16.4 KB
Newer Older
1 2
/*
 * MMX optimized motion estimation
3
 * Copyright (c) 2001 Fabrice Bellard
4
 * Copyright (c) 2002-2004 Michael Niedermayer
5
 *
6 7
 * mostly by Michael Niedermayer <michaelni@gmx.at>
 *
8 9 10
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
11 12
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
13
 * version 2.1 of the License, or (at your option) any later version.
14
 *
15
 * FFmpeg is distributed in the hope that it will be useful,
16
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 18
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
19
 *
20
 * You should have received a copy of the GNU Lesser General Public
21
 * License along with FFmpeg; if not, write to the Free Software
22
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23
 */
24

25
#include "libavutil/attributes.h"
26
#include "libavutil/avassert.h"
27
#include "libavutil/mem.h"
28
#include "libavutil/x86/asm.h"
29
#include "libavutil/x86/cpu.h"
30
#include "dsputil_x86.h"
31

32 33
#if HAVE_INLINE_ASM

34
DECLARE_ASM_CONST(8, uint64_t, round_tab)[3]={
35 36 37
0x0000000000000000ULL,
0x0001000100010001ULL,
0x0002000200020002ULL,
38
};
39

40
DECLARE_ASM_CONST(8, uint64_t, bone)= 0x0101010101010101LL;
Michael Niedermayer's avatar
Michael Niedermayer committed
41

42
static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
43
{
44
    x86_reg len= -(x86_reg)stride*h;
45
    __asm__ volatile(
46
        ".p2align 4                     \n\t"
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
        "1:                             \n\t"
        "movq (%1, %%"REG_a"), %%mm0    \n\t"
        "movq (%2, %%"REG_a"), %%mm2    \n\t"
        "movq (%2, %%"REG_a"), %%mm4    \n\t"
        "add %3, %%"REG_a"              \n\t"
        "psubusb %%mm0, %%mm2           \n\t"
        "psubusb %%mm4, %%mm0           \n\t"
        "movq (%1, %%"REG_a"), %%mm1    \n\t"
        "movq (%2, %%"REG_a"), %%mm3    \n\t"
        "movq (%2, %%"REG_a"), %%mm5    \n\t"
        "psubusb %%mm1, %%mm3           \n\t"
        "psubusb %%mm5, %%mm1           \n\t"
        "por %%mm2, %%mm0               \n\t"
        "por %%mm1, %%mm3               \n\t"
        "movq %%mm0, %%mm1              \n\t"
        "movq %%mm3, %%mm2              \n\t"
        "punpcklbw %%mm7, %%mm0         \n\t"
        "punpckhbw %%mm7, %%mm1         \n\t"
        "punpcklbw %%mm7, %%mm3         \n\t"
        "punpckhbw %%mm7, %%mm2         \n\t"
        "paddw %%mm1, %%mm0             \n\t"
        "paddw %%mm3, %%mm2             \n\t"
        "paddw %%mm2, %%mm0             \n\t"
        "paddw %%mm0, %%mm6             \n\t"
        "add %3, %%"REG_a"              \n\t"
        " js 1b                         \n\t"
73
        : "+a" (len)
74
        : "r" (blk1 - len), "r" (blk2 - len), "r" ((x86_reg)stride)
75
    );
76 77
}

78 79
static inline void sad8_1_mmxext(uint8_t *blk1, uint8_t *blk2,
                                 int stride, int h)
80
{
81
    __asm__ volatile(
82
        ".p2align 4                     \n\t"
83
        "1:                             \n\t"
Loren Merritt's avatar
Loren Merritt committed
84 85 86 87
        "movq (%1), %%mm0               \n\t"
        "movq (%1, %3), %%mm1           \n\t"
        "psadbw (%2), %%mm0             \n\t"
        "psadbw (%2, %3), %%mm1         \n\t"
88
        "paddw %%mm0, %%mm6             \n\t"
Loren Merritt's avatar
Loren Merritt committed
89 90 91 92 93 94
        "paddw %%mm1, %%mm6             \n\t"
        "lea (%1,%3,2), %1              \n\t"
        "lea (%2,%3,2), %2              \n\t"
        "sub $2, %0                     \n\t"
        " jg 1b                         \n\t"
        : "+r" (h), "+r" (blk1), "+r" (blk2)
95
        : "r" ((x86_reg)stride)
96
    );
97 98
}

99 100 101
static int sad16_sse2(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)
{
    int ret;
102
    __asm__ volatile(
103
        "pxor %%xmm2, %%xmm2            \n\t"
104
        ".p2align 4                     \n\t"
105 106
        "1:                             \n\t"
        "movdqu (%1), %%xmm0            \n\t"
107
        "movdqu (%1, %4), %%xmm1        \n\t"
108
        "psadbw (%2), %%xmm0            \n\t"
109
        "psadbw (%2, %4), %%xmm1        \n\t"
110 111
        "paddw %%xmm0, %%xmm2           \n\t"
        "paddw %%xmm1, %%xmm2           \n\t"
112 113
        "lea (%1,%4,2), %1              \n\t"
        "lea (%2,%4,2), %2              \n\t"
114 115
        "sub $2, %0                     \n\t"
        " jg 1b                         \n\t"
116 117
        "movhlps %%xmm2, %%xmm0         \n\t"
        "paddw   %%xmm0, %%xmm2         \n\t"
118 119 120
        "movd    %%xmm2, %3             \n\t"
        : "+r" (h), "+r" (blk1), "+r" (blk2), "=r"(ret)
        : "r" ((x86_reg)stride)
121 122 123 124
    );
    return ret;
}

125 126
static inline void sad8_x2a_mmxext(uint8_t *blk1, uint8_t *blk2,
                                   int stride, int h)
127
{
128
    __asm__ volatile(
129
        ".p2align 4                     \n\t"
130
        "1:                             \n\t"
Loren Merritt's avatar
Loren Merritt committed
131 132 133 134 135 136
        "movq (%1), %%mm0               \n\t"
        "movq (%1, %3), %%mm1           \n\t"
        "pavgb 1(%1), %%mm0             \n\t"
        "pavgb 1(%1, %3), %%mm1         \n\t"
        "psadbw (%2), %%mm0             \n\t"
        "psadbw (%2, %3), %%mm1         \n\t"
137
        "paddw %%mm0, %%mm6             \n\t"
Loren Merritt's avatar
Loren Merritt committed
138 139 140 141 142 143
        "paddw %%mm1, %%mm6             \n\t"
        "lea (%1,%3,2), %1              \n\t"
        "lea (%2,%3,2), %2              \n\t"
        "sub $2, %0                     \n\t"
        " jg 1b                         \n\t"
        : "+r" (h), "+r" (blk1), "+r" (blk2)
144
        : "r" ((x86_reg)stride)
Loren Merritt's avatar
Loren Merritt committed
145 146 147
    );
}

148 149
static inline void sad8_y2a_mmxext(uint8_t *blk1, uint8_t *blk2,
                                   int stride, int h)
Loren Merritt's avatar
Loren Merritt committed
150
{
151
    __asm__ volatile(
Loren Merritt's avatar
Loren Merritt committed
152 153
        "movq (%1), %%mm0               \n\t"
        "add %3, %1                     \n\t"
154
        ".p2align 4                     \n\t"
Loren Merritt's avatar
Loren Merritt committed
155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
        "1:                             \n\t"
        "movq (%1), %%mm1               \n\t"
        "movq (%1, %3), %%mm2           \n\t"
        "pavgb %%mm1, %%mm0             \n\t"
        "pavgb %%mm2, %%mm1             \n\t"
        "psadbw (%2), %%mm0             \n\t"
        "psadbw (%2, %3), %%mm1         \n\t"
        "paddw %%mm0, %%mm6             \n\t"
        "paddw %%mm1, %%mm6             \n\t"
        "movq %%mm2, %%mm0              \n\t"
        "lea (%1,%3,2), %1              \n\t"
        "lea (%2,%3,2), %2              \n\t"
        "sub $2, %0                     \n\t"
        " jg 1b                         \n\t"
        : "+r" (h), "+r" (blk1), "+r" (blk2)
170
        : "r" ((x86_reg)stride)
171
    );
172 173
}

174 175
static inline void sad8_4_mmxext(uint8_t *blk1, uint8_t *blk2,
                                 int stride, int h)
Loren Merritt's avatar
Loren Merritt committed
176
{
177
    __asm__ volatile(
178
        "movq "MANGLE(bone)", %%mm5     \n\t"
Loren Merritt's avatar
Loren Merritt committed
179 180 181
        "movq (%1), %%mm0               \n\t"
        "pavgb 1(%1), %%mm0             \n\t"
        "add %3, %1                     \n\t"
182
        ".p2align 4                     \n\t"
183
        "1:                             \n\t"
Loren Merritt's avatar
Loren Merritt committed
184 185 186 187
        "movq (%1), %%mm1               \n\t"
        "movq (%1,%3), %%mm2            \n\t"
        "pavgb 1(%1), %%mm1             \n\t"
        "pavgb 1(%1,%3), %%mm2          \n\t"
Loren Merritt's avatar
Loren Merritt committed
188 189
        "psubusb %%mm5, %%mm1           \n\t"
        "pavgb %%mm1, %%mm0             \n\t"
Loren Merritt's avatar
Loren Merritt committed
190 191 192
        "pavgb %%mm2, %%mm1             \n\t"
        "psadbw (%2), %%mm0             \n\t"
        "psadbw (%2,%3), %%mm1          \n\t"
193
        "paddw %%mm0, %%mm6             \n\t"
Loren Merritt's avatar
Loren Merritt committed
194 195 196 197 198 199 200
        "paddw %%mm1, %%mm6             \n\t"
        "movq %%mm2, %%mm0              \n\t"
        "lea (%1,%3,2), %1              \n\t"
        "lea (%2,%3,2), %2              \n\t"
        "sub $2, %0                     \n\t"
        " jg 1b                         \n\t"
        : "+r" (h), "+r" (blk1), "+r" (blk2)
201
        : "r" ((x86_reg)stride)
202
    );
203 204
}

205
static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h)
206
{
207
    x86_reg len= -(x86_reg)stride*h;
208
    __asm__ volatile(
209
        ".p2align 4                     \n\t"
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
        "1:                             \n\t"
        "movq (%1, %%"REG_a"), %%mm0    \n\t"
        "movq (%2, %%"REG_a"), %%mm1    \n\t"
        "movq (%1, %%"REG_a"), %%mm2    \n\t"
        "movq (%2, %%"REG_a"), %%mm3    \n\t"
        "punpcklbw %%mm7, %%mm0         \n\t"
        "punpcklbw %%mm7, %%mm1         \n\t"
        "punpckhbw %%mm7, %%mm2         \n\t"
        "punpckhbw %%mm7, %%mm3         \n\t"
        "paddw %%mm0, %%mm1             \n\t"
        "paddw %%mm2, %%mm3             \n\t"
        "movq (%3, %%"REG_a"), %%mm4    \n\t"
        "movq (%3, %%"REG_a"), %%mm2    \n\t"
        "paddw %%mm5, %%mm1             \n\t"
        "paddw %%mm5, %%mm3             \n\t"
        "psrlw $1, %%mm1                \n\t"
        "psrlw $1, %%mm3                \n\t"
        "packuswb %%mm3, %%mm1          \n\t"
        "psubusb %%mm1, %%mm4           \n\t"
        "psubusb %%mm2, %%mm1           \n\t"
        "por %%mm4, %%mm1               \n\t"
        "movq %%mm1, %%mm0              \n\t"
        "punpcklbw %%mm7, %%mm0         \n\t"
        "punpckhbw %%mm7, %%mm1         \n\t"
        "paddw %%mm1, %%mm0             \n\t"
        "paddw %%mm0, %%mm6             \n\t"
        "add %4, %%"REG_a"              \n\t"
        " js 1b                         \n\t"
238
        : "+a" (len)
239
        : "r" (blk1a - len), "r" (blk1b -len), "r" (blk2 - len), "r" ((x86_reg)stride)
240
    );
241 242
}

243
static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
244
{
245
    x86_reg len= -(x86_reg)stride*h;
246
    __asm__ volatile(
247 248
        "movq (%1, %%"REG_a"), %%mm0    \n\t"
        "movq 1(%1, %%"REG_a"), %%mm2   \n\t"
Loren Merritt's avatar
Loren Merritt committed
249 250 251
        "movq %%mm0, %%mm1              \n\t"
        "movq %%mm2, %%mm3              \n\t"
        "punpcklbw %%mm7, %%mm0         \n\t"
252
        "punpckhbw %%mm7, %%mm1         \n\t"
Loren Merritt's avatar
Loren Merritt committed
253 254 255 256
        "punpcklbw %%mm7, %%mm2         \n\t"
        "punpckhbw %%mm7, %%mm3         \n\t"
        "paddw %%mm2, %%mm0             \n\t"
        "paddw %%mm3, %%mm1             \n\t"
257
        ".p2align 4                     \n\t"
Loren Merritt's avatar
Loren Merritt committed
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
        "1:                             \n\t"
        "movq (%2, %%"REG_a"), %%mm2    \n\t"
        "movq 1(%2, %%"REG_a"), %%mm4   \n\t"
        "movq %%mm2, %%mm3              \n\t"
        "movq %%mm4, %%mm5              \n\t"
        "punpcklbw %%mm7, %%mm2         \n\t"
        "punpckhbw %%mm7, %%mm3         \n\t"
        "punpcklbw %%mm7, %%mm4         \n\t"
        "punpckhbw %%mm7, %%mm5         \n\t"
        "paddw %%mm4, %%mm2             \n\t"
        "paddw %%mm5, %%mm3             \n\t"
        "movq 16+"MANGLE(round_tab)", %%mm5 \n\t"
        "paddw %%mm2, %%mm0             \n\t"
        "paddw %%mm3, %%mm1             \n\t"
        "paddw %%mm5, %%mm0             \n\t"
273
        "paddw %%mm5, %%mm1             \n\t"
Loren Merritt's avatar
Loren Merritt committed
274 275 276
        "movq (%3, %%"REG_a"), %%mm4    \n\t"
        "movq (%3, %%"REG_a"), %%mm5    \n\t"
        "psrlw $2, %%mm0                \n\t"
277
        "psrlw $2, %%mm1                \n\t"
Loren Merritt's avatar
Loren Merritt committed
278 279 280 281 282
        "packuswb %%mm1, %%mm0          \n\t"
        "psubusb %%mm0, %%mm4           \n\t"
        "psubusb %%mm5, %%mm0           \n\t"
        "por %%mm4, %%mm0               \n\t"
        "movq %%mm0, %%mm4              \n\t"
283
        "punpcklbw %%mm7, %%mm0         \n\t"
Loren Merritt's avatar
Loren Merritt committed
284
        "punpckhbw %%mm7, %%mm4         \n\t"
285
        "paddw %%mm0, %%mm6             \n\t"
Loren Merritt's avatar
Loren Merritt committed
286 287 288
        "paddw %%mm4, %%mm6             \n\t"
        "movq  %%mm2, %%mm0             \n\t"
        "movq  %%mm3, %%mm1             \n\t"
289 290
        "add %4, %%"REG_a"              \n\t"
        " js 1b                         \n\t"
291
        : "+a" (len)
292
        : "r" (blk1 - len), "r" (blk1 -len + stride), "r" (blk2 - len), "r" ((x86_reg)stride)
293
    );
294 295
}

296
static inline int sum_mmx(void)
297
{
298
    int ret;
299
    __asm__ volatile(
300 301 302 303 304 305 306
        "movq %%mm6, %%mm0              \n\t"
        "psrlq $32, %%mm6               \n\t"
        "paddw %%mm0, %%mm6             \n\t"
        "movq %%mm6, %%mm0              \n\t"
        "psrlq $16, %%mm6               \n\t"
        "paddw %%mm0, %%mm6             \n\t"
        "movd %%mm6, %0                 \n\t"
307 308 309
        : "=r" (ret)
    );
    return ret&0xFFFF;
310 311
}

312
static inline int sum_mmxext(void)
313
{
314
    int ret;
315
    __asm__ volatile(
316
        "movd %%mm6, %0                 \n\t"
317 318 319
        : "=r" (ret)
    );
    return ret;
320 321
}

Loren Merritt's avatar
Loren Merritt committed
322 323 324 325 326 327 328 329 330
static inline void sad8_x2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
{
    sad8_2_mmx(blk1, blk1+1, blk2, stride, h);
}
static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
{
    sad8_2_mmx(blk1, blk1+stride, blk2, stride, h);
}

331

332
#define PIX_SAD(suf)\
333
static int sad8_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
334
{\
335
    av_assert2(h==8);\
336
    __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
337
                 "pxor %%mm6, %%mm6     \n\t":);\
338
\
339
    sad8_1_ ## suf(blk1, blk2, stride, 8);\
340 341 342
\
    return sum_ ## suf();\
}\
343
static int sad8_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
344
{\
345
    av_assert2(h==8);\
346
    __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
347
                 "pxor %%mm6, %%mm6     \n\t"\
348
                 "movq %0, %%mm5        \n\t"\
349 350 351
                 :: "m"(round_tab[1]) \
                 );\
\
Loren Merritt's avatar
Loren Merritt committed
352
    sad8_x2a_ ## suf(blk1, blk2, stride, 8);\
353 354 355 356
\
    return sum_ ## suf();\
}\
\
357
static int sad8_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
358
{\
359
    av_assert2(h==8);\
360
    __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
361 362
                 "pxor %%mm6, %%mm6     \n\t"\
                 "movq %0, %%mm5        \n\t"\
363 364 365
                 :: "m"(round_tab[1]) \
                 );\
\
Loren Merritt's avatar
Loren Merritt committed
366
    sad8_y2a_ ## suf(blk1, blk2, stride, 8);\
367 368 369 370
\
    return sum_ ## suf();\
}\
\
371
static int sad8_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
372
{\
373
    av_assert2(h==8);\
374
    __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
375
                 "pxor %%mm6, %%mm6     \n\t"\
Loren Merritt's avatar
Loren Merritt committed
376
                 ::);\
377
\
378
    sad8_4_ ## suf(blk1, blk2, stride, 8);\
379 380 381 382
\
    return sum_ ## suf();\
}\
\
383
static int sad16_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
Michael Niedermayer's avatar
Michael Niedermayer committed
384
{\
385
    __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
386
                 "pxor %%mm6, %%mm6     \n\t":);\
Michael Niedermayer's avatar
Michael Niedermayer committed
387
\
388 389
    sad8_1_ ## suf(blk1  , blk2  , stride, h);\
    sad8_1_ ## suf(blk1+8, blk2+8, stride, h);\
Michael Niedermayer's avatar
Michael Niedermayer committed
390 391 392
\
    return sum_ ## suf();\
}\
393
static int sad16_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
394
{\
395
    __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
396 397
                 "pxor %%mm6, %%mm6     \n\t"\
                 "movq %0, %%mm5        \n\t"\
398 399 400
                 :: "m"(round_tab[1]) \
                 );\
\
Loren Merritt's avatar
Loren Merritt committed
401 402
    sad8_x2a_ ## suf(blk1  , blk2  , stride, h);\
    sad8_x2a_ ## suf(blk1+8, blk2+8, stride, h);\
403 404 405
\
    return sum_ ## suf();\
}\
406
static int sad16_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
407
{\
408
    __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
409 410
                 "pxor %%mm6, %%mm6     \n\t"\
                 "movq %0, %%mm5        \n\t"\
411 412 413
                 :: "m"(round_tab[1]) \
                 );\
\
Loren Merritt's avatar
Loren Merritt committed
414 415
    sad8_y2a_ ## suf(blk1  , blk2  , stride, h);\
    sad8_y2a_ ## suf(blk1+8, blk2+8, stride, h);\
416 417 418
\
    return sum_ ## suf();\
}\
419
static int sad16_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
420
{\
421
    __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
422
                 "pxor %%mm6, %%mm6     \n\t"\
Loren Merritt's avatar
Loren Merritt committed
423
                 ::);\
424
\
425 426
    sad8_4_ ## suf(blk1  , blk2  , stride, h);\
    sad8_4_ ## suf(blk1+8, blk2+8, stride, h);\
427 428 429
\
    return sum_ ## suf();\
}\
430

431
PIX_SAD(mmx)
432
PIX_SAD(mmxext)
433

434 435
#endif /* HAVE_INLINE_ASM */

436
av_cold void ff_dsputil_init_pix_mmx(DSPContext *c, AVCodecContext *avctx)
437
{
438
#if HAVE_INLINE_ASM
439
    int cpu_flags = av_get_cpu_flags();
440

441
    if (INLINE_MMX(cpu_flags)) {
442 443 444 445 446 447 448 449
        c->pix_abs[0][0] = sad16_mmx;
        c->pix_abs[0][1] = sad16_x2_mmx;
        c->pix_abs[0][2] = sad16_y2_mmx;
        c->pix_abs[0][3] = sad16_xy2_mmx;
        c->pix_abs[1][0] = sad8_mmx;
        c->pix_abs[1][1] = sad8_x2_mmx;
        c->pix_abs[1][2] = sad8_y2_mmx;
        c->pix_abs[1][3] = sad8_xy2_mmx;
450

451
        c->sad[0]= sad16_mmx;
452
        c->sad[1]= sad8_mmx;
453
    }
454
    if (INLINE_MMXEXT(cpu_flags)) {
455 456
        c->pix_abs[0][0] = sad16_mmxext;
        c->pix_abs[1][0] = sad8_mmxext;
457

458 459
        c->sad[0]        = sad16_mmxext;
        c->sad[1]        = sad8_mmxext;
460

461
        if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
462 463 464 465 466 467
            c->pix_abs[0][1] = sad16_x2_mmxext;
            c->pix_abs[0][2] = sad16_y2_mmxext;
            c->pix_abs[0][3] = sad16_xy2_mmxext;
            c->pix_abs[1][1] = sad8_x2_mmxext;
            c->pix_abs[1][2] = sad8_y2_mmxext;
            c->pix_abs[1][3] = sad8_xy2_mmxext;
468
        }
469
    }
470
    if (INLINE_SSE2(cpu_flags) && !(cpu_flags & AV_CPU_FLAG_3DNOW) && avctx->codec_id != AV_CODEC_ID_SNOW) {
471 472
        c->sad[0]= sad16_sse2;
    }
473
#endif /* HAVE_INLINE_ASM */
474
}