vc1dsp_mmx.c 23.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * VC-1 and WMV3 - DSP functions MMX-optimized
 * Copyright (c) 2007 Christophe GISQUET <christophe.gisquet@free.fr>
 *
 * Permission is hereby granted, free of charge, to any person
 * obtaining a copy of this software and associated documentation
 * files (the "Software"), to deal in the Software without
 * restriction, including without limitation the rights to use,
 * copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following
 * conditions:
 *
 * The above copyright notice and this permission notice shall be
 * included in all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

27
#include "libavutil/cpu.h"
28
#include "libavutil/mem.h"
29
#include "libavutil/x86/asm.h"
30
#include "libavutil/x86/cpu.h"
31
#include "libavcodec/vc1dsp.h"
32
#include "constants.h"
33
#include "fpel.h"
34
#include "vc1dsp.h"
35

36 37 38 39 40
#if HAVE_6REGS && HAVE_INLINE_ASM && HAVE_MMX_EXTERNAL

void ff_vc1_put_ver_16b_shift2_mmx(int16_t *dst,
                                   const uint8_t *src, x86_reg stride,
                                   int rnd, int64_t shift);
41 42 43 44
void ff_vc1_put_hor_16b_shift2_mmx(uint8_t *dst, x86_reg stride,
                                   const int16_t *src, int rnd);
void ff_vc1_avg_hor_16b_shift2_mmxext(uint8_t *dst, x86_reg stride,
                                      const int16_t *src, int rnd);
45

46 47 48
#define OP_PUT(S,D)
#define OP_AVG(S,D) "pavgb " #S ", " #D " \n\t"

49 50 51 52 53 54 55
/** Add rounder from mm7 to mm3 and pack result at destination */
#define NORMALIZE_MMX(SHIFT)                                    \
     "paddw     %%mm7, %%mm3           \n\t" /* +bias-r */      \
     "paddw     %%mm7, %%mm4           \n\t" /* +bias-r */      \
     "psraw     "SHIFT", %%mm3         \n\t"                    \
     "psraw     "SHIFT", %%mm4         \n\t"

56
#define TRANSFER_DO_PACK(OP)                    \
57
     "packuswb  %%mm4, %%mm3           \n\t"    \
58
     OP((%2), %%mm3)                            \
59 60
     "movq      %%mm3, (%2)            \n\t"

61 62 63
#define TRANSFER_DONT_PACK(OP)                  \
     OP(0(%2), %%mm3)                           \
     OP(8(%2), %%mm4)                           \
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
     "movq      %%mm3, 0(%2)           \n\t"    \
     "movq      %%mm4, 8(%2)           \n\t"

/** @see MSPEL_FILTER13_CORE for use as UNPACK macro */
#define DO_UNPACK(reg)  "punpcklbw %%mm0, " reg "\n\t"
#define DONT_UNPACK(reg)

/** Compute the rounder 32-r or 8-r and unpacks it to mm7 */
#define LOAD_ROUNDER_MMX(ROUND)                 \
     "movd      "ROUND", %%mm7         \n\t"    \
     "punpcklwd %%mm7, %%mm7           \n\t"    \
     "punpckldq %%mm7, %%mm7           \n\t"

/**
 * Purely vertical or horizontal 1/2 shift interpolation.
 * Sacrify mm6 for *9 factor.
 */
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
#define VC1_SHIFT2(OP, OPNAME)\
static void OPNAME ## vc1_shift2_mmx(uint8_t *dst, const uint8_t *src,\
                                     x86_reg stride, int rnd, x86_reg offset)\
{\
    rnd = 8-rnd;\
    __asm__ volatile(\
        "mov       $8, %%"REG_c"           \n\t"\
        LOAD_ROUNDER_MMX("%5")\
        "movq      "MANGLE(ff_pw_9)", %%mm6\n\t"\
        "1:                                \n\t"\
        "movd      0(%0   ), %%mm3         \n\t"\
        "movd      4(%0   ), %%mm4         \n\t"\
        "movd      0(%0,%2), %%mm1         \n\t"\
        "movd      4(%0,%2), %%mm2         \n\t"\
        "add       %2, %0                  \n\t"\
        "punpcklbw %%mm0, %%mm3            \n\t"\
        "punpcklbw %%mm0, %%mm4            \n\t"\
        "punpcklbw %%mm0, %%mm1            \n\t"\
        "punpcklbw %%mm0, %%mm2            \n\t"\
        "paddw     %%mm1, %%mm3            \n\t"\
        "paddw     %%mm2, %%mm4            \n\t"\
        "movd      0(%0,%3), %%mm1         \n\t"\
        "movd      4(%0,%3), %%mm2         \n\t"\
        "pmullw    %%mm6, %%mm3            \n\t" /* 0,9,9,0*/\
        "pmullw    %%mm6, %%mm4            \n\t" /* 0,9,9,0*/\
        "punpcklbw %%mm0, %%mm1            \n\t"\
        "punpcklbw %%mm0, %%mm2            \n\t"\
        "psubw     %%mm1, %%mm3            \n\t" /*-1,9,9,0*/\
        "psubw     %%mm2, %%mm4            \n\t" /*-1,9,9,0*/\
        "movd      0(%0,%2), %%mm1         \n\t"\
        "movd      4(%0,%2), %%mm2         \n\t"\
        "punpcklbw %%mm0, %%mm1            \n\t"\
        "punpcklbw %%mm0, %%mm2            \n\t"\
        "psubw     %%mm1, %%mm3            \n\t" /*-1,9,9,-1*/\
        "psubw     %%mm2, %%mm4            \n\t" /*-1,9,9,-1*/\
        NORMALIZE_MMX("$4")\
        "packuswb  %%mm4, %%mm3            \n\t"\
        OP((%1), %%mm3)\
        "movq      %%mm3, (%1)             \n\t"\
        "add       %6, %0                  \n\t"\
        "add       %4, %1                  \n\t"\
        "dec       %%"REG_c"               \n\t"\
        "jnz 1b                            \n\t"\
        : "+r"(src),  "+r"(dst)\
        : "r"(offset), "r"(-2*offset), "g"(stride), "m"(rnd),\
          "g"(stride-offset)\
127
          NAMED_CONSTRAINTS_ADD(ff_pw_9)\
128 129
        : "%"REG_c, "memory"\
    );\
130 131
}

132 133 134
VC1_SHIFT2(OP_PUT, put_)
VC1_SHIFT2(OP_AVG, avg_)

135 136 137 138 139 140 141 142 143 144
/**
 * Core of the 1/4 and 3/4 shift bicubic interpolation.
 *
 * @param UNPACK  Macro unpacking arguments from 8 to 16bits (can be empty).
 * @param MOVQ    "movd 1" or "movq 2", if data read is already unpacked.
 * @param A1      Address of 1st tap (beware of unpacked/packed).
 * @param A2      Address of 2nd tap
 * @param A3      Address of 3rd tap
 * @param A4      Address of 4th tap
 */
Christophe Gisquet's avatar
Christophe Gisquet committed
145
#define MSPEL_FILTER13_CORE(UNPACK, MOVQ, A1, A2, A3, A4)       \
146 147 148 149
     MOVQ "*0+"A1", %%mm1       \n\t"                           \
     MOVQ "*4+"A1", %%mm2       \n\t"                           \
     UNPACK("%%mm1")                                            \
     UNPACK("%%mm2")                                            \
Christophe Gisquet's avatar
Christophe Gisquet committed
150 151
     "pmullw    "MANGLE(ff_pw_3)", %%mm1\n\t"                   \
     "pmullw    "MANGLE(ff_pw_3)", %%mm2\n\t"                   \
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
     MOVQ "*0+"A2", %%mm3       \n\t"                           \
     MOVQ "*4+"A2", %%mm4       \n\t"                           \
     UNPACK("%%mm3")                                            \
     UNPACK("%%mm4")                                            \
     "pmullw    %%mm6, %%mm3    \n\t" /* *18 */                 \
     "pmullw    %%mm6, %%mm4    \n\t" /* *18 */                 \
     "psubw     %%mm1, %%mm3    \n\t" /* 18,-3 */               \
     "psubw     %%mm2, %%mm4    \n\t" /* 18,-3 */               \
     MOVQ "*0+"A4", %%mm1       \n\t"                           \
     MOVQ "*4+"A4", %%mm2       \n\t"                           \
     UNPACK("%%mm1")                                            \
     UNPACK("%%mm2")                                            \
     "psllw     $2, %%mm1       \n\t" /* 4* */                  \
     "psllw     $2, %%mm2       \n\t" /* 4* */                  \
     "psubw     %%mm1, %%mm3    \n\t" /* -4,18,-3 */            \
     "psubw     %%mm2, %%mm4    \n\t" /* -4,18,-3 */            \
     MOVQ "*0+"A3", %%mm1       \n\t"                           \
     MOVQ "*4+"A3", %%mm2       \n\t"                           \
     UNPACK("%%mm1")                                            \
     UNPACK("%%mm2")                                            \
     "pmullw    %%mm5, %%mm1    \n\t" /* *53 */                 \
     "pmullw    %%mm5, %%mm2    \n\t" /* *53 */                 \
     "paddw     %%mm1, %%mm3    \n\t" /* 4,53,18,-3 */          \
     "paddw     %%mm2, %%mm4    \n\t" /* 4,53,18,-3 */

/**
 * Macro to build the vertical 16bits version of vc1_put_shift[13].
 * Here, offset=src_stride. Parameters passed A1 to A4 must use
 * %3 (src_stride) and %4 (3*src_stride).
 *
 * @param  NAME   Either 1 or 3
 * @see MSPEL_FILTER13_CORE for information on A1->A4
 */
#define MSPEL_FILTER13_VER_16B(NAME, A1, A2, A3, A4)                    \
static void                                                             \
vc1_put_ver_16b_ ## NAME ## _mmx(int16_t *dst, const uint8_t *src,      \
188
                                 x86_reg src_stride,                   \
189 190 191 192
                                 int rnd, int64_t shift)                \
{                                                                       \
    int h = 8;                                                          \
    src -= src_stride;                                                  \
193
    __asm__ volatile(                                                       \
194
        LOAD_ROUNDER_MMX("%5")                                          \
Christophe Gisquet's avatar
Christophe Gisquet committed
195 196
        "movq      "MANGLE(ff_pw_53)", %%mm5\n\t"                       \
        "movq      "MANGLE(ff_pw_18)", %%mm6\n\t"                       \
197
        ".p2align 3                \n\t"                                \
198
        "1:                        \n\t"                                \
Christophe Gisquet's avatar
Christophe Gisquet committed
199
        MSPEL_FILTER13_CORE(DO_UNPACK, "movd  1", A1, A2, A3, A4)       \
200
        NORMALIZE_MMX("%6")                                             \
201
        TRANSFER_DONT_PACK(OP_PUT)                                      \
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
        /* Last 3 (in fact 4) bytes on the line */                      \
        "movd      8+"A1", %%mm1   \n\t"                                \
        DO_UNPACK("%%mm1")                                              \
        "movq      %%mm1, %%mm3    \n\t"                                \
        "paddw     %%mm1, %%mm1    \n\t"                                \
        "paddw     %%mm3, %%mm1    \n\t" /* 3* */                       \
        "movd      8+"A2", %%mm3   \n\t"                                \
        DO_UNPACK("%%mm3")                                              \
        "pmullw    %%mm6, %%mm3    \n\t" /* *18 */                      \
        "psubw     %%mm1, %%mm3    \n\t" /*18,-3 */                     \
        "movd      8+"A3", %%mm1   \n\t"                                \
        DO_UNPACK("%%mm1")                                              \
        "pmullw    %%mm5, %%mm1    \n\t" /* *53 */                      \
        "paddw     %%mm1, %%mm3    \n\t" /*53,18,-3 */                  \
        "movd      8+"A4", %%mm1   \n\t"                                \
        DO_UNPACK("%%mm1")                                              \
        "psllw     $2, %%mm1       \n\t" /* 4* */                       \
        "psubw     %%mm1, %%mm3    \n\t"                                \
        "paddw     %%mm7, %%mm3    \n\t"                                \
        "psraw     %6, %%mm3       \n\t"                                \
        "movq      %%mm3, 16(%2)   \n\t"                                \
        "add       %3, %1          \n\t"                                \
        "add       $24, %2         \n\t"                                \
225
        "decl      %0              \n\t"                                \
226
        "jnz 1b                    \n\t"                                \
Christophe Gisquet's avatar
Christophe Gisquet committed
227
        : "+r"(h), "+r" (src),  "+r" (dst)                              \
228
        : "r"(src_stride), "r"(3*src_stride),                           \
Christophe Gisquet's avatar
Christophe Gisquet committed
229
          "m"(rnd), "m"(shift)                                          \
230
          NAMED_CONSTRAINTS_ADD(ff_pw_3,ff_pw_53,ff_pw_18)              \
231 232 233 234 235 236 237 238 239 240 241
        : "memory"                                                      \
    );                                                                  \
}

/**
 * Macro to build the horizontal 16bits version of vc1_put_shift[13].
 * Here, offset=16bits, so parameters passed A1 to A4 should be simple.
 *
 * @param  NAME   Either 1 or 3
 * @see MSPEL_FILTER13_CORE for information on A1->A4
 */
242
#define MSPEL_FILTER13_HOR_16B(NAME, A1, A2, A3, A4, OP, OPNAME)        \
243
static void                                                             \
244
OPNAME ## vc1_hor_16b_ ## NAME ## _mmx(uint8_t *dst, x86_reg stride,    \
245 246 247 248 249
                                 const int16_t *src, int rnd)           \
{                                                                       \
    int h = 8;                                                          \
    src -= 1;                                                           \
    rnd -= (-4+58+13-3)*256; /* Add -256 bias */                        \
250
    __asm__ volatile(                                                       \
251
        LOAD_ROUNDER_MMX("%4")                                          \
Christophe Gisquet's avatar
Christophe Gisquet committed
252 253
        "movq      "MANGLE(ff_pw_18)", %%mm6   \n\t"                    \
        "movq      "MANGLE(ff_pw_53)", %%mm5   \n\t"                    \
254
        ".p2align 3                \n\t"                                \
255
        "1:                        \n\t"                                \
Christophe Gisquet's avatar
Christophe Gisquet committed
256
        MSPEL_FILTER13_CORE(DONT_UNPACK, "movq 2", A1, A2, A3, A4)      \
257 258
        NORMALIZE_MMX("$7")                                             \
        /* Remove bias */                                               \
Christophe Gisquet's avatar
Christophe Gisquet committed
259 260
        "paddw     "MANGLE(ff_pw_128)", %%mm3  \n\t"                    \
        "paddw     "MANGLE(ff_pw_128)", %%mm4  \n\t"                    \
261
        TRANSFER_DO_PACK(OP)                                            \
262 263
        "add       $24, %1         \n\t"                                \
        "add       %3, %2          \n\t"                                \
264
        "decl      %0              \n\t"                                \
265
        "jnz 1b                    \n\t"                                \
Christophe Gisquet's avatar
Christophe Gisquet committed
266 267
        : "+r"(h), "+r" (src),  "+r" (dst)                              \
        : "r"(stride), "m"(rnd)                                         \
268
          NAMED_CONSTRAINTS_ADD(ff_pw_3,ff_pw_18,ff_pw_53,ff_pw_128)    \
269 270 271 272 273 274 275 276 277 278 279 280
        : "memory"                                                      \
    );                                                                  \
}

/**
 * Macro to build the 8bits, any direction, version of vc1_put_shift[13].
 * Here, offset=src_stride. Parameters passed A1 to A4 must use
 * %3 (offset) and %4 (3*offset).
 *
 * @param  NAME   Either 1 or 3
 * @see MSPEL_FILTER13_CORE for information on A1->A4
 */
281
#define MSPEL_FILTER13_8B(NAME, A1, A2, A3, A4, OP, OPNAME)             \
282
static void                                                             \
283
OPNAME ## vc1_## NAME ## _mmx(uint8_t *dst, const uint8_t *src,         \
284
                        x86_reg stride, int rnd, x86_reg offset)      \
285 286 287 288
{                                                                       \
    int h = 8;                                                          \
    src -= offset;                                                      \
    rnd = 32-rnd;                                                       \
289
    __asm__ volatile (                                                      \
290
        LOAD_ROUNDER_MMX("%6")                                          \
Christophe Gisquet's avatar
Christophe Gisquet committed
291 292
        "movq      "MANGLE(ff_pw_53)", %%mm5       \n\t"                \
        "movq      "MANGLE(ff_pw_18)", %%mm6       \n\t"                \
293
        ".p2align 3                \n\t"                                \
294
        "1:                        \n\t"                                \
Christophe Gisquet's avatar
Christophe Gisquet committed
295
        MSPEL_FILTER13_CORE(DO_UNPACK, "movd   1", A1, A2, A3, A4)      \
296
        NORMALIZE_MMX("$6")                                             \
297
        TRANSFER_DO_PACK(OP)                                            \
298 299
        "add       %5, %1          \n\t"                                \
        "add       %5, %2          \n\t"                                \
300
        "decl      %0              \n\t"                                \
301
        "jnz 1b                    \n\t"                                \
Christophe Gisquet's avatar
Christophe Gisquet committed
302 303
        : "+r"(h), "+r" (src),  "+r" (dst)                              \
        : "r"(offset), "r"(3*offset), "g"(stride), "m"(rnd)             \
304
          NAMED_CONSTRAINTS_ADD(ff_pw_53,ff_pw_18,ff_pw_3)              \
305 306 307 308 309
        : "memory"                                                      \
    );                                                                  \
}

/** 1/4 shift bicubic interpolation */
310 311
MSPEL_FILTER13_8B     (shift1, "0(%1,%4  )", "0(%1,%3,2)", "0(%1,%3  )", "0(%1     )", OP_PUT, put_)
MSPEL_FILTER13_8B     (shift1, "0(%1,%4  )", "0(%1,%3,2)", "0(%1,%3  )", "0(%1     )", OP_AVG, avg_)
312
MSPEL_FILTER13_VER_16B(shift1, "0(%1,%4  )", "0(%1,%3,2)", "0(%1,%3  )", "0(%1     )")
313 314
MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_PUT, put_)
MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_AVG, avg_)
315 316

/** 3/4 shift bicubic interpolation */
317 318
MSPEL_FILTER13_8B     (shift3, "0(%1     )", "0(%1,%3  )", "0(%1,%3,2)", "0(%1,%4  )", OP_PUT, put_)
MSPEL_FILTER13_8B     (shift3, "0(%1     )", "0(%1,%3  )", "0(%1,%3,2)", "0(%1,%4  )", OP_AVG, avg_)
319
MSPEL_FILTER13_VER_16B(shift3, "0(%1     )", "0(%1,%3  )", "0(%1,%3,2)", "0(%1,%4  )")
320 321
MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_PUT, put_)
MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_AVG, avg_)
322

323 324 325
typedef void (*vc1_mspel_mc_filter_ver_16bits)(int16_t *dst, const uint8_t *src, x86_reg src_stride, int rnd, int64_t shift);
typedef void (*vc1_mspel_mc_filter_hor_16bits)(uint8_t *dst, x86_reg dst_stride, const int16_t *src, int rnd);
typedef void (*vc1_mspel_mc_filter_8bits)(uint8_t *dst, const uint8_t *src, x86_reg stride, int rnd, x86_reg offset);
326 327

/**
328
 * Interpolate fractional pel values by applying proper vertical then
329 330 331 332 333 334 335 336 337
 * horizontal filter.
 *
 * @param  dst     Destination buffer for interpolated pels.
 * @param  src     Source buffer.
 * @param  stride  Stride for both src and dst buffers.
 * @param  hmode   Horizontal filter (expressed in quarter pixels shift).
 * @param  hmode   Vertical filter.
 * @param  rnd     Rounding bias.
 */
338
#define VC1_MSPEL_MC(OP, INSTR)\
339 340 341 342
static void OP ## vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride,\
                               int hmode, int vmode, int rnd)\
{\
    static const vc1_mspel_mc_filter_ver_16bits vc1_put_shift_ver_16bits[] =\
343
         { NULL, vc1_put_ver_16b_shift1_mmx, ff_vc1_put_ver_16b_shift2_mmx, vc1_put_ver_16b_shift3_mmx };\
344
    static const vc1_mspel_mc_filter_hor_16bits vc1_put_shift_hor_16bits[] =\
345
         { NULL, OP ## vc1_hor_16b_shift1_mmx, ff_vc1_ ## OP ## hor_16b_shift2_ ## INSTR, OP ## vc1_hor_16b_shift3_mmx };\
346 347 348 349 350 351 352 353 354 355 356 357 358
    static const vc1_mspel_mc_filter_8bits vc1_put_shift_8bits[] =\
         { NULL, OP ## vc1_shift1_mmx, OP ## vc1_shift2_mmx, OP ## vc1_shift3_mmx };\
\
    __asm__ volatile(\
        "pxor %%mm0, %%mm0         \n\t"\
        ::: "memory"\
    );\
\
    if (vmode) { /* Vertical filter to apply */\
        if (hmode) { /* Horizontal filter to apply, output to tmp */\
            static const int shift_value[] = { 0, 5, 1, 5 };\
            int              shift = (shift_value[hmode]+shift_value[vmode])>>1;\
            int              r;\
359
            LOCAL_ALIGNED(16, int16_t, tmp, [12*8]);\
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
\
            r = (1<<(shift-1)) + rnd-1;\
            vc1_put_shift_ver_16bits[vmode](tmp, src-1, stride, r, shift);\
\
            vc1_put_shift_hor_16bits[hmode](dst, stride, tmp+1, 64-rnd);\
            return;\
        }\
        else { /* No horizontal filter, output 8 lines to dst */\
            vc1_put_shift_8bits[vmode](dst, src, stride, 1-rnd, stride);\
            return;\
        }\
    }\
\
    /* Horizontal mode with no vertical mode */\
    vc1_put_shift_8bits[hmode](dst, src, stride, rnd, 1);\
375 376 377 378 379 380 381 382 383
} \
static void OP ## vc1_mspel_mc_16(uint8_t *dst, const uint8_t *src, \
                                  int stride, int hmode, int vmode, int rnd)\
{ \
    OP ## vc1_mspel_mc(dst + 0, src + 0, stride, hmode, vmode, rnd); \
    OP ## vc1_mspel_mc(dst + 8, src + 8, stride, hmode, vmode, rnd); \
    dst += 8*stride; src += 8*stride; \
    OP ## vc1_mspel_mc(dst + 0, src + 0, stride, hmode, vmode, rnd); \
    OP ## vc1_mspel_mc(dst + 8, src + 8, stride, hmode, vmode, rnd); \
384 385
}

386 387
VC1_MSPEL_MC(put_, mmx)
VC1_MSPEL_MC(avg_, mmxext)
388

389 390
/** Macro to ease bicubic filter interpolation functions declarations */
#define DECLARE_FUNCTION(a, b)                                          \
391 392 393 394 395
static void put_vc1_mspel_mc ## a ## b ## _mmx(uint8_t *dst,            \
                                               const uint8_t *src,      \
                                               ptrdiff_t stride,        \
                                               int rnd)                 \
{                                                                       \
396 397
     put_vc1_mspel_mc(dst, src, stride, a, b, rnd);                     \
}\
398 399
static void avg_vc1_mspel_mc ## a ## b ## _mmxext(uint8_t *dst,         \
                                                  const uint8_t *src,   \
400 401
                                                  ptrdiff_t stride,     \
                                                  int rnd)              \
402
{                                                                       \
403
     avg_vc1_mspel_mc(dst, src, stride, a, b, rnd);                     \
404 405 406 407 408 409 410 411 412 413 414 415 416 417
}\
static void put_vc1_mspel_mc ## a ## b ## _16_mmx(uint8_t *dst,         \
                                                  const uint8_t *src,   \
                                                  ptrdiff_t stride,     \
                                                  int rnd)              \
{                                                                       \
     put_vc1_mspel_mc_16(dst, src, stride, a, b, rnd);                  \
}\
static void avg_vc1_mspel_mc ## a ## b ## _16_mmxext(uint8_t *dst,      \
                                                     const uint8_t *src,\
                                                     ptrdiff_t stride,  \
                                                     int rnd)           \
{                                                                       \
     avg_vc1_mspel_mc_16(dst, src, stride, a, b, rnd);                  \
418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
}

DECLARE_FUNCTION(0, 1)
DECLARE_FUNCTION(0, 2)
DECLARE_FUNCTION(0, 3)

DECLARE_FUNCTION(1, 0)
DECLARE_FUNCTION(1, 1)
DECLARE_FUNCTION(1, 2)
DECLARE_FUNCTION(1, 3)

DECLARE_FUNCTION(2, 0)
DECLARE_FUNCTION(2, 1)
DECLARE_FUNCTION(2, 2)
DECLARE_FUNCTION(2, 3)

DECLARE_FUNCTION(3, 0)
DECLARE_FUNCTION(3, 1)
DECLARE_FUNCTION(3, 2)
DECLARE_FUNCTION(3, 3)

439 440 441
#define FN_ASSIGN(OP, X, Y, INSN) \
    dsp->OP##vc1_mspel_pixels_tab[1][X+4*Y] = OP##vc1_mspel_mc##X##Y##INSN; \
    dsp->OP##vc1_mspel_pixels_tab[0][X+4*Y] = OP##vc1_mspel_mc##X##Y##_16##INSN
442

443
av_cold void ff_vc1dsp_init_mmx(VC1DSPContext *dsp)
444
{
445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
    FN_ASSIGN(put_, 0, 1, _mmx);
    FN_ASSIGN(put_, 0, 2, _mmx);
    FN_ASSIGN(put_, 0, 3, _mmx);

    FN_ASSIGN(put_, 1, 0, _mmx);
    FN_ASSIGN(put_, 1, 1, _mmx);
    FN_ASSIGN(put_, 1, 2, _mmx);
    FN_ASSIGN(put_, 1, 3, _mmx);

    FN_ASSIGN(put_, 2, 0, _mmx);
    FN_ASSIGN(put_, 2, 1, _mmx);
    FN_ASSIGN(put_, 2, 2, _mmx);
    FN_ASSIGN(put_, 2, 3, _mmx);

    FN_ASSIGN(put_, 3, 0, _mmx);
    FN_ASSIGN(put_, 3, 1, _mmx);
    FN_ASSIGN(put_, 3, 2, _mmx);
    FN_ASSIGN(put_, 3, 3, _mmx);
463
}
464

465 466
av_cold void ff_vc1dsp_init_mmxext(VC1DSPContext *dsp)
{
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
    FN_ASSIGN(avg_, 0, 1, _mmxext);
    FN_ASSIGN(avg_, 0, 2, _mmxext);
    FN_ASSIGN(avg_, 0, 3, _mmxext);

    FN_ASSIGN(avg_, 1, 0, _mmxext);
    FN_ASSIGN(avg_, 1, 1, _mmxext);
    FN_ASSIGN(avg_, 1, 2, _mmxext);
    FN_ASSIGN(avg_, 1, 3, _mmxext);

    FN_ASSIGN(avg_, 2, 0, _mmxext);
    FN_ASSIGN(avg_, 2, 1, _mmxext);
    FN_ASSIGN(avg_, 2, 2, _mmxext);
    FN_ASSIGN(avg_, 2, 3, _mmxext);

    FN_ASSIGN(avg_, 3, 0, _mmxext);
    FN_ASSIGN(avg_, 3, 1, _mmxext);
    FN_ASSIGN(avg_, 3, 2, _mmxext);
    FN_ASSIGN(avg_, 3, 3, _mmxext);
485
}
486
#endif /* HAVE_6REGS && HAVE_INLINE_ASM && HAVE_MMX_EXTERNAL */