h264pred_mmi.c 34.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * Loongson SIMD optimized h264pred
 *
 * Copyright (c) 2015 Loongson Technology Corporation Limited
 * Copyright (c) 2015 Zhou Xiaoyong <zhouxiaoyong@loongson.cn>
 *                    Zhang Shuangshuang <zhangshuangshuang@ict.ac.cn>
 *
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * FFmpeg is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with FFmpeg; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

#include "h264pred_mips.h"

void ff_pred16x16_vertical_8_mmi(uint8_t *src, ptrdiff_t stride)
{
    __asm__ volatile (
30 31 32 33 34
        "dli $8, 16                         \r\n"
        "gsldlc1 $f2, 7(%[srcA])            \r\n"
        "gsldrc1 $f2, 0(%[srcA])            \r\n"
        "gsldlc1 $f4, 15(%[srcA])           \r\n"
        "gsldrc1 $f4, 8(%[srcA])            \r\n"
35
        "1:                                 \r\n"
36 37 38 39 40 41 42 43 44 45
        "gssdlc1 $f2, 7(%[src])             \r\n"
        "gssdrc1 $f2, 0(%[src])             \r\n"
        "gssdlc1 $f4, 15(%[src])            \r\n"
        "gssdrc1 $f4, 8(%[src])             \r\n"
        "daddu %[src], %[src], %[stride]    \r\n"
        "daddi $8, $8, -1                   \r\n"
        "bnez $8, 1b                        \r\n"
        : [src]"+&r"(src)
        : [stride]"r"(stride),[srcA]"r"(src-stride)
        : "$8","$f2","$f4"
46 47 48 49 50 51
    );
}

void ff_pred16x16_horizontal_8_mmi(uint8_t *src, ptrdiff_t stride)
{
    __asm__ volatile (
52 53
        "daddiu $2, %[src], -1              \r\n"
        "daddu $3, %[src], $0               \r\n"
54 55 56
        "dli $6, 0x10                       \r\n"
        "1:                                 \r\n"
        "lbu $4, 0($2)                      \r\n"
57
        "dmul $5, $4, %[ff_pb_1]            \r\n"
58 59 60 61
        "sdl $5, 7($3)                      \r\n"
        "sdr $5, 0($3)                      \r\n"
        "sdl $5, 15($3)                     \r\n"
        "sdr $5, 8($3)                      \r\n"
62 63
        "daddu $2, %[stride]                \r\n"
        "daddu $3, %[stride]                \r\n"
64 65
        "daddiu $6, -1                      \r\n"
        "bnez $6, 1b                        \r\n"
66 67
        ::[src]"r"(src),[stride]"r"(stride),[ff_pb_1]"r"(ff_pb_1)
        : "$2","$3","$4","$5","$6"
68 69 70 71 72 73
    );
}

void ff_pred16x16_dc_8_mmi(uint8_t *src, ptrdiff_t stride)
{
    __asm__ volatile (
74
        "daddiu $2, %[src], -1              \r\n"
75 76 77 78 79
        "dli $6, 0x10                       \r\n"
        "xor $8, $8, $8                     \r\n"
        "1:                                 \r\n"
        "lbu $4, 0($2)                      \r\n"
        "daddu $8, $8, $4                   \r\n"
80
        "daddu $2, $2, %[stride]            \r\n"
81 82 83
        "daddiu $6, $6, -1                  \r\n"
        "bnez $6, 1b                        \r\n"
        "dli $6, 0x10                       \r\n"
84 85
        "negu $3, %[stride]                 \r\n"
        "daddu $2, %[src], $3               \r\n"
86 87 88 89 90 91 92 93
        "2:                                 \r\n"
        "lbu $4, 0($2)                      \r\n"
        "daddu $8, $8, $4                   \r\n"
        "daddiu $2, $2, 1                   \r\n"
        "daddiu $6, $6, -1                  \r\n"
        "bnez $6, 2b                        \r\n"
        "daddiu $8, $8, 0x10                \r\n"
        "dsra $8, 5                         \r\n"
94 95
        "dmul $5, $8, %[ff_pb_1]            \r\n"
        "daddu $2, %[src], $0               \r\n"
96 97 98 99 100 101
        "dli $6, 0x10                       \r\n"
        "3:                                 \r\n"
        "sdl $5, 7($2)                      \r\n"
        "sdr $5, 0($2)                      \r\n"
        "sdl $5, 15($2)                     \r\n"
        "sdr $5, 8($2)                      \r\n"
102
        "daddu $2, $2, %[stride]            \r\n"
103 104
        "daddiu $6, $6, -1                  \r\n"
        "bnez $6, 3b                        \r\n"
105 106
        ::[src]"r"(src),[stride]"r"(stride),[ff_pb_1]"r"(ff_pb_1)
        : "$2","$3","$4","$5","$6","$8"
107 108 109 110 111 112 113 114 115
    );
}

void ff_pred8x8l_top_dc_8_mmi(uint8_t *src, int has_topleft,
        int has_topright, ptrdiff_t stride)
{
    uint32_t dc;

    __asm__ volatile (
116 117 118 119 120 121
        "ldl $8, 7(%[srcA])                 \r\n"
        "ldr $8, 0(%[srcA])                 \r\n"
        "ldl $9, 7(%[src0])                 \r\n"
        "ldr $9, 0(%[src0])                 \r\n"
        "ldl $10, 7(%[src1])                \r\n"
        "ldr $10, 0(%[src1])                \r\n"
122 123 124 125 126 127 128 129 130 131
        "dmtc1 $8, $f2                      \r\n"
        "dmtc1 $9, $f4                      \r\n"
        "dmtc1 $10, $f6                     \r\n"
        "dmtc1 $0, $f0                      \r\n"
        "punpcklbh $f8, $f2, $f0            \r\n"
        "punpckhbh $f10, $f2, $f0           \r\n"
        "punpcklbh $f12, $f4, $f0           \r\n"
        "punpckhbh $f14, $f4, $f0           \r\n"
        "punpcklbh $f16, $f6, $f0           \r\n"
        "punpckhbh $f18, $f6, $f0           \r\n"
132
        "bnez %[has_topleft], 1f            \r\n"
133 134
        "pinsrh_0 $f8, $f8, $f12            \r\n"
        "1:                                 \r\n"
135
        "bnez %[has_topright], 2f           \r\n"
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
        "pinsrh_3 $f18, $f18, $f14          \r\n"
        "2:                                 \r\n"
        "daddiu $8, $0, 2                   \r\n"
        "dmtc1 $8, $f20                     \r\n"
        "pshufh $f22, $f20, $f0             \r\n"
        "pmullh $f12, $f12, $f22            \r\n"
        "pmullh $f14, $f14, $f22            \r\n"
        "paddh $f8, $f8, $f12               \r\n"
        "paddh $f10, $f10, $f14             \r\n"
        "paddh $f8, $f8, $f16               \r\n"
        "paddh $f10, $f10, $f18             \r\n"
        "paddh $f8, $f8, $f22               \r\n"
        "paddh $f10, $f10, $f22             \r\n"
        "psrah $f8, $f8, $f20               \r\n"
        "psrah $f10, $f10, $f20             \r\n"
        "packushb $f4, $f8, $f10            \r\n"
        "biadd $f2, $f4                     \r\n"
        "mfc1 $9, $f2                       \r\n"
        "addiu $9, $9, 4                    \r\n"
        "dsrl $9, $9, 3                     \r\n"
156 157 158 159 160 161 162
        "mul %[dc], $9, %[ff_pb_1]          \r\n"
        : [dc]"=r"(dc)
        : [srcA]"r"(src-stride-1),[src0]"r"(src-stride),
          [src1]"r"(src-stride+1),[has_topleft]"r"(has_topleft),
          [has_topright]"r"(has_topright),[ff_pb_1]"r"(ff_pb_1)
        : "$8","$9","$10","$f2","$f4","$f6","$f8","$f10","$f12","$f14","$f16",
          "$f18","$f20","$f22"
163 164
    );

165 166 167 168 169 170 171 172 173 174 175 176 177
    __asm__ volatile (
        "dli $8, 8                          \r\n"
        "1:                                 \r\n"
        "punpcklwd $f2, %[dc], %[dc]        \r\n"
        "gssdlc1 $f2, 7(%[src])             \r\n"
        "gssdrc1 $f2, 0(%[src])             \r\n"
        "daddu %[src], %[src], %[stride]    \r\n"
        "daddi $8, $8, -1                   \r\n"
        "bnez $8, 1b                        \r\n"
        : [src]"+&r"(src)
        : [dc]"f"(dc),[stride]"r"(stride)
        : "$8","$f2"
    );
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
}

void ff_pred8x8l_dc_8_mmi(uint8_t *src, int has_topleft,
        int has_topright, ptrdiff_t stride)
{
    uint32_t dc, dc1, dc2;

    const int l0 = ((has_topleft ? src[-1+-1*stride] : src[-1+0*stride]) + 2*src[-1+0*stride] + src[-1+1*stride] + 2) >> 2;
    const int l1 = (src[-1+0*stride] + 2*src[-1+1*stride] + src[-1+2*stride] + 2) >> 2;
    const int l2 = (src[-1+1*stride] + 2*src[-1+2*stride] + src[-1+3*stride] + 2) >> 2;
    const int l3 = (src[-1+2*stride] + 2*src[-1+3*stride] + src[-1+4*stride] + 2) >> 2;
    const int l4 = (src[-1+3*stride] + 2*src[-1+4*stride] + src[-1+5*stride] + 2) >> 2;
    const int l5 = (src[-1+4*stride] + 2*src[-1+5*stride] + src[-1+6*stride] + 2) >> 2;
    const int l6 = (src[-1+5*stride] + 2*src[-1+6*stride] + src[-1+7*stride] + 2) >> 2;
    const int l7 = (src[-1+6*stride] + 2*src[-1+7*stride] + src[-1+7*stride] + 2) >> 2;

    __asm__ volatile (
195 196 197 198 199 200
        "ldl $8, 7(%[srcA])                 \r\n"
        "ldr $8, 0(%[srcA])                 \r\n"
        "ldl $9, 7(%[src0])                 \r\n"
        "ldr $9, 0(%[src0])                 \r\n"
        "ldl $10, 7(%[src1])                \r\n"
        "ldr $10, 0(%[src1])                \r\n"
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
        "dmtc1 $8, $f2                      \r\n"
        "dmtc1 $9, $f4                      \r\n"
        "dmtc1 $10, $f6                     \r\n"
        "dmtc1 $0, $f0                      \r\n"
        "punpcklbh $f8, $f2, $f0            \r\n"
        "punpckhbh $f10, $f2, $f0           \r\n"
        "punpcklbh $f12, $f4, $f0           \r\n"
        "punpckhbh $f14, $f4, $f0           \r\n"
        "punpcklbh $f16, $f6, $f0           \r\n"
        "punpckhbh $f18, $f6, $f0           \r\n"
        "daddiu $8, $0, 3                   \r\n"
        "dmtc1 $8, $f20                     \r\n"
        "pshufh $f28, $f10, $f20            \r\n"
        "pshufh $f30, $f18, $f20            \r\n"
        "pinsrh_3 $f10, $f10, $f30          \r\n"
        "pinsrh_3 $f18, $f18, $f28          \r\n"
217
        "bnez %[has_topleft], 1f            \r\n"
218 219
        "pinsrh_0 $f8, $f8, $f12            \r\n"
        "1:                                 \r\n"
220
        "bnez %[has_topright], 2f           \r\n"
221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
        "pshufh $f30, $f14, $f20            \r\n"
        "pinsrh_3 $f10, $f10, $f30          \r\n"
        "2:                                 \r\n"
        "daddiu $8, $0, 2                   \r\n"
        "dmtc1 $8, $f20                     \r\n"
        "pshufh $f22, $f20, $f0             \r\n"
        "pmullh $f12, $f12, $f22            \r\n"
        "pmullh $f14, $f14, $f22            \r\n"
        "paddh $f8, $f8, $f12               \r\n"
        "paddh $f10, $f10, $f14             \r\n"
        "paddh $f8, $f8, $f16               \r\n"
        "paddh $f10, $f10, $f18             \r\n"
        "paddh $f8, $f8, $f22               \r\n"
        "paddh $f10, $f10, $f22             \r\n"
        "psrah $f8, $f8, $f20               \r\n"
        "psrah $f10, $f10, $f20             \r\n"
        "packushb $f4, $f8, $f10            \r\n"
        "biadd $f2, $f4                     \r\n"
239 240 241 242 243 244 245
        "mfc1 %[dc2], $f2                   \r\n"
        : [dc2]"=r"(dc2)
        : [srcA]"r"(src-stride-1),[src0]"r"(src-stride),
          [src1]"r"(src-stride+1),[has_topleft]"r"(has_topleft),
          [has_topright]"r"(has_topright)
        : "$8","$9","$10","$f2","$f4","$f6","$f8","$f10","$f12","$f14","$f16",
          "$f18","$f20","$f22"
246 247 248
    );

    dc1 = l0+l1+l2+l3+l4+l5+l6+l7;
249
    dc = ((dc1+dc2+8)>>4)*0x01010101U;
250

251 252 253 254 255 256 257 258 259 260 261 262 263
    __asm__ volatile (
        "dli $8, 8                          \r\n"
        "1:                                 \r\n"
        "punpcklwd $f2, %[dc], %[dc]        \r\n"
        "gssdlc1 $f2, 7(%[src])             \r\n"
        "gssdrc1 $f2, 0(%[src])             \r\n"
        "daddu %[src], %[src], %[stride]    \r\n"
        "daddi $8, $8, -1                   \r\n"
        "bnez $8, 1b                        \r\n"
        : [src]"+&r"(src)
        : [dc]"f"(dc),[stride]"r"(stride)
        : "$8","$f2"
    );
264 265 266 267 268 269
}

void ff_pred8x8l_vertical_8_mmi(uint8_t *src, int has_topleft,
        int has_topright, ptrdiff_t stride)
{
    __asm__ volatile (
270 271 272 273 274 275
        "ldl $8, 7(%[srcA])                 \r\n"
        "ldr $8, 0(%[srcA])                 \r\n"
        "ldl $9, 7(%[src0])                 \r\n"
        "ldr $9, 0(%[src0])                 \r\n"
        "ldl $10, 7(%[src1])                \r\n"
        "ldr $10, 0(%[src1])                \r\n"
276 277 278 279 280 281 282 283 284 285
        "dmtc1 $8, $f2                      \r\n"
        "dmtc1 $9, $f4                      \r\n"
        "dmtc1 $10, $f6                     \r\n"
        "dmtc1 $0, $f0                      \r\n"
        "punpcklbh $f8, $f2, $f0            \r\n"
        "punpckhbh $f10, $f2, $f0           \r\n"
        "punpcklbh $f12, $f4, $f0           \r\n"
        "punpckhbh $f14, $f4, $f0           \r\n"
        "punpcklbh $f16, $f6, $f0           \r\n"
        "punpckhbh $f18, $f6, $f0           \r\n"
286
        "bnez %[has_topleft], 1f            \r\n"
287 288
        "pinsrh_0 $f8, $f8, $f12            \r\n"
        "1:                                 \r\n"
289
        "bnez %[has_topright], 2f           \r\n"
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
        "pinsrh_3 $f18, $f18, $f14          \r\n"
        "2:                                 \r\n"
        "daddiu $8, $0, 2                   \r\n"
        "dmtc1 $8, $f20                     \r\n"
        "pshufh $f22, $f20, $f0             \r\n"
        "pmullh $f12, $f12, $f22            \r\n"
        "pmullh $f14, $f14, $f22            \r\n"
        "paddh $f8, $f8, $f12               \r\n"
        "paddh $f10, $f10, $f14             \r\n"
        "paddh $f8, $f8, $f16               \r\n"
        "paddh $f10, $f10, $f18             \r\n"
        "paddh $f8, $f8, $f22               \r\n"
        "paddh $f10, $f10, $f22             \r\n"
        "psrah $f8, $f8, $f20               \r\n"
        "psrah $f10, $f10, $f20             \r\n"
        "packushb $f4, $f8, $f10            \r\n"
306 307 308 309 310 311 312
        "sdc1 $f4, 0(%[src])                \r\n"
        : [src]"=r"(src)
        : [srcA]"r"(src-stride-1),[src0]"r"(src-stride),
          [src1]"r"(src-stride+1),[has_topleft]"r"(has_topleft),
          [has_topright]"r"(has_topright)
        : "$8","$9","$10","$f2","$f4","$f6","$f8","$f10","$f12","$f14","$f16",
          "$f18","$f20","$f22"
313 314
    );

315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
    __asm__ volatile (
        "dli $8, 7                          \r\n"
        "gsldlc1 $f2, 7(%[src])             \r\n"
        "gsldrc1 $f2, 0(%[src])             \r\n"
        "dadd %[src], %[src], %[stride]     \r\n"
        "1:                                 \r\n"
        "gssdlc1 $f2, 7(%[src])             \r\n"
        "gssdrc1 $f2, 0(%[src])             \r\n"
        "daddu %[src], %[src], %[stride]    \r\n"
        "daddi $8, $8, -1                   \r\n"
        "bnez $8, 1b                        \r\n"
        : [src]"+&r"(src)
        : [stride]"r"(stride)
        : "$8","$f2"
    );
330 331 332 333 334 335 336 337 338 339
}

void ff_pred4x4_dc_8_mmi(uint8_t *src, const uint8_t *topright,
        ptrdiff_t stride)
{
    const int dc = (src[-stride] + src[1-stride] + src[2-stride]
                 + src[3-stride] + src[-1+0*stride] + src[-1+1*stride]
                 + src[-1+2*stride] + src[-1+3*stride] + 4) >>3;

    __asm__ volatile (
340 341
        "daddu $2, %[dc], $0                \r\n"
        "dmul $3, $2, %[ff_pb_1]            \r\n"
342
        "xor $4, $4, $4                     \r\n"
343 344 345 346 347 348 349 350 351
        "gsswx $3, 0(%[src],$4)             \r\n"
        "daddu $4, %[stride]                \r\n"
        "gsswx $3, 0(%[src],$4)             \r\n"
        "daddu $4, %[stride]                \r\n"
        "gsswx $3, 0(%[src],$4)             \r\n"
        "daddu $4, %[stride]                \r\n"
        "gsswx $3, 0(%[src],$4)             \r\n"
        ::[src]"r"(src),[stride]"r"(stride),[dc]"r"(dc),[ff_pb_1]"r"(ff_pb_1)
        : "$2","$3","$4"
352 353 354 355 356 357
    );
}

void ff_pred8x8_vertical_8_mmi(uint8_t *src, ptrdiff_t stride)
{
    __asm__ volatile (
358 359
        "dsubu $2, %[src], %[stride]        \r\n"
        "daddu $3, %[src], $0               \r\n"
360 361 362 363 364 365
        "ldl $4, 7($2)                      \r\n"
        "ldr $4, 0($2)                      \r\n"
        "dli $5, 0x8                        \r\n"
        "1:                                 \r\n"
        "sdl $4, 7($3)                      \r\n"
        "sdr $4, 0($3)                      \r\n"
366
        "daddu $3, %[stride]                \r\n"
367 368
        "daddiu $5, -1                      \r\n"
        "bnez $5, 1b                        \r\n"
369 370
        ::[src]"r"(src),[stride]"r"(stride)
        : "$2","$3","$4","$5"
371 372 373 374 375 376
    );
}

void ff_pred8x8_horizontal_8_mmi(uint8_t *src, ptrdiff_t stride)
{
    __asm__ volatile (
377 378
        "daddiu $2, %[src], -1              \r\n"
        "daddu $3, %[src], $0               \r\n"
379 380 381
        "dli $6, 0x8                        \r\n"
        "1:                                 \r\n"
        "lbu $4, 0($2)                      \r\n"
382
        "dmul $5, $4, %[ff_pb_1]            \r\n"
383 384
        "sdl $5, 7($3)                      \r\n"
        "sdr $5, 0($3)                      \r\n"
385 386
        "daddu $2, %[stride]                \r\n"
        "daddu $3, %[stride]                \r\n"
387 388
        "daddiu $6, -1                      \r\n"
        "bnez $6, 1b                        \r\n"
389 390
        ::[src]"r"(src),[stride]"r"(stride),[ff_pb_1]"r"(ff_pb_1)
        : "$2","$3","$4","$5","$6"
391 392 393 394 395 396 397
    );
}

static void ff_pred16x16_plane_compat_8_mmi(uint8_t *src, ptrdiff_t stride,
        const int svq3, const int rv40)
{
    __asm__ volatile (
398 399
        "negu $2, %[stride]                 \r\n"
        "daddu $3, %[src], $2               \r\n"
400 401 402 403 404 405 406 407 408 409 410 411 412
        "xor $f8, $f8, $f8                  \r\n"
        "gslwlc1 $f0, 2($3)                 \r\n"
        "gslwrc1 $f0, -1($3)                \r\n"
        "gslwlc1 $f2, 6($3)                 \r\n"
        "gslwrc1 $f2, 3($3)                 \r\n"
        "gslwlc1 $f4, 11($3)                \r\n"
        "gslwrc1 $f4, 8($3)                 \r\n"
        "gslwlc1 $f6, 15($3)                \r\n"
        "gslwrc1 $f6, 12($3)                \r\n"
        "punpcklbh $f0, $f0, $f8            \r\n"
        "punpcklbh $f2, $f2, $f8            \r\n"
        "punpcklbh $f4, $f4, $f8            \r\n"
        "punpcklbh $f6, $f6, $f8            \r\n"
413 414 415 416
        "dmtc1 %[ff_pw_m8tom5], $f20        \r\n"
        "dmtc1 %[ff_pw_m4tom1], $f22        \r\n"
        "dmtc1 %[ff_pw_1to4], $f24          \r\n"
        "dmtc1 %[ff_pw_5to8], $f26          \r\n"
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
        "pmullh $f0, $f0, $f20              \r\n"
        "pmullh $f2, $f2, $f22              \r\n"
        "pmullh $f4, $f4, $f24              \r\n"
        "pmullh $f6, $f6, $f26              \r\n"
        "paddsh $f0, $f0, $f4               \r\n"
        "paddsh $f2, $f2, $f6               \r\n"
        "paddsh $f0, $f0, $f2               \r\n"
        "dli $4, 0xE                        \r\n"
        "dmtc1 $4, $f28                     \r\n"
        "pshufh $f2, $f0, $f28              \r\n"
        "paddsh $f0, $f0, $f2               \r\n"
        "dli $4, 0x1                        \r\n"
        "dmtc1 $4, $f30                     \r\n"
        "pshufh $f2, $f0, $f30              \r\n"
        "paddsh $f10, $f0, $f2              \r\n"
432
        "daddiu $3, %[src], -1              \r\n"
433 434 435
        "daddu $3, $2                       \r\n"
        "lbu $4, 0($3)                      \r\n"
        "lbu $8, 16($3)                     \r\n"
436
        "daddu $3, %[stride]                \r\n"
437
        "lbu $5, 0($3)                      \r\n"
438
        "daddu $3, %[stride]                \r\n"
439
        "lbu $6, 0($3)                      \r\n"
440
        "daddu $3, %[stride]                \r\n"
441 442 443 444 445 446 447 448
        "lbu $7, 0($3)                      \r\n"
        "dsll $5, 16                        \r\n"
        "dsll $6, 32                        \r\n"
        "dsll $7, 48                        \r\n"
        "or $6, $7                          \r\n"
        "or $4, $5                          \r\n"
        "or $4, $6                          \r\n"
        "dmtc1 $4, $f0                      \r\n"
449
        "daddu $3, %[stride]                \r\n"
450
        "lbu $4, 0($3)                      \r\n"
451
        "daddu $3, %[stride]                \r\n"
452
        "lbu $5, 0($3)                      \r\n"
453
        "daddu $3, %[stride]                \r\n"
454
        "lbu $6, 0($3)                      \r\n"
455
        "daddu $3, %[stride]                \r\n"
456 457 458 459 460 461 462 463
        "lbu $7, 0($3)                      \r\n"
        "dsll $5, 16                        \r\n"
        "dsll $6, 32                        \r\n"
        "dsll $7, 48                        \r\n"
        "or $6, $7                          \r\n"
        "or $4, $5                          \r\n"
        "or $4, $6                          \r\n"
        "dmtc1 $4, $f2                      \r\n"
464 465
        "daddu $3, %[stride]                \r\n"
        "daddu $3, %[stride]                \r\n"
466
        "lbu $4, 0($3)                      \r\n"
467
        "daddu $3, %[stride]                \r\n"
468
        "lbu $5, 0($3)                      \r\n"
469
        "daddu $3, %[stride]                \r\n"
470
        "lbu $6, 0($3)                      \r\n"
471
        "daddu $3, %[stride]                \r\n"
472 473 474 475 476 477 478 479
        "lbu $7, 0($3)                      \r\n"
        "dsll $5, 16                        \r\n"
        "dsll $6, 32                        \r\n"
        "dsll $7, 48                        \r\n"
        "or $6, $7                          \r\n"
        "or $4, $5                          \r\n"
        "or $4, $6                          \r\n"
        "dmtc1 $4, $f4                      \r\n"
480
        "daddu $3, %[stride]                \r\n"
481
        "lbu $4, 0($3)                      \r\n"
482
        "daddu $3, %[stride]                \r\n"
483
        "lbu $5, 0($3)                      \r\n"
484
        "daddu $3, %[stride]                \r\n"
485
        "lbu $6, 0($3)                      \r\n"
486
        "daddu $3, %[stride]                \r\n"
487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
        "lbu $7, 0($3)                      \r\n"
        "daddu $8, $7                       \r\n"
        "daddiu $8, 1                       \r\n"
        "dsll $8, 4                         \r\n"
        "dsll $5, 16                        \r\n"
        "dsll $6, 32                        \r\n"
        "dsll $7, 48                        \r\n"
        "or $6, $7                          \r\n"
        "or $4, $5                          \r\n"
        "or $4, $6                          \r\n"
        "dmtc1 $4, $f6                      \r\n"
        "pmullh $f0, $f0, $f20              \r\n"
        "pmullh $f2, $f2, $f22              \r\n"
        "pmullh $f4, $f4, $f24              \r\n"
        "pmullh $f6, $f6, $f26              \r\n"
        "paddsh $f0, $f0, $f4               \r\n"
        "paddsh $f2, $f2, $f6               \r\n"
        "paddsh $f0, $f0, $f2               \r\n"
        "pshufh $f2, $f0, $f28              \r\n"
        "paddsh $f0, $f0, $f2               \r\n"
        "pshufh $f2, $f0, $f30              \r\n"
        "paddsh $f12, $f0, $f2              \r\n"
        "dmfc1 $2, $f10                     \r\n"
        "dsll $2, 48                        \r\n"
        "dsra $2, 48                        \r\n"
        "dmfc1 $3, $f12                     \r\n"
        "dsll $3, 48                        \r\n"
        "dsra $3, 48                        \r\n"
515
        "beqz %[svq3], 1f                   \r\n"
516 517 518 519 520 521 522 523 524 525 526 527 528 529
        "dli $4, 4                          \r\n"
        "ddiv $2, $4                        \r\n"
        "ddiv $3, $4                        \r\n"
        "dli $4, 5                          \r\n"
        "dmul $2, $4                        \r\n"
        "dmul $3, $4                        \r\n"
        "dli $4, 16                         \r\n"
        "ddiv $2, $4                        \r\n"
        "ddiv $3, $4                        \r\n"
        "daddu $4, $2, $0                   \r\n"
        "daddu $2, $3, $0                   \r\n"
        "daddu $3, $4, $0                   \r\n"
        "b 2f                               \r\n"
        "1:                                 \r\n"
530
        "beqz %[rv40], 1f                   \r\n"
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
        "dsra $4, $2, 2                     \r\n"
        "daddu $2, $4                       \r\n"
        "dsra $4, $3, 2                     \r\n"
        "daddu $3, $4                       \r\n"
        "dsra $2, 4                         \r\n"
        "dsra $3, 4                         \r\n"
        "b 2f                               \r\n"
        "1:                                 \r\n"
        "dli $4, 5                          \r\n"
        "dmul $2, $4                        \r\n"
        "dmul $3, $4                        \r\n"
        "daddiu $2, 32                      \r\n"
        "daddiu $3, 32                      \r\n"
        "dsra $2, 6                         \r\n"
        "dsra $3, 6                         \r\n"
        "2:                                 \r\n"
        "daddu $5, $2, $3                   \r\n"
        "dli $4, 7                          \r\n"
        "dmul $5, $4                        \r\n"
        "dsubu $8, $5                       \r\n"
        "dmtc1 $0, $f8                      \r\n"
        "dmtc1 $2, $f0                      \r\n"
        "pshufh $f0, $f0, $f8               \r\n"
        "dmtc1 $3, $f10                     \r\n"
        "pshufh $f10, $f10, $f8             \r\n"
        "dmtc1 $8, $f12                     \r\n"
        "pshufh $f12, $f12, $f8             \r\n"
        "dli $4, 5                          \r\n"
        "dmtc1 $4, $f14                     \r\n"
560 561 562 563 564
        "pmullh $f2, %[ff_pw_0to3], $f0     \r\n"
        "pmullh $f4, %[ff_pw_4to7], $f0     \r\n"
        "pmullh $f6, %[ff_pw_8tob], $f0     \r\n"
        "pmullh $f8, %[ff_pw_ctof], $f0     \r\n"
        "daddu $3, %[src], $0               \r\n"
565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
        "dli $2, 16                         \r\n"
        "1:                                 \r\n"
        "paddsh $f16, $f2, $f12             \r\n"
        "psrah $f16, $f16, $f14             \r\n"
        "paddsh $f18, $f4, $f12             \r\n"
        "psrah $f18, $f18, $f14             \r\n"
        "packushb $f20, $f16, $f18          \r\n"
        "gssdlc1 $f20, 7($3)                \r\n"
        "gssdrc1 $f20, 0($3)                \r\n"
        "paddsh $f16, $f6, $f12             \r\n"
        "psrah $f16, $f16, $f14             \r\n"
        "paddsh $f18, $f8, $f12             \r\n"
        "psrah $f18, $f18, $f14             \r\n"
        "packushb $f20, $f16, $f18          \r\n"
        "gssdlc1 $f20, 15($3)               \r\n"
        "gssdrc1 $f20, 8($3)                \r\n"
        "paddsh $f12, $f12, $f10            \r\n"
582
        "daddu $3, %[stride]                \r\n"
583 584
        "daddiu $2, -1                      \r\n"
        "bnez $2, 1b                        \r\n"
585 586 587 588 589 590 591 592
        ::[src]"r"(src),[stride]"r"(stride),[svq3]"r"(svq3),[rv40]"r"(rv40),
          [ff_pw_m8tom5]"r"(ff_pw_m8tom5),[ff_pw_m4tom1]"r"(ff_pw_m4tom1),
          [ff_pw_1to4]"r"(ff_pw_1to4),[ff_pw_5to8]"r"(ff_pw_5to8),
          [ff_pw_0to3]"f"(ff_pw_0to3),[ff_pw_4to7]"f"(ff_pw_4to7),
          [ff_pw_8tob]"f"(ff_pw_8tob),[ff_pw_ctof]"f"(ff_pw_ctof)
        : "$2","$3","$4","$5","$6","$7","$8","$f0","$f2","$f4","$f6","$f8",
          "$f10","$f12","$f14","$f16","$f18","$f20","$f22","$f24","$f26",
          "$f28","$f30"
593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
    );
}

void ff_pred16x16_plane_svq3_8_mmi(uint8_t *src, ptrdiff_t stride)
{
    ff_pred16x16_plane_compat_8_mmi(src, stride, 1, 0);
}

void ff_pred16x16_plane_rv40_8_mmi(uint8_t *src, ptrdiff_t stride)
{
    ff_pred16x16_plane_compat_8_mmi(src, stride, 0, 1);
}

void ff_pred16x16_plane_h264_8_mmi(uint8_t *src, ptrdiff_t stride)
{
    ff_pred16x16_plane_compat_8_mmi(src, stride, 0, 0);
}

void ff_pred8x8_top_dc_8_mmi(uint8_t *src, ptrdiff_t stride)
{
    __asm__ volatile (
        "dli $2, 2                          \r\n"
        "xor $f0, $f0, $f0                  \r\n"
        "xor $f2, $f2, $f2                  \r\n"
        "xor $f30, $f30, $f30               \r\n"
618 619
        "negu $3, %[stride]                 \r\n"
        "daddu $3, $3, %[src]               \r\n"
620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
        "gsldlc1 $f4, 7($3)                 \r\n"
        "gsldrc1 $f4, 0($3)                 \r\n"
        "punpcklbh $f0, $f4, $f30           \r\n"
        "punpckhbh $f2, $f4, $f30           \r\n"
        "biadd $f0, $f0                     \r\n"
        "biadd $f2, $f2                     \r\n"
        "pshufh $f0, $f0, $f30              \r\n"
        "pshufh $f2, $f2, $f30              \r\n"
        "dmtc1 $2, $f4                      \r\n"
        "pshufh $f4, $f4, $f30              \r\n"
        "paddush $f0, $f0, $f4              \r\n"
        "paddush $f2, $f2, $f4              \r\n"
        "dmtc1 $2, $f4                      \r\n"
        "psrlh $f0, $f0, $f4                \r\n"
        "psrlh $f2, $f2, $f4                \r\n"
        "packushb $f4, $f0, $f2             \r\n"
        "dli $2, 8                          \r\n"
        "1:                                 \r\n"
638 639 640
        "gssdlc1 $f4, 7(%[src])             \r\n"
        "gssdrc1 $f4, 0(%[src])             \r\n"
        "daddu %[src], %0, %[stride]        \r\n"
641 642
        "daddiu $2, $2, -1                  \r\n"
        "bnez $2, 1b                        \r\n"
643 644
        ::[src]"r"(src),[stride]"r"(stride)
        : "$2","$3","$f0","$f2","$f4","$f30"
645 646 647 648 649 650
    );
}

void ff_pred8x8_dc_8_mmi(uint8_t *src, ptrdiff_t stride)
{
    __asm__ volatile (
651 652
        "negu $2, %[stride]                 \r\n"
        "daddu $2, $2, %[src]               \r\n"
653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678
        "daddiu $5, $2, 4                   \r\n"
        "lbu $6, 0($2)                      \r\n"
        "daddu $3, $0, $6                   \r\n"
        "daddiu $2, 1                       \r\n"
        "lbu $6, 0($5)                      \r\n"
        "daddu $4, $0, $6                   \r\n"
        "daddiu $5, 1                       \r\n"
        "lbu $6, 0($2)                      \r\n"
        "daddu $3, $3, $6                   \r\n"
        "daddiu $2, 1                       \r\n"
        "lbu $6, 0($5)                      \r\n"
        "daddu $4, $4, $6                   \r\n"
        "daddiu $5, 1                       \r\n"
        "lbu $6, 0($2)                      \r\n"
        "daddu $3, $3, $6                   \r\n"
        "daddiu $2, 1                       \r\n"
        "lbu $6, 0($5)                      \r\n"
        "daddu $4, $4, $6                   \r\n"
        "daddiu $5, 1                       \r\n"
        "lbu $6, 0($2)                      \r\n"
        "daddu $3, $3, $6                   \r\n"
        "daddiu $2, 1                       \r\n"
        "lbu $6, 0($5)                      \r\n"
        "daddu $4, $4, $6                   \r\n"
        "daddiu $5, 1                       \r\n"
        "dli $6, -1                         \r\n"
679
        "daddu $6, $6, %[src]               \r\n"
680 681
        "lbu $5, 0($6)                      \r\n"
        "daddu $7, $0, $5                   \r\n"
682
        "daddu $6, $6, %[stride]            \r\n"
683 684
        "lbu $5, 0($6)                      \r\n"
        "daddu $7, $7, $5                   \r\n"
685
        "daddu $6, $6, %[stride]            \r\n"
686 687
        "lbu $5, 0($6)                      \r\n"
        "daddu $7, $7, $5                   \r\n"
688
        "daddu $6, $6, %[stride]            \r\n"
689 690
        "lbu $5, 0($6)                      \r\n"
        "daddu $7, $7, $5                   \r\n"
691
        "daddu $6, $6, %[stride]            \r\n"
692 693
        "lbu $5, 0($6)                      \r\n"
        "daddu $8, $0, $5                   \r\n"
694
        "daddu $6, $6, %[stride]            \r\n"
695 696
        "lbu $5, 0($6)                      \r\n"
        "daddu $8, $8, $5                   \r\n"
697
        "daddu $6, $6, %[stride]            \r\n"
698 699
        "lbu $5, 0($6)                      \r\n"
        "daddu $8, $8, $5                   \r\n"
700
        "daddu $6, $6, %[stride]            \r\n"
701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
        "lbu $5, 0($6)                      \r\n"
        "daddu $8, $8, $5                   \r\n"
        "daddu $3, $3, $7                   \r\n"
        "daddiu $3, $3, 4                   \r\n"
        "daddiu $4, $4, 2                   \r\n"
        "daddiu $5, $8, 2                   \r\n"
        "daddu $6, $4, $5                   \r\n"
        "dsrl $3, 3                         \r\n"
        "dsrl $4, 2                         \r\n"
        "dsrl $5, 2                         \r\n"
        "dsrl $6, 3                         \r\n"
        "xor $f30, $f30, $f30               \r\n"
        "dmtc1 $3, $f0                      \r\n"
        "pshufh $f0, $f0, $f30              \r\n"
        "dmtc1 $4, $f2                      \r\n"
        "pshufh $f2, $f2, $f30              \r\n"
        "dmtc1 $5, $f4                      \r\n"
        "pshufh $f4, $f4, $f30              \r\n"
        "dmtc1 $6, $f6                      \r\n"
        "pshufh $f6, $f6, $f30              \r\n"
        "packushb $f0, $f0, $f2             \r\n"
        "packushb $f2, $f4, $f6             \r\n"
723
        "daddu $2, $0, %[src]               \r\n"
724
        "sdc1 $f0, 0($2)                    \r\n"
725
        "daddu $2, $2, %[stride]            \r\n"
726
        "sdc1 $f0, 0($2)                    \r\n"
727
        "daddu $2, $2, %[stride]            \r\n"
728
        "sdc1 $f0, 0($2)                    \r\n"
729
        "daddu $2, $2, %[stride]            \r\n"
730
        "sdc1 $f0, 0($2)                    \r\n"
731
        "daddu $2, $2, %[stride]            \r\n"
732
        "sdc1 $f2, 0($2)                    \r\n"
733
        "daddu $2, $2, %[stride]            \r\n"
734
        "sdc1 $f2, 0($2)                    \r\n"
735
        "daddu $2, $2, %[stride]            \r\n"
736
        "sdc1 $f2, 0($2)                    \r\n"
737
        "daddu $2, $2, %[stride]            \r\n"
738
        "sdc1 $f2, 0($2)                    \r\n"
739 740
        ::[src]"r"(src),[stride]"r"(stride)
        : "$2","$3","$4","$5","$6","$7","$8","$f0","$f2","$f4","$f6","$f30"
741 742 743 744 745 746
    );
}

void ff_pred8x16_vertical_8_mmi(uint8_t *src, ptrdiff_t stride)
{
    __asm__ volatile (
747 748 749
        "gsldlc1 $f2, 7(%[srcA])            \r\n"
        "gsldrc1 $f2, 0(%[srcA])            \r\n"
        "dli $8, 16                         \r\n"
750
        "1:                                 \r\n"
751 752 753 754 755 756 757 758
        "gssdlc1 $f2, 7(%[src])             \r\n"
        "gssdrc1 $f2, 0(%[src])             \r\n"
        "daddu %[src], %[src], %[stride]    \r\n"
        "daddi $8, $8, -1                   \r\n"
        "bnez $8, 1b                        \r\n"
        : [src]"+&r"(src)
        : [stride]"r"(stride),[srcA]"r"(src-stride)
        : "$8","$f2"
759 760 761 762 763 764
    );
}

void ff_pred8x16_horizontal_8_mmi(uint8_t *src, ptrdiff_t stride)
{
    __asm__ volatile (
765 766
        "daddiu $2, %[src], -1              \r\n"
        "daddu $3, %[src], $0               \r\n"
767 768 769
        "dli $6, 0x10                       \r\n"
        "1:                                 \r\n"
        "lbu $4, 0($2)                      \r\n"
770
        "dmul $5, $4, %[ff_pb_1]            \r\n"
771 772
        "sdl $5, 7($3)                      \r\n"
        "sdr $5, 0($3)                      \r\n"
773 774
        "daddu $2, %[stride]                \r\n"
        "daddu $3, %[stride]                \r\n"
775 776
        "daddiu $6, -1                      \r\n"
        "bnez $6, 1b                        \r\n"
777 778
        ::[src]"r"(src),[stride]"r"(stride),[ff_pb_1]"r"(ff_pb_1)
        : "$2","$3","$4","$5","$6"
779 780
    );
}