/*
 * Loongson SIMD optimized h264dsp
 *
 * Copyright (c) 2015 Loongson Technology Corporation Limited
 * Copyright (c) 2015 Zhou Xiaoyong <zhouxiaoyong@loongson.cn>
 *                    Zhang Shuangshuang <zhangshuangshuang@ict.ac.cn>
 *                    Heiher <r@hev.cc>
 *
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * FFmpeg is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with FFmpeg; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

#include "libavcodec/bit_depth_template.c"
#include "h264dsp_mips.h"

void ff_h264_add_pixels4_8_mmi(uint8_t *dst, int16_t *src, int stride)
{
    __asm__ volatile (
        "xor $f0, $f0, $f0              \r\n"
        "ldc1 $f2, 0(%[src])            \r\n"
        "ldc1 $f4, 8(%[src])            \r\n"
        "ldc1 $f6, 16(%[src])           \r\n"
        "ldc1 $f8, 24(%[src])           \r\n"
        "lwc1 $f10, 0(%[dst0])          \r\n"
        "lwc1 $f12, 0(%[dst1])          \r\n"
        "lwc1 $f14, 0(%[dst2])          \r\n"
        "lwc1 $f16, 0(%[dst3])          \r\n"
        "punpcklbh $f10, $f10, $f0      \r\n"
        "punpcklbh $f12, $f12, $f0      \r\n"
        "punpcklbh $f14, $f14, $f0      \r\n"
        "punpcklbh $f16, $f16, $f0      \r\n"
        "paddh $f2, $f2, $f10           \r\n"
        "paddh $f4, $f4, $f12           \r\n"
        "paddh $f6, $f6, $f14           \r\n"
        "paddh $f8, $f8, $f16           \r\n"
        "packushb $f2, $f2, $f0         \r\n"
        "packushb $f4, $f4, $f0         \r\n"
        "packushb $f6, $f6, $f0         \r\n"
        "packushb $f8, $f8, $f0         \r\n"
        "swc1 $f2, 0(%[dst0])           \r\n"
        "swc1 $f4, 0(%[dst1])           \r\n"
        "swc1 $f6, 0(%[dst2])           \r\n"
        "swc1 $f8, 0(%[dst3])           \r\n"
        ::[dst0]"r"(dst),[dst1]"r"(dst+stride),[dst2]"r"(dst+2*stride),
          [dst3]"r"(dst+3*stride),[src]"r"(src)
        : "$f0","$f2","$f4","$f6","$f8","$f10","$f12","$f14","$f16"
    );

    memset(src, 0, 32);
}

void ff_h264_idct_add_8_mmi(uint8_t *dst, int16_t *block, int stride)
{
    __asm__ volatile (
        "dli $8, 1                              \r\n"
        "ldc1 $f0, 0(%[block])                  \r\n"
        "dmtc1 $8, $f16                         \r\n"
        "ldc1 $f2, 8(%[block])                  \r\n"
        "dli $8, 6                              \r\n"
        "ldc1 $f4, 16(%[block])                 \r\n"
        "dmtc1 $8, $f18                         \r\n"
        "psrah $f8, $f2, $f16                   \r\n"
        "ldc1 $f6, 24(%[block])                 \r\n"
        "psrah $f10, $f6, $f16                  \r\n"
        "psubh $f8, $f8, $f6                    \r\n"
        "paddh $f10, $f10, $f2                  \r\n"
        "paddh $f20, $f4, $f0                   \r\n"
        "psubh $f0, $f0, $f4                    \r\n"
        "paddh $f22, $f10, $f20                 \r\n"
        "psubh $f4, $f20, $f10                  \r\n"
        "paddh $f20, $f8, $f0                   \r\n"
        "psubh $f0, $f0, $f8                    \r\n"
        "punpckhhw $f2, $f22, $f20              \r\n"
        "punpcklhw $f10, $f22, $f20             \r\n"
        "punpckhhw $f8, $f0, $f4                \r\n"
        "punpcklhw $f0, $f0, $f4                \r\n"
        "punpckhwd $f4, $f10, $f0               \r\n"
        "punpcklwd $f10, $f10, $f0              \r\n"
        "punpcklwd $f20, $f2, $f8               \r\n"
        "punpckhwd $f0, $f2, $f8                \r\n"
        "paddh $f10, $f10, %[ff_pw_32]          \r\n"
        "psrah $f8, $f4, $f16                   \r\n"
        "psrah $f6, $f0, $f16                   \r\n"
        "psubh $f8, $f8, $f0                    \r\n"
        "paddh $f6, $f6, $f4                    \r\n"
        "paddh $f2, $f20, $f10                  \r\n"
        "psubh $f10, $f10, $f20                 \r\n"
        "paddh $f20, $f6, $f2                   \r\n"
        "psubh $f2, $f2, $f6                    \r\n"
        "paddh $f22, $f8, $f10                  \r\n"
        "xor $f14, $f14, $f14                   \r\n"
        "psubh $f10, $f10, $f8                  \r\n"
        "sdc1 $f14, 0(%[block])                 \r\n"
        "sdc1 $f14, 8(%[block])                 \r\n"
        "sdc1 $f14, 16(%[block])                \r\n"
        "sdc1 $f14, 24(%[block])                \r\n"
        "lwc1 $f4, 0(%[dst])                    \r\n"
        "psrah $f6, $f20, $f18                  \r\n"
        "gslwxc1 $f0, 0(%[dst], %[stride])      \r\n"
        "psrah $f8, $f22, $f18                  \r\n"
        "punpcklbh $f4, $f4, $f14               \r\n"
        "punpcklbh $f0, $f0, $f14               \r\n"
        "paddh $f4, $f4, $f6                    \r\n"
        "paddh $f0, $f0, $f8                    \r\n"
        "packushb $f4, $f4, $f14                \r\n"
        "packushb $f0, $f0, $f14                \r\n"
        "swc1 $f4, 0(%[dst])                    \r\n"
        "gsswxc1 $f0, 0(%[dst], %[stride])      \r\n"
        "daddu %[dst], %[dst], %[stride]        \r\n"
        "daddu %[dst], %[dst], %[stride]        \r\n"
        "lwc1 $f4, 0(%[dst])                    \r\n"
        "psrah $f10, $f10, $f18                 \r\n"
        "gslwxc1 $f0, 0(%[dst], %[stride])      \r\n"
        "psrah $f2, $f2, $f18                   \r\n"
        "punpcklbh $f4, $f4, $f14               \r\n"
        "punpcklbh $f0, $f0, $f14               \r\n"
        "paddh $f4, $f4, $f10                   \r\n"
        "paddh $f0, $f0, $f2                    \r\n"
        "packushb $f4, $f4, $f14                \r\n"
        "swc1 $f4, 0(%[dst])                    \r\n"
        "packushb $f0, $f0, $f14                \r\n"
        "gsswxc1 $f0, 0(%[dst], %[stride])      \r\n"
        ::[dst]"r"(dst),[block]"r"(block),[stride]"r"((uint64_t)stride),
          [ff_pw_32]"f"(ff_pw_32)
        : "$8","$f0","$f2","$f4","$f6","$f8","$f10","$f12","$f14","$f16",
          "$f18","$f20","$f22"
    );

    memset(block, 0, 32);
}

void ff_h264_idct8_add_8_mmi(uint8_t *dst, int16_t *block, int stride)
{
    __asm__ volatile (
        "lhu $10, 0x0(%[block])                     \r\n"
        "daddiu $29, $29, -0x20                     \r\n"
        "daddiu $10, $10, 0x20                      \r\n"
        "ldc1 $f2, 0x10(%[block])                   \r\n"
        "sh $10, 0x0(%[block])                      \r\n"
        "ldc1 $f4, 0x20(%[block])                   \r\n"
        "dli $10, 0x1                               \r\n"
        "ldc1 $f6, 0x30(%[block])                   \r\n"
        "dmtc1 $10, $f16                            \r\n"
        "ldc1 $f10, 0x50(%[block])                  \r\n"
        "ldc1 $f12, 0x60(%[block])                  \r\n"
        "ldc1 $f14, 0x70(%[block])                  \r\n"
        "mov.d $f0, $f2                             \r\n"
        "psrah $f2, $f2, $f16                       \r\n"
        "psrah $f8, $f10, $f16                      \r\n"
        "paddh $f2, $f2, $f0                        \r\n"
        "paddh $f8, $f8, $f10                       \r\n"
        "paddh $f2, $f2, $f10                       \r\n"
        "paddh $f8, $f8, $f14                       \r\n"
        "paddh $f2, $f2, $f6                        \r\n"
        "psubh $f8, $f8, $f0                        \r\n"
        "psubh $f0, $f0, $f6                        \r\n"
        "psubh $f10, $f10, $f6                      \r\n"
        "psrah $f6, $f6, $f16                       \r\n"
        "paddh $f0, $f0, $f14                       \r\n"
        "psubh $f10, $f10, $f14                     \r\n"
        "psrah $f14, $f14, $f16                     \r\n"
        "psubh $f0, $f0, $f6                        \r\n"
        "dli $10, 0x2                               \r\n"
        "psubh $f10, $f10, $f14                     \r\n"
        "dmtc1 $10, $f18                            \r\n"
        "mov.d $f14, $f2                            \r\n"
        "psrah $f2, $f2, $f18                       \r\n"
        "psrah $f6, $f8, $f18                       \r\n"
        "paddh $f6, $f6, $f0                        \r\n"
        "psrah $f0, $f0, $f18                       \r\n"
        "paddh $f2, $f2, $f10                       \r\n"
        "psrah $f10, $f10, $f18                     \r\n"
        "psubh $f0, $f0, $f8                        \r\n"
        "psubh $f14, $f14, $f10                     \r\n"
        "mov.d $f10, $f12                           \r\n"
        "psrah $f12, $f12, $f16                     \r\n"
        "psrah $f8, $f4, $f16                       \r\n"
        "paddh $f12, $f12, $f4                      \r\n"
        "psubh $f8, $f8, $f10                       \r\n"
        "ldc1 $f4, 0x0(%[block])                    \r\n"
        "ldc1 $f10, 0x40(%[block])                  \r\n"
        "paddh $f10, $f10, $f4                      \r\n"
        "paddh $f4, $f4, $f4                        \r\n"
        "paddh $f12, $f12, $f10                     \r\n"
        "psubh $f4, $f4, $f10                       \r\n"
        "paddh $f10, $f10, $f10                     \r\n"
        "paddh $f8, $f8, $f4                        \r\n"
        "psubh $f10, $f10, $f12                     \r\n"
        "paddh $f4, $f4, $f4                        \r\n"
        "paddh $f14, $f14, $f12                     \r\n"
        "psubh $f4, $f4, $f8                        \r\n"
        "paddh $f12, $f12, $f12                     \r\n"
        "paddh $f0, $f0, $f8                        \r\n"
        "psubh $f12, $f12, $f14                     \r\n"
        "paddh $f8, $f8, $f8                        \r\n"
        "paddh $f6, $f6, $f4                        \r\n"
        "psubh $f8, $f8, $f0                        \r\n"
        "paddh $f4, $f4, $f4                        \r\n"
        "paddh $f2, $f2, $f10                       \r\n"
        "psubh $f4, $f4, $f6                        \r\n"
        "paddh $f10, $f10, $f10                     \r\n"
        "sdc1 $f12, 0x0(%[block])                   \r\n"
        "psubh $f10, $f10, $f2                      \r\n"
        "punpckhhw $f12, $f14, $f0                  \r\n"
        "punpcklhw $f14, $f14, $f0                  \r\n"
        "punpckhhw $f0, $f6, $f2                    \r\n"
        "punpcklhw $f6, $f6, $f2                    \r\n"
        "punpckhwd $f2, $f14, $f6                   \r\n"
        "punpcklwd $f14, $f14, $f6                  \r\n"
        "punpckhwd $f6, $f12, $f0                   \r\n"
        "punpcklwd $f12, $f12, $f0                  \r\n"
        "ldc1 $f0, 0x0(%[block])                    \r\n"
        "sdc1 $f14, 0x0($29)                        \r\n"
        "sdc1 $f2, 0x10($29)                        \r\n"
        "dmfc1 $8, $f12                             \r\n"
        "dmfc1 $11, $f6                             \r\n"
        "punpckhhw $f6, $f10, $f4                   \r\n"
        "punpcklhw $f10, $f10, $f4                  \r\n"
        "punpckhhw $f4, $f8, $f0                    \r\n"
        "punpcklhw $f8, $f8, $f0                    \r\n"
        "punpckhwd $f0, $f10, $f8                   \r\n"
        "punpcklwd $f10, $f10, $f8                  \r\n"
        "punpckhwd $f8, $f6, $f4                    \r\n"
        "punpcklwd $f6, $f6, $f4                    \r\n"
        "sdc1 $f10, 0x8($29)                        \r\n"
        "sdc1 $f0, 0x18($29)                        \r\n"
        "dmfc1 $9, $f6                              \r\n"
        "dmfc1 $12, $f8                             \r\n"
        "ldc1 $f2, 0x18(%[block])                   \r\n"
        "ldc1 $f12, 0x28(%[block])                  \r\n"
        "ldc1 $f4, 0x38(%[block])                   \r\n"
        "ldc1 $f0, 0x58(%[block])                   \r\n"
        "ldc1 $f6, 0x68(%[block])                   \r\n"
        "ldc1 $f8, 0x78(%[block])                   \r\n"
        "mov.d $f14, $f2                            \r\n"
        "psrah $f10, $f0, $f16                      \r\n"
        "psrah $f2, $f2, $f16                       \r\n"
        "paddh $f10, $f10, $f0                      \r\n"
        "paddh $f2, $f2, $f14                       \r\n"
        "paddh $f10, $f10, $f8                      \r\n"
        "paddh $f2, $f2, $f0                        \r\n"
        "psubh $f10, $f10, $f14                     \r\n"
        "paddh $f2, $f2, $f4                        \r\n"
        "psubh $f14, $f14, $f4                      \r\n"
        "psubh $f0, $f0, $f4                        \r\n"
        "psrah $f4, $f4, $f16                       \r\n"
        "paddh $f14, $f14, $f8                      \r\n"
        "psubh $f0, $f0, $f8                        \r\n"
        "psrah $f8, $f8, $f16                       \r\n"
        "psubh $f14, $f14, $f4                      \r\n"
        "psubh $f0, $f0, $f8                        \r\n"
        "mov.d $f8, $f2                             \r\n"
        "psrah $f4, $f10, $f18                      \r\n"
        "psrah $f2, $f2, $f18                       \r\n"
        "paddh $f4, $f4, $f14                       \r\n"
        "psrah $f14, $f14, $f18                     \r\n"
        "paddh $f2, $f2, $f0                        \r\n"
        "psrah $f0, $f0, $f18                       \r\n"
        "psubh $f14, $f14, $f10                     \r\n"
        "psubh $f8, $f8, $f0                        \r\n"
        "mov.d $f0, $f6                             \r\n"
        "psrah $f6, $f6, $f16                       \r\n"
        "psrah $f10, $f12, $f16                     \r\n"
        "paddh $f6, $f6, $f12                       \r\n"
        "psubh $f10, $f10, $f0                      \r\n"
        "ldc1 $f12, 0x8(%[block])                   \r\n"
        "ldc1 $f0, 0x48(%[block])                   \r\n"
        "paddh $f0, $f0, $f12                       \r\n"
        "paddh $f12, $f12, $f12                     \r\n"
        "paddh $f6, $f6, $f0                        \r\n"
        "psubh $f12, $f12, $f0                      \r\n"
        "paddh $f0, $f0, $f0                        \r\n"
        "paddh $f10, $f10, $f12                     \r\n"
        "psubh $f0, $f0, $f6                        \r\n"
        "paddh $f12, $f12, $f12                     \r\n"
        "paddh $f8, $f8, $f6                        \r\n"
        "psubh $f12, $f12, $f10                     \r\n"
        "paddh $f6, $f6, $f6                        \r\n"
        "paddh $f14, $f14, $f10                     \r\n"
        "psubh $f6, $f6, $f8                        \r\n"
        "paddh $f10, $f10, $f10                     \r\n"
        "paddh $f4, $f4, $f12                       \r\n"
        "psubh $f10, $f10, $f14                     \r\n"
        "paddh $f12, $f12, $f12                     \r\n"
        "paddh $f2, $f2, $f0                        \r\n"
        "psubh $f12, $f12, $f4                      \r\n"
        "paddh $f0, $f0, $f0                        \r\n"
        "sdc1 $f6, 0x8(%[block])                    \r\n"
        "psubh $f0, $f0, $f2                        \r\n"
        "punpckhhw $f6, $f8, $f14                   \r\n"
        "punpcklhw $f8, $f8, $f14                   \r\n"
        "punpckhhw $f14, $f4, $f2                   \r\n"
        "punpcklhw $f4, $f4, $f2                    \r\n"
        "punpckhwd $f2, $f8, $f4                    \r\n"
        "punpcklwd $f8, $f8, $f4                    \r\n"
        "punpckhwd $f4, $f6, $f14                   \r\n"
        "punpcklwd $f6, $f6, $f14                   \r\n"
        "ldc1 $f14, 0x8(%[block])                   \r\n"
        "dmfc1 $13, $f8                             \r\n"
        "dmfc1 $15, $f2                             \r\n"
        "mov.d $f24, $f6                            \r\n"
        "mov.d $f28, $f4                            \r\n"
        "punpckhhw $f4, $f0, $f12                   \r\n"
        "punpcklhw $f0, $f0, $f12                   \r\n"
        "punpckhhw $f12, $f10, $f14                 \r\n"
        "punpcklhw $f10, $f10, $f14                 \r\n"
        "punpckhwd $f14, $f0, $f10                  \r\n"
        "punpcklwd $f0, $f0, $f10                   \r\n"
        "punpckhwd $f10, $f4, $f12                  \r\n"
        "punpcklwd $f4, $f4, $f12                   \r\n"
        "dmfc1 $14, $f0                             \r\n"
        "mov.d $f22, $f14                           \r\n"
        "mov.d $f26, $f4                            \r\n"
        "mov.d $f30, $f10                           \r\n"
        "daddiu $10, %[dst], 0x4                    \r\n"
        "dmtc1 $15, $f14                            \r\n"
        "dmtc1 $11, $f12                            \r\n"
        "ldc1 $f2, 0x10($29)                        \r\n"
        "dmtc1 $8, $f6                              \r\n"
        "mov.d $f8, $f2                             \r\n"
        "psrah $f2, $f2, $f16                       \r\n"
        "psrah $f0, $f14, $f16                      \r\n"
        "paddh $f2, $f2, $f8                        \r\n"
        "paddh $f0, $f0, $f14                       \r\n"
        "paddh $f2, $f2, $f14                       \r\n"
        "paddh $f0, $f0, $f28                       \r\n"
        "paddh $f2, $f2, $f12                       \r\n"
        "psubh $f0, $f0, $f8                        \r\n"
        "psubh $f8, $f8, $f12                       \r\n"
        "psubh $f14, $f14, $f12                     \r\n"
        "psrah $f12, $f12, $f16                     \r\n"
        "paddh $f8, $f8, $f28                       \r\n"
        "psubh $f14, $f14, $f28                     \r\n"
        "psrah $f10, $f28, $f16                     \r\n"
        "psubh $f8, $f8, $f12                       \r\n"
        "psubh $f14, $f14, $f10                     \r\n"
        "mov.d $f10, $f2                            \r\n"
        "psrah $f2, $f2, $f18                       \r\n"
        "psrah $f12, $f0, $f18                      \r\n"
        "paddh $f2, $f2, $f14                       \r\n"
        "paddh $f12, $f12, $f8                      \r\n"
        "psrah $f8, $f8, $f18                       \r\n"
        "psrah $f14, $f14, $f18                     \r\n"
        "psubh $f8, $f8, $f0                        \r\n"
        "psubh $f10, $f10, $f14                     \r\n"
        "mov.d $f14, $f24                           \r\n"
        "psrah $f4, $f24, $f16                      \r\n"
        "psrah $f0, $f6, $f16                       \r\n"
        "paddh $f4, $f4, $f6                        \r\n"
        "psubh $f0, $f0, $f14                       \r\n"
        "ldc1 $f6, 0x0($29)                         \r\n"
        "dmtc1 $13, $f14                            \r\n"
        "paddh $f14, $f14, $f6                      \r\n"
        "paddh $f6, $f6, $f6                        \r\n"
        "paddh $f4, $f4, $f14                       \r\n"
        "psubh $f6, $f6, $f14                       \r\n"
        "paddh $f14, $f14, $f14                     \r\n"
        "paddh $f0, $f0, $f6                        \r\n"
        "psubh $f14, $f14, $f4                      \r\n"
        "paddh $f6, $f6, $f6                        \r\n"
        "paddh $f10, $f10, $f4                      \r\n"
        "psubh $f6, $f6, $f0                        \r\n"
        "paddh $f4, $f4, $f4                        \r\n"
        "paddh $f8, $f8, $f0                        \r\n"
        "psubh $f4, $f4, $f10                       \r\n"
        "paddh $f0, $f0, $f0                        \r\n"
        "paddh $f12, $f12, $f6                      \r\n"
        "psubh $f0, $f0, $f8                        \r\n"
        "paddh $f6, $f6, $f6                        \r\n"
        "paddh $f2, $f2, $f14                       \r\n"
        "psubh $f6, $f6, $f12                       \r\n"
        "paddh $f14, $f14, $f14                     \r\n"
        "sdc1 $f6, 0x0($29)                         \r\n"
        "psubh $f14, $f14, $f2                      \r\n"
        "sdc1 $f0, 0x10($29)                        \r\n"
        "dmfc1 $8, $f4                              \r\n"
        "xor $f4, $f4, $f4                          \r\n"
        "sdc1 $f4, 0x0(%[block])                    \r\n"
        "sdc1 $f4, 0x8(%[block])                    \r\n"
        "sdc1 $f4, 0x10(%[block])                   \r\n"
        "sdc1 $f4, 0x18(%[block])                   \r\n"
        "sdc1 $f4, 0x20(%[block])                   \r\n"
        "sdc1 $f4, 0x28(%[block])                   \r\n"
        "sdc1 $f4, 0x30(%[block])                   \r\n"
        "sdc1 $f4, 0x38(%[block])                   \r\n"
        "sdc1 $f4, 0x40(%[block])                   \r\n"
        "sdc1 $f4, 0x48(%[block])                   \r\n"
        "sdc1 $f4, 0x50(%[block])                   \r\n"
        "sdc1 $f4, 0x58(%[block])                   \r\n"
        "sdc1 $f4, 0x60(%[block])                   \r\n"
        "sdc1 $f4, 0x68(%[block])                   \r\n"
        "sdc1 $f4, 0x70(%[block])                   \r\n"
        "sdc1 $f4, 0x78(%[block])                   \r\n"
        "dli $11, 0x6                               \r\n"
        "lwc1 $f6, 0x0(%[dst])                      \r\n"
        "dmtc1 $11, $f20                            \r\n"
        "gslwxc1 $f0, 0x0(%[dst], %[stride])        \r\n"
        "psrah $f10, $f10, $f20                     \r\n"
        "psrah $f8, $f8, $f20                       \r\n"
        "punpcklbh $f6, $f6, $f4                    \r\n"
        "punpcklbh $f0, $f0, $f4                    \r\n"
        "paddh $f6, $f6, $f10                       \r\n"
        "paddh $f0, $f0, $f8                        \r\n"
        "packushb $f6, $f6, $f4                     \r\n"
        "packushb $f0, $f0, $f4                     \r\n"
        "swc1 $f6, 0x0(%[dst])                      \r\n"
        "gsswxc1 $f0, 0x0(%[dst], %[stride])        \r\n"
        "daddu %[dst], %[dst], %[stride]            \r\n"
        "daddu %[dst], %[dst], %[stride]            \r\n"
        "lwc1 $f6, 0x0(%[dst])                      \r\n"
        "gslwxc1 $f0, 0x0(%[dst], %[stride])        \r\n"
        "psrah $f12, $f12, $f20                     \r\n"
        "psrah $f2, $f2, $f20                       \r\n"
        "punpcklbh $f6, $f6, $f4                    \r\n"
        "punpcklbh $f0, $f0, $f4                    \r\n"
        "paddh $f6, $f6, $f12                       \r\n"
        "paddh $f0, $f0, $f2                        \r\n"
        "packushb $f6, $f6, $f4                     \r\n"
        "packushb $f0, $f0, $f4                     \r\n"
        "swc1 $f6, 0x0(%[dst])                      \r\n"
        "gsswxc1 $f0, 0x0(%[dst], %[stride])        \r\n"
        "ldc1 $f10, 0x0($29)                        \r\n"
        "ldc1 $f8, 0x10($29)                        \r\n"
        "dmtc1 $8, $f12                             \r\n"
        "daddu %[dst], %[dst], %[stride]            \r\n"
        "daddu %[dst], %[dst], %[stride]            \r\n"
        "lwc1 $f6, 0x0(%[dst])                      \r\n"
        "gslwxc1 $f0, 0x0(%[dst], %[stride])        \r\n"
        "psrah $f14, $f14, $f20                     \r\n"
        "psrah $f10, $f10, $f20                     \r\n"
        "punpcklbh $f6, $f6, $f4                    \r\n"
        "punpcklbh $f0, $f0, $f4                    \r\n"
        "paddh $f6, $f6, $f14                       \r\n"
        "paddh $f0, $f0, $f10                       \r\n"
        "packushb $f6, $f6, $f4                     \r\n"
        "packushb $f0, $f0, $f4                     \r\n"
        "swc1 $f6, 0x0(%[dst])                      \r\n"
        "gsswxc1 $f0, 0x0(%[dst], %[stride])        \r\n"
        "daddu %[dst], %[dst], %[stride]            \r\n"
        "daddu %[dst], %[dst], %[stride]            \r\n"
        "lwc1 $f6, 0x0(%[dst])                      \r\n"
        "gslwxc1 $f0, 0x0(%[dst], %[stride])        \r\n"
        "psrah $f8, $f8, $f20                       \r\n"
        "psrah $f12, $f12, $f20                     \r\n"
        "punpcklbh $f6, $f6, $f4                    \r\n"
        "punpcklbh $f0, $f0, $f4                    \r\n"
        "paddh $f6, $f6, $f8                        \r\n"
        "paddh $f0, $f0, $f12                       \r\n"
        "packushb $f6, $f6, $f4                     \r\n"
        "packushb $f0, $f0, $f4                     \r\n"
        "swc1 $f6, 0x0(%[dst])                      \r\n"
        "gsswxc1 $f0, 0x0(%[dst], %[stride])        \r\n"
        "dmtc1 $12, $f2                             \r\n"
        "dmtc1 $9, $f12                             \r\n"
        "ldc1 $f8, 0x18($29)                        \r\n"
        "mov.d $f10, $f8                            \r\n"
        "psrah $f8, $f8, $f16                       \r\n"
        "psrah $f14, $f22, $f16                     \r\n"
        "paddh $f14, $f14, $f22                     \r\n"
        "paddh $f8, $f8, $f10                       \r\n"
        "paddh $f14, $f14, $f30                     \r\n"
        "paddh $f8, $f8, $f22                       \r\n"
        "psubh $f14, $f14, $f10                     \r\n"
        "paddh $f8, $f8, $f2                        \r\n"
        "psubh $f10, $f10, $f2                      \r\n"
        "psubh $f6, $f22, $f2                       \r\n"
        "psrah $f2, $f2, $f16                       \r\n"
        "paddh $f10, $f10, $f30                     \r\n"
        "psubh $f6, $f6, $f30                       \r\n"
        "psrah $f4, $f30, $f16                      \r\n"
        "psubh $f10, $f10, $f2                      \r\n"
        "psubh $f6, $f6, $f4                        \r\n"
        "mov.d $f4, $f8                             \r\n"
        "psrah $f8, $f8, $f18                       \r\n"
        "psrah $f2, $f14, $f18                      \r\n"
        "paddh $f8, $f8, $f6                        \r\n"
        "paddh $f2, $f2, $f10                       \r\n"
        "psrah $f10, $f10, $f18                     \r\n"
        "psrah $f6, $f6, $f18                       \r\n"
        "psubh $f10, $f10, $f14                     \r\n"
        "psubh $f4, $f4, $f6                        \r\n"
        "mov.d $f6, $f26                            \r\n"
        "psrah $f0, $f26, $f16                      \r\n"
        "psrah $f14, $f12, $f16                     \r\n"
        "paddh $f0, $f0, $f12                       \r\n"
        "psubh $f14, $f14, $f6                      \r\n"
        "ldc1 $f12, 0x8($29)                        \r\n"
        "dmtc1 $14, $f6                             \r\n"
        "paddh $f6, $f6, $f12                       \r\n"
        "paddh $f12, $f12, $f12                     \r\n"
        "paddh $f0, $f0, $f6                        \r\n"
        "psubh $f12, $f12, $f6                      \r\n"
        "paddh $f6, $f6, $f6                        \r\n"
        "paddh $f14, $f14, $f12                     \r\n"
        "psubh $f6, $f6, $f0                        \r\n"
        "paddh $f12, $f12, $f12                     \r\n"
        "paddh $f4, $f4, $f0                        \r\n"
        "psubh $f12, $f12, $f14                     \r\n"
        "paddh $f0, $f0, $f0                        \r\n"
        "paddh $f10, $f10, $f14                     \r\n"
        "psubh $f0, $f0, $f4                        \r\n"
        "paddh $f14, $f14, $f14                     \r\n"
        "paddh $f2, $f2, $f12                       \r\n"
        "psubh $f14, $f14, $f10                     \r\n"
        "paddh $f12, $f12, $f12                     \r\n"
        "paddh $f8, $f8, $f6                        \r\n"
        "psubh $f12, $f12, $f2                      \r\n"
        "paddh $f6, $f6, $f6                        \r\n"
        "sdc1 $f12, 0x8($29)                        \r\n"
        "psubh $f6, $f6, $f8                        \r\n"
        "sdc1 $f14, 0x18($29)                       \r\n"
        "dmfc1 $9, $f0                              \r\n"
        "xor $f0, $f0, $f0                          \r\n"
        "lwc1 $f12, 0x0($10)                        \r\n"
        "gslwxc1 $f14, 0x0($10, %[stride])          \r\n"
        "psrah $f4, $f4, $f20                       \r\n"
        "psrah $f10, $f10, $f20                     \r\n"
        "punpcklbh $f12, $f12, $f0                  \r\n"
        "punpcklbh $f14, $f14, $f0                  \r\n"
        "paddh $f12, $f12, $f4                      \r\n"
        "paddh $f14, $f14, $f10                     \r\n"
        "packushb $f12, $f12, $f0                   \r\n"
        "packushb $f14, $f14, $f0                   \r\n"
        "swc1 $f12, 0x0($10)                        \r\n"
        "gsswxc1 $f14, 0x0($10, %[stride])          \r\n"
        "daddu $10, $10, %[stride]                  \r\n"
        "daddu $10, $10, %[stride]                  \r\n"
        "lwc1 $f12, 0x0($10)                        \r\n"
        "gslwxc1 $f14, 0x0($10, %[stride])          \r\n"
        "psrah $f2, $f2, $f20                       \r\n"
        "psrah $f8, $f8, $f20                       \r\n"
        "punpcklbh $f12, $f12, $f0                  \r\n"
        "punpcklbh $f14, $f14, $f0                  \r\n"
        "paddh $f12, $f12, $f2                      \r\n"
        "paddh $f14, $f14, $f8                      \r\n"
        "packushb $f12, $f12, $f0                   \r\n"
        "packushb $f14, $f14, $f0                   \r\n"
        "swc1 $f12, 0x0($10)                        \r\n"
        "gsswxc1 $f14, 0x0($10, %[stride])          \r\n"
        "ldc1 $f4, 0x8($29)                         \r\n"
        "ldc1 $f10, 0x18($29)                       \r\n"
        "daddu $10, $10, %[stride]                  \r\n"
        "dmtc1 $9, $f2                              \r\n"
        "daddu $10, $10, %[stride]                  \r\n"
        "lwc1 $f12, 0x0($10)                        \r\n"
        "gslwxc1 $f14, 0x0($10, %[stride])          \r\n"
        "psrah $f6, $f6, $f20                       \r\n"
        "psrah $f4, $f4, $f20                       \r\n"
        "punpcklbh $f12, $f12, $f0                  \r\n"
        "punpcklbh $f14, $f14, $f0                  \r\n"
        "paddh $f12, $f12, $f6                      \r\n"
        "paddh $f14, $f14, $f4                      \r\n"
        "packushb $f12, $f12, $f0                   \r\n"
        "packushb $f14, $f14, $f0                   \r\n"
        "swc1 $f12, 0x0($10)                        \r\n"
        "gsswxc1 $f14, 0x0($10, %[stride])          \r\n"
        "daddu $10, $10, %[stride]                  \r\n"
        "daddu $10, $10, %[stride]                  \r\n"
        "lwc1 $f12, 0x0($10)                        \r\n"
        "gslwxc1 $f14, 0x0($10, %[stride])          \r\n"
        "psrah $f10, $f10, $f20                     \r\n"
        "psrah $f2, $f2, $f20                       \r\n"
        "punpcklbh $f12, $f12, $f0                  \r\n"
        "punpcklbh $f14, $f14, $f0                  \r\n"
        "paddh $f12, $f12, $f10                     \r\n"
        "paddh $f14, $f14, $f2                      \r\n"
        "packushb $f12, $f12, $f0                   \r\n"
        "packushb $f14, $f14, $f0                   \r\n"
        "swc1 $f12, 0x0($10)                        \r\n"
        "gsswxc1 $f14, 0x0($10, %[stride])          \r\n"
        "daddiu $29, $29, 0x20                      \r\n"
        ::[dst]"r"(dst),[block]"r"(block),[stride]"r"((uint64_t)stride)
        :"$8","$9","$10","$11","$12","$13","$14","$15","$29","$f0","$f2","$f4",
         "$f8","$f10","$f12","$f14","$f16","$f18","$f20","$f22","$f24","$f26",
         "$f28","$f30"
    );

    memset(block, 0, 128);
}

void ff_h264_idct_dc_add_8_mmi(uint8_t *dst, int16_t *block, int stride)
{
    __asm__ volatile (
        "lh $8, 0x0(%[block])                       \r\n"
        "sd $0, 0x0(%[block])                       \r\n"
        "daddiu $8, $8, 0x20                        \r\n"
        "daddu $10, %[stride], %[stride]            \r\n"
        "dsra $8, $8, 0x6                           \r\n"
        "xor $f2, $f2, $f2                          \r\n"
        "mtc1 $8, $f0                               \r\n"
        "pshufh $f0, $f0, $f2                       \r\n"
        "daddu $8, $10, %[stride]                   \r\n"
        "psubh $f2, $f2, $f0                        \r\n"
        "packushb $f0, $f0, $f0                     \r\n"
        "packushb $f2, $f2, $f2                     \r\n"
        "lwc1 $f4, 0x0(%[dst])                      \r\n"
        "gslwxc1 $f6, 0x0(%[dst], %[stride])        \r\n"
        "gslwxc1 $f8, 0x0(%[dst], $10)              \r\n"
        "gslwxc1 $f10, 0x0(%[dst], $8)              \r\n"
        "paddusb $f4, $f4, $f0                      \r\n"
        "paddusb $f6, $f6, $f0                      \r\n"
        "paddusb $f8, $f8, $f0                      \r\n"
        "paddusb $f10, $f10, $f0                    \r\n"
        "psubusb $f4, $f4, $f2                      \r\n"
        "psubusb $f6, $f6, $f2                      \r\n"
        "psubusb $f8, $f8, $f2                      \r\n"
        "psubusb $f10, $f10, $f2                    \r\n"
        "swc1 $f4, 0x0(%[dst])                      \r\n"
        "gsswxc1 $f6, 0x0(%[dst], %[stride])        \r\n"
        "gsswxc1 $f8, 0x0(%[dst], $10)              \r\n"
        "gsswxc1 $f10, 0x0(%[dst], $8)              \r\n"
        ::[dst]"r"(dst),[block]"r"(block),[stride]"r"((uint64_t)stride)
        : "$8","$10","$f0","$f2","$f4","$f6","$f8","$f10"
    );
}

void ff_h264_idct8_dc_add_8_mmi(uint8_t *dst, int16_t *block, int stride)
{
    __asm__ volatile (
        "lh $8, 0x0(%[block])                       \r\n"
        "sd $0, 0x0(%[block])                       \r\n"
        "daddiu $8, $8, 0x20                        \r\n"
        "daddu $10, %[stride], %[stride]            \r\n"
        "dsra $8, $8, 0x6                           \r\n"
        "xor $f2, $f2, $f2                          \r\n"
        "mtc1 $8, $f0                               \r\n"
        "pshufh $f0, $f0, $f2                       \r\n"
        "daddu $8, $10, %[stride]                   \r\n"
        "psubh $f2, $f2, $f0                        \r\n"
        "packushb $f0, $f0, $f0                     \r\n"
        "packushb $f2, $f2, $f2                     \r\n"
        "ldc1 $f4, 0x0(%[dst])                      \r\n"
        "gsldxc1 $f6, 0x0(%[dst], %[stride])        \r\n"
        "gsldxc1 $f8, 0x0(%[dst], $10)              \r\n"
        "gsldxc1 $f10, 0x0(%[dst], $8)              \r\n"
        "paddusb $f4, $f4, $f0                      \r\n"
        "paddusb $f6, $f6, $f0                      \r\n"
        "paddusb $f8, $f8, $f0                      \r\n"
        "paddusb $f10, $f10, $f0                    \r\n"
        "psubusb $f4, $f4, $f2                      \r\n"
        "psubusb $f6, $f6, $f2                      \r\n"
        "psubusb $f8, $f8, $f2                      \r\n"
        "psubusb $f10, $f10, $f2                    \r\n"
        "sdc1 $f4, 0x0(%[dst])                      \r\n"
        "gssdxc1 $f6, 0x0(%[dst], %[stride])        \r\n"
        "gssdxc1 $f8, 0x0(%[dst], $10)              \r\n"
        "daddu $9, $10, $10                         \r\n"
        "gssdxc1 $f10, 0x0(%[dst], $8)              \r\n"
        "daddu %[dst], %[dst], $9                   \r\n"
        "ldc1 $f4, 0x0(%[dst])                      \r\n"
        "gsldxc1 $f6, 0x0(%[dst], %[stride])        \r\n"
        "gsldxc1 $f8, 0x0(%[dst], $10)              \r\n"
        "gsldxc1 $f10, 0x0(%[dst], $8)              \r\n"
        "paddusb $f4, $f4, $f0                      \r\n"
        "paddusb $f6, $f6, $f0                      \r\n"
        "paddusb $f8, $f8, $f0                      \r\n"
        "paddusb $f10, $f10, $f0                    \r\n"
        "psubusb $f4, $f4, $f2                      \r\n"
        "psubusb $f6, $f6, $f2                      \r\n"
        "psubusb $f8, $f8, $f2                      \r\n"
        "psubusb $f10, $f10, $f2                    \r\n"
        "sdc1 $f4, 0x0(%[dst])                      \r\n"
        "gssdxc1 $f6, 0x0(%[dst], %[stride])        \r\n"
        "gssdxc1 $f8, 0x0(%[dst], $10)              \r\n"
        "gssdxc1 $f10, 0x0(%[dst], $8)              \r\n"
        ::[dst]"r"(dst),[block]"r"(block),[stride]"r"((uint64_t)stride)
        : "$8","$9","$10","$f0","$f2","$f4","$f6","$f8","$f10"
    );
}

void ff_h264_idct_add16_8_mmi(uint8_t *dst, const int *block_offset,
        int16_t *block, int stride, const uint8_t nnzc[15*8])
{
    int i;
    for(i=0; i<16; i++){
        int nnz = nnzc[ scan8[i] ];
        if(nnz){
            if(nnz==1 && ((int16_t*)block)[i*16])
                ff_h264_idct_dc_add_8_mmi(dst + block_offset[i], block + i*16,
                        stride);
            else
                ff_h264_idct_add_8_mmi(dst + block_offset[i], block + i*16,
                        stride);
        }
    }
}

void ff_h264_idct_add16intra_8_mmi(uint8_t *dst, const int *block_offset,
        int16_t *block, int stride, const uint8_t nnzc[15*8])
{
    int i;
    for(i=0; i<16; i++){
        if(nnzc[ scan8[i] ])
            ff_h264_idct_add_8_mmi(dst + block_offset[i], block + i*16, stride);
        else if(((int16_t*)block)[i*16])
            ff_h264_idct_dc_add_8_mmi(dst + block_offset[i], block + i*16,
                    stride);
    }
}

void ff_h264_idct8_add4_8_mmi(uint8_t *dst, const int *block_offset,
        int16_t *block, int stride, const uint8_t nnzc[15*8])
{
    int i;
    for(i=0; i<16; i+=4){
        int nnz = nnzc[ scan8[i] ];
        if(nnz){
            if(nnz==1 && ((int16_t*)block)[i*16])
                ff_h264_idct8_dc_add_8_mmi(dst + block_offset[i],
                        block + i*16, stride);
            else
                ff_h264_idct8_add_8_mmi(dst + block_offset[i], block + i*16,
                        stride);
        }
    }
}

void ff_h264_idct_add8_8_mmi(uint8_t **dest, const int *block_offset,
        int16_t *block, int stride, const uint8_t nnzc[15*8])
{
    int i, j;
    for(j=1; j<3; j++){
        for(i=j*16; i<j*16+4; i++){
            if(nnzc[ scan8[i] ])
                ff_h264_idct_add_8_mmi(dest[j-1] + block_offset[i],
                        block + i*16, stride);
            else if(((int16_t*)block)[i*16])
                ff_h264_idct_dc_add_8_mmi(dest[j-1] + block_offset[i],
                        block + i*16, stride);
        }
    }
}

void ff_h264_idct_add8_422_8_mmi(uint8_t **dest, const int *block_offset,
        int16_t *block, int stride, const uint8_t nnzc[15*8])
{
    int i, j;

    for(j=1; j<3; j++){
        for(i=j*16; i<j*16+4; i++){
            if(nnzc[ scan8[i] ])
                ff_h264_idct_add_8_mmi(dest[j-1] + block_offset[i],
                        block + i*16, stride);
            else if(((int16_t*)block)[i*16])
                ff_h264_idct_dc_add_8_mmi(dest[j-1] + block_offset[i],
                        block + i*16, stride);
        }
    }

    for(j=1; j<3; j++){
        for(i=j*16+4; i<j*16+8; i++){
            if(nnzc[ scan8[i+4] ])
                ff_h264_idct_add_8_mmi(dest[j-1] + block_offset[i+4],
                        block + i*16, stride);
            else if(((int16_t*)block)[i*16])
                ff_h264_idct_dc_add_8_mmi(dest[j-1] + block_offset[i+4],
                        block + i*16, stride);
        }
    }
}

void ff_h264_luma_dc_dequant_idct_8_mmi(int16_t *output, int16_t *input,
        int qmul)
{
    __asm__ volatile (
        ".set noreorder                                 \r\n"
        "dli $10, 0x8                                   \r\n"
        "ldc1 $f6, 0x18(%[input])                       \r\n"
        "dmtc1 $10, $f16                                \r\n"
        "ldc1 $f4, 0x10(%[input])                       \r\n"
        "dli $10, 0x20                                  \r\n"
        "ldc1 $f2, 0x8(%[input])                        \r\n"
        "dmtc1 $10, $f18                                \r\n"
        "ldc1 $f0, 0x0(%[input])                        \r\n"
        "mov.d $f8, $f6                                 \r\n"
        "paddh $f6, $f6, $f4                            \r\n"
        "psubh $f4, $f4, $f8                            \r\n"
        "mov.d $f8, $f2                                 \r\n"
        "paddh $f2, $f2, $f0                            \r\n"
        "psubh $f0, $f0, $f8                            \r\n"
        "mov.d $f8, $f6                                 \r\n"
        "paddh $f6, $f6, $f2                            \r\n"
        "psubh $f2, $f2, $f8                            \r\n"
        "mov.d $f8, $f4                                 \r\n"
        "paddh $f4, $f4, $f0                            \r\n"
        "psubh $f0, $f0, $f8                            \r\n"
        "mov.d $f8, $f6                                 \r\n"
        "punpcklhw $f6, $f6, $f2                        \r\n"
        "punpckhhw $f8, $f8, $f2                        \r\n"
        "punpckhhw $f2, $f0, $f4                        \r\n"
        "punpcklhw $f0, $f0, $f4                        \r\n"
        "punpckhwd $f4, $f6, $f0                        \r\n"
        "punpcklwd $f6, $f6, $f0                        \r\n"
        "mov.d $f0, $f8                                 \r\n"
        "punpcklwd $f8, $f8, $f2                        \r\n"
        "punpckhwd $f0, $f0, $f2                        \r\n"
        "mov.d $f2, $f0                                 \r\n"
        "paddh $f0, $f0, $f8                            \r\n"
        "psubh $f8, $f8, $f2                            \r\n"
        "mov.d $f2, $f4                                 \r\n"
        "paddh $f4, $f4, $f6                            \r\n"
        "psubh $f6, $f6, $f2                            \r\n"
        "mov.d $f2, $f0                                 \r\n"
        "paddh $f0, $f0, $f4                            \r\n"
        "psubh $f4, $f4, $f2                            \r\n"
        "mov.d $f2, $f8                                 \r\n"
        "daddiu $10, %[qmul], -0x7fff                   \r\n"
        "paddh $f8, $f8, $f6                            \r\n"
        "bgtz $10, 1f                                   \r\n"
        "psubh $f6, $f6, $f2                            \r\n"
        "ori $10, $0, 0x80                              \r\n"
        "dsll $10, $10, 0x10                            \r\n"
        "punpckhhw $f2, $f0, %[ff_pw_1]                 \r\n"
        "daddu %[qmul], %[qmul], $10                    \r\n"
        "punpcklhw $f0, $f0, %[ff_pw_1]                 \r\n"
        "punpckhhw $f10, $f4, %[ff_pw_1]                \r\n"
        "punpcklhw $f4, $f4, %[ff_pw_1]                 \r\n"
        "mtc1 %[qmul], $f14                             \r\n"
        "punpcklwd $f14, $f14, $f14                     \r\n"
        "pmaddhw $f0, $f0, $f14                         \r\n"
        "pmaddhw $f4, $f4, $f14                         \r\n"
        "pmaddhw $f2, $f2, $f14                         \r\n"
        "pmaddhw $f10, $f10, $f14                       \r\n"
        "psraw $f0, $f0, $f16                           \r\n"
        "psraw $f4, $f4, $f16                           \r\n"
        "psraw $f2, $f2, $f16                           \r\n"
        "psraw $f10, $f10, $f16                         \r\n"
        "packsswh $f0, $f0, $f2                         \r\n"
        "packsswh $f4, $f4, $f10                        \r\n"
        "mfc1 $9, $f0                                   \r\n"
        "dsrl $f0, $f0, $f18                            \r\n"
        "mfc1 %[input], $f0                             \r\n"
        "sh $9, 0x0(%[output])                          \r\n"
        "sh %[input], 0x80(%[output])                   \r\n"
        "dsrl $9, $9, 0x10                              \r\n"
        "dsrl %[input], %[input], 0x10                  \r\n"
        "sh $9, 0x20(%[output])                         \r\n"
        "sh %[input], 0xa0(%[output])                   \r\n"
        "mfc1 $9, $f4                                   \r\n"
        "dsrl $f4, $f4, $f18                            \r\n"
        "mfc1 %[input], $f4                             \r\n"
        "sh $9, 0x40(%[output])                         \r\n"
        "sh %[input], 0xc0(%[output])                   \r\n"
        "dsrl $9, $9, 0x10                              \r\n"
        "dsrl %[input], %[input], 0x10                  \r\n"
        "sh $9, 0x60(%[output])                         \r\n"
        "sh %[input], 0xe0(%[output])                   \r\n"
        "punpckhhw $f2, $f6, %[ff_pw_1]                 \r\n"
        "punpcklhw $f6, $f6, %[ff_pw_1]                 \r\n"
        "punpckhhw $f10, $f8, %[ff_pw_1]                \r\n"
        "punpcklhw $f8, $f8, %[ff_pw_1]                 \r\n"
        "mtc1 %[qmul], $f14                             \r\n"
        "punpcklwd $f14, $f14, $f14                     \r\n"
        "pmaddhw $f6, $f6, $f14                         \r\n"
        "pmaddhw $f8, $f8, $f14                         \r\n"
        "pmaddhw $f2, $f2, $f14                         \r\n"
        "pmaddhw $f10, $f10, $f14                       \r\n"
        "psraw $f6, $f6, $f16                           \r\n"
        "psraw $f8, $f8, $f16                           \r\n"
        "psraw $f2, $f2, $f16                           \r\n"
        "psraw $f10, $f10, $f16                         \r\n"
        "packsswh $f6, $f6, $f2                         \r\n"
        "packsswh $f8, $f8, $f10                        \r\n"
        "mfc1 $9, $f6                                   \r\n"
        "dsrl $f6, $f6, $f18                            \r\n"
        "mfc1 %[input], $f6                             \r\n"
        "sh $9, 0x100(%[output])                        \r\n"
        "sh %[input], 0x180(%[output])                  \r\n"
        "dsrl $9, $9, 0x10                              \r\n"
        "dsrl %[input], %[input], 0x10                  \r\n"
        "sh $9, 0x120(%[output])                        \r\n"
        "sh %[input], 0x1a0(%[output])                  \r\n"
        "mfc1 $9, $f8                                   \r\n"
        "dsrl $f8, $f8, $f18                            \r\n"
        "mfc1 %[input], $f8                             \r\n"
        "sh $9, 0x140(%[output])                        \r\n"
        "sh %[input], 0x1c0(%[output])                  \r\n"
        "dsrl $9, $9, 0x10                              \r\n"
        "dsrl %[input], %[input], 0x10                  \r\n"
        "sh $9, 0x160(%[output])                        \r\n"
        "jr $31                                         \r\n"
        "sh %[input], 0x1e0(%[output])                  \r\n"
        "1:                                             \r\n"
        "ori $10, $0, 0x1f                              \r\n"
        "clz $9, %[qmul]                                \r\n"
        "ori %[input], $0, 0x7                          \r\n"
        "dsubu $9, $10, $9                              \r\n"
        "ori $10, $0, 0x80                              \r\n"
        "dsll $10, $10, 0x10                            \r\n"
        "daddu %[qmul], %[qmul], $10                    \r\n"
        "dsubu $10, $9, %[input]                        \r\n"
        "movn $9, %[input], $10                         \r\n"
        "daddiu %[input], %[input], 0x1                 \r\n"
        "andi $10, $9, 0xff                             \r\n"
        "dsrlv %[qmul], %[qmul], $10                    \r\n"
        "dsubu %[input], %[input], $9                   \r\n"
        "mtc1 %[input], $f12                            \r\n"
        "punpckhhw $f2, $f0, %[ff_pw_1]                 \r\n"
        "punpcklhw $f0, $f0, %[ff_pw_1]                 \r\n"
        "punpckhhw $f10, $f4, %[ff_pw_1]                \r\n"
        "punpcklhw $f4, $f4, %[ff_pw_1]                 \r\n"
        "mtc1 %[qmul], $f14                             \r\n"
        "punpcklwd $f14, $f14, $f14                     \r\n"
        "pmaddhw $f0, $f0, $f14                         \r\n"
        "pmaddhw $f4, $f4, $f14                         \r\n"
        "pmaddhw $f2, $f2, $f14                         \r\n"
        "pmaddhw $f10, $f10, $f14                       \r\n"
        "psraw $f0, $f0, $f12                           \r\n"
        "psraw $f4, $f4, $f12                           \r\n"
        "psraw $f2, $f2, $f12                           \r\n"
        "psraw $f10, $f10, $f12                         \r\n"
        "packsswh $f0, $f0, $f2                         \r\n"
        "packsswh $f4, $f4, $f10                        \r\n"
        "mfc1 $9, $f0                                   \r\n"
        "dsrl $f0, $f0, $f18                            \r\n"
        "sh $9, 0x0(%[output])                          \r\n"
        "mfc1 %[input], $f0                             \r\n"
        "dsrl $9, $9, 0x10                              \r\n"
        "sh %[input], 0x80(%[output])                   \r\n"
        "sh $9, 0x20(%[output])                         \r\n"
        "dsrl %[input], %[input], 0x10                  \r\n"
        "mfc1 $9, $f4                                   \r\n"
        "sh %[input], 0xa0(%[output])                   \r\n"
        "dsrl $f4, $f4, $f18                            \r\n"
        "sh $9, 0x40(%[output])                         \r\n"
        "mfc1 %[input], $f4                             \r\n"
        "dsrl $9, $9, 0x10                              \r\n"
        "sh %[input], 0xc0(%[output])                   \r\n"
        "sh $9, 0x60(%[output])                         \r\n"
        "dsrl %[input], %[input], 0x10                  \r\n"
        "sh %[input], 0xe0(%[output])                   \r\n"
        "punpckhhw $f2, $f6, %[ff_pw_1]                 \r\n"
        "punpcklhw $f6, $f6, %[ff_pw_1]                 \r\n"
        "punpckhhw $f10, $f8, %[ff_pw_1]                \r\n"
        "punpcklhw $f8, $f8, %[ff_pw_1]                 \r\n"
        "mtc1 %[qmul], $f14                             \r\n"
        "punpcklwd $f14, $f14, $f14                     \r\n"
        "pmaddhw $f6, $f6, $f14                         \r\n"
        "pmaddhw $f8, $f8, $f14                         \r\n"
        "pmaddhw $f2, $f2, $f14                         \r\n"
        "pmaddhw $f10, $f10, $f14                       \r\n"
        "psraw $f6, $f6, $f12                           \r\n"
        "psraw $f8, $f8, $f12                           \r\n"
        "psraw $f2, $f2, $f12                           \r\n"
        "psraw $f10, $f10, $f12                         \r\n"
        "packsswh $f6, $f6, $f2                         \r\n"
        "packsswh $f8, $f8, $f10                        \r\n"
        "mfc1 $9, $f6                                   \r\n"
        "dsrl $f6, $f6, $f18                            \r\n"
        "mfc1 %[input], $f6                             \r\n"
        "sh $9, 0x100(%[output])                        \r\n"
        "sh %[input], 0x180(%[output])                  \r\n"
        "dsrl $9, $9, 0x10                              \r\n"
        "dsrl %[input], %[input], 0x10                  \r\n"
        "sh $9, 0x120(%[output])                        \r\n"
        "sh %[input], 0x1a0(%[output])                  \r\n"
        "mfc1 $9, $f8                                   \r\n"
        "dsrl $f8, $f8, $f18                            \r\n"
        "mfc1 %[input], $f8                             \r\n"
        "sh $9, 0x140(%[output])                        \r\n"
        "sh %[input], 0x1c0(%[output])                  \r\n"
        "dsrl $9, $9, 0x10                              \r\n"
        "dsrl %[input], %[input], 0x10                  \r\n"
        "sh $9, 0x160(%[output])                        \r\n"
        "sh %[input], 0x1e0(%[output])                  \r\n"
        ".set reorder                                   \r\n"
        ::[output]"r"(output),[input]"r"(input),[qmul]"r"((uint64_t)qmul),
          [ff_pw_1]"f"(ff_pw_1)
        : "$9","$10","$f0","$f2","$f4","$f6","$f8","$f10","$f12","$f14","$f16",
          "$f18"
    );
}

void ff_h264_chroma422_dc_dequant_idct_8_mmi(int16_t *block, int qmul)
{
    int temp[8];
    int t[8];

    temp[0] = block[0] + block[16];
    temp[1] = block[0] - block[16];
    temp[2] = block[32] + block[48];
    temp[3] = block[32] - block[48];
    temp[4] = block[64] + block[80];
    temp[5] = block[64] - block[80];
    temp[6] = block[96] + block[112];
    temp[7] = block[96] - block[112];

    t[0] = temp[0] + temp[4] + temp[2] + temp[6];
    t[1] = temp[0] - temp[4] + temp[2] - temp[6];
    t[2] = temp[0] - temp[4] - temp[2] + temp[6];
    t[3] = temp[0] + temp[4] - temp[2] - temp[6];
    t[4] = temp[1] + temp[5] + temp[3] + temp[7];
    t[5] = temp[1] - temp[5] + temp[3] - temp[7];
    t[6] = temp[1] - temp[5] - temp[3] + temp[7];
    t[7] = temp[1] + temp[5] - temp[3] - temp[7];

    block[  0]= (t[0]*qmul + 128) >> 8;
    block[ 32]= (t[1]*qmul + 128) >> 8;
    block[ 64]= (t[2]*qmul + 128) >> 8;
    block[ 96]= (t[3]*qmul + 128) >> 8;
    block[ 16]= (t[4]*qmul + 128) >> 8;
    block[ 48]= (t[5]*qmul + 128) >> 8;
    block[ 80]= (t[6]*qmul + 128) >> 8;
    block[112]= (t[7]*qmul + 128) >> 8;
}

void ff_h264_chroma_dc_dequant_idct_8_mmi(int16_t *block, int qmul)
{
    int a,b,c,d;

    d = block[0] - block[16];
    a = block[0] + block[16];
    b = block[32] - block[48];
    c = block[32] + block[48];
    block[0] = ((a+c)*qmul) >> 7;
    block[16]= ((d+b)*qmul) >> 7;
    block[32]= ((a-c)*qmul) >> 7;
    block[48]= ((d-b)*qmul) >> 7;
}

void ff_h264_weight_pixels16_8_mmi(uint8_t *block, int stride,
        int height, int log2_denom, int weight, int offset)
{
    int y;

    offset <<= log2_denom;

    if (log2_denom)
        offset += 1 << (log2_denom - 1);

    for (y=0; y<height; y++, block+=stride) {
        __asm__ volatile (
            "ldc1 $f2, %0                   \r\n"
            "ldc1 $f4, %1                   \r\n"
            "dmtc1 $0, $f20                 \r\n"
            "mtc1 %2, $f6                   \r\n"
            "mtc1 %3, $f8                   \r\n"
            "mtc1 %4, $f10                  \r\n"
            "pshufh $f6, $f6, $f20          \r\n"
            "pshufh $f8, $f8, $f20          \r\n"
            "punpckhbh $f14, $f2, $f20      \r\n"
            "punpckhbh $f16, $f4, $f20      \r\n"
            "punpcklbh $f2, $f2, $f20       \r\n"
            "punpcklbh $f4, $f4, $f20       \r\n"
            "pmullh $f14, $f14, $f6         \r\n"
            "pmullh $f16, $f16, $f6         \r\n"
            "pmullh $f2, $f2, $f6           \r\n"
            "pmullh $f4, $f4, $f6           \r\n"
            "paddsh $f14, $f14, $f8         \r\n"
            "paddsh $f16, $f16, $f8         \r\n"
            "paddsh $f2, $f2, $f8           \r\n"
            "paddsh $f4, $f4, $f8           \r\n"
            "psrah $f14, $f14, $f10         \r\n"
            "psrah $f16, $f16, $f10         \r\n"
            "psrah $f2, $f2, $f10           \r\n"
            "psrah $f4, $f4, $f10           \r\n"
            "packushb $f2, $f2, $f14        \r\n"
            "packushb $f4, $f4, $f16        \r\n"
            "sdc1 $f2, %0                   \r\n"
            "sdc1 $f4, %1                   \r\n"
            : "=m"(*block),"=m"(*(block + 8))
            : "r"(weight),"r"(offset),"r"(log2_denom)
        );
    }
}

void ff_h264_biweight_pixels16_8_mmi(uint8_t *dst, uint8_t *src,
        int stride, int height, int log2_denom, int weightd, int weights,
        int offset)
{
    int y;

    offset = ((offset + 1) | 1) << log2_denom;

    for (y=0; y<height; y++, dst+=stride, src+=stride) {
        __asm__ volatile (
            "ldc1 $f2, %2                   \r\n"
            "ldc1 $f4, %3                   \r\n"
            "dmtc1 $0, $f20                 \r\n"
            "mtc1 %6, $f6                   \r\n"
            "mtc1 %7, $f8                   \r\n"
            "mtc1 %8, $f10                  \r\n"
            "mtc1 %9, $f12                  \r\n"
            "pshufh $f6, $f6, $f20          \r\n"
            "pshufh $f8, $f8, $f20          \r\n"
            "pshufh $f10, $f10, $f20        \r\n"
            "punpckhbh $f14, $f2, $f20      \r\n"
            "punpckhbh $f16, $f4, $f20      \r\n"
            "punpcklbh $f2, $f2, $f20       \r\n"
            "punpcklbh $f4, $f4, $f20       \r\n"
            "pmullh $f14, $f14, $f6         \r\n"
            "pmullh $f16, $f16, $f8         \r\n"
            "pmullh $f2, $f2, $f6           \r\n"
            "pmullh $f4, $f4, $f8           \r\n"
            "paddsh $f14, $f14, $f10        \r\n"
            "paddsh $f2, $f2, $f10          \r\n"
            "paddsh $f14, $f14, $f16        \r\n"
            "paddsh $f2, $f2, $f4           \r\n"
            "psrah $f14, $f14, $f12         \r\n"
            "psrah $f2, $f2, $f12           \r\n"
            "packushb $f2, $f2, $f14        \r\n"
            "sdc1 $f2, %0                   \r\n"
            "ldc1 $f2, %4                   \r\n"
            "ldc1 $f4, %5                   \r\n"
            "punpckhbh $f14, $f2, $f20      \r\n"
            "punpckhbh $f16, $f4, $f20      \r\n"
            "punpcklbh $f2, $f2, $f20       \r\n"
            "punpcklbh $f4, $f4, $f20       \r\n"
            "pmullh $f14, $f14, $f6         \r\n"
            "pmullh $f16, $f16, $f8         \r\n"
            "pmullh $f2, $f2, $f6           \r\n"
            "pmullh $f4, $f4, $f8           \r\n"
            "paddsh $f14, $f14, $f10        \r\n"
            "paddsh $f2, $f2, $f10          \r\n"
            "paddsh $f14, $f14, $f16        \r\n"
            "paddsh $f2, $f2, $f4           \r\n"
            "psrah $f14, $f14, $f12         \r\n"
            "psrah $f2, $f2, $f12           \r\n"
            "packushb $f2, $f2, $f14        \r\n"
            "sdc1 $f2, %1                   \r\n"
            : "=m"(*dst),"=m"(*(dst+8))
            : "m"(*src),"m"(*dst),"m"(*(src+8)),"m"(*(dst+8)),
              "r"(weights),"r"(weightd),"r"(offset),"r"(log2_denom+1)
        );
    }
}

void ff_h264_weight_pixels8_8_mmi(uint8_t *block, int stride, int height,
        int log2_denom, int weight, int offset)
{
    int y;

    offset <<= log2_denom;

    if (log2_denom)
        offset += 1 << (log2_denom - 1);

    for (y=0; y<height; y++, block+=stride) {
        __asm__ volatile (
            "ldc1 $f2, %0                   \r\n"
            "mtc1 %1, $f6                   \r\n"
            "mtc1 %2, $f8                   \r\n"
            "mtc1 %3, $f10                  \r\n"
            "dmtc1 $0, $f20                 \r\n"
            "pshufh $f6, $f6, $f20          \r\n"
            "pshufh $f8, $f8, $f20          \r\n"
            "punpckhbh $f14, $f2, $f20      \r\n"
            "punpcklbh $f2, $f2, $f20       \r\n"
            "pmullh $f14, $f14, $f6         \r\n"
            "pmullh $f2, $f2, $f6           \r\n"
            "paddsh $f14, $f14, $f8         \r\n"
            "paddsh $f2, $f2, $f8           \r\n"
            "psrah $f14, $f14, $f10         \r\n"
            "psrah $f2, $f2, $f10           \r\n"
            "packushb $f2, $f2, $f14        \r\n"
            "sdc1 $f2, %0                   \r\n"
            : "=m"(*block)
            : "r"(weight),"r"(offset),"r"(log2_denom)
        );
    }
}

void ff_h264_biweight_pixels8_8_mmi(uint8_t *dst, uint8_t *src,
        int stride, int height, int log2_denom, int weightd, int weights,
        int offset)
{
    int y;

    offset = ((offset + 1) | 1) << log2_denom;

    for (y=0; y<height; y++, dst+=stride, src+=stride) {
        __asm__ volatile (
            "ldc1 $f2, %1                   \r\n"
            "ldc1 $f4, %2                   \r\n"
            "dmtc1 $0, $f20                 \r\n"
            "mtc1 %3, $f6                   \r\n"
            "mtc1 %4, $f8                   \r\n"
            "mtc1 %5, $f10                  \r\n"
            "mtc1 %6, $f12                  \r\n"
            "pshufh $f6, $f6, $f20          \r\n"
            "pshufh $f8, $f8, $f20          \r\n"
            "pshufh $f10, $f10, $f20        \r\n"
            "punpckhbh $f14, $f2, $f20      \r\n"
            "punpckhbh $f16, $f4, $f20      \r\n"
            "punpcklbh $f2, $f2, $f20       \r\n"
            "punpcklbh $f4, $f4, $f20       \r\n"
            "pmullh $f14, $f14, $f6         \r\n"
            "pmullh $f16, $f16, $f8         \r\n"
            "pmullh $f2, $f2, $f6           \r\n"
            "pmullh $f4, $f4, $f8           \r\n"
            "paddsh $f14, $f14, $f10        \r\n"
            "paddsh $f2, $f2, $f10          \r\n"
            "paddsh $f14, $f14, $f16        \r\n"
            "paddsh $f2, $f2, $f4           \r\n"
            "psrah $f14, $f14, $f12         \r\n"
            "psrah $f2, $f2, $f12           \r\n"
            "packushb $f2, $f2, $f14        \r\n"
            "sdc1 $f2, %0                   \r\n"
            : "=m"(*dst)
            : "m"(*src),"m"(*dst),"r"(weights),
              "r"(weightd),"r"(offset),"r"(log2_denom+1)
        );
    }
}

void ff_h264_weight_pixels4_8_mmi(uint8_t *block, int stride, int height,
        int log2_denom, int weight, int offset)
{
    int y;

    offset <<= log2_denom;

    if (log2_denom)
        offset += 1 << (log2_denom - 1);

    for (y=0; y<height; y++, block+=stride) {
        __asm__ volatile (
            "lwc1 $f2, %0                   \r\n"
            "mtc1 %1, $f6                   \r\n"
            "mtc1 %2, $f8                   \r\n"
            "mtc1 %3, $f10                  \r\n"
            "dmtc1 $0, $f20                 \r\n"
            "pshufh $f6, $f6, $f20          \r\n"
            "pshufh $f8, $f8, $f20          \r\n"
            "punpcklbh $f2, $f2, $f20       \r\n"
            "pmullh $f2, $f2, $f6           \r\n"
            "paddsh $f2, $f2, $f8           \r\n"
            "psrah $f2, $f2, $f10           \r\n"
            "packushb $f2, $f2, $f20        \r\n"
            "swc1 $f2, %0                   \r\n"
            : "=m"(*block)
            : "r"(weight),"r"(offset),"r"(log2_denom)
        );
    }
}

void ff_h264_biweight_pixels4_8_mmi(uint8_t *dst, uint8_t *src,
        int stride, int height, int log2_denom, int weightd, int weights,
        int offset)
{
    int y;

    offset = ((offset + 1) | 1) << log2_denom;

    for (y=0; y<height; y++, dst+=stride, src+=stride) {
        __asm__ volatile (
            "lwc1 $f2, %1                   \r\n"
            "lwc1 $f4, %2                   \r\n"
            "dmtc1 $0, $f20                 \r\n"
            "mtc1 %3, $f6                   \r\n"
            "mtc1 %4, $f8                   \r\n"
            "mtc1 %5, $f10                  \r\n"
            "mtc1 %6, $f12                  \r\n"
            "pshufh $f6, $f6, $f20          \r\n"
            "pshufh $f8, $f8, $f20          \r\n"
            "pshufh $f10, $f10, $f20        \r\n"
            "punpcklbh $f2, $f2, $f20       \r\n"
            "punpcklbh $f4, $f4, $f20       \r\n"
            "pmullh $f2, $f2, $f6           \r\n"
            "pmullh $f4, $f4, $f8           \r\n"
            "paddsh $f2, $f2, $f10          \r\n"
            "paddsh $f2, $f2, $f4           \r\n"
            "psrah $f2, $f2, $f12           \r\n"
            "packushb $f2, $f2, $f20        \r\n"
            "swc1 $f2, %0                   \r\n"
            : "=m"(*dst)
            : "m"(*src),"m"(*dst),"r"(weights),
              "r"(weightd),"r"(offset),"r"(log2_denom+1)
        );
    }
}

static void inline chroma_inter_body_mmi(uint8_t *pix, int stride,
        int alpha, int beta, int8_t *tc0)
{
    __asm__ volatile (
        "xor $f16, $f16, $f16                           \r\n"
        "mtc1 %[alpha], $f8                             \r\n"
        "mtc1 %[beta], $f10                             \r\n"
        "pshufh $f8, $f8, $f16                          \r\n"
        "pshufh $f10, $f10, $f16                        \r\n"
        "packushb $f8, $f8, $f8                         \r\n"
        "packushb $f10, $f10, $f10                      \r\n"
        "psubusb $f12, $f4, $f2                         \r\n"
        "psubusb $f14, $f2, $f4                         \r\n"
        "or $f14, $f14, $f12                            \r\n"
        "psubusb $f14, $f14, $f8                        \r\n"
        "psubusb $f12, $f2, $f0                         \r\n"
        "psubusb $f8, $f0, $f2                          \r\n"
        "or $f8, $f8, $f12                              \r\n"
        "psubusb $f8, $f8, $f10                         \r\n"
        "or $f14, $f14, $f8                             \r\n"
        "psubusb $f12, $f4, $f6                         \r\n"
        "psubusb $f8, $f6, $f4                          \r\n"
        "or $f8, $f8, $f12                              \r\n"
        "psubusb $f8, $f8, $f10                         \r\n"
        "or $f14, $f14, $f8                             \r\n"
        "xor $f12, $f12, $f12                           \r\n"
        "pcmpeqb $f14, $f14, $f12                       \r\n"
        "lwc1 $f12, 0x0(%[tc0])                         \r\n"
        "punpcklbh $f12, $f12, $f12                     \r\n"
        "and $f14, $f14, $f12                           \r\n"
        "pcmpeqb $f8, $f8, $f8                          \r\n"
        "xor $f10, $f2, $f4                             \r\n"
        "xor $f6, $f6, $f8                              \r\n"
        "and $f10, $f10, %[ff_pb_1]                     \r\n"
        "pavgb $f6, $f6, $f0                            \r\n"
        "xor $f8, $f8, $f2                              \r\n"
        "pavgb $f6, $f6, %[ff_pb_3]                     \r\n"
        "pavgb $f8, $f8, $f4                            \r\n"
        "pavgb $f6, $f6, $f10                           \r\n"
        "paddusb $f6, $f6, $f8                          \r\n"
        "psubusb $f12, %[ff_pb_A1], $f6                 \r\n"
        "psubusb $f6, $f6, %[ff_pb_A1]                  \r\n"
        "pminub $f12, $f12, $f14                        \r\n"
        "pminub $f6, $f6, $f14                          \r\n"
        "psubusb $f2, $f2, $f12                         \r\n"
        "psubusb $f4, $f4, $f6                          \r\n"
        "paddusb $f2, $f2, $f6                          \r\n"
        "paddusb $f4, $f4, $f12                         \r\n"
        ::[pix]"r"(pix),[stride]"r"((int64_t)stride),
          [alpha]"r"((int64_t)alpha),[beta]"r"((int64_t)beta),[tc0]"r"(tc0),
          [ff_pb_1]"f"(ff_pb_1),[ff_pb_3]"f"(ff_pb_3),[ff_pb_A1]"f"(ff_pb_A1)
        : "$f0","$f2","$f4","$f6","$f8","$f10","$f12","$f14","$f16"
    );
}

static void inline chroma_intra_body_mmi(uint8_t *pix, int stride,
        int alpha, int beta)
{
    __asm__ volatile (
        "xor $f16, $f16, $f16                           \r\n"
        "mtc1 %[alpha], $f8                             \r\n"
        "mtc1 %[beta], $f10                             \r\n"
        "pshufh $f8, $f8, $f16                          \r\n"
        "pshufh $f10, $f10, $f16                        \r\n"
        "packushb $f8, $f8, $f8                         \r\n"
        "packushb $f10, $f10, $f10                      \r\n"
        "psubusb $f12, $f4, $f2                         \r\n"
        "psubusb $f14, $f2, $f4                         \r\n"
        "or $f14, $f14, $f12                            \r\n"
        "psubusb $f14, $f14, $f8                        \r\n"
        "psubusb $f12, $f2, $f0                         \r\n"
        "psubusb $f8, $f0, $f2                          \r\n"
        "or $f8, $f8, $f12                              \r\n"
        "psubusb $f8, $f8, $f10                         \r\n"
        "or $f14, $f14, $f8                             \r\n"
        "psubusb $f12, $f4, $f6                         \r\n"
        "psubusb $f8, $f6, $f4                          \r\n"
        "or $f8, $f8, $f12                              \r\n"
        "psubusb $f8, $f8, $f10                         \r\n"
        "or $f14, $f14, $f8                             \r\n"
        "xor $f12, $f12, $f12                           \r\n"
        "pcmpeqb $f14, $f14, $f12                       \r\n"
        "mov.d $f10, $f2                                \r\n"
        "mov.d $f12, $f4                                \r\n"
        "xor $f8, $f2, $f6                              \r\n"
        "and $f8, $f8, %[ff_pb_1]                       \r\n"
        "pavgb $f2, $f2, $f6                            \r\n"
        "psubusb $f2, $f2, $f8                          \r\n"
        "pavgb $f2, $f2, $f0                            \r\n"
        "xor $f8, $f4, $f0                              \r\n"
        "and $f8, $f8, %[ff_pb_1]                       \r\n"
        "pavgb $f4, $f4, $f0                            \r\n"
        "psubusb $f4, $f4, $f8                          \r\n"
        "pavgb $f4, $f4, $f6                            \r\n"
        "psubb $f2, $f2, $f10                           \r\n"
        "psubb $f4, $f4, $f12                           \r\n"
        "and $f2, $f2, $f14                             \r\n"
        "and $f4, $f4, $f14                             \r\n"
        "paddb $f2, $f2, $f10                           \r\n"
        "paddb $f4, $f4, $f12                           \r\n"
        ::[pix]"r"(pix),[stride]"r"((int64_t)stride),
          [alpha]"r"((int64_t)alpha),[beta]"r"((int64_t)beta),
          [ff_pb_1]"f"(ff_pb_1)
        : "$f0","$f2","$f4","$f8","$f10","$f12","$f14","$f16"
    );
}

void ff_deblock_v8_luma_8_mmi(uint8_t *pix, int stride, int alpha, int beta,
        int8_t *tc0)
{
    __asm__ volatile (
        "daddu $8, %[stride], %[stride]                 \r\n"
        "xor $f16, $f16, $f16                           \r\n"
        "daddu $9, %[stride], $8                        \r\n"
        "daddiu %[alpha], %[alpha], -0x1                \r\n"
        "dsubu $9, $0, $9                               \r\n"
        "daddiu %[beta], %[beta], -0x1                  \r\n"
        "daddu $9, $9, %[pix]                           \r\n"
        "ldc1 $f4, 0x0(%[pix])                          \r\n"
        "gsldxc1 $f0, 0x0($9, %[stride])                \r\n"
        "gsldxc1 $f2, 0x0($9, $8)                       \r\n"
        "gsldxc1 $f6, 0x0(%[pix], %[stride])            \r\n"
        "mtc1 %[alpha], $f8                             \r\n"
        "mtc1 %[beta], $f10                             \r\n"
        "pshufh $f8, $f8, $f16                          \r\n"
        "pshufh $f10, $f10, $f16                        \r\n"
        "packushb $f8, $f8, $f8                         \r\n"
        "packushb $f10, $f10, $f10                      \r\n"
        "psubusb $f12, $f4, $f2                         \r\n"
        "psubusb $f14, $f2, $f4                         \r\n"
        "or $f14, $f14, $f12                            \r\n"
        "psubusb $f12, $f2, $f0                         \r\n"
        "psubusb $f14, $f14, $f8                        \r\n"
        "psubusb $f8, $f0, $f2                          \r\n"
        "or $f8, $f8, $f12                              \r\n"
        "psubusb $f12, $f4, $f6                         \r\n"
        "psubusb $f8, $f8, $f10                         \r\n"
        "or $f14, $f14, $f8                             \r\n"
        "psubusb $f8, $f6, $f4                          \r\n"
        "or $f8, $f8, $f12                              \r\n"
        "psubusb $f8, $f8, $f10                         \r\n"
        "or $f14, $f14, $f8                             \r\n"
        "pcmpeqb $f14, $f14, $f16                       \r\n"
        "pcmpeqb $f6, $f6, $f6                          \r\n"
        "gslwlc1 $f8, 0x3(%[tc0])                       \r\n"
        "gslwrc1 $f8, 0x0(%[tc0])                       \r\n"
        "punpcklbh $f8, $f8, $f8                        \r\n"
        "punpcklbh $f18, $f8, $f8                       \r\n"
        "pcmpgtb $f8, $f18, $f6                         \r\n"
        "ldc1 $f6, 0x0($9)                              \r\n"
        "and $f20, $f8, $f14                            \r\n"
        "psubusb $f14, $f6, $f2                         \r\n"
        "psubusb $f12, $f2, $f6                         \r\n"
        "psubusb $f14, $f14, $f10                       \r\n"
        "psubusb $f12, $f12, $f10                       \r\n"
        "pcmpeqb $f12, $f12, $f14                       \r\n"
        "and $f12, $f12, $f20                           \r\n"
        "and $f8, $f20, $f18                            \r\n"
        "psubb $f14, $f8, $f12                          \r\n"
        "and $f12, $f12, $f8                            \r\n"
        "pavgb $f8, $f2, $f4                            \r\n"
        "ldc1 $f22, 0x0($9)                             \r\n"
        "pavgb $f6, $f6, $f8                            \r\n"
        "xor $f8, $f8, $f22                             \r\n"
        "and $f8, $f8, %[ff_pb_1]                       \r\n"
        "psubusb $f6, $f6, $f8                          \r\n"
        "psubusb $f8, $f0, $f12                         \r\n"
        "paddusb $f12, $f12, $f0                        \r\n"
        "pmaxub $f6, $f6, $f8                           \r\n"
        "pminub $f6, $f6, $f12                          \r\n"
        "gssdxc1 $f6, 0x0($9, %[stride])                \r\n"
        "gsldxc1 $f8, 0x0(%[pix], $8)                   \r\n"
        "psubusb $f6, $f8, $f4                          \r\n"
        "psubusb $f12, $f4, $f8                         \r\n"
        "psubusb $f6, $f6, $f10                         \r\n"
        "psubusb $f12, $f12, $f10                       \r\n"
        "pcmpeqb $f12, $f12, $f6                        \r\n"
        "and $f12, $f12, $f20                           \r\n"
        "psubb $f14, $f14, $f12                         \r\n"
        "and $f10, $f18, $f12                           \r\n"
        "gsldxc1 $f6, 0x0(%[pix], %[stride])            \r\n"
        "pavgb $f12, $f2, $f4                           \r\n"
        "gsldxc1 $f22, 0x0(%[pix], $8)                  \r\n"
        "pavgb $f8, $f8, $f12                           \r\n"
        "xor $f12, $f12, $f22                           \r\n"
        "and $f12, $f12, %[ff_pb_1]                     \r\n"
        "psubusb $f8, $f8, $f12                         \r\n"
        "psubusb $f12, $f6, $f10                        \r\n"
        "paddusb $f10, $f10, $f6                        \r\n"
        "pmaxub $f8, $f8, $f12                          \r\n"
        "pminub $f8, $f8, $f10                          \r\n"
        "gssdxc1 $f8, 0x0(%[pix], %[stride])            \r\n"
        "xor $f10, $f2, $f4                             \r\n"
        "pcmpeqb $f8, $f8, $f8                          \r\n"
        "and $f10, $f10, %[ff_pb_1]                     \r\n"
        "xor $f6, $f6, $f8                              \r\n"
        "xor $f8, $f8, $f2                              \r\n"
        "pavgb $f6, $f6, $f0                            \r\n"
        "pavgb $f6, $f6, %[ff_pb_3]                     \r\n"
        "pavgb $f8, $f8, $f4                            \r\n"
        "pavgb $f6, $f6, $f10                           \r\n"
        "paddusb $f6, $f6, $f8                          \r\n"
        "psubusb $f12, %[ff_pb_A1], $f6                 \r\n"
        "psubusb $f6, $f6, %[ff_pb_A1]                  \r\n"
        "pminub $f12, $f12, $f14                        \r\n"
        "pminub $f6, $f6, $f14                          \r\n"
        "psubusb $f2, $f2, $f12                         \r\n"
        "psubusb $f4, $f4, $f6                          \r\n"
        "paddusb $f2, $f2, $f6                          \r\n"
        "paddusb $f4, $f4, $f12                         \r\n"
        "gssdxc1 $f2, 0x0($9, $8)                       \r\n"
        "sdc1 $f4, 0x0(%[pix])                          \r\n"
        ::[pix]"r"(pix),[stride]"r"((int64_t)stride),
          [alpha]"r"((int64_t)alpha),[beta]"r"((int64_t)beta),[tc0]"r"(tc0),
          [ff_pb_1]"f"(ff_pb_1),[ff_pb_3]"f"(ff_pb_3),[ff_pb_A1]"f"(ff_pb_A1)
        : "$8","$9","$f0","$f2","$f4","$f6","$f8","$f10","$f12","$f14","$f16",
          "$f18","$f20","$f22"
    );
}

void ff_deblock_v8_luma_intra_8_mmi(uint8_t *pix, int stride, int alpha,
        int beta)
{
    uint64_t stack[0xa];

    __asm__ volatile (
        "ori $8, $0, 0x1                                \r\n"
        "xor $f30, $f30, $f30                           \r\n"
        "dmtc1 $8, $f16                                 \r\n"
        "dsll $8, %[stride], 2                          \r\n"
        "daddu $10, %[stride], %[stride]                \r\n"
        "daddiu %[alpha], %[alpha], -0x1                \r\n"
        "dsll $f20, $f16, $f16                          \r\n"
        "bltz %[alpha], 1f                              \r\n"
        "daddu $9, $10, %[stride]                       \r\n"
        "daddiu %[beta], %[beta], -0x1                  \r\n"
        "bltz %[beta], 1f                               \r\n"
        "dsubu $8, $0, $8                               \r\n"
        "daddu $8, $8, %[pix]                           \r\n"
        "ldc1 $f4, 0x0(%[pix])                          \r\n"
        "gsldxc1 $f0, 0x0($8, $10)                      \r\n"
        "gsldxc1 $f2, 0x0($8, $9)                       \r\n"
        "gsldxc1 $f6, 0x0(%[pix], %[stride])            \r\n"
        "mtc1 %[alpha], $f8                             \r\n"
        "mtc1 %[beta], $f10                             \r\n"
        "pshufh $f8, $f8, $f30                          \r\n"
        "pshufh $f10, $f10, $f30                        \r\n"
        "packushb $f8, $f8, $f8                         \r\n"
        "psubusb $f12, $f4, $f2                         \r\n"
        "psubusb $f14, $f2, $f4                         \r\n"
        "packushb $f10, $f10, $f10                      \r\n"
        "or $f14, $f14, $f12                            \r\n"
        "sdc1 $f8, 0x10+%[stack]                        \r\n"
        "psubusb $f14, $f14, $f8                        \r\n"
        "psubusb $f12, $f2, $f0                         \r\n"
        "psubusb $f8, $f0, $f2                          \r\n"
        "or $f8, $f8, $f12                              \r\n"
        "psubusb $f8, $f8, $f10                         \r\n"
        "or $f14, $f14, $f8                             \r\n"
        "psubusb $f12, $f4, $f6                         \r\n"
        "psubusb $f8, $f6, $f4                          \r\n"
        "or $f8, $f8, $f12                              \r\n"
        "psubusb $f8, $f8, $f10                         \r\n"
        "or $f14, $f14, $f8                             \r\n"
        "xor $f12, $f12, $f12                           \r\n"
        "ldc1 $f8, 0x10+%[stack]                        \r\n"
        "pcmpeqb $f14, $f14, $f12                       \r\n"
        "sdc1 $f14, 0x20+%[stack]                       \r\n"
        "pavgb $f8, $f8, $f30                           \r\n"
        "psubusb $f14, $f4, $f2                         \r\n"
        "pavgb $f8, $f8, %[ff_pb_1]                     \r\n"
        "psubusb $f12, $f2, $f4                         \r\n"
        "psubusb $f14, $f14, $f8                        \r\n"
        "psubusb $f12, $f12, $f8                        \r\n"
        "ldc1 $f28, 0x20+%[stack]                       \r\n"
        "pcmpeqb $f12, $f12, $f14                       \r\n"
        "and $f12, $f12, $f28                           \r\n"
        "gsldxc1 $f28, 0x0($8, %[stride])               \r\n"
        "psubusb $f14, $f28, $f2                        \r\n"
        "psubusb $f8, $f2, $f28                         \r\n"
        "psubusb $f14, $f14, $f10                       \r\n"
        "psubusb $f8, $f8, $f10                         \r\n"
        "pcmpeqb $f8, $f8, $f14                         \r\n"
        "and $f8, $f8, $f12                             \r\n"
        "gsldxc1 $f26, 0x0(%[pix], $10)                 \r\n"
        "sdc1 $f8, 0x30+%[stack]                        \r\n"
        "psubusb $f14, $f26, $f4                        \r\n"
        "psubusb $f8, $f4, $f26                         \r\n"
        "psubusb $f14, $f14, $f10                       \r\n"
        "psubusb $f8, $f8, $f10                         \r\n"
        "pcmpeqb $f8, $f8, $f14                         \r\n"
        "and $f8, $f8, $f12                             \r\n"
        "sdc1 $f8, 0x40+%[stack]                        \r\n"
        "pavgb $f8, $f28, $f0                           \r\n"
        "pavgb $f10, $f2, $f4                           \r\n"
        "pavgb $f8, $f8, $f10                           \r\n"
        "sdc1 $f10, 0x10+%[stack]                       \r\n"
        "paddb $f12, $f28, $f0                          \r\n"
        "paddb $f14, $f2, $f4                           \r\n"
        "paddb $f12, $f12, $f14                         \r\n"
        "mov.d $f14, $f12                               \r\n"
        "sdc1 $f12, 0x0+%[stack]                        \r\n"
        "psrlh $f12, $f12, $f16                         \r\n"
        "pavgb $f12, $f12, $f30                         \r\n"
        "xor $f12, $f12, $f8                            \r\n"
        "and $f12, $f12, %[ff_pb_1]                     \r\n"
        "psubb $f8, $f8, $f12                           \r\n"
        "pavgb $f10, $f28, $f6                          \r\n"
        "psubb $f12, $f28, $f6                          \r\n"
        "paddb $f14, $f14, $f14                         \r\n"
        "psubb $f14, $f14, $f12                         \r\n"
        "and $f12, $f12, %[ff_pb_1]                     \r\n"
        "psubb $f10, $f10, $f12                         \r\n"
        "ldc1 $f24, 0x10+%[stack]                       \r\n"
        "pavgb $f10, $f10, $f0                          \r\n"
        "psrlh $f14, $f14, $f20                         \r\n"
        "pavgb $f10, $f10, $f24                         \r\n"
        "pavgb $f14, $f14, $f30                         \r\n"
        "xor $f14, $f14, $f10                           \r\n"
        "and $f14, $f14, %[ff_pb_1]                     \r\n"
        "psubb $f10, $f10, $f14                         \r\n"
        "xor $f14, $f2, $f6                             \r\n"
        "pavgb $f12, $f2, $f6                           \r\n"
        "and $f14, $f14, %[ff_pb_1]                     \r\n"
        "psubb $f12, $f12, $f14                         \r\n"
        "ldc1 $f24, 0x30+%[stack]                       \r\n"
        "pavgb $f12, $f12, $f0                          \r\n"
        "ldc1 $f22, 0x20+%[stack]                       \r\n"
        "xor $f10, $f10, $f12                           \r\n"
        "xor $f12, $f12, $f2                            \r\n"
        "and $f10, $f10, $f24                           \r\n"
        "and $f12, $f12, $f22                           \r\n"
        "xor $f10, $f10, $f12                           \r\n"
        "xor $f10, $f10, $f2                            \r\n"
        "gssdxc1 $f10, 0x0($8, $9)                      \r\n"
        "ldc1 $f10, 0x0($8)                             \r\n"
        "paddb $f12, $f28, $f10                         \r\n"
        "pavgb $f10, $f10, $f28                         \r\n"
        "ldc1 $f22, 0x0+%[stack]                        \r\n"
        "pavgb $f10, $f10, $f8                          \r\n"
        "paddb $f12, $f12, $f12                         \r\n"
        "paddb $f12, $f12, $f22                         \r\n"
        "psrlh $f12, $f12, $f20                         \r\n"
        "pavgb $f12, $f12, $f30                         \r\n"
        "xor $f12, $f12, $f10                           \r\n"
        "and $f12, $f12, %[ff_pb_1]                     \r\n"
        "ldc1 $f22, 0x30+%[stack]                       \r\n"
        "psubb $f10, $f10, $f12                         \r\n"
        "xor $f8, $f8, $f0                              \r\n"
        "xor $f10, $f10, $f28                           \r\n"
        "and $f8, $f8, $f22                             \r\n"
        "and $f10, $f10, $f22                           \r\n"
        "xor $f8, $f8, $f0                              \r\n"
        "xor $f10, $f10, $f28                           \r\n"
        "gssdxc1 $f8, 0x0($8, $10)                      \r\n"
        "gssdxc1 $f10, 0x0($8, %[stride])               \r\n"
        "pavgb $f8, $f26, $f6                           \r\n"
        "pavgb $f10, $f4, $f2                           \r\n"
        "pavgb $f8, $f8, $f10                           \r\n"
        "sdc1 $f10, 0x10+%[stack]                       \r\n"
        "paddb $f12, $f26, $f6                          \r\n"
        "paddb $f14, $f4, $f2                           \r\n"
        "paddb $f12, $f12, $f14                         \r\n"
        "mov.d $f14, $f12                               \r\n"
        "sdc1 $f12, 0x0+%[stack]                        \r\n"
        "psrlh $f12, $f12, $f16                         \r\n"
        "pavgb $f12, $f12, $f30                         \r\n"
        "xor $f12, $f12, $f8                            \r\n"
        "and $f12, $f12, %[ff_pb_1]                     \r\n"
        "psubb $f8, $f8, $f12                           \r\n"
        "pavgb $f10, $f26, $f0                          \r\n"
        "paddb $f14, $f14, $f14                         \r\n"
        "psubb $f12, $f26, $f0                          \r\n"
        "psubb $f14, $f14, $f12                         \r\n"
        "and $f12, $f12, %[ff_pb_1]                     \r\n"
        "psubb $f10, $f10, $f12                         \r\n"
        "ldc1 $f22, 0x10+%[stack]                       \r\n"
        "pavgb $f10, $f10, $f6                          \r\n"
        "pavgb $f10, $f10, $f22                         \r\n"
        "psrlh $f14, $f14, $f20                         \r\n"
        "pavgb $f14, $f14, $f30                         \r\n"
        "xor $f14, $f14, $f10                           \r\n"
        "and $f14, $f14, %[ff_pb_1]                     \r\n"
        "psubb $f10, $f10, $f14                         \r\n"
        "xor $f14, $f4, $f0                             \r\n"
        "pavgb $f12, $f4, $f0                           \r\n"
        "and $f14, $f14, %[ff_pb_1]                     \r\n"
        "ldc1 $f22, 0x40+%[stack]                       \r\n"
        "psubb $f12, $f12, $f14                         \r\n"
        "ldc1 $f24, 0x20+%[stack]                       \r\n"
        "pavgb $f12, $f12, $f6                          \r\n"
        "xor $f10, $f10, $f12                           \r\n"
        "xor $f12, $f12, $f4                            \r\n"
        "and $f10, $f10, $f22                           \r\n"
        "and $f12, $f12, $f24                           \r\n"
        "xor $f10, $f10, $f12                           \r\n"
        "xor $f10, $f10, $f4                            \r\n"
        "sdc1 $f10, 0x0(%[pix])                         \r\n"
        "gsldxc1 $f10, 0x0(%[pix], $9)                  \r\n"
        "paddb $f12, $f26, $f10                         \r\n"
        "pavgb $f10, $f10, $f26                         \r\n"
        "ldc1 $f22, 0x0+%[stack]                        \r\n"
        "pavgb $f10, $f10, $f8                          \r\n"
        "paddb $f12, $f12, $f12                         \r\n"
        "paddb $f12, $f12, $f22                         \r\n"
        "psrlh $f12, $f12, $f20                         \r\n"
        "pavgb $f12, $f12, $f30                         \r\n"
        "xor $f12, $f12, $f10                           \r\n"
        "and $f12, $f12, %[ff_pb_1]                     \r\n"
        "ldc1 $f22, 0x40+%[stack]                       \r\n"
        "psubb $f10, $f10, $f12                         \r\n"
        "xor $f8, $f8, $f6                              \r\n"
        "xor $f10, $f10, $f26                           \r\n"
        "and $f8, $f8, $f22                             \r\n"
        "and $f10, $f10, $f22                           \r\n"
        "xor $f8, $f8, $f6                              \r\n"
        "xor $f10, $f10, $f26                           \r\n"
        "gssdxc1 $f8, 0x0(%[pix], %[stride])            \r\n"
        "gssdxc1 $f10, 0x0(%[pix], $10)                 \r\n"
        "1:                                             \r\n"
        ::[pix]"r"(pix),[stride]"r"((int64_t)stride),
          [alpha]"r"((int64_t)alpha),[beta]"r"((int64_t)beta),
          [stack]"m"(stack[0]),[ff_pb_1]"f"(ff_pb_1)
        : "$8","$9","$10","$f0","$f2","$f4","$f6","$f8","$f10","$f12","$f14",
          "$f16","$f18","$f20","$f22","$f24","$f26","$f28","$f30"
    );
}

void ff_deblock_v_chroma_8_mmi(uint8_t *pix, int stride, int alpha, int beta,
        int8_t *tc0)
{
    __asm__ volatile (
        "daddiu %[alpha], %[alpha], -0x1                \r\n"
        "daddiu %[beta], %[beta], -0x1                  \r\n"
        "or $16, $0, %[pix]                             \r\n"
        "dsubu $16, $16, %[stride]                      \r\n"
        "dsubu $16, $16, %[stride]                      \r\n"
        "ldc1 $f0, 0x0($16)                             \r\n"
        "gsldxc1 $f2, 0x0($16, %[stride])               \r\n"
        "ldc1 $f4, 0x0(%[pix])                          \r\n"
        "gsldxc1 $f6, 0x0(%[pix], %[stride])            \r\n"
        : [pix]"+r"(pix),[stride]"+r"(stride),[alpha]"+r"(alpha),
          [beta]"+r"(beta)
        : [tc0]"r"(tc0)
        : "$16","$f2","$f4"
    );

    chroma_inter_body_mmi(pix, stride, alpha, beta, tc0);

    __asm__ volatile (
        "gssdxc1 $f2, 0x0($16, %[stride])               \r\n"
        "sdc1 $f4, 0x0(%[pix])                          \r\n"
        ::[pix]"r"(pix),[stride]"r"((int64_t)stride)
        : "$16","$f2","$f4"
    );
}

void ff_deblock_v_chroma_intra_8_mmi(uint8_t *pix, int stride, int alpha,
        int beta)
{
    __asm__ volatile (
        "daddiu %[alpha], %[alpha], -0x1                \r\n"
        "daddiu %[beta], %[beta], -0x1                  \r\n"
        "or $16, $0, %[pix]                             \r\n"
        "dsubu $16, $16, %[stride]                      \r\n"
        "dsubu $16, $16, %[stride]                      \r\n"
        "ldc1 $f0, 0x0($16)                             \r\n"
        "gsldxc1 $f2, 0x0($16, %[stride])               \r\n"
        "ldc1 $f4, 0x0(%[pix])                          \r\n"
        "gsldxc1 $f6, 0x0(%[pix], %[stride])            \r\n"
        : [pix]"+r"(pix),[stride]"+r"(stride),[alpha]"+r"(alpha),
          [beta]"+r"(beta)
        ::"$16","$f0","$f2","$f4","$f6"
    );

    chroma_intra_body_mmi(pix, stride, alpha, beta);

    __asm__ volatile (
        "gssdxc1 $f2, 0x0($16, %[stride])               \r\n"
        "sdc1 $f4, 0x0(%[pix])                          \r\n"
        ::[pix]"r"(pix),[stride]"r"((int64_t)stride)
        : "$16","$f2","$f4"
    );
}

void ff_deblock_h_chroma_8_mmi(uint8_t *pix, int stride, int alpha, int beta,
        int8_t *tc0)
{
    __asm__ volatile (
        "daddiu %[alpha], %[alpha], -0x1                \r\n"
        "daddiu %[beta], %[beta], -0x1                  \r\n"
        "daddu $16, %[stride], %[stride]                \r\n"
        "daddiu %[pix], %[pix], -0x2                    \r\n"
        "daddu $17, $16, %[stride]                      \r\n"
        "daddu $19, $16, $16                            \r\n"
        "or $18, $0, %[pix]                             \r\n"
        "daddu %[pix], %[pix], $17                      \r\n"
        "gslwlc1 $f0, 0x3($18)                          \r\n"
        "daddu $12, $18, %[stride]                      \r\n"
        "gslwrc1 $f0, 0x0($18)                          \r\n"
        "gslwlc1 $f4, 0x3($12)                          \r\n"
        "daddu $13, $18, $16                            \r\n"
        "gslwrc1 $f4, 0x0($12)                          \r\n"
        "gslwlc1 $f2, 0x3($13)                          \r\n"
        "gslwrc1 $f2, 0x0($13)                          \r\n"
        "gslwlc1 $f6, 0x3(%[pix])                       \r\n"
        "gslwrc1 $f6, 0x0(%[pix])                       \r\n"
        "punpcklbh $f0, $f0, $f4                        \r\n"
        "punpcklbh $f2, $f2, $f6                        \r\n"
        "daddu $12, %[pix], %[stride]                   \r\n"
        "punpckhhw $f4, $f0, $f2                        \r\n"
        "punpcklhw $f0, $f0, $f2                        \r\n"
        "gslwlc1 $f8, 0x3($12)                          \r\n"
        "daddu $13, %[pix], $16                         \r\n"
        "gslwrc1 $f8, 0x0($12)                          \r\n"
        "gslwlc1 $f12, 0x3($13)                         \r\n"
        "daddu $12, %[pix], $17                         \r\n"
        "gslwrc1 $f12, 0x0($13)                         \r\n"
        "gslwlc1 $f10, 0x3($12)                         \r\n"
        "daddu $13, %[pix], $19                         \r\n"
        "gslwrc1 $f10, 0x0($12)                         \r\n"
        "gslwlc1 $f14, 0x3($13)                         \r\n"
        "gslwrc1 $f14, 0x0($13)                         \r\n"
        "punpcklbh $f8, $f8, $f12                       \r\n"
        "punpcklbh $f10, $f10, $f14                     \r\n"
        "mov.d $f12, $f8                                \r\n"
        "punpcklhw $f8, $f8, $f10                       \r\n"
        "punpckhhw $f12, $f12, $f10                     \r\n"
        "punpckhwd $f2, $f0, $f8                        \r\n"
        "punpckhwd $f6, $f4, $f12                       \r\n"
        "punpcklwd $f0, $f0, $f8                        \r\n"
        "punpcklwd $f4, $f4, $f12                       \r\n"
        "mov.d $f20, $f0                                \r\n"
        "mov.d $f22, $f6                                \r\n"
        : [pix]"+r"(pix),[stride]"+r"(stride),[alpha]"+r"(alpha),
          [beta]"+r"(beta)
        ::"$12","$13","$16","$17","$18","$19","$f0","$f2","$f4","$f6","$f8",
          "$f10","$f12","$f14","$f20","$f22"
    );

    chroma_inter_body_mmi(pix, stride, alpha, beta, tc0);

    __asm__ volatile (
        "punpckhwd $f8, $f20, $f20                      \r\n"
        "punpckhwd $f10, $f2, $f2                       \r\n"
        "punpckhwd $f12, $f4, $f4                       \r\n"
        "punpcklbh $f0, $f20, $f2                       \r\n"
        "punpcklbh $f4, $f4, $f22                       \r\n"
        "punpcklhw $f2, $f0, $f4                        \r\n"
        "punpckhhw $f0, $f0, $f4                        \r\n"
        "gsswlc1 $f2, 0x3($18)                          \r\n"
        "gsswrc1 $f2, 0x0($18)                          \r\n"
        "daddu $12, $18, %[stride]                      \r\n"
        "punpckhwd $f2, $f2, $f2                        \r\n"
        "gsswlc1 $f2, 0x3($12)                          \r\n"
        "daddu $13, $18, $16                            \r\n"
        "gsswrc1 $f2, 0x0($12)                          \r\n"
        "gsswlc1 $f0, 0x3($13)                          \r\n"
        "gsswrc1 $f0, 0x0($13)                          \r\n"
        "punpckhwd $f0, $f0, $f0                        \r\n"
        "punpckhwd $f6, $f22, $f22                      \r\n"
        "gsswlc1 $f0, 0x3(%[pix])                       \r\n"
        "gsswrc1 $f0, 0x0(%[pix])                       \r\n"
        "punpcklbh $f8, $f8, $f10                       \r\n"
        "punpcklbh $f12, $f12, $f6                      \r\n"
        "daddu $12, %[pix], %[stride]                   \r\n"
        "punpcklhw $f10, $f8, $f12                      \r\n"
        "punpckhhw $f8, $f8, $f12                       \r\n"
        "gsswlc1 $f10, 0x3($12)                         \r\n"
        "gsswrc1 $f10, 0x0($12)                         \r\n"
        "punpckhwd $f10, $f10, $f10                     \r\n"
        "daddu $12, %[pix], $16                         \r\n"
        "daddu $13, %[pix], $17                         \r\n"
        "gsswlc1 $f10, 0x3($12)                         \r\n"
        "gsswrc1 $f10, 0x0($12)                         \r\n"
        "gsswlc1 $f8, 0x3($13)                          \r\n"
        "daddu $12, %[pix], $19                         \r\n"
        "punpckhwd $f20, $f8, $f8                       \r\n"
        "gsswrc1 $f8, 0x0($13)                          \r\n"
        "gsswlc1 $f20, 0x3($12)                         \r\n"
        "gsswrc1 $f20, 0x0($12)                         \r\n"
        ::[pix]"r"(pix),[stride]"r"((int64_t)stride)
        : "$12","$13","$16","$17","$18","$19","$f0","$f2","$f4","$f6","$f8",
          "$f10","$f12","$f20"
    );
}

void ff_deblock_h_chroma_intra_8_mmi(uint8_t *pix, int stride, int alpha,
        int beta)
{
    __asm__ volatile (
        "daddiu %[alpha], %[alpha], -0x1                \r\n"
        "daddiu %[beta], %[beta], -0x1                  \r\n"
        "daddu $16, %[stride], %[stride]                \r\n"
        "daddiu %[pix], %[pix], -0x2                    \r\n"
        "daddu $17, $16, %[stride]                      \r\n"
        "daddu $19, $16, $16                            \r\n"
        "or $18, $0, %[pix]                             \r\n"
        "daddu %[pix], %[pix], $17                      \r\n"
        "gslwlc1 $f0, 0x3($18)                          \r\n"
        "daddu $12, $18, %[stride]                      \r\n"
        "gslwrc1 $f0, 0x0($18)                          \r\n"
        "gslwlc1 $f4, 0x3($12)                          \r\n"
        "daddu $13, $18, $16                            \r\n"
        "gslwrc1 $f4, 0x0($12)                          \r\n"
        "gslwlc1 $f2, 0x3($13)                          \r\n"
        "gslwrc1 $f2, 0x0($13)                          \r\n"
        "gslwlc1 $f6, 0x3(%[pix])                       \r\n"
        "gslwrc1 $f6, 0x0(%[pix])                       \r\n"
        "punpcklbh $f0, $f0, $f4                        \r\n"
        "punpcklbh $f2, $f2, $f6                        \r\n"
        "daddu $12, %[pix], %[stride]                   \r\n"
        "punpckhhw $f4, $f0, $f2                        \r\n"
        "punpcklhw $f0, $f0, $f2                        \r\n"
        "gslwlc1 $f8, 0x3($12)                          \r\n"
        "daddu $13, %[pix], $16                         \r\n"
        "gslwrc1 $f8, 0x0($12)                          \r\n"
        "gslwlc1 $f12, 0x3($13)                         \r\n"
        "daddu $12, %[pix], $17                         \r\n"
        "gslwrc1 $f12, 0x0($13)                         \r\n"
        "gslwlc1 $f10, 0x3($12)                         \r\n"
        "daddu $13, %[pix], $19                         \r\n"
        "gslwrc1 $f10, 0x0($12)                         \r\n"
        "gslwlc1 $f14, 0x3($13)                         \r\n"
        "gslwrc1 $f14, 0x0($13)                         \r\n"
        "punpcklbh $f8, $f8, $f12                       \r\n"
        "punpcklbh $f10, $f10, $f14                     \r\n"
        "mov.d $f12, $f8                                \r\n"
        "punpcklhw $f8, $f8, $f10                       \r\n"
        "punpckhhw $f12, $f12, $f10                     \r\n"
        "punpckhwd $f2, $f0, $f8                        \r\n"
        "punpckhwd $f6, $f4, $f12                       \r\n"
        "punpcklwd $f0, $f0, $f8                        \r\n"
        "punpcklwd $f4, $f4, $f12                       \r\n"
        : [pix]"+r"(pix),[stride]"+r"(stride),[alpha]"+r"(alpha),
          [beta]"+r"(beta)
        ::"$12","$13","$16","$17","$18","$19","$f0","$f2","$f4","$f6","$f8",
          "$f10","$f12","$f14","$f20","$f22"
    );

    chroma_intra_body_mmi(pix, stride, alpha, beta);

    __asm__ volatile (
        "punpckhwd $f8, $f0, $f0                        \r\n"
        "punpckhwd $f10, $f2, $f2                       \r\n"
        "punpckhwd $f12, $f4, $f4                       \r\n"
        "punpcklbh $f0, $f0, $f2                        \r\n"
        "punpcklbh $f4, $f4, $f6                        \r\n"
        "punpcklhw $f2, $f0, $f4                        \r\n"
        "punpckhhw $f0, $f0, $f4                        \r\n"
        "gsswlc1 $f2, 0x3($18)                          \r\n"
        "gsswrc1 $f2, 0x0($18)                          \r\n"
        "daddu $12, $18, %[stride]                      \r\n"
        "punpckhwd $f2, $f2, $f2                        \r\n"
        "gsswlc1 $f2, 0x3($12)                          \r\n"
        "daddu $13, $18, $16                            \r\n"
        "gsswrc1 $f2, 0x0($12)                          \r\n"
        "gsswlc1 $f0, 0x3($13)                          \r\n"
        "gsswrc1 $f0, 0x0($13)                          \r\n"
        "punpckhwd $f0, $f0, $f0                        \r\n"
        "punpckhwd $f6, $f6, $f6                        \r\n"
        "gsswlc1 $f0, 0x3(%[pix])                       \r\n"
        "gsswrc1 $f0, 0x0(%[pix])                       \r\n"
        "punpcklbh $f8, $f8, $f10                       \r\n"
        "punpcklbh $f12, $f12, $f6                      \r\n"
        "daddu $12, %[pix], %[stride]                   \r\n"
        "punpcklhw $f10, $f8, $f12                      \r\n"
        "punpckhhw $f8, $f8, $f12                       \r\n"
        "gsswlc1 $f10, 0x3($12)                         \r\n"
        "gsswrc1 $f10, 0x0($12)                         \r\n"
        "punpckhwd $f10, $f10, $f10                     \r\n"
        "daddu $12, %[pix], $16                         \r\n"
        "daddu $13, %[pix], $17                         \r\n"
        "gsswlc1 $f10, 0x3($12)                         \r\n"
        "gsswrc1 $f10, 0x0($12)                         \r\n"
        "gsswlc1 $f8, 0x3($13)                          \r\n"
        "daddu $12, %[pix], $19                         \r\n"
        "punpckhwd $f20, $f8, $f8                       \r\n"
        "gsswrc1 $f8, 0x0($13)                          \r\n"
        "gsswlc1 $f20, 0x3($12)                         \r\n"
        "gsswrc1 $f20, 0x0($12)                         \r\n"
        ::[pix]"r"(pix),[stride]"r"((int64_t)stride)
        : "$12","$13","$16","$17","$18","$19","$f0","$f2","$f4","$f6","$f8",
          "$f10","$f12","$f20"
    );
}

void ff_deblock_v_luma_8_mmi(uint8_t *pix, int stride, int alpha, int beta,
        int8_t *tc0)
{
    if ((tc0[0] & tc0[1]) >= 0)
        ff_deblock_v8_luma_8_mmi(pix + 0, stride, alpha, beta, tc0);
    if ((tc0[2] & tc0[3]) >= 0)
        ff_deblock_v8_luma_8_mmi(pix + 8, stride, alpha, beta, tc0 + 2);
}

void ff_deblock_v_luma_intra_8_mmi(uint8_t *pix, int stride, int alpha,
        int beta)
{
    ff_deblock_v8_luma_intra_8_mmi(pix + 0, stride, alpha, beta);
    ff_deblock_v8_luma_intra_8_mmi(pix + 8, stride, alpha, beta);
}

void ff_deblock_h_luma_8_mmi(uint8_t *pix, int stride, int alpha, int beta,
        int8_t *tc0)
{
    uint64_t stack[0xd];

    __asm__ volatile (
        "daddu $15, %[stride], %[stride]                \r\n"
        "daddiu $8, %[pix], -0x4                        \r\n"
        "daddu $9, %[stride], $15                       \r\n"
        "gsldlc1 $f0, 0x7($8)                           \r\n"
        "gsldrc1 $f0, 0x0($8)                           \r\n"
        "daddu $12, $8, %[stride]                       \r\n"
        "daddu $10, $8, $9                              \r\n"
        "gsldlc1 $f2, 0x7($12)                          \r\n"
        "daddu $11, $8, $15                             \r\n"
        "gsldrc1 $f2, 0x0($12)                          \r\n"
        "gsldlc1 $f4, 0x7($11)                          \r\n"
        "gsldrc1 $f4, 0x0($11)                          \r\n"
        "gsldlc1 $f6, 0x7($10)                          \r\n"
        "daddu $12, $10, %[stride]                      \r\n"
        "gsldrc1 $f6, 0x0($10)                          \r\n"
        "gsldlc1 $f8, 0x7($12)                          \r\n"
        "daddu $11, $10, $15                            \r\n"
        "gsldrc1 $f8, 0x0($12)                          \r\n"
        "gsldlc1 $f10, 0x7($11)                         \r\n"
        "daddu $12, $10, $9                             \r\n"
        "gsldrc1 $f10, 0x0($11)                         \r\n"
        "gsldlc1 $f12, 0x7($12)                         \r\n"
        "gsldrc1 $f12, 0x0($12)                         \r\n"
        "daddu $14, $15, $15                            \r\n"
        "punpckhbh $f14, $f0, $f2                       \r\n"
        "punpcklbh $f0, $f0, $f2                        \r\n"
        "punpckhbh $f2, $f4, $f6                        \r\n"
        "punpcklbh $f4, $f4, $f6                        \r\n"
        "punpckhbh $f6, $f8, $f10                       \r\n"
        "punpcklbh $f8, $f8, $f10                       \r\n"
        "daddu $12, $10, $14                            \r\n"
        "sdc1 $f2, 0x10+%[stack]                        \r\n"
        "gsldlc1 $f16, 0x7($12)                         \r\n"
        "gsldrc1 $f16, 0x0($12)                         \r\n"
        "daddu $13, $14, $14                            \r\n"
        "punpckhbh $f10, $f12, $f16                     \r\n"
        "punpcklbh $f12, $f12, $f16                     \r\n"
        "punpckhhw $f2, $f0, $f4                        \r\n"
        "punpcklhw $f0, $f0, $f4                        \r\n"
        "punpckhhw $f4, $f8, $f12                       \r\n"
        "punpcklhw $f8, $f8, $f12                       \r\n"
        "ldc1 $f16, 0x10+%[stack]                       \r\n"
        "punpckhwd $f0, $f0, $f8                        \r\n"
        "sdc1 $f0, 0x0+%[stack]                         \r\n"
        "punpckhhw $f12, $f14, $f16                     \r\n"
        "punpcklhw $f14, $f14, $f16                     \r\n"
        "punpckhhw $f0, $f6, $f10                       \r\n"
        "punpcklhw $f6, $f6, $f10                       \r\n"
        "punpcklwd $f12, $f12, $f0                      \r\n"
        "punpckhwd $f10, $f14, $f6                      \r\n"
        "punpcklwd $f14, $f14, $f6                      \r\n"
        "punpckhwd $f6, $f2, $f4                        \r\n"
        "punpcklwd $f2, $f2, $f4                        \r\n"
        "sdc1 $f2, 0x10+%[stack]                        \r\n"
        "sdc1 $f6, 0x20+%[stack]                        \r\n"
        "sdc1 $f14, 0x30+%[stack]                       \r\n"
        "sdc1 $f10, 0x40+%[stack]                       \r\n"
        "sdc1 $f12, 0x50+%[stack]                       \r\n"
        "daddu $8, $8, $13                              \r\n"
        "daddu $10, $10, $13                            \r\n"
        "gsldlc1 $f0, 0x7($8)                           \r\n"
        "daddu $12, $8, %[stride]                       \r\n"
        "gsldrc1 $f0, 0x0($8)                           \r\n"
        "gsldlc1 $f2, 0x7($12)                          \r\n"
        "daddu $11, $8, $15                             \r\n"
        "gsldrc1 $f2, 0x0($12)                          \r\n"
        "gsldlc1 $f4, 0x7($11)                          \r\n"
        "gsldrc1 $f4, 0x0($11)                          \r\n"
        "gsldlc1 $f6, 0x7($10)                          \r\n"
        "daddu $12, $10, %[stride]                      \r\n"
        "gsldrc1 $f6, 0x0($10)                          \r\n"
        "gsldlc1 $f8, 0x7($12)                          \r\n"
        "daddu $11, $10, $15                            \r\n"
        "gsldrc1 $f8, 0x0($12)                          \r\n"
        "gsldlc1 $f10, 0x7($11)                         \r\n"
        "daddu $12, $10, $9                             \r\n"
        "gsldrc1 $f10, 0x0($11)                         \r\n"
        "gsldlc1 $f12, 0x7($12)                         \r\n"
        "gsldrc1 $f12, 0x0($12)                         \r\n"
        "punpckhbh $f14, $f0, $f2                       \r\n"
        "punpcklbh $f0, $f0, $f2                        \r\n"
        "punpckhbh $f2, $f4, $f6                        \r\n"
        "punpcklbh $f4, $f4, $f6                        \r\n"
        "punpckhbh $f6, $f8, $f10                       \r\n"
        "punpcklbh $f8, $f8, $f10                       \r\n"
        "daddu $12, $10, $14                            \r\n"
        "sdc1 $f2, 0x18+%[stack]                        \r\n"
        "gsldlc1 $f16, 0x7($12)                         \r\n"
        "gsldrc1 $f16, 0x0($12)                         \r\n"
        "punpckhhw $f2, $f0, $f4                        \r\n"
        "punpckhbh $f10, $f12, $f16                     \r\n"
        "punpcklbh $f12, $f12, $f16                     \r\n"
        "punpcklhw $f0, $f0, $f4                        \r\n"
        "punpckhhw $f4, $f8, $f12                       \r\n"
        "punpcklhw $f8, $f8, $f12                       \r\n"
        "punpckhwd $f0, $f0, $f8                        \r\n"
        "ldc1 $f16, 0x18+%[stack]                       \r\n"
        "sdc1 $f0, 0x8+%[stack]                         \r\n"
        "punpckhhw $f12, $f14, $f16                     \r\n"
        "punpcklhw $f14, $f14, $f16                     \r\n"
        "punpckhhw $f0, $f6, $f10                       \r\n"
        "punpcklhw $f6, $f6, $f10                       \r\n"
        "punpckhwd $f10, $f14, $f6                      \r\n"
        "punpcklwd $f14, $f14, $f6                      \r\n"
        "punpckhwd $f6, $f2, $f4                        \r\n"
        "punpcklwd $f2, $f2, $f4                        \r\n"
        "punpcklwd $f12, $f12, $f0                      \r\n"
        "sdc1 $f2, 0x18+%[stack]                        \r\n"
        "sdc1 $f6, 0x28+%[stack]                        \r\n"
        "sdc1 $f14, 0x38+%[stack]                       \r\n"
        "sdc1 $f10, 0x48+%[stack]                       \r\n"
        "sdc1 $f12, 0x58+%[stack]                       \r\n"
        ::[pix]"r"(pix),[stride]"r"((int64_t)stride),[stack]"m"(stack[0])
        : "$8","$9","$10","$11","$12","$13","$14","$15","$f0","$f2","$f4",
          "$f6","$f8","$f10","$f12","$f14","$f16"
    );

    ff_deblock_v_luma_8_mmi((uint8_t *) &stack[6], 0x10, alpha, beta, tc0);

    __asm__ volatile (
        "daddu $15, %[stride], %[stride]                \r\n"
        "daddiu $8, %[pix], -0x2                        \r\n"
        "daddu $14, $15, $15                            \r\n"
        "daddu $9, $15, %[stride]                       \r\n"
        "daddu $13, $14, $14                            \r\n"
        "daddu $10, $8, $9                              \r\n"
        "ldc1 $f0, 0x10+%[stack]                        \r\n"
        "ldc1 $f2, 0x20+%[stack]                        \r\n"
        "ldc1 $f4, 0x30+%[stack]                        \r\n"
        "ldc1 $f6, 0x40+%[stack]                        \r\n"
        "punpckhwd $f8, $f0, $f0                        \r\n"
        "punpckhwd $f10, $f2, $f2                       \r\n"
        "punpckhwd $f12, $f4, $f4                       \r\n"
        "punpcklbh $f0, $f0, $f2                        \r\n"
        "punpcklbh $f4, $f4, $f6                        \r\n"
        "punpcklhw $f2, $f0, $f4                        \r\n"
        "punpckhhw $f0, $f0, $f4                        \r\n"
        "gsswlc1 $f2, 0x3($8)                           \r\n"
        "gsswrc1 $f2, 0x0($8)                           \r\n"
        "daddu $12, $8, %[stride]                       \r\n"
        "punpckhwd $f2, $f2, $f2                        \r\n"
        "daddu $11, $8, $15                             \r\n"
        "gsswlc1 $f2, 0x3($12)                          \r\n"
        "gsswrc1 $f2, 0x0($12)                          \r\n"
        "gsswlc1 $f0, 0x3($11)                          \r\n"
        "gsswrc1 $f0, 0x0($11)                          \r\n"
        "punpckhwd $f0, $f0, $f0                        \r\n"
        "punpckhwd $f6, $f6, $f6                        \r\n"
        "gsswlc1 $f0, 0x3($10)                          \r\n"
        "gsswrc1 $f0, 0x0($10)                          \r\n"
        "punpcklbh $f8, $f8, $f10                       \r\n"
        "punpcklbh $f12, $f12, $f6                      \r\n"
        "punpcklhw $f10, $f8, $f12                      \r\n"
        "daddu $12, $10, %[stride]                      \r\n"
        "punpckhhw $f8, $f8, $f12                       \r\n"
        "gsswlc1 $f10, 0x3($12)                         \r\n"
        "gsswrc1 $f10, 0x0($12)                         \r\n"
        "daddu $12, $10, $15                            \r\n"
        "punpckhwd $f10, $f10, $f10                     \r\n"
        "daddu $11, $10, $9                             \r\n"
        "gsswlc1 $f10, 0x3($12)                         \r\n"
        "gsswrc1 $f10, 0x0($12)                         \r\n"
        "gsswlc1 $f8, 0x3($11)                          \r\n"
        "gsswrc1 $f8, 0x0($11)                          \r\n"
        "daddu $12, $10, $14                            \r\n"
        "punpckhwd $f8, $f8, $f8                        \r\n"
        "daddu $8, $8, $13                              \r\n"
        "gsswlc1 $f8, 0x3($12)                          \r\n"
        "gsswrc1 $f8, 0x0($12)                          \r\n"
        "daddu $10, $10, $13                            \r\n"
        "ldc1 $f0, 0x18+%[stack]                        \r\n"
        "ldc1 $f2, 0x28+%[stack]                        \r\n"
        "ldc1 $f4, 0x38+%[stack]                        \r\n"
        "ldc1 $f6, 0x48+%[stack]                        \r\n"
        "daddu $15, %[stride], %[stride]                \r\n"
        "punpckhwd $f8, $f0, $f0                        \r\n"
        "daddu $14, $15, $15                            \r\n"
        "punpckhwd $f10, $f2, $f2                       \r\n"
        "punpckhwd $f12, $f4, $f4                       \r\n"
        "punpcklbh $f0, $f0, $f2                        \r\n"
        "punpcklbh $f4, $f4, $f6                        \r\n"
        "daddu $12, $8, %[stride]                       \r\n"
        "punpcklhw $f2, $f0, $f4                        \r\n"
        "punpckhhw $f0, $f0, $f4                        \r\n"
        "gsswlc1 $f2, 0x3($8)                           \r\n"
        "gsswrc1 $f2, 0x0($8)                           \r\n"
        "punpckhwd $f2, $f2, $f2                        \r\n"
        "daddu $11, $8, $15                             \r\n"
        "gsswlc1 $f2, 0x3($12)                          \r\n"
        "gsswrc1 $f2, 0x0($12)                          \r\n"
        "gsswlc1 $f0, 0x3($11)                          \r\n"
        "gsswrc1 $f0, 0x0($11)                          \r\n"
        "punpckhwd $f0, $f0, $f0                        \r\n"
        "punpckhwd $f6, $f6, $f6                        \r\n"
        "gsswlc1 $f0, 0x3($10)                          \r\n"
        "gsswrc1 $f0, 0x0($10)                          \r\n"
        "punpcklbh $f8, $f8, $f10                       \r\n"
        "punpcklbh $f12, $f12, $f6                      \r\n"
        "daddu $12, $10, %[stride]                      \r\n"
        "punpcklhw $f10, $f8, $f12                      \r\n"
        "punpckhhw $f8, $f8, $f12                       \r\n"
        "gsswlc1 $f10, 0x3($12)                         \r\n"
        "gsswrc1 $f10, 0x0($12)                         \r\n"
        "daddu $12, $10, $15                            \r\n"
        "punpckhwd $f10, $f10, $f10                     \r\n"
        "daddu $11, $10, $9                             \r\n"
        "gsswlc1 $f10, 0x3($12)                         \r\n"
        "gsswrc1 $f10, 0x0($12)                         \r\n"
        "gsswlc1 $f8, 0x3($11)                          \r\n"
        "gsswrc1 $f8, 0x0($11)                          \r\n"
        "daddu $12, $10, $14                            \r\n"
        "punpckhwd $f8, $f8, $f8                        \r\n"
        "gsswlc1 $f8, 0x3($12)                          \r\n"
        "gsswrc1 $f8, 0x0($12)                          \r\n"
        ::[pix]"r"(pix),[stride]"r"((int64_t)stride),[stack]"m"(stack[0])
        : "$8","$9","$10","$11","$12","$13","$14","$15","$f0","$f2","$f4",
          "$f6","$f8","$f10","$f12","$f14","$f16"
    );
}

void ff_deblock_h_luma_intra_8_mmi(uint8_t *pix, int stride, int alpha,
        int beta)
{
    uint64_t ptmp[0x11];
    uint64_t pdat[4];

    __asm__ volatile (
        "daddu $12, %[stride], %[stride]                \r\n"
        "daddiu $10, %[pix], -0x4                       \r\n"
        "daddu $11, $12, %[stride]                      \r\n"
        "daddu $13, $12, $12                            \r\n"
        "daddu $9, $10, $11                             \r\n"
        "daddu $8, $10, %[stride]                       \r\n"
        "gsldlc1 $f0, 0x7($10)                          \r\n"
        "gsldrc1 $f0, 0x0($10)                          \r\n"
        "daddu $14, $10, $12                            \r\n"
        "gsldlc1 $f2, 0x7($8)                           \r\n"
        "gsldrc1 $f2, 0x0($8)                           \r\n"
        "gsldlc1 $f4, 0x7($14)                          \r\n"
        "gsldrc1 $f4, 0x0($14)                          \r\n"
        "daddu $8, $9, %[stride]                        \r\n"
        "gsldlc1 $f6, 0x7($9)                           \r\n"
        "gsldrc1 $f6, 0x0($9)                           \r\n"
        "daddu $14, $9, $12                             \r\n"
        "gsldlc1 $f8, 0x7($8)                           \r\n"
        "gsldrc1 $f8, 0x0($8)                           \r\n"
        "daddu $8, $9, $11                              \r\n"
        "gsldlc1 $f10, 0x7($14)                         \r\n"
        "gsldrc1 $f10, 0x0($14)                         \r\n"
        "gsldlc1 $f12, 0x7($8)                          \r\n"
        "gsldrc1 $f12, 0x0($8)                          \r\n"
        "daddu $8, $9, $13                              \r\n"
        "punpckhbh $f14, $f0, $f2                       \r\n"
        "punpcklbh $f0, $f0, $f2                        \r\n"
        "punpckhbh $f2, $f4, $f6                        \r\n"
        "punpcklbh $f4, $f4, $f6                        \r\n"
        "punpckhbh $f6, $f8, $f10                       \r\n"
        "punpcklbh $f8, $f8, $f10                       \r\n"
        "gsldlc1 $f16, 0x7($8)                          \r\n"
        "gsldrc1 $f16, 0x0($8)                          \r\n"
        "punpckhbh $f10, $f12, $f16                     \r\n"
        "punpcklbh $f12, $f12, $f16                     \r\n"
        "sdc1 $f6, 0x0+%[ptmp]                          \r\n"
        "punpckhhw $f6, $f0, $f4                        \r\n"
        "punpcklhw $f0, $f0, $f4                        \r\n"
        "punpckhhw $f4, $f8, $f12                       \r\n"
        "punpcklhw $f8, $f8, $f12                       \r\n"
        "punpckhhw $f12, $f14, $f2                      \r\n"
        "punpcklhw $f14, $f14, $f2                      \r\n"
        "sdc1 $f4, 0x20+%[ptmp]                         \r\n"
        "ldc1 $f4, 0x0+%[ptmp]                          \r\n"
        "punpckhhw $f2, $f4, $f10                       \r\n"
        "punpcklhw $f4, $f4, $f10                       \r\n"
        "punpckhwd $f10, $f0, $f8                       \r\n"
        "punpcklwd $f0, $f0, $f8                        \r\n"
        "punpckhwd $f8, $f14, $f4                       \r\n"
        "punpcklwd $f14, $f14, $f4                      \r\n"
        "sdc1 $f0, 0x0+%[ptmp]                          \r\n"
        "sdc1 $f10, 0x10+%[ptmp]                        \r\n"
        "sdc1 $f14, 0x40+%[ptmp]                        \r\n"
        "sdc1 $f8, 0x50+%[ptmp]                         \r\n"
        "ldc1 $f16, 0x20+%[ptmp]                        \r\n"
        "punpckhwd $f0, $f6, $f16                       \r\n"
        "punpcklwd $f6, $f6, $f16                       \r\n"
        "punpckhwd $f10, $f12, $f2                      \r\n"
        "punpcklwd $f12, $f12, $f2                      \r\n"
        "daddu $8, $13, $13                             \r\n"
        "sdc1 $f6, 0x20+%[ptmp]                         \r\n"
        "sdc1 $f0, 0x30+%[ptmp]                         \r\n"
        "sdc1 $f12, 0x60+%[ptmp]                        \r\n"
        "sdc1 $f10, 0x70+%[ptmp]                        \r\n"
        "daddu $10, $10, $8                             \r\n"
        "daddu $9, $9, $8                               \r\n"
        "daddu $8, $10, %[stride]                       \r\n"
        "gsldlc1 $f0, 0x7($10)                          \r\n"
        "gsldrc1 $f0, 0x0($10)                          \r\n"
        "daddu $14, $10, $12                            \r\n"
        "gsldlc1 $f2, 0x7($8)                           \r\n"
        "gsldrc1 $f2, 0x0($8)                           \r\n"
        "gsldlc1 $f4, 0x7($14)                          \r\n"
        "gsldrc1 $f4, 0x0($14)                          \r\n"
        "daddu $8, $9, %[stride]                        \r\n"
        "gsldlc1 $f6, 0x7($9)                           \r\n"
        "gsldrc1 $f6, 0x0($9)                           \r\n"
        "daddu $14, $9, $12                             \r\n"
        "gsldlc1 $f8, 0x7($8)                           \r\n"
        "gsldrc1 $f8, 0x0($8)                           \r\n"
        "daddu $8, $9, $11                              \r\n"
        "gsldlc1 $f10, 0x7($14)                         \r\n"
        "gsldrc1 $f10, 0x0($14)                         \r\n"
        "gsldlc1 $f12, 0x7($8)                          \r\n"
        "gsldrc1 $f12, 0x0($8)                          \r\n"
        "daddu $8, $9, $13                              \r\n"
        "punpckhbh $f14, $f0, $f2                       \r\n"
        "punpcklbh $f0, $f0, $f2                        \r\n"
        "punpckhbh $f2, $f4, $f6                        \r\n"
        "punpcklbh $f4, $f4, $f6                        \r\n"
        "punpckhbh $f6, $f8, $f10                       \r\n"
        "punpcklbh $f8, $f8, $f10                       \r\n"
        "gsldlc1 $f16, 0x7($8)                          \r\n"
        "gsldrc1 $f16, 0x0($8)                          \r\n"
        "punpckhbh $f10, $f12, $f16                     \r\n"
        "punpcklbh $f12, $f12, $f16                     \r\n"
        "sdc1 $f6, 0x8+%[ptmp]                          \r\n"
        "punpckhhw $f6, $f0, $f4                        \r\n"
        "punpcklhw $f0, $f0, $f4                        \r\n"
        "punpckhhw $f4, $f8, $f12                       \r\n"
        "punpcklhw $f8, $f8, $f12                       \r\n"
        "punpckhhw $f12, $f14, $f2                      \r\n"
        "punpcklhw $f14, $f14, $f2                      \r\n"
        "sdc1 $f4, 0x28+%[ptmp]                         \r\n"
        "ldc1 $f4, 0x8+%[ptmp]                          \r\n"
        "punpckhhw $f2, $f4, $f10                       \r\n"
        "punpcklhw $f4, $f4, $f10                       \r\n"
        "punpckhwd $f10, $f0, $f8                       \r\n"
        "punpcklwd $f0, $f0, $f8                        \r\n"
        "punpckhwd $f8, $f14, $f4                       \r\n"
        "punpcklwd $f14, $f14, $f4                      \r\n"
        "sdc1 $f0, 0x8+%[ptmp]                          \r\n"
        "sdc1 $f10, 0x18+%[ptmp]                        \r\n"
        "sdc1 $f14, 0x48+%[ptmp]                        \r\n"
        "sdc1 $f8, 0x58+%[ptmp]                         \r\n"
        "ldc1 $f16, 0x28+%[ptmp]                        \r\n"
        "punpckhwd $f0, $f6, $f16                       \r\n"
        "punpcklwd $f6, $f6, $f16                       \r\n"
        "punpckhwd $f10, $f12, $f2                      \r\n"
        "punpcklwd $f12, $f12, $f2                      \r\n"
        "sdc1 $f6, 0x28+%[ptmp]                         \r\n"
        "sdc1 $f0, 0x38+%[ptmp]                         \r\n"
        "sdc1 $f12, 0x68+%[ptmp]                        \r\n"
        "sdc1 $f10, 0x78+%[ptmp]                        \r\n"
        "sd $10, 0x00+%[pdat]                           \r\n"
        "sd $11, 0x08+%[pdat]                           \r\n"
        "sd $12, 0x10+%[pdat]                           \r\n"
        "sd $13, 0x18+%[pdat]                           \r\n"
        ::[pix]"r"(pix),[stride]"r"((uint64_t)stride),[ptmp]"m"(ptmp[0]),
          [pdat]"m"(pdat[0])
        : "$8","$9","$10","$11","$12","$13","$14","$f0","$f2","$f4","$f6",
          "$f8","$f10","$f12","$f14","$f16"
    );

    ff_deblock_v_luma_intra_8_mmi((uint8_t *) &ptmp[8], 0x10, alpha, beta);

    __asm__ volatile (
        "ld $10, 0x00+%[pdat]                           \r\n"
        "ld $11, 0x08+%[pdat]                           \r\n"
        "ld $12, 0x10+%[pdat]                           \r\n"
        "ld $13, 0x18+%[pdat]                           \r\n"
        "daddu $9, $10, $11                             \r\n"
        "ldc1 $f0, 0x8+%[ptmp]                          \r\n"
        "ldc1 $f2, 0x18+%[ptmp]                         \r\n"
        "ldc1 $f4, 0x28+%[ptmp]                         \r\n"
        "ldc1 $f6, 0x38+%[ptmp]                         \r\n"
        "ldc1 $f8, 0x48+%[ptmp]                         \r\n"
        "ldc1 $f10, 0x58+%[ptmp]                        \r\n"
        "ldc1 $f12, 0x68+%[ptmp]                        \r\n"
        "punpckhbh $f14, $f0, $f2                       \r\n"
        "punpcklbh $f0, $f0, $f2                        \r\n"
        "punpckhbh $f2, $f4, $f6                        \r\n"
        "punpcklbh $f4, $f4, $f6                        \r\n"
        "punpckhbh $f6, $f8, $f10                       \r\n"
        "punpcklbh $f8, $f8, $f10                       \r\n"
        "ldc1 $f16, 0x78+%[ptmp]                        \r\n"
        "punpckhbh $f10, $f12, $f16                     \r\n"
        "punpcklbh $f12, $f12, $f16                     \r\n"
        "gssdlc1 $f6, 0x7($10)                          \r\n"
        "gssdrc1 $f6, 0x0($10)                          \r\n"
        "daddu $8, $10, $12                             \r\n"
        "punpckhhw $f6, $f0, $f4                        \r\n"
        "punpcklhw $f0, $f0, $f4                        \r\n"
        "punpckhhw $f4, $f8, $f12                       \r\n"
        "punpcklhw $f8, $f8, $f12                       \r\n"
        "punpckhhw $f12, $f14, $f2                      \r\n"
        "punpcklhw $f14, $f14, $f2                      \r\n"
        "gssdlc1 $f4, 0x7($8)                           \r\n"
        "gssdrc1 $f4, 0x0($8)                           \r\n"
        "gsldlc1 $f4, 0x7($10)                          \r\n"
        "gsldrc1 $f4, 0x0($10)                          \r\n"
        "punpckhhw $f2, $f4, $f10                       \r\n"
        "punpcklhw $f4, $f4, $f10                       \r\n"
        "punpckhwd $f10, $f0, $f8                       \r\n"
        "punpcklwd $f0, $f0, $f8                        \r\n"
        "punpckhwd $f8, $f14, $f4                       \r\n"
        "punpcklwd $f14, $f14, $f4                      \r\n"
        "daddu $8, $10, %[stride]                       \r\n"
        "gssdlc1 $f0, 0x7($10)                          \r\n"
        "gssdrc1 $f0, 0x0($10)                          \r\n"
        "daddu $14, $9, %[stride]                       \r\n"
        "gssdlc1 $f10, 0x7($8)                          \r\n"
        "gssdrc1 $f10, 0x0($8)                          \r\n"
        "daddu $8, $9, $12                              \r\n"
        "gssdlc1 $f14, 0x7($14)                         \r\n"
        "gssdrc1 $f14, 0x0($14)                         \r\n"
        "daddu $14, $10, $12                            \r\n"
        "gssdlc1 $f8, 0x7($8)                           \r\n"
        "gssdrc1 $f8, 0x0($8)                           \r\n"
        "gsldlc1 $f16, 0x7($14)                         \r\n"
        "gsldrc1 $f16, 0x0($14)                         \r\n"
        "daddu $8, $10, $12                             \r\n"
        "punpckhwd $f0, $f6, $f16                       \r\n"
        "punpcklwd $f6, $f6, $f16                       \r\n"
        "punpckhwd $f10, $f12, $f2                      \r\n"
        "punpcklwd $f12, $f12, $f2                      \r\n"
        "gssdlc1 $f6, 0x7($8)                           \r\n"
        "gssdrc1 $f6, 0x0($8)                           \r\n"
        "daddu $8, $9, $11                              \r\n"
        "gssdlc1 $f0, 0x7($9)                           \r\n"
        "gssdrc1 $f0, 0x0($9)                           \r\n"
        "daddu $14, $9, $13                             \r\n"
        "gssdlc1 $f12, 0x7($8)                          \r\n"
        "gssdrc1 $f12, 0x0($8)                          \r\n"
        "daddu $8, $13, $13                             \r\n"
        "gssdlc1 $f10, 0x7($14)                         \r\n"
        "gssdrc1 $f10, 0x0($14)                         \r\n"
        "dsubu $10, $10, $8                             \r\n"
        "dsubu $9, $9, $8                               \r\n"
        "ldc1 $f0, 0x0+%[ptmp]                          \r\n"
        "ldc1 $f2, 0x10+%[ptmp]                         \r\n"
        "ldc1 $f4, 0x20+%[ptmp]                         \r\n"
        "ldc1 $f6, 0x30+%[ptmp]                         \r\n"
        "ldc1 $f8, 0x40+%[ptmp]                         \r\n"
        "ldc1 $f10, 0x50+%[ptmp]                        \r\n"
        "ldc1 $f12, 0x60+%[ptmp]                        \r\n"
        "punpckhbh $f14, $f0, $f2                       \r\n"
        "punpcklbh $f0, $f0, $f2                        \r\n"
        "punpckhbh $f2, $f4, $f6                        \r\n"
        "punpcklbh $f4, $f4, $f6                        \r\n"
        "punpckhbh $f6, $f8, $f10                       \r\n"
        "punpcklbh $f8, $f8, $f10                       \r\n"
        "ldc1 $f16, 0x70+%[ptmp]                        \r\n"
        "punpckhbh $f10, $f12, $f16                     \r\n"
        "punpcklbh $f12, $f12, $f16                     \r\n"
        "gssdlc1 $f6, 0x7($10)                          \r\n"
        "gssdrc1 $f6, 0x0($10)                          \r\n"
        "daddu $8, $10, $12                             \r\n"
        "punpckhhw $f6, $f0, $f4                        \r\n"
        "punpcklhw $f0, $f0, $f4                        \r\n"
        "punpckhhw $f4, $f8, $f12                       \r\n"
        "punpcklhw $f8, $f8, $f12                       \r\n"
        "punpckhhw $f12, $f14, $f2                      \r\n"
        "punpcklhw $f14, $f14, $f2                      \r\n"
        "gssdlc1 $f4, 0x7($8)                           \r\n"
        "gssdrc1 $f4, 0x0($8)                           \r\n"
        "gsldlc1 $f4, 0x7($10)                          \r\n"
        "gsldrc1 $f4, 0x0($10)                          \r\n"
        "punpckhhw $f2, $f4, $f10                       \r\n"
        "punpcklhw $f4, $f4, $f10                       \r\n"
        "punpckhwd $f10, $f0, $f8                       \r\n"
        "punpcklwd $f0, $f0, $f8                        \r\n"
        "punpckhwd $f8, $f14, $f4                       \r\n"
        "punpcklwd $f14, $f14, $f4                      \r\n"
        "daddu $8, $10, %[stride]                       \r\n"
        "gssdlc1 $f0, 0x7($10)                          \r\n"
        "gssdrc1 $f0, 0x0($10)                          \r\n"
        "daddu $14, $9, %[stride]                       \r\n"
        "gssdlc1 $f10, 0x7($8)                          \r\n"
        "gssdrc1 $f10, 0x0($8)                          \r\n"
        "daddu $8, $9, $12                              \r\n"
        "gssdlc1 $f14, 0x7($14)                         \r\n"
        "gssdrc1 $f14, 0x0($14)                         \r\n"
        "daddu $14, $10, $12                            \r\n"
        "gssdlc1 $f8, 0x7($8)                           \r\n"
        "gssdrc1 $f8, 0x0($8)                           \r\n"
        "gsldlc1 $f16, 0x7($14)                         \r\n"
        "gsldrc1 $f16, 0x0($14)                         \r\n"
        "daddu $8, $10, $12                             \r\n"
        "punpckhwd $f0, $f6, $f16                       \r\n"
        "punpcklwd $f6, $f6, $f16                       \r\n"
        "punpckhwd $f10, $f12, $f2                      \r\n"
        "punpcklwd $f12, $f12, $f2                      \r\n"
        "gssdlc1 $f6, 0x7($8)                           \r\n"
        "gssdrc1 $f6, 0x0($8)                           \r\n"
        "daddu $8, $9, $11                              \r\n"
        "gssdlc1 $f0, 0x7($9)                           \r\n"
        "gssdrc1 $f0, 0x0($9)                           \r\n"
        "daddu $14, $9, $13                             \r\n"
        "gssdlc1 $f12, 0x7($8)                          \r\n"
        "gssdrc1 $f12, 0x0($8)                          \r\n"
        "gssdlc1 $f10, 0x7($14)                         \r\n"
        "gssdrc1 $f10, 0x0($14)                         \r\n"
        ::[pix]"r"(pix),[stride]"r"((uint64_t)stride),[ptmp]"m"(ptmp[0]),
          [pdat]"m"(pdat[0])
        : "$8","$9","$10","$11","$12","$13","$14","$f0","$f2","$f4","$f6",
          "$f8","$f10","$f12","$f14","$f16"
    );
}