rnd_template.c 7.3 KB
Newer Older
1
/*
2
 * SIMD-optimized halfpel functions are compiled twice for rnd/no_rnd
3
 * Copyright (c) 2000, 2001 Fabrice Bellard
4
 * Copyright (c) 2003-2004 Michael Niedermayer <michaelni@gmx.at>
5
 *
6 7 8 9
 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
 * mostly rewritten by Michael Niedermayer <michaelni@gmx.at>
 * and improved by Zdenek Kabelac <kabi@users.sf.net>
 *
10 11 12
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
13 14
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
15
 * version 2.1 of the License, or (at your option) any later version.
16
 *
17
 * FFmpeg is distributed in the hope that it will be useful,
18 19 20 21 22
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
23
 * License along with FFmpeg; if not, write to the Free Software
24
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 26
 */

27 28 29
#include <stddef.h>
#include <stdint.h>

30 31
#include "inline_asm.h"

32
// put_pixels
33
av_unused STATIC void DEF(put, pixels8_xy2)(uint8_t *block, const uint8_t *pixels,
34
                                  ptrdiff_t line_size, int h)
35
{
36 37
    MOVQ_ZERO(mm7);
    SET_RND(mm6); // =2 for rnd  and  =1 for no_rnd version
38
    __asm__ volatile(
39 40 41 42 43 44 45 46 47 48
        "movq   (%1), %%mm0             \n\t"
        "movq   1(%1), %%mm4            \n\t"
        "movq   %%mm0, %%mm1            \n\t"
        "movq   %%mm4, %%mm5            \n\t"
        "punpcklbw %%mm7, %%mm0         \n\t"
        "punpcklbw %%mm7, %%mm4         \n\t"
        "punpckhbw %%mm7, %%mm1         \n\t"
        "punpckhbw %%mm7, %%mm5         \n\t"
        "paddusw %%mm0, %%mm4           \n\t"
        "paddusw %%mm1, %%mm5           \n\t"
49
        "xor    %%"FF_REG_a", %%"FF_REG_a" \n\t"
50
        "add    %3, %1                  \n\t"
51
        ".p2align 3                     \n\t"
52
        "1:                             \n\t"
53 54
        "movq   (%1, %%"FF_REG_a"), %%mm0  \n\t"
        "movq   1(%1, %%"FF_REG_a"), %%mm2 \n\t"
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
        "movq   %%mm0, %%mm1            \n\t"
        "movq   %%mm2, %%mm3            \n\t"
        "punpcklbw %%mm7, %%mm0         \n\t"
        "punpcklbw %%mm7, %%mm2         \n\t"
        "punpckhbw %%mm7, %%mm1         \n\t"
        "punpckhbw %%mm7, %%mm3         \n\t"
        "paddusw %%mm2, %%mm0           \n\t"
        "paddusw %%mm3, %%mm1           \n\t"
        "paddusw %%mm6, %%mm4           \n\t"
        "paddusw %%mm6, %%mm5           \n\t"
        "paddusw %%mm0, %%mm4           \n\t"
        "paddusw %%mm1, %%mm5           \n\t"
        "psrlw  $2, %%mm4               \n\t"
        "psrlw  $2, %%mm5               \n\t"
        "packuswb  %%mm5, %%mm4         \n\t"
70 71
        "movq   %%mm4, (%2, %%"FF_REG_a")  \n\t"
        "add    %3, %%"FF_REG_a"           \n\t"
72

73 74
        "movq   (%1, %%"FF_REG_a"), %%mm2  \n\t" // 0 <-> 2   1 <-> 3
        "movq   1(%1, %%"FF_REG_a"), %%mm4 \n\t"
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
        "movq   %%mm2, %%mm3            \n\t"
        "movq   %%mm4, %%mm5            \n\t"
        "punpcklbw %%mm7, %%mm2         \n\t"
        "punpcklbw %%mm7, %%mm4         \n\t"
        "punpckhbw %%mm7, %%mm3         \n\t"
        "punpckhbw %%mm7, %%mm5         \n\t"
        "paddusw %%mm2, %%mm4           \n\t"
        "paddusw %%mm3, %%mm5           \n\t"
        "paddusw %%mm6, %%mm0           \n\t"
        "paddusw %%mm6, %%mm1           \n\t"
        "paddusw %%mm4, %%mm0           \n\t"
        "paddusw %%mm5, %%mm1           \n\t"
        "psrlw  $2, %%mm0               \n\t"
        "psrlw  $2, %%mm1               \n\t"
        "packuswb  %%mm1, %%mm0         \n\t"
90 91
        "movq   %%mm0, (%2, %%"FF_REG_a")  \n\t"
        "add    %3, %%"FF_REG_a"        \n\t"
92

93 94 95
        "subl   $2, %0                  \n\t"
        "jnz    1b                      \n\t"
        :"+g"(h), "+S"(pixels)
96
        :"D"(block), "r"((x86_reg)line_size)
97
        :FF_REG_a, "memory");
98 99
}

100
// avg_pixels
101
// this routine is 'slightly' suboptimal but mostly unused
102
av_unused STATIC void DEF(avg, pixels8_xy2)(uint8_t *block, const uint8_t *pixels,
103
                                  ptrdiff_t line_size, int h)
104 105 106
{
    MOVQ_ZERO(mm7);
    SET_RND(mm6); // =2 for rnd  and  =1 for no_rnd version
107
    __asm__ volatile(
108 109 110 111 112 113 114 115 116 117
        "movq   (%1), %%mm0             \n\t"
        "movq   1(%1), %%mm4            \n\t"
        "movq   %%mm0, %%mm1            \n\t"
        "movq   %%mm4, %%mm5            \n\t"
        "punpcklbw %%mm7, %%mm0         \n\t"
        "punpcklbw %%mm7, %%mm4         \n\t"
        "punpckhbw %%mm7, %%mm1         \n\t"
        "punpckhbw %%mm7, %%mm5         \n\t"
        "paddusw %%mm0, %%mm4           \n\t"
        "paddusw %%mm1, %%mm5           \n\t"
118
        "xor    %%"FF_REG_a", %%"FF_REG_a" \n\t"
119
        "add    %3, %1                  \n\t"
120
        ".p2align 3                     \n\t"
121
        "1:                             \n\t"
122 123
        "movq   (%1, %%"FF_REG_a"), %%mm0  \n\t"
        "movq   1(%1, %%"FF_REG_a"), %%mm2 \n\t"
124 125 126 127 128 129 130 131 132 133 134 135 136 137
        "movq   %%mm0, %%mm1            \n\t"
        "movq   %%mm2, %%mm3            \n\t"
        "punpcklbw %%mm7, %%mm0         \n\t"
        "punpcklbw %%mm7, %%mm2         \n\t"
        "punpckhbw %%mm7, %%mm1         \n\t"
        "punpckhbw %%mm7, %%mm3         \n\t"
        "paddusw %%mm2, %%mm0           \n\t"
        "paddusw %%mm3, %%mm1           \n\t"
        "paddusw %%mm6, %%mm4           \n\t"
        "paddusw %%mm6, %%mm5           \n\t"
        "paddusw %%mm0, %%mm4           \n\t"
        "paddusw %%mm1, %%mm5           \n\t"
        "psrlw  $2, %%mm4               \n\t"
        "psrlw  $2, %%mm5               \n\t"
138
                "movq   (%2, %%"FF_REG_a"), %%mm3  \n\t"
139 140 141
        "packuswb  %%mm5, %%mm4         \n\t"
                "pcmpeqd %%mm2, %%mm2   \n\t"
                "paddb %%mm2, %%mm2     \n\t"
142
                PAVGB_MMX(%%mm3, %%mm4, %%mm5, %%mm2)
143 144
                "movq   %%mm5, (%2, %%"FF_REG_a")  \n\t"
        "add    %3, %%"FF_REG_a"        \n\t"
145

146 147
        "movq   (%1, %%"FF_REG_a"), %%mm2  \n\t" // 0 <-> 2   1 <-> 3
        "movq   1(%1, %%"FF_REG_a"), %%mm4 \n\t"
148 149 150 151 152 153 154 155 156 157 158 159 160 161
        "movq   %%mm2, %%mm3            \n\t"
        "movq   %%mm4, %%mm5            \n\t"
        "punpcklbw %%mm7, %%mm2         \n\t"
        "punpcklbw %%mm7, %%mm4         \n\t"
        "punpckhbw %%mm7, %%mm3         \n\t"
        "punpckhbw %%mm7, %%mm5         \n\t"
        "paddusw %%mm2, %%mm4           \n\t"
        "paddusw %%mm3, %%mm5           \n\t"
        "paddusw %%mm6, %%mm0           \n\t"
        "paddusw %%mm6, %%mm1           \n\t"
        "paddusw %%mm4, %%mm0           \n\t"
        "paddusw %%mm5, %%mm1           \n\t"
        "psrlw  $2, %%mm0               \n\t"
        "psrlw  $2, %%mm1               \n\t"
162
                "movq   (%2, %%"FF_REG_a"), %%mm3  \n\t"
163 164 165
        "packuswb  %%mm1, %%mm0         \n\t"
                "pcmpeqd %%mm2, %%mm2   \n\t"
                "paddb %%mm2, %%mm2     \n\t"
166
                PAVGB_MMX(%%mm3, %%mm0, %%mm1, %%mm2)
167 168
                "movq   %%mm1, (%2, %%"FF_REG_a")  \n\t"
        "add    %3, %%"FF_REG_a"           \n\t"
169

170 171 172
        "subl   $2, %0                  \n\t"
        "jnz    1b                      \n\t"
        :"+g"(h), "+S"(pixels)
173
        :"D"(block), "r"((x86_reg)line_size)
174
        :FF_REG_a, "memory");
175
}