h264chroma_template.c 8.45 KB
Newer Older
1 2 3
/*
 * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
 *
4
 * This file is part of FFmpeg.
5
 *
6
 * FFmpeg is free software; you can redistribute it and/or
7 8 9 10
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
11
 * FFmpeg is distributed in the hope that it will be useful,
12 13 14 15 16
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with FFmpeg; if not, write to the Free Software
18 19 20 21
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

#include "libavutil/mem.h"
22 23
#include "libavutil/ppc/types_altivec.h"
#include "libavutil/ppc/util_altivec.h"
24 25 26 27

/* this code assume that stride % 16 == 0 */

#define CHROMA_MC8_ALTIVEC_CORE(BIAS1, BIAS2) \
28 29
        vsrc2ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc2uc);\
        vsrc3ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc3uc);\
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
\
        psum = vec_mladd(vA, vsrc0ssH, BIAS1);\
        psum = vec_mladd(vB, vsrc1ssH, psum);\
        psum = vec_mladd(vC, vsrc2ssH, psum);\
        psum = vec_mladd(vD, vsrc3ssH, psum);\
        psum = BIAS2(psum);\
        psum = vec_sr(psum, v6us);\
\
        vdst = vec_ld(0, dst);\
        ppsum = (vec_u8)vec_pack(psum, psum);\
        vfdst = vec_perm(vdst, ppsum, fperm);\
\
        OP_U8_ALTIVEC(fsum, vfdst, vdst);\
\
        vec_st(fsum, 0, dst);\
\
        vsrc0ssH = vsrc2ssH;\
        vsrc1ssH = vsrc3ssH;\
\
        dst += stride;\
        src += stride;

#define CHROMA_MC8_ALTIVEC_CORE_SIMPLE \
\
54 55
        vsrc0ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc0uc);\
        vsrc1ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc1uc);\
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
\
        psum = vec_mladd(vA, vsrc0ssH, v32ss);\
        psum = vec_mladd(vE, vsrc1ssH, psum);\
        psum = vec_sr(psum, v6us);\
\
        vdst = vec_ld(0, dst);\
        ppsum = (vec_u8)vec_pack(psum, psum);\
        vfdst = vec_perm(vdst, ppsum, fperm);\
\
        OP_U8_ALTIVEC(fsum, vfdst, vdst);\
\
        vec_st(fsum, 0, dst);\
\
        dst += stride;\
        src += stride;

#define noop(a) a
#define add28(a) vec_add(v28ss, a)

75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
#if HAVE_BIGENDIAN
#define GET_VSRC1(vs0, off, b, perm0, s){    \
    vec_u8 vsrcCuc, vsrcDuc;                 \
    vsrcCuc = vec_ld(off, s);                \
    if (loadSecond){                         \
        vsrcDuc = vec_ld(off + b, s);        \
    } else                                   \
        vsrcDuc = vsrcCuc;                   \
                                             \
    vs0 = vec_perm(vsrcCuc, vsrcDuc, perm0); \
}
#define GET_VSRC(vs0, vs1, off, b, perm0, perm1, s){ \
    vec_u8 vsrcCuc, vsrcDuc;                         \
    vsrcCuc = vec_ld(off, s);                        \
    if (loadSecond){                                 \
        vsrcDuc = vec_ld(off + b, s);                \
    } else                                           \
        vsrcDuc = vsrcCuc;                           \
                                                     \
    vs0 = vec_perm(vsrcCuc, vsrcDuc, perm0);         \
    if (reallyBadAlign){                             \
        vs1 = vsrcDuc;                               \
    } else                                           \
        vs1 = vec_perm(vsrcCuc, vsrcDuc, perm1);     \
 }

#else

#define GET_VSRC1(vs0, off, b, perm0, s){            \
    vs0 = vec_vsx_ld(off, s);                        \
 }
#define GET_VSRC(vs0, vs1, off, b, perm0, perm1, s){ \
    vs0 = vec_vsx_ld(off, s);                        \
    vs1 = vec_vsx_ld(off + 1, s);                    \
 }
#endif /* HAVE_BIGENDIAN */

112 113 114 115 116 117 118 119 120 121 122
#ifdef PREFIX_h264_chroma_mc8_altivec
static void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src,
                                    int stride, int h, int x, int y) {
    DECLARE_ALIGNED(16, signed int, ABCD)[4] =
                        {((8 - x) * (8 - y)),
                         ((    x) * (8 - y)),
                         ((8 - x) * (    y)),
                         ((    x) * (    y))};
    register int i;
    vec_u8 fperm;
    LOAD_ZERO;
123 124 125 126 127
    const vec_s32 vABCD = vec_ld(0, ABCD);
    const vec_s16 vA = VEC_SPLAT16(vABCD, 1);
    const vec_s16 vB = VEC_SPLAT16(vABCD, 3);
    const vec_s16 vC = VEC_SPLAT16(vABCD, 5);
    const vec_s16 vD = VEC_SPLAT16(vABCD, 7);
128 129 130
    const vec_s16 v32ss = vec_sl(vec_splat_s16(1),vec_splat_u16(5));
    const vec_u16 v6us = vec_splat_u16(6);

131
    vec_u8 vsrcperm0, vsrcperm1;
132 133
    vec_u8 vsrc0uc, vsrc1uc;
    vec_s16 vsrc0ssH, vsrc1ssH;
134
    vec_u8 vsrc2uc, vsrc3uc;
135 136
    vec_s16 vsrc2ssH, vsrc3ssH, psum;
    vec_u8 vdst, ppsum, vfdst, fsum;
137 138 139 140 141 142
#if HAVE_BIGENDIAN
    register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
    register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
    vsrcperm0 = vec_lvsl(0, src);
    vsrcperm1 = vec_lvsl(1, src);
#endif
143 144 145 146 147 148 149 150 151 152 153 154 155

    if (((unsigned long)dst) % 16 == 0) {
        fperm = (vec_u8){0x10, 0x11, 0x12, 0x13,
                         0x14, 0x15, 0x16, 0x17,
                         0x08, 0x09, 0x0A, 0x0B,
                         0x0C, 0x0D, 0x0E, 0x0F};
    } else {
        fperm = (vec_u8){0x00, 0x01, 0x02, 0x03,
                         0x04, 0x05, 0x06, 0x07,
                         0x18, 0x19, 0x1A, 0x1B,
                         0x1C, 0x1D, 0x1E, 0x1F};
    }

156
    GET_VSRC(vsrc0uc, vsrc1uc, 0, 16, vsrcperm0, vsrcperm1, src);
157

158 159
    vsrc0ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc0uc);
    vsrc1ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc1uc);
160 161

    if (ABCD[3]) {
162 163 164
        for (i = 0 ; i < h ; i++) {
            GET_VSRC(vsrc2uc, vsrc3uc, stride, 16, vsrcperm0, vsrcperm1, src);
            CHROMA_MC8_ALTIVEC_CORE(v32ss, noop);
165 166 167 168
        }
    } else {
        const vec_s16 vE = vec_add(vB, vC);
        if (ABCD[2]) { // x == 0 B == 0
169 170 171 172
            for (i = 0 ; i < h ; i++) {
                GET_VSRC1(vsrc1uc, stride, 15, vsrcperm0, src);
                CHROMA_MC8_ALTIVEC_CORE_SIMPLE;
                vsrc0uc = vsrc1uc;
173 174
            }
        } else { // y == 0 C == 0
175 176 177
            for (i = 0 ; i < h ; i++) {
               GET_VSRC(vsrc0uc, vsrc1uc, 0, 15, vsrcperm0, vsrcperm1, src);
               CHROMA_MC8_ALTIVEC_CORE_SIMPLE;
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
            }
        }
    }
}
#endif

/* this code assume that stride % 16 == 0 */
#ifdef PREFIX_no_rnd_vc1_chroma_mc8_altivec
static void PREFIX_no_rnd_vc1_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
   DECLARE_ALIGNED(16, signed int, ABCD)[4] =
                        {((8 - x) * (8 - y)),
                         ((    x) * (8 - y)),
                         ((8 - x) * (    y)),
                         ((    x) * (    y))};
    register int i;
    vec_u8 fperm;
    LOAD_ZERO;
195 196 197 198 199
    const vec_s32 vABCD = vec_ld(0, ABCD);
    const vec_s16 vA = VEC_SPLAT16(vABCD, 1);
    const vec_s16 vB = VEC_SPLAT16(vABCD, 3);
    const vec_s16 vC = VEC_SPLAT16(vABCD, 5);
    const vec_s16 vD = VEC_SPLAT16(vABCD, 7);
200 201 202
    const vec_s16 v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4));
    const vec_u16 v6us  = vec_splat_u16(6);

203
    vec_u8 vsrcperm0, vsrcperm1;
204 205
    vec_u8 vsrc0uc, vsrc1uc;
    vec_s16 vsrc0ssH, vsrc1ssH;
206
    vec_u8 vsrc2uc, vsrc3uc;
207 208
    vec_s16 vsrc2ssH, vsrc3ssH, psum;
    vec_u8 vdst, ppsum, vfdst, fsum;
209 210 211 212 213 214
#if HAVE_BIGENDIAN
    register int loadSecond     = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
    register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
    vsrcperm0 = vec_lvsl(0, src);
    vsrcperm1 = vec_lvsl(1, src);
#endif
215 216 217 218 219 220 221 222 223 224 225 226 227

    if (((unsigned long)dst) % 16 == 0) {
        fperm = (vec_u8){0x10, 0x11, 0x12, 0x13,
                         0x14, 0x15, 0x16, 0x17,
                         0x08, 0x09, 0x0A, 0x0B,
                         0x0C, 0x0D, 0x0E, 0x0F};
    } else {
        fperm = (vec_u8){0x00, 0x01, 0x02, 0x03,
                         0x04, 0x05, 0x06, 0x07,
                         0x18, 0x19, 0x1A, 0x1B,
                         0x1C, 0x1D, 0x1E, 0x1F};
    }

228
    GET_VSRC(vsrc0uc, vsrc1uc, 0, 16, vsrcperm0, vsrcperm1, src);
229

230 231
    vsrc0ssH = (vec_s16)VEC_MERGEH(zero_u8v, (vec_u8)vsrc0uc);
    vsrc1ssH = (vec_s16)VEC_MERGEH(zero_u8v, (vec_u8)vsrc1uc);
232

233 234 235
    for (i = 0 ; i < h ; i++) {
        GET_VSRC(vsrc2uc, vsrc3uc, stride, 16, vsrcperm0, vsrcperm1, src);
        CHROMA_MC8_ALTIVEC_CORE(vec_splat_s16(0), add28);
236 237 238 239 240 241 242
    }
}
#endif

#undef noop
#undef add28
#undef CHROMA_MC8_ALTIVEC_CORE