mpegvideodsp.c 5.17 KB
Newer Older
1
/*
2 3
 * GMC (Global Motion Compensation), AltiVec-enabled
 *
4 5
 * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
 *
6
 * This file is part of Libav.
7
 *
8
 * Libav is free software; you can redistribute it and/or
9 10
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
11
 * version 2.1 of the License, or (at your option) any later version.
12
 *
13
 * Libav is distributed in the hope that it will be useful,
14 15 16 17 18
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
19
 * License along with Libav; if not, write to the Free Software
20
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 22
 */

23
#include "libavutil/mem.h"
24 25
#include "libavutil/ppc/types_altivec.h"
#include "libavutil/ppc/util_altivec.h"
26
#include "libavcodec/mpegvideodsp.h"
27

28
#if HAVE_ALTIVEC && HAVE_BIGENDIAN
29 30
/* AltiVec-enhanced gmc1. ATM this code assumes stride is a multiple of 8
 * to preserve proper dst alignment. */
31 32
static void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */,
                         int stride, int h, int x16, int y16, int rounder)
33
{
34
    int i;
35
    const DECLARE_ALIGNED(16, unsigned short, rounder_a) = rounder;
36 37 38 39 40 41 42 43 44 45 46
    const DECLARE_ALIGNED(16, unsigned short, ABCD)[8] = {
        (16 - x16) * (16 - y16), /* A */
             (x16) * (16 - y16), /* B */
        (16 - x16) * (y16),      /* C */
             (x16) * (y16),      /* D */
        0, 0, 0, 0               /* padding */
    };
    register const vector unsigned char vczero =
        (const vector unsigned char) vec_splat_u8(0);
    register const vector unsigned short vcsr8 =
        (const vector unsigned short) vec_splat_u16(8);
47 48
    register vector unsigned char dstv, dstv2, srcvB, srcvC, srcvD;
    register vector unsigned short tempB, tempC, tempD;
49 50
    unsigned long dst_odd        = (unsigned long) dst & 0x0000000F;
    unsigned long src_really_odd = (unsigned long) src & 0x0000000F;
51 52 53 54 55 56 57 58
    register vector unsigned short tempA =
        vec_ld(0, (const unsigned short *) ABCD);
    register vector unsigned short Av = vec_splat(tempA, 0);
    register vector unsigned short Bv = vec_splat(tempA, 1);
    register vector unsigned short Cv = vec_splat(tempA, 2);
    register vector unsigned short Dv = vec_splat(tempA, 3);
    register vector unsigned short rounderV =
        vec_splat((vec_u16) vec_lde(0, &rounder_a), 0);
59

60 61 62
    /* we'll be able to pick-up our 9 char elements at src from those
     * 32 bytes we load the first batch here, as inside the loop we can
     * reuse 'src + stride' from one iteration as the 'src' of the next. */
63 64 65 66
    register vector unsigned char src_0 = vec_ld(0, src);
    register vector unsigned char src_1 = vec_ld(16, src);
    register vector unsigned char srcvA = vec_perm(src_0, src_1,
                                                   vec_lvsl(0, src));
67

68
    if (src_really_odd != 0x0000000F)
69 70
        /* If src & 0xF == 0xF, then (src + 1) is properly aligned
         * on the second vector. */
71
        srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
72
    else
73
        srcvB = src_1;
74 75
    srcvA = vec_mergeh(vczero, srcvA);
    srcvB = vec_mergeh(vczero, srcvB);
76

77 78 79
    for (i = 0; i < h; i++) {
        dst_odd        =   (unsigned long) dst            & 0x0000000F;
        src_really_odd = (((unsigned long) src) + stride) & 0x0000000F;
80 81 82

        dstv = vec_ld(0, dst);

83 84 85
        /* We'll be able to pick-up our 9 char elements at src + stride from
         * those 32 bytes then reuse the resulting 2 vectors srvcC and srcvD
         * as the next srcvA and srcvB. */
86
        src_0 = vec_ld(stride +  0, src);
87 88 89
        src_1 = vec_ld(stride + 16, src);
        srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));

90
        if (src_really_odd != 0x0000000F)
91 92
            /* If src & 0xF == 0xF, then (src + 1) is properly aligned
             * on the second vector. */
93
            srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
94
        else
95 96 97 98 99
            srcvD = src_1;

        srcvC = vec_mergeh(vczero, srcvC);
        srcvD = vec_mergeh(vczero, srcvD);

100 101 102
        /* OK, now we (finally) do the math :-)
         * Those four instructions replace 32 int muls & 32 int adds.
         * Isn't AltiVec nice? */
103 104 105 106
        tempA = vec_mladd((vector unsigned short) srcvA, Av, rounderV);
        tempB = vec_mladd((vector unsigned short) srcvB, Bv, tempA);
        tempC = vec_mladd((vector unsigned short) srcvC, Cv, tempB);
        tempD = vec_mladd((vector unsigned short) srcvD, Dv, tempC);
107 108 109 110 111 112

        srcvA = srcvC;
        srcvB = srcvD;

        tempD = vec_sr(tempD, vcsr8);

113
        dstv2 = vec_pack(tempD, (vector unsigned short) vczero);
114

115 116 117 118
        if (dst_odd)
            dstv2 = vec_perm(dstv, dstv2, vcprm(0, 1, s0, s1));
        else
            dstv2 = vec_perm(dstv, dstv2, vcprm(s0, s1, 2, 3));
119 120 121 122 123

        vec_st(dstv2, 0, dst);

        dst += stride;
        src += stride;
124 125
    }
}
126 127 128 129
#endif /* HAVE_ALTIVEC */

av_cold void ff_mpegvideodsp_init_ppc(MpegVideoDSPContext *c)
{
130
#if HAVE_ALTIVEC && HAVE_BIGENDIAN
131 132 133
    c->gmc1 = gmc1_altivec;
#endif /* HAVE_ALTIVEC */
}