idct_altivec.c 10.6 KB
Newer Older
1 2 3
/*
 * Copyright (c) 2001 Michel Lespinasse
 *
4
 * This file is part of Libav.
5
 *
6
 * Libav is free software; you can redistribute it and/or
7 8
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
9
 * version 2.1 of the License, or (at your option) any later version.
10
 *
11
 * Libav is distributed in the hope that it will be useful,
12 13 14 15 16
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with Libav; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 20 21 22 23
 */

/*
 * NOTE: This code is based on GPL code from the libmpeg2 project.  The
 * author, Michel Lespinasses, has given explicit permission to release
24
 * under LGPL as part of Libav.
25 26 27
 */

/*
28
 * Libav integration by Dieter Shirley
29
 *
30 31 32 33 34
 * This file is a direct copy of the AltiVec IDCT module from the libmpeg2
 * project.  I've deleted all of the libmpeg2-specific code, renamed the
 * functions and reordered the function parameters.  The only change to the
 * IDCT function itself was to factor out the partial transposition, and to
 * perform a full transpose at the end of the function.
35 36 37 38 39
 */


#include <stdlib.h>                                      /* malloc(), free() */
#include <string.h>
40 41 42 43
#include "config.h"
#if HAVE_ALTIVEC_H
#include <altivec.h>
#endif
44
#include "libavcodec/dsputil.h"
45
#include "types_altivec.h"
46
#include "dsputil_altivec.h"
47

48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
#define IDCT_HALF                                       \
    /* 1st stage */                                     \
    t1 = vec_mradds (a1, vx7, vx1 );                    \
    t8 = vec_mradds (a1, vx1, vec_subs (zero, vx7));    \
    t7 = vec_mradds (a2, vx5, vx3);                     \
    t3 = vec_mradds (ma2, vx3, vx5);                    \
                                                        \
    /* 2nd stage */                                     \
    t5 = vec_adds (vx0, vx4);                           \
    t0 = vec_subs (vx0, vx4);                           \
    t2 = vec_mradds (a0, vx6, vx2);                     \
    t4 = vec_mradds (a0, vx2, vec_subs (zero, vx6));    \
    t6 = vec_adds (t8, t3);                             \
    t3 = vec_subs (t8, t3);                             \
    t8 = vec_subs (t1, t7);                             \
    t1 = vec_adds (t1, t7);                             \
                                                        \
    /* 3rd stage */                                     \
    t7 = vec_adds (t5, t2);                             \
    t2 = vec_subs (t5, t2);                             \
    t5 = vec_adds (t0, t4);                             \
    t0 = vec_subs (t0, t4);                             \
    t4 = vec_subs (t8, t3);                             \
    t3 = vec_adds (t8, t3);                             \
                                                        \
    /* 4th stage */                                     \
    vy0 = vec_adds (t7, t1);                            \
    vy7 = vec_subs (t7, t1);                            \
    vy1 = vec_mradds (c4, t3, t5);                      \
    vy6 = vec_mradds (mc4, t3, t5);                     \
    vy2 = vec_mradds (c4, t4, t0);                      \
    vy5 = vec_mradds (mc4, t4, t0);                     \
    vy3 = vec_adds (t2, t6);                            \
81 82
    vy4 = vec_subs (t2, t6);

83

84
#define IDCT                                                            \
85 86 87 88 89
    vec_s16 vx0, vx1, vx2, vx3, vx4, vx5, vx6, vx7;                \
    vec_s16 vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7;                \
    vec_s16 a0, a1, a2, ma2, c4, mc4, zero, bias;                  \
    vec_s16 t0, t1, t2, t3, t4, t5, t6, t7, t8;                    \
    vec_u16 shift;                                                 \
90 91 92 93 94 95 96
                                                                        \
    c4 = vec_splat (constants[0], 0);                                   \
    a0 = vec_splat (constants[0], 1);                                   \
    a1 = vec_splat (constants[0], 2);                                   \
    a2 = vec_splat (constants[0], 3);                                   \
    mc4 = vec_splat (constants[0], 4);                                  \
    ma2 = vec_splat (constants[0], 5);                                  \
97
    bias = (vec_s16)vec_splat ((vec_s32)constants[0], 3);     \
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
                                                                        \
    zero = vec_splat_s16 (0);                                           \
    shift = vec_splat_u16 (4);                                          \
                                                                        \
    vx0 = vec_mradds (vec_sl (block[0], shift), constants[1], zero);    \
    vx1 = vec_mradds (vec_sl (block[1], shift), constants[2], zero);    \
    vx2 = vec_mradds (vec_sl (block[2], shift), constants[3], zero);    \
    vx3 = vec_mradds (vec_sl (block[3], shift), constants[4], zero);    \
    vx4 = vec_mradds (vec_sl (block[4], shift), constants[1], zero);    \
    vx5 = vec_mradds (vec_sl (block[5], shift), constants[4], zero);    \
    vx6 = vec_mradds (vec_sl (block[6], shift), constants[3], zero);    \
    vx7 = vec_mradds (vec_sl (block[7], shift), constants[2], zero);    \
                                                                        \
    IDCT_HALF                                                           \
                                                                        \
    vx0 = vec_mergeh (vy0, vy4);                                        \
    vx1 = vec_mergel (vy0, vy4);                                        \
    vx2 = vec_mergeh (vy1, vy5);                                        \
    vx3 = vec_mergel (vy1, vy5);                                        \
    vx4 = vec_mergeh (vy2, vy6);                                        \
    vx5 = vec_mergel (vy2, vy6);                                        \
    vx6 = vec_mergeh (vy3, vy7);                                        \
    vx7 = vec_mergel (vy3, vy7);                                        \
                                                                        \
    vy0 = vec_mergeh (vx0, vx4);                                        \
    vy1 = vec_mergel (vx0, vx4);                                        \
    vy2 = vec_mergeh (vx1, vx5);                                        \
    vy3 = vec_mergel (vx1, vx5);                                        \
    vy4 = vec_mergeh (vx2, vx6);                                        \
    vy5 = vec_mergel (vx2, vx6);                                        \
    vy6 = vec_mergeh (vx3, vx7);                                        \
    vy7 = vec_mergel (vx3, vx7);                                        \
                                                                        \
    vx0 = vec_adds (vec_mergeh (vy0, vy4), bias);                       \
    vx1 = vec_mergel (vy0, vy4);                                        \
    vx2 = vec_mergeh (vy1, vy5);                                        \
    vx3 = vec_mergel (vy1, vy5);                                        \
    vx4 = vec_mergeh (vy2, vy6);                                        \
    vx5 = vec_mergel (vy2, vy6);                                        \
    vx6 = vec_mergeh (vy3, vy7);                                        \
    vx7 = vec_mergel (vy3, vy7);                                        \
                                                                        \
    IDCT_HALF                                                           \
                                                                        \
    shift = vec_splat_u16 (6);                                          \
    vx0 = vec_sra (vy0, shift);                                         \
    vx1 = vec_sra (vy1, shift);                                         \
    vx2 = vec_sra (vy2, shift);                                         \
    vx3 = vec_sra (vy3, shift);                                         \
    vx4 = vec_sra (vy4, shift);                                         \
    vx5 = vec_sra (vy5, shift);                                         \
    vx6 = vec_sra (vy6, shift);                                         \
150 151
    vx7 = vec_sra (vy7, shift);

152

153
static const vec_s16 constants[5] = {
154 155 156 157 158
    {23170, 13573,  6518, 21895, -23170, -21895,    32,    31},
    {16384, 22725, 21407, 19266,  16384,  19266, 21407, 22725},
    {22725, 31521, 29692, 26722,  22725,  26722, 29692, 31521},
    {21407, 29692, 27969, 25172,  21407,  25172, 27969, 29692},
    {19266, 26722, 25172, 22654,  19266,  22654, 25172, 26722}
159
};
160

161
void idct_put_altivec(uint8_t* dest, int stride, int16_t *blk)
162
{
163
    vec_s16 *block = (vec_s16*)blk;
164
    vec_u8 tmp;
165 166 167

    IDCT

168 169
#define COPY(dest,src)                                          \
    tmp = vec_packsu (src, src);                                \
170 171
    vec_ste ((vec_u32)tmp, 0, (unsigned int *)dest);       \
    vec_ste ((vec_u32)tmp, 4, (unsigned int *)dest);
172

173 174 175 176 177 178 179
    COPY (dest, vx0)    dest += stride;
    COPY (dest, vx1)    dest += stride;
    COPY (dest, vx2)    dest += stride;
    COPY (dest, vx3)    dest += stride;
    COPY (dest, vx4)    dest += stride;
    COPY (dest, vx5)    dest += stride;
    COPY (dest, vx6)    dest += stride;
180 181 182
    COPY (dest, vx7)
}

183
void idct_add_altivec(uint8_t* dest, int stride, int16_t *blk)
184
{
185
    vec_s16 *block = (vec_s16*)blk;
186 187 188 189 190
    vec_u8 tmp;
    vec_s16 tmp2, tmp3;
    vec_u8 perm0;
    vec_u8 perm1;
    vec_u8 p0, p1, p;
191 192 193 194 195 196 197 198 199

    IDCT

    p0 = vec_lvsl (0, dest);
    p1 = vec_lvsl (stride, dest);
    p = vec_splat_u8 (-1);
    perm0 = vec_mergeh (p, p0);
    perm1 = vec_mergeh (p, p1);

200 201 202
#define ADD(dest,src,perm)                                              \
    /* *(uint64_t *)&tmp = *(uint64_t *)dest; */                        \
    tmp = vec_ld (0, dest);                                             \
203
    tmp2 = (vec_s16)vec_perm (tmp, (vec_u8)zero, perm);       \
204 205
    tmp3 = vec_adds (tmp2, src);                                        \
    tmp = vec_packsu (tmp3, tmp3);                                      \
206 207
    vec_ste ((vec_u32)tmp, 0, (unsigned int *)dest);               \
    vec_ste ((vec_u32)tmp, 4, (unsigned int *)dest);
208

209 210 211 212 213 214 215
    ADD (dest, vx0, perm0)      dest += stride;
    ADD (dest, vx1, perm1)      dest += stride;
    ADD (dest, vx2, perm0)      dest += stride;
    ADD (dest, vx3, perm1)      dest += stride;
    ADD (dest, vx4, perm0)      dest += stride;
    ADD (dest, vx5, perm1)      dest += stride;
    ADD (dest, vx6, perm0)      dest += stride;
216 217 218
    ADD (dest, vx7, perm1)
}