hpeldsp_altivec.c 14.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 * Copyright (c) 2002 Brian Foley
 * Copyright (c) 2002 Dieter Shirley
 * Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
 *
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * FFmpeg is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with FFmpeg; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

#include "config.h"

#if HAVE_ALTIVEC_H
#include <altivec.h>
#endif
28 29 30

#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
31
#include "libavutil/ppc/cpu.h"
32 33
#include "libavutil/ppc/types_altivec.h"
#include "libavutil/ppc/util_altivec.h"
34
#include "libavcodec/hpeldsp.h"
35
#include "hpeldsp_altivec.h"
36

37
#if HAVE_ALTIVEC
38
/* next one assumes that ((line_size % 16) == 0) */
39 40
void ff_put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
{
41 42 43 44
    register vector unsigned char pixelsv1;
    register vector unsigned char pixelsv1B;
    register vector unsigned char pixelsv1C;
    register vector unsigned char pixelsv1D;
45

46 47 48 49 50 51 52 53 54 55 56
    int i;
    register ptrdiff_t line_size_2 = line_size << 1;
    register ptrdiff_t line_size_3 = line_size + line_size_2;
    register ptrdiff_t line_size_4 = line_size << 2;

// hand-unrolling the loop by 4 gains about 15%
// mininum execution time goes from 74 to 60 cycles
// it's faster than -funroll-loops, but using
// -funroll-loops w/ this is bad - 74 cycles again.
// all this is on a 7450, tuning for the 7450
    for (i = 0; i < h; i += 4) {
57 58 59 60 61 62 63 64
        pixelsv1  = unaligned_load( 0, pixels);
        pixelsv1B = unaligned_load(line_size, pixels);
        pixelsv1C = unaligned_load(line_size_2, pixels);
        pixelsv1D = unaligned_load(line_size_3, pixels);
        VEC_ST(pixelsv1, 0, (unsigned char*)block);
        VEC_ST(pixelsv1B, line_size, (unsigned char*)block);
        VEC_ST(pixelsv1C, line_size_2, (unsigned char*)block);
        VEC_ST(pixelsv1D, line_size_3, (unsigned char*)block);
65 66 67 68 69 70 71 72 73
        pixels+=line_size_4;
        block +=line_size_4;
    }
}

/* next one assumes that ((line_size % 16) == 0) */
#define op_avg(a,b)  a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) )
void ff_avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
{
74
    register vector unsigned char pixelsv, blockv;
75

76
    int i;
77 78
    for (i = 0; i < h; i++) {
        blockv = vec_ld(0, block);
79
        pixelsv = VEC_LD( 0, pixels);
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
        blockv = vec_avg(blockv,pixelsv);
        vec_st(blockv, 0, (unsigned char*)block);
        pixels+=line_size;
        block +=line_size;
    }
}

/* next one assumes that ((line_size % 8) == 0) */
static void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, ptrdiff_t line_size, int h)
{
    register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
    int i;

   for (i = 0; i < h; i++) {
       /* block is 8 bytes-aligned, so we're either in the
          left block (16 bytes-aligned) or in the right block (not) */
       int rightside = ((unsigned long)block & 0x0000000F);

       blockv = vec_ld(0, block);
99
       pixelsv = VEC_LD( 0, pixels);
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120

       if (rightside) {
           pixelsv = vec_perm(blockv, pixelsv, vcprm(0,1,s0,s1));
       } else {
           pixelsv = vec_perm(blockv, pixelsv, vcprm(s0,s1,2,3));
       }

       blockv = vec_avg(blockv, pixelsv);

       vec_st(blockv, 0, block);

       pixels += line_size;
       block += line_size;
   }
}

/* next one assumes that ((line_size % 8) == 0) */
static void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
{
    register int i;
    register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
121
    register vector unsigned char blockv;
122 123 124 125
    register vector unsigned short pixelssum1, pixelssum2, temp3;
    register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
    register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);

126 127 128 129 130
    pixelsv1 = VEC_LD(0, pixels);
    pixelsv2 = VEC_LD(1, pixels);
    pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
    pixelsv2 = VEC_MERGEH(vczero, pixelsv2);

131 132 133 134 135 136 137 138
    pixelssum1 = vec_add((vector unsigned short)pixelsv1,
                         (vector unsigned short)pixelsv2);
    pixelssum1 = vec_add(pixelssum1, vctwo);

    for (i = 0; i < h ; i++) {
        int rightside = ((unsigned long)block & 0x0000000F);
        blockv = vec_ld(0, block);

139 140 141 142
        pixelsv1 = unaligned_load(line_size, pixels);
        pixelsv2 = unaligned_load(line_size+1, pixels);
        pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
        pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
        pixelssum2 = vec_add((vector unsigned short)pixelsv1,
                             (vector unsigned short)pixelsv2);
        temp3 = vec_add(pixelssum1, pixelssum2);
        temp3 = vec_sra(temp3, vctwo);
        pixelssum1 = vec_add(pixelssum2, vctwo);
        pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);

        if (rightside) {
            blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
        } else {
            blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
        }

        vec_st(blockv, 0, block);

        block += line_size;
        pixels += line_size;
    }
}

/* next one assumes that ((line_size % 8) == 0) */
static void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
{
    register int i;
    register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
168
    register vector unsigned char blockv;
169 170 171 172 173
    register vector unsigned short pixelssum1, pixelssum2, temp3;
    register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
    register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
    register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);

174 175 176 177
    pixelsv1 = VEC_LD(0, pixels);
    pixelsv2 = VEC_LD(1, pixels);
    pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
    pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
178 179 180 181 182 183 184 185
    pixelssum1 = vec_add((vector unsigned short)pixelsv1,
                         (vector unsigned short)pixelsv2);
    pixelssum1 = vec_add(pixelssum1, vcone);

    for (i = 0; i < h ; i++) {
        int rightside = ((unsigned long)block & 0x0000000F);
        blockv = vec_ld(0, block);

186 187 188 189
        pixelsv1 = unaligned_load(line_size, pixels);
        pixelsv2 = unaligned_load(line_size+1, pixels);
        pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
        pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
        pixelssum2 = vec_add((vector unsigned short)pixelsv1,
                             (vector unsigned short)pixelsv2);
        temp3 = vec_add(pixelssum1, pixelssum2);
        temp3 = vec_sra(temp3, vctwo);
        pixelssum1 = vec_add(pixelssum2, vcone);
        pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);

        if (rightside) {
            blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
        } else {
            blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
        }

        vec_st(blockv, 0, block);

        block += line_size;
        pixels += line_size;
    }
}

/* next one assumes that ((line_size % 16) == 0) */
static void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, ptrdiff_t line_size, int h)
{
    register int i;
    register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
215
    register vector unsigned char blockv;
216 217 218 219 220
    register vector unsigned short temp3, temp4,
        pixelssum1, pixelssum2, pixelssum3, pixelssum4;
    register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
    register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);

221 222 223 224 225 226
    pixelsv1 = VEC_LD(0, pixels);
    pixelsv2 = VEC_LD(1, pixels);
    pixelsv3 = VEC_MERGEL(vczero, pixelsv1);
    pixelsv4 = VEC_MERGEL(vczero, pixelsv2);
    pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
    pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
227 228 229 230 231 232 233 234 235 236
    pixelssum3 = vec_add((vector unsigned short)pixelsv3,
                         (vector unsigned short)pixelsv4);
    pixelssum3 = vec_add(pixelssum3, vctwo);
    pixelssum1 = vec_add((vector unsigned short)pixelsv1,
                         (vector unsigned short)pixelsv2);
    pixelssum1 = vec_add(pixelssum1, vctwo);

    for (i = 0; i < h ; i++) {
        blockv = vec_ld(0, block);

237 238
        pixelsv1 = unaligned_load(line_size, pixels);
        pixelsv2 = unaligned_load(line_size+1, pixels);
239

240 241 242 243
        pixelsv3 = VEC_MERGEL(vczero, pixelsv1);
        pixelsv4 = VEC_MERGEL(vczero, pixelsv2);
        pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
        pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
        pixelssum4 = vec_add((vector unsigned short)pixelsv3,
                             (vector unsigned short)pixelsv4);
        pixelssum2 = vec_add((vector unsigned short)pixelsv1,
                             (vector unsigned short)pixelsv2);
        temp4 = vec_add(pixelssum3, pixelssum4);
        temp4 = vec_sra(temp4, vctwo);
        temp3 = vec_add(pixelssum1, pixelssum2);
        temp3 = vec_sra(temp3, vctwo);

        pixelssum3 = vec_add(pixelssum4, vctwo);
        pixelssum1 = vec_add(pixelssum2, vctwo);

        blockv = vec_packsu(temp3, temp4);

        vec_st(blockv, 0, block);

        block += line_size;
        pixels += line_size;
    }
}

/* next one assumes that ((line_size % 16) == 0) */
static void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, ptrdiff_t line_size, int h)
{
    register int i;
    register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
270
    register vector unsigned char blockv;
271 272 273 274 275 276
    register vector unsigned short temp3, temp4,
        pixelssum1, pixelssum2, pixelssum3, pixelssum4;
    register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
    register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
    register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);

277 278 279 280 281 282
    pixelsv1 = VEC_LD(0, pixels);
    pixelsv2 = VEC_LD(1, pixels);
    pixelsv3 = VEC_MERGEL(vczero, pixelsv1);
    pixelsv4 = VEC_MERGEL(vczero, pixelsv2);
    pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
    pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
283 284 285 286 287 288 289 290
    pixelssum3 = vec_add((vector unsigned short)pixelsv3,
                         (vector unsigned short)pixelsv4);
    pixelssum3 = vec_add(pixelssum3, vcone);
    pixelssum1 = vec_add((vector unsigned short)pixelsv1,
                         (vector unsigned short)pixelsv2);
    pixelssum1 = vec_add(pixelssum1, vcone);

    for (i = 0; i < h ; i++) {
291 292
        pixelsv1 = unaligned_load(line_size, pixels);
        pixelsv2 = unaligned_load(line_size+1, pixels);
293

294 295 296 297
        pixelsv3 = VEC_MERGEL(vczero, pixelsv1);
        pixelsv4 = VEC_MERGEL(vczero, pixelsv2);
        pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
        pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
298 299 300 301 302 303 304 305 306 307 308 309 310 311
        pixelssum4 = vec_add((vector unsigned short)pixelsv3,
                             (vector unsigned short)pixelsv4);
        pixelssum2 = vec_add((vector unsigned short)pixelsv1,
                             (vector unsigned short)pixelsv2);
        temp4 = vec_add(pixelssum3, pixelssum4);
        temp4 = vec_sra(temp4, vctwo);
        temp3 = vec_add(pixelssum1, pixelssum2);
        temp3 = vec_sra(temp3, vctwo);

        pixelssum3 = vec_add(pixelssum4, vcone);
        pixelssum1 = vec_add(pixelssum2, vcone);

        blockv = vec_packsu(temp3, temp4);

312
        VEC_ST(blockv, 0, block);
313 314 315 316 317 318 319 320 321 322 323

        block += line_size;
        pixels += line_size;
    }
}

/* next one assumes that ((line_size % 8) == 0) */
static void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
{
    register int i;
    register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
324
    register vector unsigned char blockv, blocktemp;
325 326 327 328 329 330 331
    register vector unsigned short pixelssum1, pixelssum2, temp3;

    register const vector unsigned char vczero = (const vector unsigned char)
                                        vec_splat_u8(0);
    register const vector unsigned short vctwo = (const vector unsigned short)
                                        vec_splat_u16(2);

332 333 334 335
    pixelsv1 = VEC_LD(0, pixels);
    pixelsv2 = VEC_LD(1, pixels);
    pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
    pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
336 337 338 339 340 341 342 343
    pixelssum1 = vec_add((vector unsigned short)pixelsv1,
                         (vector unsigned short)pixelsv2);
    pixelssum1 = vec_add(pixelssum1, vctwo);

    for (i = 0; i < h ; i++) {
        int rightside = ((unsigned long)block & 0x0000000F);
        blockv = vec_ld(0, block);

344 345
        pixelsv1 = unaligned_load(line_size, pixels);
        pixelsv2 = unaligned_load(line_size+1, pixels);
346

347 348
        pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
        pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
        pixelssum2 = vec_add((vector unsigned short)pixelsv1,
                             (vector unsigned short)pixelsv2);
        temp3 = vec_add(pixelssum1, pixelssum2);
        temp3 = vec_sra(temp3, vctwo);
        pixelssum1 = vec_add(pixelssum2, vctwo);
        pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);

        if (rightside) {
            blocktemp = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
        } else {
            blocktemp = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
        }

        blockv = vec_avg(blocktemp, blockv);
        vec_st(blockv, 0, block);

        block += line_size;
        pixels += line_size;
    }
}
#endif /* HAVE_ALTIVEC */

371
av_cold void ff_hpeldsp_init_ppc(HpelDSPContext *c, int flags)
372 373
{
#if HAVE_ALTIVEC
374
    if (!PPC_ALTIVEC(av_get_cpu_flags()))
375 376 377 378 379 380 381 382 383 384 385 386 387
        return;

    c->avg_pixels_tab[0][0]        = ff_avg_pixels16_altivec;
    c->avg_pixels_tab[1][0]        = avg_pixels8_altivec;
    c->avg_pixels_tab[1][3]        = avg_pixels8_xy2_altivec;

    c->put_pixels_tab[0][0]        = ff_put_pixels16_altivec;
    c->put_pixels_tab[1][3]        = put_pixels8_xy2_altivec;
    c->put_pixels_tab[0][3]        = put_pixels16_xy2_altivec;

    c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_altivec;
    c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec;
    c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_altivec;
388 389
#endif /* HAVE_ALTIVEC */
}