dsputil_altivec.c 54 KB
Newer Older
1 2 3
/*
 * Copyright (c) 2002 Brian Foley
 * Copyright (c) 2002 Dieter Shirley
4
 * Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
5
 *
6
 * This file is part of Libav.
7
 *
8
 * Libav is free software; you can redistribute it and/or
9 10
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
11
 * version 2.1 of the License, or (at your option) any later version.
12
 *
13
 * Libav is distributed in the hope that it will be useful,
14 15 16 17 18
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
19
 * License along with Libav; if not, write to the Free Software
20
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21
 */
22

23 24 25 26
#include "config.h"
#if HAVE_ALTIVEC_H
#include <altivec.h>
#endif
27 28
#include "libavutil/ppc/types_altivec.h"
#include "libavutil/ppc/util_altivec.h"
29
#include "libavcodec/dsputil.h"
30
#include "dsputil_altivec.h"
31

32
static int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
33
{
34
    int i;
35
    int s;
36
    const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
37 38 39
    vector unsigned char perm1 = vec_lvsl(0, pix2);
    vector unsigned char perm2 = vec_add(perm1, vec_splat_u8(1));
    vector unsigned char pix2l, pix2r;
40 41 42 43 44
    vector unsigned char pix1v, pix2v, pix2iv, avgv, t5;
    vector unsigned int sad;
    vector signed int sumdiffs;

    s = 0;
45
    sad = (vector unsigned int)vec_splat_u32(0);
46 47
    for (i = 0; i < h; i++) {
        /* Read unaligned pixels into our vectors. The vectors are as follows:
48
           pix1v: pix1[0]-pix1[15]
49
           pix2v: pix2[0]-pix2[15]      pix2iv: pix2[1]-pix2[16] */
50 51 52 53 54
        pix1v  = vec_ld( 0, pix1);
        pix2l  = vec_ld( 0, pix2);
        pix2r  = vec_ld(16, pix2);
        pix2v  = vec_perm(pix2l, pix2r, perm1);
        pix2iv = vec_perm(pix2l, pix2r, perm2);
55 56 57 58 59 60 61 62 63

        /* Calculate the average vector */
        avgv = vec_avg(pix2v, pix2iv);

        /* Calculate a sum of abs differences vector */
        t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));

        /* Add each 4 pixel group together and put 4 results into sad */
        sad = vec_sum4s(t5, sad);
64

65 66 67 68 69 70 71 72 73 74 75
        pix1 += line_size;
        pix2 += line_size;
    }
    /* Sum up the four partial sums, and put the result into s */
    sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
    sumdiffs = vec_splat(sumdiffs, 3);
    vec_ste(sumdiffs, 0, &s);

    return s;
}

76
static int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
77
{
78
    int i;
79
    int s;
80
    const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
81 82
    vector unsigned char perm = vec_lvsl(0, pix2);
    vector unsigned char pix2l, pix2r;
83 84 85 86 87 88
    vector unsigned char pix1v, pix2v, pix3v, avgv, t5;
    vector unsigned int sad;
    vector signed int sumdiffs;
    uint8_t *pix3 = pix2 + line_size;

    s = 0;
89
    sad = (vector unsigned int)vec_splat_u32(0);
90

91
    /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
92 93 94 95 96
       iteration becomes pix2 in the next iteration. We can use this
       fact to avoid a potentially expensive unaligned read, each
       time around the loop.
       Read unaligned pixels into our vectors. The vectors are as follows:
       pix2v: pix2[0]-pix2[15]
97
       Split the pixel vectors into shorts */
98 99 100
    pix2l = vec_ld( 0, pix2);
    pix2r = vec_ld(15, pix2);
    pix2v = vec_perm(pix2l, pix2r, perm);
101

102 103
    for (i = 0; i < h; i++) {
        /* Read unaligned pixels into our vectors. The vectors are as follows:
104
           pix1v: pix1[0]-pix1[15]
105
           pix3v: pix3[0]-pix3[15] */
106
        pix1v = vec_ld(0, pix1);
107

108 109 110
        pix2l = vec_ld( 0, pix3);
        pix2r = vec_ld(15, pix3);
        pix3v = vec_perm(pix2l, pix2r, perm);
111 112 113 114 115 116 117 118 119

        /* Calculate the average vector */
        avgv = vec_avg(pix2v, pix3v);

        /* Calculate a sum of abs differences vector */
        t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));

        /* Add each 4 pixel group together and put 4 results into sad */
        sad = vec_sum4s(t5, sad);
120

121 122 123
        pix1 += line_size;
        pix2v = pix3v;
        pix3 += line_size;
124

125
    }
126

127 128 129 130
    /* Sum up the four partial sums, and put the result into s */
    sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
    sumdiffs = vec_splat(sumdiffs, 3);
    vec_ste(sumdiffs, 0, &s);
131
    return s;
132 133
}

134
static int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
135
{
136
    int i;
137
    int s;
138
    uint8_t *pix3 = pix2 + line_size;
139 140
    const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
    const vector unsigned short two = (const vector unsigned short)vec_splat_u16(2);
141 142 143 144
    vector unsigned char avgv, t5;
    vector unsigned char perm1 = vec_lvsl(0, pix2);
    vector unsigned char perm2 = vec_add(perm1, vec_splat_u8(1));
    vector unsigned char pix2l, pix2r;
145 146 147
    vector unsigned char pix1v, pix2v, pix3v, pix2iv, pix3iv;
    vector unsigned short pix2lv, pix2hv, pix2ilv, pix2ihv;
    vector unsigned short pix3lv, pix3hv, pix3ilv, pix3ihv;
148
    vector unsigned short avghv, avglv;
149 150 151 152
    vector unsigned short t1, t2, t3, t4;
    vector unsigned int sad;
    vector signed int sumdiffs;

153
    sad = (vector unsigned int)vec_splat_u32(0);
154

155 156
    s = 0;

157
    /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
158 159 160 161
       iteration becomes pix2 in the next iteration. We can use this
       fact to avoid a potentially expensive unaligned read, as well
       as some splitting, and vector addition each time around the loop.
       Read unaligned pixels into our vectors. The vectors are as follows:
162
       pix2v: pix2[0]-pix2[15]  pix2iv: pix2[1]-pix2[16]
163
       Split the pixel vectors into shorts */
164 165 166 167
    pix2l  = vec_ld( 0, pix2);
    pix2r  = vec_ld(16, pix2);
    pix2v  = vec_perm(pix2l, pix2r, perm1);
    pix2iv = vec_perm(pix2l, pix2r, perm2);
168

169 170
    pix2hv  = (vector unsigned short) vec_mergeh(zero, pix2v);
    pix2lv  = (vector unsigned short) vec_mergel(zero, pix2v);
171 172 173 174
    pix2ihv = (vector unsigned short) vec_mergeh(zero, pix2iv);
    pix2ilv = (vector unsigned short) vec_mergel(zero, pix2iv);
    t1 = vec_add(pix2hv, pix2ihv);
    t2 = vec_add(pix2lv, pix2ilv);
175

176 177
    for (i = 0; i < h; i++) {
        /* Read unaligned pixels into our vectors. The vectors are as follows:
178
           pix1v: pix1[0]-pix1[15]
179
           pix3v: pix3[0]-pix3[15]      pix3iv: pix3[1]-pix3[16] */
180
        pix1v = vec_ld(0, pix1);
181

182 183 184 185
        pix2l  = vec_ld( 0, pix3);
        pix2r  = vec_ld(16, pix3);
        pix3v  = vec_perm(pix2l, pix2r, perm1);
        pix3iv = vec_perm(pix2l, pix2r, perm2);
186

187 188 189 190 191
        /* Note that AltiVec does have vec_avg, but this works on vector pairs
           and rounds up. We could do avg(avg(a,b),avg(c,d)), but the rounding
           would mean that, for example, avg(3,0,0,1) = 2, when it should be 1.
           Instead, we have to split the pixel vectors into vectors of shorts,
           and do the averaging by hand. */
192 193

        /* Split the pixel vectors into shorts */
194 195
        pix3hv  = (vector unsigned short) vec_mergeh(zero, pix3v);
        pix3lv  = (vector unsigned short) vec_mergel(zero, pix3v);
196 197 198 199 200 201 202
        pix3ihv = (vector unsigned short) vec_mergeh(zero, pix3iv);
        pix3ilv = (vector unsigned short) vec_mergel(zero, pix3iv);

        /* Do the averaging on them */
        t3 = vec_add(pix3hv, pix3ihv);
        t4 = vec_add(pix3lv, pix3ilv);

203 204
        avghv = vec_sr(vec_add(vec_add(t1, t3), two), two);
        avglv = vec_sr(vec_add(vec_add(t2, t4), two), two);
205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228

        /* Pack the shorts back into a result */
        avgv = vec_pack(avghv, avglv);

        /* Calculate a sum of abs differences vector */
        t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));

        /* Add each 4 pixel group together and put 4 results into sad */
        sad = vec_sum4s(t5, sad);

        pix1 += line_size;
        pix3 += line_size;
        /* Transfer the calculated values for pix3 into pix2 */
        t1 = t3;
        t2 = t4;
    }
    /* Sum up the four partial sums, and put the result into s */
    sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
    sumdiffs = vec_splat(sumdiffs, 3);
    vec_ste(sumdiffs, 0, &s);

    return s;
}

229
static int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
230
{
231
    int i;
232
    int s;
233
    const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
234
    vector unsigned char perm = vec_lvsl(0, pix2);
235
    vector unsigned char t1, t2, t3,t4, t5;
236
    vector unsigned int sad;
237
    vector signed int sumdiffs;
238

239
    sad = (vector unsigned int)vec_splat_u32(0);
240 241


242
    for (i = 0; i < h; i++) {
243
        /* Read potentially unaligned pixels into t1 and t2 */
244 245 246 247
        vector unsigned char pix2l = vec_ld( 0, pix2);
        vector unsigned char pix2r = vec_ld(15, pix2);
        t1 = vec_ld(0, pix1);
        t2 = vec_perm(pix2l, pix2r, perm);
248

249
        /* Calculate a sum of abs differences vector */
250 251 252
        t3 = vec_max(t1, t2);
        t4 = vec_min(t1, t2);
        t5 = vec_sub(t3, t4);
253

254
        /* Add each 4 pixel group together and put 4 results into sad */
255 256 257 258 259 260 261 262 263 264
        sad = vec_sum4s(t5, sad);

        pix1 += line_size;
        pix2 += line_size;
    }

    /* Sum up the four partial sums, and put the result into s */
    sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
    sumdiffs = vec_splat(sumdiffs, 3);
    vec_ste(sumdiffs, 0, &s);
265

266 267 268
    return s;
}

269
static int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
270
{
271
    int i;
272
    int s;
273
    const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
274 275 276
    const vector unsigned char permclear = (vector unsigned char){255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0};
    vector unsigned char perm1 = vec_lvsl(0, pix1);
    vector unsigned char perm2 = vec_lvsl(0, pix2);
277
    vector unsigned char t1, t2, t3,t4, t5;
278
    vector unsigned int sad;
279 280
    vector signed int sumdiffs;

281
    sad = (vector unsigned int)vec_splat_u32(0);
282

283
    for (i = 0; i < h; i++) {
284 285 286
        /* Read potentially unaligned pixels into t1 and t2
           Since we're reading 16 pixels, and actually only want 8,
           mask out the last 8 pixels. The 0s don't change the sum. */
287 288 289 290 291 292
        vector unsigned char pix1l = vec_ld( 0, pix1);
        vector unsigned char pix1r = vec_ld(15, pix1);
        vector unsigned char pix2l = vec_ld( 0, pix2);
        vector unsigned char pix2r = vec_ld(15, pix2);
        t1 = vec_and(vec_perm(pix1l, pix1r, perm1), permclear);
        t2 = vec_and(vec_perm(pix2l, pix2r, perm2), permclear);
293

294
        /* Calculate a sum of abs differences vector */
295 296 297 298
        t3 = vec_max(t1, t2);
        t4 = vec_min(t1, t2);
        t5 = vec_sub(t3, t4);

299
        /* Add each 4 pixel group together and put 4 results into sad */
300 301 302 303 304 305 306 307 308 309 310 311 312 313
        sad = vec_sum4s(t5, sad);

        pix1 += line_size;
        pix2 += line_size;
    }

    /* Sum up the four partial sums, and put the result into s */
    sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
    sumdiffs = vec_splat(sumdiffs, 3);
    vec_ste(sumdiffs, 0, &s);

    return s;
}

314
static int pix_norm1_altivec(uint8_t *pix, int line_size)
315
{
316
    int i;
317
    int s;
318
    const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
319
    vector unsigned char perm = vec_lvsl(0, pix);
320 321 322
    vector unsigned char pixv;
    vector unsigned int sv;
    vector signed int sum;
323

324
    sv = (vector unsigned int)vec_splat_u32(0);
325

326 327 328
    s = 0;
    for (i = 0; i < 16; i++) {
        /* Read in the potentially unaligned pixels */
329 330 331
        vector unsigned char pixl = vec_ld( 0, pix);
        vector unsigned char pixr = vec_ld(15, pix);
        pixv = vec_perm(pixl, pixr, perm);
332

333 334
        /* Square the values, and add them to our sum */
        sv = vec_msum(pixv, pixv, sv);
335 336 337 338 339 340 341 342 343 344 345

        pix += line_size;
    }
    /* Sum up the four partial sums, and put the result into s */
    sum = vec_sums((vector signed int) sv, (vector signed int) zero);
    sum = vec_splat(sum, 3);
    vec_ste(sum, 0, &s);

    return s;
}

346 347 348
/**
 * Sum of Squared Errors for a 8x8 block.
 * AltiVec-enhanced.
349
 * It's the sad8_altivec code above w/ squaring added.
350
 */
351
static int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
352 353
{
    int i;
354
    int s;
355
    const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
356 357 358
    const vector unsigned char permclear = (vector unsigned char){255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0};
    vector unsigned char perm1 = vec_lvsl(0, pix1);
    vector unsigned char perm2 = vec_lvsl(0, pix2);
359 360 361
    vector unsigned char t1, t2, t3,t4, t5;
    vector unsigned int sum;
    vector signed int sumsqr;
362

363
    sum = (vector unsigned int)vec_splat_u32(0);
364

365
    for (i = 0; i < h; i++) {
366 367 368
        /* Read potentially unaligned pixels into t1 and t2
           Since we're reading 16 pixels, and actually only want 8,
           mask out the last 8 pixels. The 0s don't change the sum. */
369 370 371 372 373 374
        vector unsigned char pix1l = vec_ld( 0, pix1);
        vector unsigned char pix1r = vec_ld(15, pix1);
        vector unsigned char pix2l = vec_ld( 0, pix2);
        vector unsigned char pix2r = vec_ld(15, pix2);
        t1 = vec_and(vec_perm(pix1l, pix1r, perm1), permclear);
        t2 = vec_and(vec_perm(pix2l, pix2r, perm2), permclear);
375

376 377
        /* Since we want to use unsigned chars, we can take advantage
           of the fact that abs(a-b)^2 = (a-b)^2. */
378

379
        /* Calculate abs differences vector */
380 381 382
        t3 = vec_max(t1, t2);
        t4 = vec_min(t1, t2);
        t5 = vec_sub(t3, t4);
383

384 385
        /* Square the values and add them to our sum */
        sum = vec_msum(t5, t5, sum);
386

387 388 389
        pix1 += line_size;
        pix2 += line_size;
    }
390

391 392 393 394
    /* Sum up the four partial sums, and put the result into s */
    sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
    sumsqr = vec_splat(sumsqr, 3);
    vec_ste(sumsqr, 0, &s);
395

396 397 398 399 400 401
    return s;
}

/**
 * Sum of Squared Errors for a 16x16 block.
 * AltiVec-enhanced.
402
 * It's the sad16_altivec code above w/ squaring added.
403
 */
404
static int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
405
{
406
    int i;
407
    int s;
408
    const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
409
    vector unsigned char perm = vec_lvsl(0, pix2);
410 411 412
    vector unsigned char t1, t2, t3,t4, t5;
    vector unsigned int sum;
    vector signed int sumsqr;
413

414
    sum = (vector unsigned int)vec_splat_u32(0);
415

416
    for (i = 0; i < h; i++) {
417
        /* Read potentially unaligned pixels into t1 and t2 */
418 419 420 421
        vector unsigned char pix2l = vec_ld( 0, pix2);
        vector unsigned char pix2r = vec_ld(15, pix2);
        t1 = vec_ld(0, pix1);
        t2 = vec_perm(pix2l, pix2r, perm);
422

423 424
        /* Since we want to use unsigned chars, we can take advantage
           of the fact that abs(a-b)^2 = (a-b)^2. */
425

426
        /* Calculate abs differences vector */
427 428 429
        t3 = vec_max(t1, t2);
        t4 = vec_min(t1, t2);
        t5 = vec_sub(t3, t4);
430

431 432
        /* Square the values and add them to our sum */
        sum = vec_msum(t5, t5, sum);
433

434 435 436
        pix1 += line_size;
        pix2 += line_size;
    }
437

438 439 440 441
    /* Sum up the four partial sums, and put the result into s */
    sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
    sumsqr = vec_splat(sumsqr, 3);
    vec_ste(sumsqr, 0, &s);
442

443 444
    return s;
}
445

446
static int pix_sum_altivec(uint8_t * pix, int line_size)
447
{
448
    const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
449
    vector unsigned char perm = vec_lvsl(0, pix);
450
    vector unsigned char t1;
451
    vector unsigned int sad;
452 453
    vector signed int sumdiffs;

454
    int i;
455
    int s;
456

457
    sad = (vector unsigned int)vec_splat_u32(0);
458

459
    for (i = 0; i < 16; i++) {
460
        /* Read the potentially unaligned 16 pixels into t1 */
461 462 463
        vector unsigned char pixl = vec_ld( 0, pix);
        vector unsigned char pixr = vec_ld(15, pix);
        t1 = vec_perm(pixl, pixr, perm);
464

465
        /* Add each 4 pixel group together and put 4 results into sad */
466
        sad = vec_sum4s(t1, sad);
467

468 469
        pix += line_size;
    }
470

471 472 473 474
    /* Sum up the four partial sums, and put the result into s */
    sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
    sumdiffs = vec_splat(sumdiffs, 3);
    vec_ste(sumdiffs, 0, &s);
475

476 477 478
    return s;
}

Diego Biurrun's avatar
Diego Biurrun committed
479
static void get_pixels_altivec(int16_t *restrict block, const uint8_t *pixels, int line_size)
480 481
{
    int i;
482 483
    vector unsigned char perm = vec_lvsl(0, pixels);
    vector unsigned char bytes;
484
    const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
485 486
    vector signed short shorts;

487
    for (i = 0; i < 8; i++) {
488 489 490
        // Read potentially unaligned pixels.
        // We're reading 16 pixels, and actually only want 8,
        // but we simply ignore the extras.
491 492 493
        vector unsigned char pixl = vec_ld( 0, pixels);
        vector unsigned char pixr = vec_ld(15, pixels);
        bytes = vec_perm(pixl, pixr, perm);
494 495 496 497 498 499 500 501 502 503 504

        // convert the bytes into shorts
        shorts = (vector signed short)vec_mergeh(zero, bytes);

        // save the data to the block, we assume the block is 16-byte aligned
        vec_st(shorts, i*16, (vector signed short*)block);

        pixels += line_size;
    }
}

Diego Biurrun's avatar
Diego Biurrun committed
505
static void diff_pixels_altivec(int16_t *restrict block, const uint8_t *s1,
506
        const uint8_t *s2, int stride)
507 508
{
    int i;
509 510 511
    vector unsigned char perm1 = vec_lvsl(0, s1);
    vector unsigned char perm2 = vec_lvsl(0, s2);
    vector unsigned char bytes, pixl, pixr;
512
    const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
513 514
    vector signed short shorts1, shorts2;

515
    for (i = 0; i < 4; i++) {
516 517 518
        // Read potentially unaligned pixels
        // We're reading 16 pixels, and actually only want 8,
        // but we simply ignore the extras.
519 520 521
        pixl = vec_ld( 0, s1);
        pixr = vec_ld(15, s1);
        bytes = vec_perm(pixl, pixr, perm1);
522 523 524 525 526

        // convert the bytes into shorts
        shorts1 = (vector signed short)vec_mergeh(zero, bytes);

        // Do the same for the second block of pixels
527 528 529
        pixl = vec_ld( 0, s2);
        pixr = vec_ld(15, s2);
        bytes = vec_perm(pixl, pixr, perm2);
530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550

        // convert the bytes into shorts
        shorts2 = (vector signed short)vec_mergeh(zero, bytes);

        // Do the subtraction
        shorts1 = vec_sub(shorts1, shorts2);

        // save the data to the block, we assume the block is 16-byte aligned
        vec_st(shorts1, 0, (vector signed short*)block);

        s1 += stride;
        s2 += stride;
        block += 8;


        // The code below is a copy of the code above... This is a manual
        // unroll.

        // Read potentially unaligned pixels
        // We're reading 16 pixels, and actually only want 8,
        // but we simply ignore the extras.
551 552 553
        pixl = vec_ld( 0, s1);
        pixr = vec_ld(15, s1);
        bytes = vec_perm(pixl, pixr, perm1);
554 555 556 557 558

        // convert the bytes into shorts
        shorts1 = (vector signed short)vec_mergeh(zero, bytes);

        // Do the same for the second block of pixels
559 560 561
        pixl = vec_ld( 0, s2);
        pixr = vec_ld(15, s2);
        bytes = vec_perm(pixl, pixr, perm2);
562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577

        // convert the bytes into shorts
        shorts2 = (vector signed short)vec_mergeh(zero, bytes);

        // Do the subtraction
        shorts1 = vec_sub(shorts1, shorts2);

        // save the data to the block, we assume the block is 16-byte aligned
        vec_st(shorts1, 0, (vector signed short*)block);

        s1 += stride;
        s2 += stride;
        block += 8;
    }
}

578

Diego Biurrun's avatar
Diego Biurrun committed
579
static void clear_block_altivec(int16_t *block) {
580 581 582 583 584 585 586 587 588 589 590 591
    LOAD_ZERO;
    vec_st(zero_s16v,   0, block);
    vec_st(zero_s16v,  16, block);
    vec_st(zero_s16v,  32, block);
    vec_st(zero_s16v,  48, block);
    vec_st(zero_s16v,  64, block);
    vec_st(zero_s16v,  80, block);
    vec_st(zero_s16v,  96, block);
    vec_st(zero_s16v, 112, block);
}


592
static void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) {
593
    register int i;
594
    register vector unsigned char vdst, vsrc;
595

596
    /* dst and src are 16 bytes-aligned (guaranteed) */
597 598 599 600 601
    for (i = 0 ; (i + 15) < w ; i+=16) {
        vdst = vec_ld(i, (unsigned char*)dst);
        vsrc = vec_ld(i, (unsigned char*)src);
        vdst = vec_add(vsrc, vdst);
        vec_st(vdst, i, (unsigned char*)dst);
602
    }
603
    /* if w is not a multiple of 16 */
604 605
    for (; (i < w) ; i++) {
        dst[i] = src[i];
606
    }
607 608
}

609
/* next one assumes that ((line_size % 16) == 0) */
610
void ff_put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
611 612
{
    register vector unsigned char pixelsv1, pixelsv2;
613 614 615 616
    register vector unsigned char pixelsv1B, pixelsv2B;
    register vector unsigned char pixelsv1C, pixelsv2C;
    register vector unsigned char pixelsv1D, pixelsv2D;

617
    register vector unsigned char perm = vec_lvsl(0, pixels);
618
    int i;
619 620 621 622 623 624 625 626 627
    register int line_size_2 = line_size << 1;
    register int line_size_3 = line_size + line_size_2;
    register int line_size_4 = line_size << 2;

// hand-unrolling the loop by 4 gains about 15%
// mininum execution time goes from 74 to 60 cycles
// it's faster than -funroll-loops, but using
// -funroll-loops w/ this is bad - 74 cycles again.
// all this is on a 7450, tuning for the 7450
628
    for (i = 0; i < h; i += 4) {
629 630 631 632 633 634 635 636
        pixelsv1  = vec_ld( 0, pixels);
        pixelsv2  = vec_ld(15, pixels);
        pixelsv1B = vec_ld(line_size, pixels);
        pixelsv2B = vec_ld(15 + line_size, pixels);
        pixelsv1C = vec_ld(line_size_2, pixels);
        pixelsv2C = vec_ld(15 + line_size_2, pixels);
        pixelsv1D = vec_ld(line_size_3, pixels);
        pixelsv2D = vec_ld(15 + line_size_3, pixels);
637 638 639 640 641 642 643 644 645 646
        vec_st(vec_perm(pixelsv1, pixelsv2, perm),
               0, (unsigned char*)block);
        vec_st(vec_perm(pixelsv1B, pixelsv2B, perm),
               line_size, (unsigned char*)block);
        vec_st(vec_perm(pixelsv1C, pixelsv2C, perm),
               line_size_2, (unsigned char*)block);
        vec_st(vec_perm(pixelsv1D, pixelsv2D, perm),
               line_size_3, (unsigned char*)block);
        pixels+=line_size_4;
        block +=line_size_4;
647
    }
648 649
}

650
/* next one assumes that ((line_size % 16) == 0) */
651
#define op_avg(a,b)  a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) )
652
void ff_avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
653 654
{
    register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
655
    register vector unsigned char perm = vec_lvsl(0, pixels);
656 657
    int i;

658
    for (i = 0; i < h; i++) {
659 660
        pixelsv1 = vec_ld( 0, pixels);
        pixelsv2 = vec_ld(16,pixels);
661 662 663 664 665 666
        blockv = vec_ld(0, block);
        pixelsv = vec_perm(pixelsv1, pixelsv2, perm);
        blockv = vec_avg(blockv,pixelsv);
        vec_st(blockv, 0, (unsigned char*)block);
        pixels+=line_size;
        block +=line_size;
667
    }
668
}
669

670
/* next one assumes that ((line_size % 8) == 0) */
671
static void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
672 673 674 675 676
{
    register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
    int i;

   for (i = 0; i < h; i++) {
677 678 679 680 681
       /* block is 8 bytes-aligned, so we're either in the
          left block (16 bytes-aligned) or in the right block (not) */
       int rightside = ((unsigned long)block & 0x0000000F);

       blockv = vec_ld(0, block);
682 683
       pixelsv1 = vec_ld( 0, pixels);
       pixelsv2 = vec_ld(16, pixels);
684 685 686 687 688 689 690 691 692 693 694 695 696 697
       pixelsv = vec_perm(pixelsv1, pixelsv2, vec_lvsl(0, pixels));

       if (rightside) {
           pixelsv = vec_perm(blockv, pixelsv, vcprm(0,1,s0,s1));
       } else {
           pixelsv = vec_perm(blockv, pixelsv, vcprm(s0,s1,2,3));
       }

       blockv = vec_avg(blockv, pixelsv);

       vec_st(blockv, 0, block);

       pixels += line_size;
       block += line_size;
698 699 700
   }
}

701
/* next one assumes that ((line_size % 8) == 0) */
702
static void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
703
{
704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723
    register int i;
    register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
    register vector unsigned char blockv, temp1, temp2;
    register vector unsigned short pixelssum1, pixelssum2, temp3;
    register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
    register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);

    temp1 = vec_ld(0, pixels);
    temp2 = vec_ld(16, pixels);
    pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
    if ((((unsigned long)pixels) & 0x0000000F) ==  0x0000000F) {
        pixelsv2 = temp2;
    } else {
        pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
    }
    pixelsv1 = vec_mergeh(vczero, pixelsv1);
    pixelsv2 = vec_mergeh(vczero, pixelsv2);
    pixelssum1 = vec_add((vector unsigned short)pixelsv1,
                         (vector unsigned short)pixelsv2);
    pixelssum1 = vec_add(pixelssum1, vctwo);
724

725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757
    for (i = 0; i < h ; i++) {
        int rightside = ((unsigned long)block & 0x0000000F);
        blockv = vec_ld(0, block);

        temp1 = vec_ld(line_size, pixels);
        temp2 = vec_ld(line_size + 16, pixels);
        pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
        if (((((unsigned long)pixels) + line_size) & 0x0000000F) ==  0x0000000F) {
            pixelsv2 = temp2;
        } else {
            pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
        }

        pixelsv1 = vec_mergeh(vczero, pixelsv1);
        pixelsv2 = vec_mergeh(vczero, pixelsv2);
        pixelssum2 = vec_add((vector unsigned short)pixelsv1,
                             (vector unsigned short)pixelsv2);
        temp3 = vec_add(pixelssum1, pixelssum2);
        temp3 = vec_sra(temp3, vctwo);
        pixelssum1 = vec_add(pixelssum2, vctwo);
        pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);

        if (rightside) {
            blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
        } else {
            blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
        }

        vec_st(blockv, 0, block);

        block += line_size;
        pixels += line_size;
    }
758 759
}

760
/* next one assumes that ((line_size % 8) == 0) */
761
static void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
762
{
763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783
    register int i;
    register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
    register vector unsigned char blockv, temp1, temp2;
    register vector unsigned short pixelssum1, pixelssum2, temp3;
    register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
    register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
    register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);

    temp1 = vec_ld(0, pixels);
    temp2 = vec_ld(16, pixels);
    pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
    if ((((unsigned long)pixels) & 0x0000000F) ==  0x0000000F) {
        pixelsv2 = temp2;
    } else {
        pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
    }
    pixelsv1 = vec_mergeh(vczero, pixelsv1);
    pixelsv2 = vec_mergeh(vczero, pixelsv2);
    pixelssum1 = vec_add((vector unsigned short)pixelsv1,
                         (vector unsigned short)pixelsv2);
    pixelssum1 = vec_add(pixelssum1, vcone);
784

785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817
    for (i = 0; i < h ; i++) {
        int rightside = ((unsigned long)block & 0x0000000F);
        blockv = vec_ld(0, block);

        temp1 = vec_ld(line_size, pixels);
        temp2 = vec_ld(line_size + 16, pixels);
        pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
        if (((((unsigned long)pixels) + line_size) & 0x0000000F) ==  0x0000000F) {
            pixelsv2 = temp2;
        } else {
            pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
        }

        pixelsv1 = vec_mergeh(vczero, pixelsv1);
        pixelsv2 = vec_mergeh(vczero, pixelsv2);
        pixelssum2 = vec_add((vector unsigned short)pixelsv1,
                             (vector unsigned short)pixelsv2);
        temp3 = vec_add(pixelssum1, pixelssum2);
        temp3 = vec_sra(temp3, vctwo);
        pixelssum1 = vec_add(pixelssum2, vcone);
        pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);

        if (rightside) {
            blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
        } else {
            blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
        }

        vec_st(blockv, 0, block);

        block += line_size;
        pixels += line_size;
    }
818 819 820
}

/* next one assumes that ((line_size % 16) == 0) */
821
static void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
822
{
823 824 825 826 827 828 829
    register int i;
    register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
    register vector unsigned char blockv, temp1, temp2;
    register vector unsigned short temp3, temp4,
        pixelssum1, pixelssum2, pixelssum3, pixelssum4;
    register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
    register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
830

831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
    temp1 = vec_ld(0, pixels);
    temp2 = vec_ld(16, pixels);
    pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
    if ((((unsigned long)pixels) & 0x0000000F) ==  0x0000000F) {
        pixelsv2 = temp2;
    } else {
        pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
    }
    pixelsv3 = vec_mergel(vczero, pixelsv1);
    pixelsv4 = vec_mergel(vczero, pixelsv2);
    pixelsv1 = vec_mergeh(vczero, pixelsv1);
    pixelsv2 = vec_mergeh(vczero, pixelsv2);
    pixelssum3 = vec_add((vector unsigned short)pixelsv3,
                         (vector unsigned short)pixelsv4);
    pixelssum3 = vec_add(pixelssum3, vctwo);
    pixelssum1 = vec_add((vector unsigned short)pixelsv1,
                         (vector unsigned short)pixelsv2);
    pixelssum1 = vec_add(pixelssum1, vctwo);

    for (i = 0; i < h ; i++) {
        blockv = vec_ld(0, block);

        temp1 = vec_ld(line_size, pixels);
        temp2 = vec_ld(line_size + 16, pixels);
        pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
        if (((((unsigned long)pixels) + line_size) & 0x0000000F) ==  0x0000000F) {
            pixelsv2 = temp2;
        } else {
            pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
        }

        pixelsv3 = vec_mergel(vczero, pixelsv1);
        pixelsv4 = vec_mergel(vczero, pixelsv2);
        pixelsv1 = vec_mergeh(vczero, pixelsv1);
        pixelsv2 = vec_mergeh(vczero, pixelsv2);

        pixelssum4 = vec_add((vector unsigned short)pixelsv3,
                             (vector unsigned short)pixelsv4);
        pixelssum2 = vec_add((vector unsigned short)pixelsv1,
                             (vector unsigned short)pixelsv2);
        temp4 = vec_add(pixelssum3, pixelssum4);
        temp4 = vec_sra(temp4, vctwo);
        temp3 = vec_add(pixelssum1, pixelssum2);
        temp3 = vec_sra(temp3, vctwo);

        pixelssum3 = vec_add(pixelssum4, vctwo);
        pixelssum1 = vec_add(pixelssum2, vctwo);

        blockv = vec_packsu(temp3, temp4);

        vec_st(blockv, 0, block);

        block += line_size;
        pixels += line_size;
    }
886 887 888
}

/* next one assumes that ((line_size % 16) == 0) */
889
static void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
890
{
891 892 893 894 895 896 897 898
    register int i;
    register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
    register vector unsigned char blockv, temp1, temp2;
    register vector unsigned short temp3, temp4,
        pixelssum1, pixelssum2, pixelssum3, pixelssum4;
    register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
    register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
    register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
899

900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954
    temp1 = vec_ld(0, pixels);
    temp2 = vec_ld(16, pixels);
    pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
    if ((((unsigned long)pixels) & 0x0000000F) ==  0x0000000F) {
        pixelsv2 = temp2;
    } else {
        pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
    }
    pixelsv3 = vec_mergel(vczero, pixelsv1);
    pixelsv4 = vec_mergel(vczero, pixelsv2);
    pixelsv1 = vec_mergeh(vczero, pixelsv1);
    pixelsv2 = vec_mergeh(vczero, pixelsv2);
    pixelssum3 = vec_add((vector unsigned short)pixelsv3,
                         (vector unsigned short)pixelsv4);
    pixelssum3 = vec_add(pixelssum3, vcone);
    pixelssum1 = vec_add((vector unsigned short)pixelsv1,
                         (vector unsigned short)pixelsv2);
    pixelssum1 = vec_add(pixelssum1, vcone);

    for (i = 0; i < h ; i++) {
        blockv = vec_ld(0, block);

        temp1 = vec_ld(line_size, pixels);
        temp2 = vec_ld(line_size + 16, pixels);
        pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
        if (((((unsigned long)pixels) + line_size) & 0x0000000F) ==  0x0000000F) {
            pixelsv2 = temp2;
        } else {
            pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
        }

        pixelsv3 = vec_mergel(vczero, pixelsv1);
        pixelsv4 = vec_mergel(vczero, pixelsv2);
        pixelsv1 = vec_mergeh(vczero, pixelsv1);
        pixelsv2 = vec_mergeh(vczero, pixelsv2);

        pixelssum4 = vec_add((vector unsigned short)pixelsv3,
                             (vector unsigned short)pixelsv4);
        pixelssum2 = vec_add((vector unsigned short)pixelsv1,
                             (vector unsigned short)pixelsv2);
        temp4 = vec_add(pixelssum3, pixelssum4);
        temp4 = vec_sra(temp4, vctwo);
        temp3 = vec_add(pixelssum1, pixelssum2);
        temp3 = vec_sra(temp3, vctwo);

        pixelssum3 = vec_add(pixelssum4, vcone);
        pixelssum1 = vec_add(pixelssum2, vcone);

        blockv = vec_packsu(temp3, temp4);

        vec_st(blockv, 0, block);

        block += line_size;
        pixels += line_size;
    }
955 956
}

957
static int hadamard8_diff8x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
958
    int sum;
959 960
    register const vector unsigned char vzero =
                            (const vector unsigned char)vec_splat_u8(0);
961 962
    register vector signed short temp0, temp1, temp2, temp3, temp4,
                                 temp5, temp6, temp7;
963
    {
964
    register const vector signed short vprod1 =(const vector signed short)
965
                                               { 1,-1, 1,-1, 1,-1, 1,-1 };
966
    register const vector signed short vprod2 =(const vector signed short)
967
                                               { 1, 1,-1,-1, 1, 1,-1,-1 };
968
    register const vector signed short vprod3 =(const vector signed short)
969
                                               { 1, 1, 1, 1,-1,-1,-1,-1 };
970
    register const vector unsigned char perm1 = (const vector unsigned char)
971 972
        {0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
         0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D};
973
    register const vector unsigned char perm2 = (const vector unsigned char)
974 975
        {0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
         0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B};
976
    register const vector unsigned char perm3 = (const vector unsigned char)
977 978
        {0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
         0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07};
979

980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005
#define ONEITERBUTTERFLY(i, res)                                          \
    {                                                                     \
    register vector unsigned char src1, src2, srcO;                   \
    register vector unsigned char dst1, dst2, dstO;                   \
    register vector signed short srcV, dstV;                          \
    register vector signed short but0, but1, but2, op1, op2, op3;     \
    src1 = vec_ld(stride * i, src);                                   \
    src2 = vec_ld((stride * i) + 15, src);                            \
    srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src));           \
    dst1 = vec_ld(stride * i, dst);                                   \
    dst2 = vec_ld((stride * i) + 15, dst);                            \
    dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst));           \
    /* promote the unsigned chars to signed shorts */                 \
    /* we're in the 8x8 function, we only care for the first 8 */     \
    srcV = (vector signed short)vec_mergeh((vector signed char)vzero, \
           (vector signed char)srcO);                                 \
    dstV = (vector signed short)vec_mergeh((vector signed char)vzero, \
           (vector signed char)dstO);                                 \
    /* subtractions inside the first butterfly */                     \
    but0 = vec_sub(srcV, dstV);                                       \
    op1  = vec_perm(but0, but0, perm1);                               \
    but1 = vec_mladd(but0, vprod1, op1);                              \
    op2  = vec_perm(but1, but1, perm2);                               \
    but2 = vec_mladd(but1, vprod2, op2);                              \
    op3  = vec_perm(but2, but2, perm3);                               \
    res  = vec_mladd(but2, vprod3, op3);                              \
1006 1007 1008 1009 1010 1011 1012 1013 1014
    }
    ONEITERBUTTERFLY(0, temp0);
    ONEITERBUTTERFLY(1, temp1);
    ONEITERBUTTERFLY(2, temp2);
    ONEITERBUTTERFLY(3, temp3);
    ONEITERBUTTERFLY(4, temp4);
    ONEITERBUTTERFLY(5, temp5);
    ONEITERBUTTERFLY(6, temp6);
    ONEITERBUTTERFLY(7, temp7);
1015
    }
1016
#undef ONEITERBUTTERFLY
1017
    {
1018 1019 1020 1021 1022 1023 1024 1025 1026
    register vector signed int vsum;
    register vector signed short line0 = vec_add(temp0, temp1);
    register vector signed short line1 = vec_sub(temp0, temp1);
    register vector signed short line2 = vec_add(temp2, temp3);
    register vector signed short line3 = vec_sub(temp2, temp3);
    register vector signed short line4 = vec_add(temp4, temp5);
    register vector signed short line5 = vec_sub(temp4, temp5);
    register vector signed short line6 = vec_add(temp6, temp7);
    register vector signed short line7 = vec_sub(temp6, temp7);
1027

1028 1029 1030 1031 1032 1033 1034 1035
    register vector signed short line0B = vec_add(line0, line2);
    register vector signed short line2B = vec_sub(line0, line2);
    register vector signed short line1B = vec_add(line1, line3);
    register vector signed short line3B = vec_sub(line1, line3);
    register vector signed short line4B = vec_add(line4, line6);
    register vector signed short line6B = vec_sub(line4, line6);
    register vector signed short line5B = vec_add(line5, line7);
    register vector signed short line7B = vec_sub(line5, line7);
1036

1037 1038 1039 1040 1041 1042 1043 1044
    register vector signed short line0C = vec_add(line0B, line4B);
    register vector signed short line4C = vec_sub(line0B, line4B);
    register vector signed short line1C = vec_add(line1B, line5B);
    register vector signed short line5C = vec_sub(line1B, line5B);
    register vector signed short line2C = vec_add(line2B, line6B);
    register vector signed short line6C = vec_sub(line2B, line6B);
    register vector signed short line3C = vec_add(line3B, line7B);
    register vector signed short line7C = vec_sub(line3B, line7B);
1045

1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
    vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
    vsum = vec_sum4s(vec_abs(line1C), vsum);
    vsum = vec_sum4s(vec_abs(line2C), vsum);
    vsum = vec_sum4s(vec_abs(line3C), vsum);
    vsum = vec_sum4s(vec_abs(line4C), vsum);
    vsum = vec_sum4s(vec_abs(line5C), vsum);
    vsum = vec_sum4s(vec_abs(line6C), vsum);
    vsum = vec_sum4s(vec_abs(line7C), vsum);
    vsum = vec_sums(vsum, (vector signed int)vzero);
    vsum = vec_splat(vsum, 3);
    vec_ste(vsum, 0, &sum);
1057 1058
    }
    return sum;
1059 1060 1061
}

/*
1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
16x8 works with 16 elements; it allows to avoid replicating loads, and
give the compiler more rooms for scheduling.  It's only used from
inside hadamard8_diff16_altivec.

Unfortunately, it seems gcc-3.3 is a bit dumb, and the compiled code has a LOT
of spill code, it seems gcc (unlike xlc) cannot keep everything in registers
by itself. The following code include hand-made registers allocation. It's not
clean, but on a 7450 the resulting code is much faster (best case fall from
700+ cycles to 550).

xlc doesn't add spill code, but it doesn't know how to schedule for the 7450,
and its code isn't much faster than gcc-3.3 on the 7450 (but uses 25% less
instructions...)

On the 970, the hand-made RA is still a win (around 690 vs. around 780), but
xlc goes to around 660 on the regular C code...
1078 1079 1080
*/

static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h) {
1081 1082
    int sum;
    register vector signed short
1083 1084 1085 1086 1087 1088 1089 1090
        temp0 __asm__ ("v0"),
        temp1 __asm__ ("v1"),
        temp2 __asm__ ("v2"),
        temp3 __asm__ ("v3"),
        temp4 __asm__ ("v4"),
        temp5 __asm__ ("v5"),
        temp6 __asm__ ("v6"),
        temp7 __asm__ ("v7");
1091
    register vector signed short
1092 1093 1094 1095 1096 1097 1098 1099 1100
        temp0S __asm__ ("v8"),
        temp1S __asm__ ("v9"),
        temp2S __asm__ ("v10"),
        temp3S __asm__ ("v11"),
        temp4S __asm__ ("v12"),
        temp5S __asm__ ("v13"),
        temp6S __asm__ ("v14"),
        temp7S __asm__ ("v15");
    register const vector unsigned char vzero __asm__ ("v31") =
1101
        (const vector unsigned char)vec_splat_u8(0);
1102
    {
1103
    register const vector signed short vprod1 __asm__ ("v16") =
1104
        (const vector signed short){ 1,-1, 1,-1, 1,-1, 1,-1 };
1105
    register const vector signed short vprod2 __asm__ ("v17") =
1106
        (const vector signed short){ 1, 1,-1,-1, 1, 1,-1,-1 };
1107
    register const vector signed short vprod3 __asm__ ("v18") =
1108
        (const vector signed short){ 1, 1, 1, 1,-1,-1,-1,-1 };
1109
    register const vector unsigned char perm1 __asm__ ("v19") =
1110
        (const vector unsigned char)
1111 1112
        {0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
         0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D};
1113
    register const vector unsigned char perm2 __asm__ ("v20") =
1114
        (const vector unsigned char)
1115 1116
        {0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
         0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B};
1117
    register const vector unsigned char perm3 __asm__ ("v21") =
1118
        (const vector unsigned char)
1119 1120
        {0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
         0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07};
1121

1122 1123 1124 1125 1126 1127 1128 1129
#define ONEITERBUTTERFLY(i, res1, res2)                               \
    {                                                                 \
    register vector unsigned char src1 __asm__ ("v22"),               \
                                  src2 __asm__ ("v23"),               \
                                  dst1 __asm__ ("v24"),               \
                                  dst2 __asm__ ("v25"),               \
                                  srcO __asm__ ("v22"),               \
                                  dstO __asm__ ("v23");               \
1130
                                                                      \
1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
    register vector signed short  srcV  __asm__ ("v24"),              \
                                  dstV  __asm__ ("v25"),              \
                                  srcW  __asm__ ("v26"),              \
                                  dstW  __asm__ ("v27"),              \
                                  but0  __asm__ ("v28"),              \
                                  but0S __asm__ ("v29"),              \
                                  op1   __asm__ ("v30"),              \
                                  but1  __asm__ ("v22"),              \
                                  op1S  __asm__ ("v23"),              \
                                  but1S __asm__ ("v24"),              \
                                  op2   __asm__ ("v25"),              \
                                  but2  __asm__ ("v26"),              \
                                  op2S  __asm__ ("v27"),              \
                                  but2S __asm__ ("v28"),              \
                                  op3   __asm__ ("v29"),              \
                                  op3S  __asm__ ("v30");              \
1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
                                                                      \
    src1 = vec_ld(stride * i, src);                                   \
    src2 = vec_ld((stride * i) + 16, src);                            \
    srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src));           \
    dst1 = vec_ld(stride * i, dst);                                   \
    dst2 = vec_ld((stride * i) + 16, dst);                            \
    dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst));           \
    /* promote the unsigned chars to signed shorts */                 \
    srcV = (vector signed short)vec_mergeh((vector signed char)vzero, \
           (vector signed char)srcO);                                 \
    dstV = (vector signed short)vec_mergeh((vector signed char)vzero, \
           (vector signed char)dstO);                                 \
    srcW = (vector signed short)vec_mergel((vector signed char)vzero, \
           (vector signed char)srcO);                                 \
    dstW = (vector signed short)vec_mergel((vector signed char)vzero, \
           (vector signed char)dstO);                                 \
    /* subtractions inside the first butterfly */                     \
    but0 = vec_sub(srcV, dstV);                                       \
    but0S = vec_sub(srcW, dstW);                                      \
    op1 = vec_perm(but0, but0, perm1);                                \
    but1 = vec_mladd(but0, vprod1, op1);                              \
    op1S = vec_perm(but0S, but0S, perm1);                             \
    but1S = vec_mladd(but0S, vprod1, op1S);                           \
    op2 = vec_perm(but1, but1, perm2);                                \
    but2 = vec_mladd(but1, vprod2, op2);                              \
    op2S = vec_perm(but1S, but1S, perm2);                             \
    but2S = vec_mladd(but1S, vprod2, op2S);                           \
    op3 = vec_perm(but2, but2, perm3);                                \
    res1 = vec_mladd(but2, vprod3, op3);                              \
    op3S = vec_perm(but2S, but2S, perm3);                             \
    res2 = vec_mladd(but2S, vprod3, op3S);                            \
1178 1179 1180 1181 1182 1183 1184 1185 1186
    }
    ONEITERBUTTERFLY(0, temp0, temp0S);
    ONEITERBUTTERFLY(1, temp1, temp1S);
    ONEITERBUTTERFLY(2, temp2, temp2S);
    ONEITERBUTTERFLY(3, temp3, temp3S);
    ONEITERBUTTERFLY(4, temp4, temp4S);
    ONEITERBUTTERFLY(5, temp5, temp5S);
    ONEITERBUTTERFLY(6, temp6, temp6S);
    ONEITERBUTTERFLY(7, temp7, temp7S);
1187
    }
1188
#undef ONEITERBUTTERFLY
1189
    {
1190
    register vector signed int vsum;
1191 1192 1193 1194 1195 1196
    register vector signed short line0S, line1S, line2S, line3S, line4S,
                                 line5S, line6S, line7S, line0BS,line2BS,
                                 line1BS,line3BS,line4BS,line6BS,line5BS,
                                 line7BS,line0CS,line4CS,line1CS,line5CS,
                                 line2CS,line6CS,line3CS,line7CS;

1197 1198 1199 1200 1201 1202 1203 1204
    register vector signed short line0 = vec_add(temp0, temp1);
    register vector signed short line1 = vec_sub(temp0, temp1);
    register vector signed short line2 = vec_add(temp2, temp3);
    register vector signed short line3 = vec_sub(temp2, temp3);
    register vector signed short line4 = vec_add(temp4, temp5);
    register vector signed short line5 = vec_sub(temp4, temp5);
    register vector signed short line6 = vec_add(temp6, temp7);
    register vector signed short line7 = vec_sub(temp6, temp7);
1205

1206 1207 1208 1209 1210 1211 1212 1213
    register vector signed short line0B = vec_add(line0, line2);
    register vector signed short line2B = vec_sub(line0, line2);
    register vector signed short line1B = vec_add(line1, line3);
    register vector signed short line3B = vec_sub(line1, line3);
    register vector signed short line4B = vec_add(line4, line6);
    register vector signed short line6B = vec_sub(line4, line6);
    register vector signed short line5B = vec_add(line5, line7);
    register vector signed short line7B = vec_sub(line5, line7);
1214

1215 1216 1217 1218 1219 1220 1221 1222
    register vector signed short line0C = vec_add(line0B, line4B);
    register vector signed short line4C = vec_sub(line0B, line4B);
    register vector signed short line1C = vec_add(line1B, line5B);
    register vector signed short line5C = vec_sub(line1B, line5B);
    register vector signed short line2C = vec_add(line2B, line6B);
    register vector signed short line6C = vec_sub(line2B, line6B);
    register vector signed short line3C = vec_add(line3B, line7B);
    register vector signed short line7C = vec_sub(line3B, line7B);
1223

1224 1225 1226 1227 1228 1229 1230 1231 1232
    vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
    vsum = vec_sum4s(vec_abs(line1C), vsum);
    vsum = vec_sum4s(vec_abs(line2C), vsum);
    vsum = vec_sum4s(vec_abs(line3C), vsum);
    vsum = vec_sum4s(vec_abs(line4C), vsum);
    vsum = vec_sum4s(vec_abs(line5C), vsum);
    vsum = vec_sum4s(vec_abs(line6C), vsum);
    vsum = vec_sum4s(vec_abs(line7C), vsum);

1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258
    line0S = vec_add(temp0S, temp1S);
    line1S = vec_sub(temp0S, temp1S);
    line2S = vec_add(temp2S, temp3S);
    line3S = vec_sub(temp2S, temp3S);
    line4S = vec_add(temp4S, temp5S);
    line5S = vec_sub(temp4S, temp5S);
    line6S = vec_add(temp6S, temp7S);
    line7S = vec_sub(temp6S, temp7S);

    line0BS = vec_add(line0S, line2S);
    line2BS = vec_sub(line0S, line2S);
    line1BS = vec_add(line1S, line3S);
    line3BS = vec_sub(line1S, line3S);
    line4BS = vec_add(line4S, line6S);
    line6BS = vec_sub(line4S, line6S);
    line5BS = vec_add(line5S, line7S);
    line7BS = vec_sub(line5S, line7S);

    line0CS = vec_add(line0BS, line4BS);
    line4CS = vec_sub(line0BS, line4BS);
    line1CS = vec_add(line1BS, line5BS);
    line5CS = vec_sub(line1BS, line5BS);
    line2CS = vec_add(line2BS, line6BS);
    line6CS = vec_sub(line2BS, line6BS);
    line3CS = vec_add(line3BS, line7BS);
    line7CS = vec_sub(line3BS, line7BS);
1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270

    vsum = vec_sum4s(vec_abs(line0CS), vsum);
    vsum = vec_sum4s(vec_abs(line1CS), vsum);
    vsum = vec_sum4s(vec_abs(line2CS), vsum);
    vsum = vec_sum4s(vec_abs(line3CS), vsum);
    vsum = vec_sum4s(vec_abs(line4CS), vsum);
    vsum = vec_sum4s(vec_abs(line5CS), vsum);
    vsum = vec_sum4s(vec_abs(line6CS), vsum);
    vsum = vec_sum4s(vec_abs(line7CS), vsum);
    vsum = vec_sums(vsum, (vector signed int)vzero);
    vsum = vec_splat(vsum, 3);
    vec_ste(vsum, 0, &sum);
1271 1272
    }
    return sum;
1273 1274
}

1275
static int hadamard8_diff16_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
1276 1277 1278 1279 1280 1281 1282 1283
    int score;
    score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
    if (h==16) {
        dst += 8*stride;
        src += 8*stride;
        score += hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
    }
    return score;
1284 1285
}

1286
/* next one assumes that ((line_size % 8) == 0) */
1287
static void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
1288
{
1289 1290 1291 1292 1293
    register int i;
    register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
    register vector unsigned char blockv, temp1, temp2, blocktemp;
    register vector unsigned short pixelssum1, pixelssum2, temp3;

1294
    register const vector unsigned char vczero = (const vector unsigned char)
1295
                                        vec_splat_u8(0);
1296
    register const vector unsigned short vctwo = (const vector unsigned short)
1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311
                                        vec_splat_u16(2);

    temp1 = vec_ld(0, pixels);
    temp2 = vec_ld(16, pixels);
    pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
    if ((((unsigned long)pixels) & 0x0000000F) ==  0x0000000F) {
        pixelsv2 = temp2;
    } else {
        pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
    }
    pixelsv1 = vec_mergeh(vczero, pixelsv1);
    pixelsv2 = vec_mergeh(vczero, pixelsv2);
    pixelssum1 = vec_add((vector unsigned short)pixelsv1,
                         (vector unsigned short)pixelsv2);
    pixelssum1 = vec_add(pixelssum1, vctwo);
1312

1313 1314 1315 1316 1317 1318 1319
    for (i = 0; i < h ; i++) {
        int rightside = ((unsigned long)block & 0x0000000F);
        blockv = vec_ld(0, block);

        temp1 = vec_ld(line_size, pixels);
        temp2 = vec_ld(line_size + 16, pixels);
        pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
1320
        if (((((unsigned long)pixels) + line_size) & 0x0000000F) ==  0x0000000F) {
1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342
            pixelsv2 = temp2;
        } else {
            pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
        }

        pixelsv1 = vec_mergeh(vczero, pixelsv1);
        pixelsv2 = vec_mergeh(vczero, pixelsv2);
        pixelssum2 = vec_add((vector unsigned short)pixelsv1,
                             (vector unsigned short)pixelsv2);
        temp3 = vec_add(pixelssum1, pixelssum2);
        temp3 = vec_sra(temp3, vctwo);
        pixelssum1 = vec_add(pixelssum2, vctwo);
        pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);

        if (rightside) {
            blocktemp = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
        } else {
            blocktemp = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
        }

        blockv = vec_avg(blocktemp, blockv);
        vec_st(blockv, 0, block);
1343

1344 1345 1346
        block += line_size;
        pixels += line_size;
    }
1347
}
1348

1349
void ff_dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx)
1350
{
1351
    const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1352

1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365
    c->pix_abs[0][1] = sad16_x2_altivec;
    c->pix_abs[0][2] = sad16_y2_altivec;
    c->pix_abs[0][3] = sad16_xy2_altivec;
    c->pix_abs[0][0] = sad16_altivec;
    c->pix_abs[1][0] = sad8_altivec;
    c->sad[0]= sad16_altivec;
    c->sad[1]= sad8_altivec;
    c->pix_norm1 = pix_norm1_altivec;
    c->sse[1]= sse8_altivec;
    c->sse[0]= sse16_altivec;
    c->pix_sum = pix_sum_altivec;
    c->diff_pixels = diff_pixels_altivec;
    c->add_bytes= add_bytes_altivec;
1366
    if (!high_bit_depth) {
1367 1368
    c->get_pixels = get_pixels_altivec;
    c->clear_block = clear_block_altivec;
1369
    c->put_pixels_tab[0][0] = ff_put_pixels16_altivec;
1370
    /* the two functions do the same thing, so use the same code */
1371 1372
    c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_altivec;
    c->avg_pixels_tab[0][0] = ff_avg_pixels16_altivec;
1373 1374 1375 1376 1377 1378
    c->avg_pixels_tab[1][0] = avg_pixels8_altivec;
    c->avg_pixels_tab[1][3] = avg_pixels8_xy2_altivec;
    c->put_pixels_tab[1][3] = put_pixels8_xy2_altivec;
    c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec;
    c->put_pixels_tab[0][3] = put_pixels16_xy2_altivec;
    c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_altivec;
1379
    }
1380 1381 1382 1383

    c->hadamard8_diff[0] = hadamard8_diff16_altivec;
    c->hadamard8_diff[1] = hadamard8_diff8x8_altivec;
}