postprocess_altivec_template.c 53 KB
Newer Older
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
 * AltiVec optimizations (C) 2004 Romain Dolbeau <romain@dolbeau.org>
 *
 * based on code by Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at)
 *
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * FFmpeg is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with FFmpeg; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */
22

23
#include "libavutil/avutil.h"
24

25
#define ALTIVEC_TRANSPOSE_8x8_SHORT(src_a,src_b,src_c,src_d,src_e,src_f,src_g,src_h) \
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
    do {                                                          \
        __typeof__(src_a) tempA1, tempB1, tempC1, tempD1;         \
        __typeof__(src_a) tempE1, tempF1, tempG1, tempH1;         \
        __typeof__(src_a) tempA2, tempB2, tempC2, tempD2;         \
        __typeof__(src_a) tempE2, tempF2, tempG2, tempH2;         \
        tempA1 = vec_mergeh (src_a, src_e);                       \
        tempB1 = vec_mergel (src_a, src_e);                       \
        tempC1 = vec_mergeh (src_b, src_f);                       \
        tempD1 = vec_mergel (src_b, src_f);                       \
        tempE1 = vec_mergeh (src_c, src_g);                       \
        tempF1 = vec_mergel (src_c, src_g);                       \
        tempG1 = vec_mergeh (src_d, src_h);                       \
        tempH1 = vec_mergel (src_d, src_h);                       \
        tempA2 = vec_mergeh (tempA1, tempE1);                     \
        tempB2 = vec_mergel (tempA1, tempE1);                     \
        tempC2 = vec_mergeh (tempB1, tempF1);                     \
        tempD2 = vec_mergel (tempB1, tempF1);                     \
        tempE2 = vec_mergeh (tempC1, tempG1);                     \
        tempF2 = vec_mergel (tempC1, tempG1);                     \
        tempG2 = vec_mergeh (tempD1, tempH1);                     \
        tempH2 = vec_mergel (tempD1, tempH1);                     \
        src_a = vec_mergeh (tempA2, tempE2);                      \
        src_b = vec_mergel (tempA2, tempE2);                      \
        src_c = vec_mergeh (tempB2, tempF2);                      \
        src_d = vec_mergel (tempB2, tempF2);                      \
        src_e = vec_mergeh (tempC2, tempG2);                      \
        src_f = vec_mergel (tempC2, tempG2);                      \
        src_g = vec_mergeh (tempD2, tempH2);                      \
        src_h = vec_mergel (tempD2, tempH2);                      \
    } while (0)
56 57


58
static inline int vertClassify_altivec(uint8_t src[], int stride, PPContext *c) {
59
    /*
60 61 62 63
    this code makes no assumption on src or stride.
    One could remove the recomputation of the perm
    vector by assuming (stride % 16) == 0, unfortunately
    this is not always true.
64
    */
65
    short data_0 = ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1;
66
    DECLARE_ALIGNED(16, short, data)[8] =
67
                    {
68 69
                        data_0,
                        data_0 * 2 + 1,
70 71 72
                        c->QP * 2,
                        c->QP * 4
                    };
73 74 75 76 77 78 79 80 81 82 83 84 85
    int numEq;
    uint8_t *src2 = src;
    vector signed short v_dcOffset;
    vector signed short v2QP;
    vector unsigned short v4QP;
    vector unsigned short v_dcThreshold;
    const int properStride = (stride % 16);
    const int srcAlign = ((unsigned long)src2 % 16);
    const int two_vectors = ((srcAlign > 8) || properStride) ? 1 : 0;
    const vector signed int zero = vec_splat_s32(0);
    const vector signed short mask = vec_splat_s16(1);
    vector signed int v_numEq = vec_splat_s32(0);
    vector signed short v_data = vec_ld(0, data);
86 87
    vector signed short v_srcAss0, v_srcAss1, v_srcAss2, v_srcAss3,
                        v_srcAss4, v_srcAss5, v_srcAss6, v_srcAss7;
88 89 90 91 92 93 94 95 96 97 98
//FIXME avoid this mess if possible
    register int j0 = 0,
                 j1 = stride,
                 j2 = 2 * stride,
                 j3 = 3 * stride,
                 j4 = 4 * stride,
                 j5 = 5 * stride,
                 j6 = 6 * stride,
                 j7 = 7 * stride;
    vector unsigned char v_srcA0, v_srcA1, v_srcA2, v_srcA3,
                         v_srcA4, v_srcA5, v_srcA6, v_srcA7;
99

100 101 102 103 104 105 106
    v_dcOffset = vec_splat(v_data, 0);
    v_dcThreshold = (vector unsigned short)vec_splat(v_data, 1);
    v2QP = vec_splat(v_data, 2);
    v4QP = (vector unsigned short)vec_splat(v_data, 3);

    src2 += stride * 4;

107
#define LOAD_LINE(i)                                                    \
108
    {                                                                   \
109 110
    vector unsigned char perm##i = vec_lvsl(j##i, src2);                \
    vector unsigned char v_srcA2##i;                                    \
111
    vector unsigned char v_srcA1##i = vec_ld(j##i, src2);               \
112 113
    if (two_vectors)                                                    \
        v_srcA2##i = vec_ld(j##i + 16, src2);                           \
114
    v_srcA##i =                                                         \
115 116 117
        vec_perm(v_srcA1##i, v_srcA2##i, perm##i);                      \
    v_srcAss##i =                                                       \
        (vector signed short)vec_mergeh((vector signed char)zero,       \
118
                                        (vector signed char)v_srcA##i); }
119

120
#define LOAD_LINE_ALIGNED(i)                                            \
121
    v_srcA##i = vec_ld(j##i, src2);                                     \
122 123 124
    v_srcAss##i =                                                       \
        (vector signed short)vec_mergeh((vector signed char)zero,       \
                                        (vector signed char)v_srcA##i)
125

Diego Biurrun's avatar
Diego Biurrun committed
126 127 128
    /* Special-casing the aligned case is worthwhile, as all calls from
     * the (transposed) horizontable deblocks will be aligned, in addition
     * to the naturally aligned vertical deblocks. */
129
    if (properStride && srcAlign) {
130 131 132 133 134 135 136 137
        LOAD_LINE_ALIGNED(0);
        LOAD_LINE_ALIGNED(1);
        LOAD_LINE_ALIGNED(2);
        LOAD_LINE_ALIGNED(3);
        LOAD_LINE_ALIGNED(4);
        LOAD_LINE_ALIGNED(5);
        LOAD_LINE_ALIGNED(6);
        LOAD_LINE_ALIGNED(7);
138
    } else {
139 140 141 142 143 144 145 146
        LOAD_LINE(0);
        LOAD_LINE(1);
        LOAD_LINE(2);
        LOAD_LINE(3);
        LOAD_LINE(4);
        LOAD_LINE(5);
        LOAD_LINE(6);
        LOAD_LINE(7);
147
    }
148
#undef LOAD_LINE
149
#undef LOAD_LINE_ALIGNED
150

151
#define ITER(i, j)                                                      \
152 153 154 155 156 157 158
    const vector signed short v_diff##i =                               \
        vec_sub(v_srcAss##i, v_srcAss##j);                              \
    const vector signed short v_sum##i =                                \
        vec_add(v_diff##i, v_dcOffset);                                 \
    const vector signed short v_comp##i =                               \
        (vector signed short)vec_cmplt((vector unsigned short)v_sum##i, \
                                       v_dcThreshold);                  \
159 160
    const vector signed short v_part##i = vec_and(mask, v_comp##i);

161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
    {
        ITER(0, 1)
        ITER(1, 2)
        ITER(2, 3)
        ITER(3, 4)
        ITER(4, 5)
        ITER(5, 6)
        ITER(6, 7)

        v_numEq = vec_sum4s(v_part0, v_numEq);
        v_numEq = vec_sum4s(v_part1, v_numEq);
        v_numEq = vec_sum4s(v_part2, v_numEq);
        v_numEq = vec_sum4s(v_part3, v_numEq);
        v_numEq = vec_sum4s(v_part4, v_numEq);
        v_numEq = vec_sum4s(v_part5, v_numEq);
        v_numEq = vec_sum4s(v_part6, v_numEq);
    }
178

179 180
#undef ITER

181 182 183 184 185 186 187
    v_numEq = vec_sums(v_numEq, zero);

    v_numEq = vec_splat(v_numEq, 3);
    vec_ste(v_numEq, 0, &numEq);

    if (numEq > c->ppMode.flatnessThreshold){
        const vector unsigned char mmoP1 = (const vector unsigned char)
188 189
            {0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
             0x00, 0x01, 0x12, 0x13, 0x08, 0x09, 0x1A, 0x1B};
190
        const vector unsigned char mmoP2 = (const vector unsigned char)
191 192
            {0x04, 0x05, 0x16, 0x17, 0x0C, 0x0D, 0x1E, 0x1F,
             0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f};
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
        const vector unsigned char mmoP = (const vector unsigned char)
            vec_lvsl(8, (unsigned char*)0);

        vector signed short mmoL1 = vec_perm(v_srcAss0, v_srcAss2, mmoP1);
        vector signed short mmoL2 = vec_perm(v_srcAss4, v_srcAss6, mmoP2);
        vector signed short mmoL = vec_perm(mmoL1, mmoL2, mmoP);
        vector signed short mmoR1 = vec_perm(v_srcAss5, v_srcAss7, mmoP1);
        vector signed short mmoR2 = vec_perm(v_srcAss1, v_srcAss3, mmoP2);
        vector signed short mmoR = vec_perm(mmoR1, mmoR2, mmoP);
        vector signed short mmoDiff = vec_sub(mmoL, mmoR);
        vector unsigned short mmoSum = (vector unsigned short)vec_add(mmoDiff, v2QP);

        if (vec_any_gt(mmoSum, v4QP))
            return 0;
        else
            return 1;
209
    }
210
    else return 2;
211 212
}

213
static inline void doVertLowPass_altivec(uint8_t *src, int stride, PPContext *c) {
214
    /*
215 216 217 218
    this code makes no assumption on src or stride.
    One could remove the recomputation of the perm
    vector by assuming (stride % 16) == 0, unfortunately
    this is not always true. Quite a lot of load/stores
Vitor Sessak's avatar
Vitor Sessak committed
219
    can be removed by assuming proper alignment of
220
    src & stride :-(
221 222 223 224 225
    */
    uint8_t *src2 = src;
    const vector signed int zero = vec_splat_s32(0);
    const int properStride = (stride % 16);
    const int srcAlign = ((unsigned long)src2 % 16);
226
    DECLARE_ALIGNED(16, short, qp)[8] = {c->QP};
227 228
    vector signed short vqp = vec_ld(0, qp);
    vector signed short vb0, vb1, vb2, vb3, vb4, vb5, vb6, vb7, vb8, vb9;
229 230
    vector unsigned char vbA0, av_uninit(vbA1), av_uninit(vbA2), av_uninit(vbA3), av_uninit(vbA4), av_uninit(vbA5), av_uninit(vbA6), av_uninit(vbA7), av_uninit(vbA8), vbA9;
    vector unsigned char vbB0, av_uninit(vbB1), av_uninit(vbB2), av_uninit(vbB3), av_uninit(vbB4), av_uninit(vbB5), av_uninit(vbB6), av_uninit(vbB7), av_uninit(vbB8), vbB9;
231
    vector unsigned char vbT0, vbT1, vbT2, vbT3, vbT4, vbT5, vbT6, vbT7, vbT8, vbT9;
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
    vector unsigned char perml0, perml1, perml2, perml3, perml4,
                         perml5, perml6, perml7, perml8, perml9;
    register int j0 = 0,
                 j1 = stride,
                 j2 = 2 * stride,
                 j3 = 3 * stride,
                 j4 = 4 * stride,
                 j5 = 5 * stride,
                 j6 = 6 * stride,
                 j7 = 7 * stride,
                 j8 = 8 * stride,
                 j9 = 9 * stride;

    vqp = vec_splat(vqp, 0);

    src2 += stride*3;
248

249
#define LOAD_LINE(i)                                                    \
250
    perml##i = vec_lvsl(i * stride, src2);                              \
251 252 253 254 255 256
    vbA##i = vec_ld(i * stride, src2);                                  \
    vbB##i = vec_ld(i * stride + 16, src2);                             \
    vbT##i = vec_perm(vbA##i, vbB##i, perml##i);                        \
    vb##i =                                                             \
        (vector signed short)vec_mergeh((vector unsigned char)zero,     \
                                        (vector unsigned char)vbT##i)
257

258
#define LOAD_LINE_ALIGNED(i)                                            \
259 260 261 262 263 264 265 266
    vbT##i = vec_ld(j##i, src2);                                        \
    vb##i =                                                             \
        (vector signed short)vec_mergeh((vector signed char)zero,       \
                                        (vector signed char)vbT##i)

      /* Special-casing the aligned case is worthwhile, as all calls from
       * the (transposed) horizontable deblocks will be aligned, in addition
       * to the naturally aligned vertical deblocks. */
267
    if (properStride && srcAlign) {
268 269 270 271 272 273 274 275 276 277
          LOAD_LINE_ALIGNED(0);
          LOAD_LINE_ALIGNED(1);
          LOAD_LINE_ALIGNED(2);
          LOAD_LINE_ALIGNED(3);
          LOAD_LINE_ALIGNED(4);
          LOAD_LINE_ALIGNED(5);
          LOAD_LINE_ALIGNED(6);
          LOAD_LINE_ALIGNED(7);
          LOAD_LINE_ALIGNED(8);
          LOAD_LINE_ALIGNED(9);
278
    } else {
279 280 281 282 283 284 285 286 287 288
          LOAD_LINE(0);
          LOAD_LINE(1);
          LOAD_LINE(2);
          LOAD_LINE(3);
          LOAD_LINE(4);
          LOAD_LINE(5);
          LOAD_LINE(6);
          LOAD_LINE(7);
          LOAD_LINE(8);
          LOAD_LINE(9);
289
    }
290
#undef LOAD_LINE
291
#undef LOAD_LINE_ALIGNED
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
    {
        const vector unsigned short v_2 = vec_splat_u16(2);
        const vector unsigned short v_4 = vec_splat_u16(4);

        const vector signed short v_diff01 = vec_sub(vb0, vb1);
        const vector unsigned short v_cmp01 =
            (const vector unsigned short) vec_cmplt(vec_abs(v_diff01), vqp);
        const vector signed short v_first = vec_sel(vb1, vb0, v_cmp01);
        const vector signed short v_diff89 = vec_sub(vb8, vb9);
        const vector unsigned short v_cmp89 =
            (const vector unsigned short) vec_cmplt(vec_abs(v_diff89), vqp);
        const vector signed short v_last = vec_sel(vb8, vb9, v_cmp89);

        const vector signed short temp01 = vec_mladd(v_first, (vector signed short)v_4, vb1);
        const vector signed short temp02 = vec_add(vb2, vb3);
        const vector signed short temp03 = vec_add(temp01, (vector signed short)v_4);
        const vector signed short v_sumsB0 = vec_add(temp02, temp03);

        const vector signed short temp11 = vec_sub(v_sumsB0, v_first);
        const vector signed short v_sumsB1 = vec_add(temp11, vb4);

        const vector signed short temp21 = vec_sub(v_sumsB1, v_first);
        const vector signed short v_sumsB2 = vec_add(temp21, vb5);

        const vector signed short temp31 = vec_sub(v_sumsB2, v_first);
        const vector signed short v_sumsB3 = vec_add(temp31, vb6);

        const vector signed short temp41 = vec_sub(v_sumsB3, v_first);
        const vector signed short v_sumsB4 = vec_add(temp41, vb7);

        const vector signed short temp51 = vec_sub(v_sumsB4, vb1);
        const vector signed short v_sumsB5 = vec_add(temp51, vb8);

        const vector signed short temp61 = vec_sub(v_sumsB5, vb2);
        const vector signed short v_sumsB6 = vec_add(temp61, v_last);

        const vector signed short temp71 = vec_sub(v_sumsB6, vb3);
        const vector signed short v_sumsB7 = vec_add(temp71, v_last);

        const vector signed short temp81 = vec_sub(v_sumsB7, vb4);
        const vector signed short v_sumsB8 = vec_add(temp81, v_last);

        const vector signed short temp91 = vec_sub(v_sumsB8, vb5);
        const vector signed short v_sumsB9 = vec_add(temp91, v_last);

    #define COMPUTE_VR(i, j, k)                                             \
        const vector signed short temps1##i =                               \
            vec_add(v_sumsB##i, v_sumsB##k);                                \
        const vector signed short temps2##i =                               \
            vec_mladd(vb##j, (vector signed short)v_2, temps1##i);          \
        const vector signed short  vr##j = vec_sra(temps2##i, v_4)

        COMPUTE_VR(0, 1, 2);
        COMPUTE_VR(1, 2, 3);
        COMPUTE_VR(2, 3, 4);
        COMPUTE_VR(3, 4, 5);
        COMPUTE_VR(4, 5, 6);
        COMPUTE_VR(5, 6, 7);
        COMPUTE_VR(6, 7, 8);
        COMPUTE_VR(7, 8, 9);

        const vector signed char neg1 = vec_splat_s8(-1);
354 355
        const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
                                                                         0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F};
356 357

#define PACK_AND_STORE(i)                                       \
358
{   const vector unsigned char perms##i =                       \
359 360 361 362 363 364 365 366 367 368 369 370 371 372
        vec_lvsr(i * stride, src2);                             \
    const vector unsigned char vf##i =                          \
        vec_packsu(vr##i, (vector signed short)zero);           \
    const vector unsigned char vg##i =                          \
        vec_perm(vf##i, vbT##i, permHH);                        \
    const vector unsigned char mask##i =                        \
        vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
    const vector unsigned char vg2##i =                         \
        vec_perm(vg##i, vg##i, perms##i);                       \
    const vector unsigned char svA##i =                         \
        vec_sel(vbA##i, vg2##i, mask##i);                       \
    const vector unsigned char svB##i =                         \
        vec_sel(vg2##i, vbB##i, mask##i);                       \
    vec_st(svA##i, i * stride, src2);                           \
373
    vec_st(svB##i, i * stride + 16, src2);}
374

375
#define PACK_AND_STORE_ALIGNED(i)                               \
376
{   const vector unsigned char vf##i =                          \
377 378 379
        vec_packsu(vr##i, (vector signed short)zero);           \
    const vector unsigned char vg##i =                          \
        vec_perm(vf##i, vbT##i, permHH);                        \
380
    vec_st(vg##i, i * stride, src2);}
381

382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
        /* Special-casing the aligned case is worthwhile, as all calls from
         * the (transposed) horizontable deblocks will be aligned, in addition
         * to the naturally aligned vertical deblocks. */
        if (properStride && srcAlign) {
            PACK_AND_STORE_ALIGNED(1)
            PACK_AND_STORE_ALIGNED(2)
            PACK_AND_STORE_ALIGNED(3)
            PACK_AND_STORE_ALIGNED(4)
            PACK_AND_STORE_ALIGNED(5)
            PACK_AND_STORE_ALIGNED(6)
            PACK_AND_STORE_ALIGNED(7)
            PACK_AND_STORE_ALIGNED(8)
        } else {
            PACK_AND_STORE(1)
            PACK_AND_STORE(2)
            PACK_AND_STORE(3)
            PACK_AND_STORE(4)
            PACK_AND_STORE(5)
            PACK_AND_STORE(6)
            PACK_AND_STORE(7)
            PACK_AND_STORE(8)
        }
    #undef PACK_AND_STORE
    #undef PACK_AND_STORE_ALIGNED
406
    }
407
}
408 409 410 411



static inline void doVertDefFilter_altivec(uint8_t src[], int stride, PPContext *c) {
412
    /*
413 414 415 416
    this code makes no assumption on src or stride.
    One could remove the recomputation of the perm
    vector by assuming (stride % 16) == 0, unfortunately
    this is not always true. Quite a lot of load/stores
Vitor Sessak's avatar
Vitor Sessak committed
417
    can be removed by assuming proper alignment of
418
    src & stride :-(
419
    */
420
    uint8_t *src2 = src + stride*3;
421
    const vector signed int zero = vec_splat_s32(0);
422
    DECLARE_ALIGNED(16, short, qp)[8] = {8*c->QP};
423 424
    vector signed short vqp = vec_splat(
                                (vector signed short)vec_ld(0, qp), 0);
425 426

#define LOAD_LINE(i)                                                    \
427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
    const vector unsigned char perm##i =                                \
        vec_lvsl(i * stride, src2);                                     \
    const vector unsigned char vbA##i =                                 \
        vec_ld(i * stride, src2);                                       \
    const vector unsigned char vbB##i =                                 \
        vec_ld(i * stride + 16, src2);                                  \
    const vector unsigned char vbT##i =                                 \
        vec_perm(vbA##i, vbB##i, perm##i);                              \
    const vector signed short vb##i =                                   \
        (vector signed short)vec_mergeh((vector unsigned char)zero,     \
                                        (vector unsigned char)vbT##i)

     LOAD_LINE(1);
     LOAD_LINE(2);
     LOAD_LINE(3);
     LOAD_LINE(4);
     LOAD_LINE(5);
     LOAD_LINE(6);
     LOAD_LINE(7);
     LOAD_LINE(8);
447
#undef LOAD_LINE
448

449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508
     const vector signed short v_1 = vec_splat_s16(1);
     const vector signed short v_2 = vec_splat_s16(2);
     const vector signed short v_5 = vec_splat_s16(5);
     const vector signed short v_32 = vec_sl(v_1,
                                             (vector unsigned short)v_5);
     /* middle energy */
     const vector signed short l3minusl6 = vec_sub(vb3, vb6);
     const vector signed short l5minusl4 = vec_sub(vb5, vb4);
     const vector signed short twotimes_l3minusl6 = vec_mladd(v_2, l3minusl6, (vector signed short)zero);
     const vector signed short mE = vec_mladd(v_5, l5minusl4, twotimes_l3minusl6);
     const vector signed short absmE = vec_abs(mE);
     /* left & right energy */
     const vector signed short l1minusl4 = vec_sub(vb1, vb4);
     const vector signed short l3minusl2 = vec_sub(vb3, vb2);
     const vector signed short l5minusl8 = vec_sub(vb5, vb8);
     const vector signed short l7minusl6 = vec_sub(vb7, vb6);
     const vector signed short twotimes_l1minusl4 = vec_mladd(v_2, l1minusl4, (vector signed short)zero);
     const vector signed short twotimes_l5minusl8 = vec_mladd(v_2, l5minusl8, (vector signed short)zero);
     const vector signed short lE = vec_mladd(v_5, l3minusl2, twotimes_l1minusl4);
     const vector signed short rE = vec_mladd(v_5, l7minusl6, twotimes_l5minusl8);
     /* d */
     const vector signed short ddiff = vec_sub(absmE,
                                               vec_min(vec_abs(lE),
                                                       vec_abs(rE)));
     const vector signed short ddiffclamp = vec_max(ddiff, (vector signed short)zero);
     const vector signed short dtimes64 = vec_mladd(v_5, ddiffclamp, v_32);
     const vector signed short d = vec_sra(dtimes64, vec_splat_u16(6));
     const vector signed short minusd = vec_sub((vector signed short)zero, d);
     const vector signed short finald = vec_sel(minusd,
                                                d,
                                                vec_cmpgt(vec_sub((vector signed short)zero, mE),
                                                          (vector signed short)zero));
     /* q */
     const vector signed short qtimes2 = vec_sub(vb4, vb5);
     /* for a shift right to behave like /2, we need to add one
        to all negative integer */
     const vector signed short rounddown = vec_sel((vector signed short)zero,
                                                   v_1,
                                                   vec_cmplt(qtimes2, (vector signed short)zero));
     const vector signed short q = vec_sra(vec_add(qtimes2, rounddown), vec_splat_u16(1));
     /* clamp */
     const vector signed short dclamp_P1 = vec_max((vector signed short)zero, finald);
     const vector signed short dclamp_P = vec_min(dclamp_P1, q);
     const vector signed short dclamp_N1 = vec_min((vector signed short)zero, finald);
     const vector signed short dclamp_N = vec_max(dclamp_N1, q);

     const vector signed short dclampedfinal = vec_sel(dclamp_N,
                                                       dclamp_P,
                                                       vec_cmpgt(q, (vector signed short)zero));
     const vector signed short dornotd = vec_sel((vector signed short)zero,
                                                 dclampedfinal,
                                                 vec_cmplt(absmE, vqp));
     /* add/subtract to l4 and l5 */
     const vector signed short vb4minusd = vec_sub(vb4, dornotd);
     const vector signed short vb5plusd  = vec_add(vb5, dornotd);
     /* finally, stores */
     const vector unsigned char st4 = vec_packsu(vb4minusd, (vector signed short)zero);
     const vector unsigned char st5 = vec_packsu(vb5plusd,  (vector signed short)zero);

     const vector signed char neg1 = vec_splat_s8(-1);
509 510
     const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
                                                                      0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F};
511 512

#define STORE(i)                                                \
513
{    const vector unsigned char perms##i =                      \
514 515 516 517 518 519 520 521 522 523 524 525
         vec_lvsr(i * stride, src2);                            \
     const vector unsigned char vg##i =                         \
         vec_perm(st##i, vbT##i, permHH);                       \
     const vector unsigned char mask##i =                       \
         vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
     const vector unsigned char vg2##i =                        \
         vec_perm(vg##i, vg##i, perms##i);                      \
     const vector unsigned char svA##i =                        \
         vec_sel(vbA##i, vg2##i, mask##i);                      \
     const vector unsigned char svB##i =                        \
         vec_sel(vg2##i, vbB##i, mask##i);                      \
     vec_st(svA##i, i * stride, src2);                          \
526
     vec_st(svB##i, i * stride + 16, src2);}
527

528 529
     STORE(4)
     STORE(5)
530 531 532
}

static inline void dering_altivec(uint8_t src[], int stride, PPContext *c) {
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
    const vector signed int vsint32_8 = vec_splat_s32(8);
    const vector unsigned int vuint32_4 = vec_splat_u32(4);
    const vector signed char neg1 = vec_splat_s8(-1);

    const vector unsigned char permA1 = (vector unsigned char)
        {0x00, 0x01, 0x02, 0x10, 0x11, 0x12, 0x1F, 0x1F,
         0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F};
    const vector unsigned char permA2 = (vector unsigned char)
        {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x10, 0x11,
         0x12, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F};
    const vector unsigned char permA1inc = (vector unsigned char)
        {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00,
         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
    const vector unsigned char permA2inc = (vector unsigned char)
        {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01,
         0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
    const vector unsigned char magic = (vector unsigned char)
        {0x01, 0x02, 0x01, 0x02, 0x04, 0x02, 0x01, 0x02,
         0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
    const vector unsigned char extractPerm = (vector unsigned char)
        {0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01,
         0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01};
    const vector unsigned char extractPermInc = (vector unsigned char)
        {0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01,
         0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01};
    const vector unsigned char identity = vec_lvsl(0,(unsigned char *)0);
    const vector unsigned char tenRight = (vector unsigned char)
        {0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
    const vector unsigned char eightLeft = (vector unsigned char)
        {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08};

566
    /*
567 568 569 570
    this code makes no assumption on src or stride.
    One could remove the recomputation of the perm
    vector by assuming (stride % 16) == 0, unfortunately
    this is not always true. Quite a lot of load/stores
Vitor Sessak's avatar
Vitor Sessak committed
571
    can be removed by assuming proper alignment of
572
    src & stride :-(
573 574
    */
    uint8_t *srcCopy = src;
575
    DECLARE_ALIGNED(16, uint8_t, dt)[16] = { deringThreshold };
576
    const vector signed int zero = vec_splat_s32(0);
577
    vector unsigned char v_dt = vec_splat(vec_ld(0, dt), 0);
578

579 580 581 582 583
#define LOAD_LINE(i)                                                  \
    const vector unsigned char perm##i =                              \
        vec_lvsl(i * stride, srcCopy);                                \
    vector unsigned char sA##i = vec_ld(i * stride, srcCopy);         \
    vector unsigned char sB##i = vec_ld(i * stride + 16, srcCopy);    \
584 585 586 587 588 589 590 591 592 593 594 595
    vector unsigned char src##i = vec_perm(sA##i, sB##i, perm##i)

    LOAD_LINE(0);
    LOAD_LINE(1);
    LOAD_LINE(2);
    LOAD_LINE(3);
    LOAD_LINE(4);
    LOAD_LINE(5);
    LOAD_LINE(6);
    LOAD_LINE(7);
    LOAD_LINE(8);
    LOAD_LINE(9);
596 597
#undef LOAD_LINE

598
    vector unsigned char v_avg;
599
    DECLARE_ALIGNED(16, signed int, S)[8];
600 601 602 603
    DECLARE_ALIGNED(16, int, tQP2)[4] = { c->QP/2 + 1 };
    vector signed int vQP2 = vec_ld(0, tQP2);
    vQP2 = vec_splat(vQP2, 0);

604
    {
605
    const vector unsigned char trunc_perm = (vector unsigned char)
606 607
        {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
         0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18};
608 609 610 611
    const vector unsigned char trunc_src12 = vec_perm(src1, src2, trunc_perm);
    const vector unsigned char trunc_src34 = vec_perm(src3, src4, trunc_perm);
    const vector unsigned char trunc_src56 = vec_perm(src5, src6, trunc_perm);
    const vector unsigned char trunc_src78 = vec_perm(src7, src8, trunc_perm);
612

613
#define EXTRACT(op) do {                                                \
614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629
    const vector unsigned char s_1   = vec_##op(trunc_src12, trunc_src34); \
    const vector unsigned char s_2   = vec_##op(trunc_src56, trunc_src78); \
    const vector unsigned char s_6   = vec_##op(s_1, s_2);     \
    const vector unsigned char s_8h  = vec_mergeh(s_6, s_6);   \
    const vector unsigned char s_8l  = vec_mergel(s_6, s_6);   \
    const vector unsigned char s_9   = vec_##op(s_8h, s_8l);   \
    const vector unsigned char s_9h  = vec_mergeh(s_9, s_9);   \
    const vector unsigned char s_9l  = vec_mergel(s_9, s_9);   \
    const vector unsigned char s_10  = vec_##op(s_9h, s_9l);   \
    const vector unsigned char s_10h = vec_mergeh(s_10, s_10); \
    const vector unsigned char s_10l = vec_mergel(s_10, s_10); \
    const vector unsigned char s_11  = vec_##op(s_10h, s_10l); \
    const vector unsigned char s_11h = vec_mergeh(s_11, s_11); \
    const vector unsigned char s_11l = vec_mergel(s_11, s_11); \
    v_##op = vec_##op(s_11h, s_11l);                           \
} while (0)
630

631 632 633 634 635
    vector unsigned char v_min;
    vector unsigned char v_max;
    EXTRACT(min);
    EXTRACT(max);
#undef EXTRACT
636

637
    if (vec_all_lt(vec_sub(v_max, v_min), v_dt))
638
        return;
639

640
    v_avg = vec_avg(v_min, v_max);
641
    }
642

643
    {
644
    const vector unsigned short mask1 = (vector unsigned short)
645 646
                                        {0x0001, 0x0002, 0x0004, 0x0008,
                                         0x0010, 0x0020, 0x0040, 0x0080};
647
    const vector unsigned short mask2 = (vector unsigned short)
648 649
                                        {0x0100, 0x0200, 0x0000, 0x0000,
                                         0x0000, 0x0000, 0x0000, 0x0000};
650

651 652
    const vector unsigned int vuint32_16 = vec_sl(vec_splat_u32(1), vec_splat_u32(4));
    const vector unsigned int vuint32_1 = vec_splat_u32(1);
653

654 655 656 657 658
    vector signed int sumA2;
    vector signed int sumB2;
    vector signed int sum0, sum1, sum2, sum3, sum4;
    vector signed int sum5, sum6, sum7, sum8, sum9;

659 660
#define COMPARE(i)                                                      \
    do {                                                                \
661
        const vector unsigned char cmp =                                \
662
            (vector unsigned char)vec_cmpgt(src##i, v_avg);             \
663 664 665 666 667 668 669 670 671 672 673 674
        const vector unsigned short cmpHi =                             \
            (vector unsigned short)vec_mergeh(cmp, cmp);                \
        const vector unsigned short cmpLi =                             \
            (vector unsigned short)vec_mergel(cmp, cmp);                \
        const vector signed short cmpHf =                               \
            (vector signed short)vec_and(cmpHi, mask1);                 \
        const vector signed short cmpLf =                               \
            (vector signed short)vec_and(cmpLi, mask2);                 \
        const vector signed int sump = vec_sum4s(cmpHf, zero);          \
        const vector signed int sumq = vec_sum4s(cmpLf, sump);          \
        sum##i  = vec_sums(sumq, zero);                                 \
    } while (0)
675

676 677 678 679 680 681 682 683 684 685 686
    COMPARE(0);
    COMPARE(1);
    COMPARE(2);
    COMPARE(3);
    COMPARE(4);
    COMPARE(5);
    COMPARE(6);
    COMPARE(7);
    COMPARE(8);
    COMPARE(9);
#undef COMPARE
687

688
    {
689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732
    const vector signed int sump02 = vec_mergel(sum0, sum2);
    const vector signed int sump13 = vec_mergel(sum1, sum3);
    const vector signed int sumA = vec_mergel(sump02, sump13);

    const vector signed int sump46 = vec_mergel(sum4, sum6);
    const vector signed int sump57 = vec_mergel(sum5, sum7);
    const vector signed int sumB = vec_mergel(sump46, sump57);

    const vector signed int sump8A = vec_mergel(sum8, zero);
    const vector signed int sump9B = vec_mergel(sum9, zero);
    const vector signed int sumC = vec_mergel(sump8A, sump9B);

    const vector signed int tA = vec_sl(vec_nor(zero, sumA), vuint32_16);
    const vector signed int tB = vec_sl(vec_nor(zero, sumB), vuint32_16);
    const vector signed int tC = vec_sl(vec_nor(zero, sumC), vuint32_16);
    const vector signed int t2A = vec_or(sumA, tA);
    const vector signed int t2B = vec_or(sumB, tB);
    const vector signed int t2C = vec_or(sumC, tC);
    const vector signed int t3A = vec_and(vec_sra(t2A, vuint32_1),
                                          vec_sl(t2A, vuint32_1));
    const vector signed int t3B = vec_and(vec_sra(t2B, vuint32_1),
                                          vec_sl(t2B, vuint32_1));
    const vector signed int t3C = vec_and(vec_sra(t2C, vuint32_1),
                                          vec_sl(t2C, vuint32_1));
    const vector signed int yA = vec_and(t2A, t3A);
    const vector signed int yB = vec_and(t2B, t3B);
    const vector signed int yC = vec_and(t2C, t3C);

    const vector unsigned char strangeperm1 = vec_lvsl(4, (unsigned char*)0);
    const vector unsigned char strangeperm2 = vec_lvsl(8, (unsigned char*)0);
    const vector signed int sumAd4 = vec_perm(yA, yB, strangeperm1);
    const vector signed int sumAd8 = vec_perm(yA, yB, strangeperm2);
    const vector signed int sumBd4 = vec_perm(yB, yC, strangeperm1);
    const vector signed int sumBd8 = vec_perm(yB, yC, strangeperm2);
    const vector signed int sumAp = vec_and(yA,
                                            vec_and(sumAd4,sumAd8));
    const vector signed int sumBp = vec_and(yB,
                                            vec_and(sumBd4,sumBd8));
    sumA2 = vec_or(sumAp,
                   vec_sra(sumAp,
                           vuint32_16));
    sumB2  = vec_or(sumBp,
                    vec_sra(sumBp,
                            vuint32_16));
733
    }
734 735
    vec_st(sumA2, 0, S);
    vec_st(sumB2, 16, S);
736 737 738 739 740
    }

    /* I'm not sure the following is actually faster
       than straight, unvectorized C code :-( */

741 742 743 744 745
#define F_INIT()                                       \
    vector unsigned char tenRightM = tenRight;         \
    vector unsigned char permA1M = permA1;             \
    vector unsigned char permA2M = permA2;             \
    vector unsigned char extractPermM = extractPerm
746

747
#define F2(i, j, k, l)                                                  \
748
    if (S[i] & (1 << (l+1))) {                                          \
749 750 751 752 753 754 755 756 757 758 759
        const vector unsigned char a_A = vec_perm(src##i, src##j, permA1M); \
        const vector unsigned char a_B = vec_perm(a_A, src##k, permA2M); \
        const vector signed int a_sump =                                \
            (vector signed int)vec_msum(a_B, magic, (vector unsigned int)zero);\
        vector signed int F = vec_sr(vec_sums(a_sump, vsint32_8), vuint32_4); \
        const vector signed int p =                                     \
            (vector signed int)vec_perm(src##j, (vector unsigned char)zero, \
                                        extractPermM);                  \
        const vector signed int sum  = vec_add(p, vQP2);                \
        const vector signed int diff = vec_sub(p, vQP2);                \
        vector signed int newpm;                                        \
760 761
        vector unsigned char newpm2, mask;                              \
        F = vec_splat(F, 3);                                            \
762 763 764 765 766
        if (vec_all_lt(sum, F))                                         \
            newpm = sum;                                                \
        else if (vec_all_gt(diff, F))                                   \
            newpm = diff;                                               \
        else newpm = F;                                                 \
767 768
        newpm2 = vec_splat((vector unsigned char)newpm, 15);            \
        mask = vec_add(identity, tenRightM);                            \
769
        src##j = vec_perm(src##j, newpm2, mask);                        \
770
    }                                                                   \
771 772 773 774
    permA1M = vec_add(permA1M, permA1inc);                              \
    permA2M = vec_add(permA2M, permA2inc);                              \
    tenRightM = vec_sro(tenRightM, eightLeft);                          \
    extractPermM = vec_add(extractPermM, extractPermInc)
775

776
#define ITER(i, j, k) do {                      \
777
    F_INIT();                                   \
778 779 780 781 782 783 784
    F2(i, j, k, 0);                             \
    F2(i, j, k, 1);                             \
    F2(i, j, k, 2);                             \
    F2(i, j, k, 3);                             \
    F2(i, j, k, 4);                             \
    F2(i, j, k, 5);                             \
    F2(i, j, k, 6);                             \
785 786
    F2(i, j, k, 7);                             \
} while (0)
787 788 789 790 791 792 793 794 795 796

    ITER(0, 1, 2);
    ITER(1, 2, 3);
    ITER(2, 3, 4);
    ITER(3, 4, 5);
    ITER(4, 5, 6);
    ITER(5, 6, 7);
    ITER(6, 7, 8);
    ITER(7, 8, 9);

797
#define STORE_LINE(i) do {                              \
798
    const vector unsigned char permST =                 \
799
        vec_lvsr(i * stride, srcCopy);                  \
800
    const vector unsigned char maskST =                 \
801
        vec_perm((vector unsigned char)zero,            \
802 803 804 805
                 (vector unsigned char)neg1, permST);   \
    src##i = vec_perm(src##i ,src##i, permST);          \
    sA##i= vec_sel(sA##i, src##i, maskST);              \
    sB##i= vec_sel(src##i, sB##i, maskST);              \
806
    vec_st(sA##i, i * stride, srcCopy);                 \
807 808
    vec_st(sB##i, i * stride + 16, srcCopy);            \
} while (0)
809 810 811 812 813 814 815 816 817

    STORE_LINE(1);
    STORE_LINE(2);
    STORE_LINE(3);
    STORE_LINE(4);
    STORE_LINE(5);
    STORE_LINE(6);
    STORE_LINE(7);
    STORE_LINE(8);
818 819 820 821 822 823 824 825

#undef STORE_LINE
#undef ITER
#undef F2
}

#define doHorizLowPass_altivec(a...) doHorizLowPass_C(a)
#define doHorizDefFilter_altivec(a...) doHorizDefFilter_C(a)
826 827
#define do_a_deblock_altivec(a...) do_a_deblock_C(a)

828
static inline void tempNoiseReducer_altivec(uint8_t *src, int stride,
829
                                            uint8_t *tempBlurred, uint32_t *tempBlurredPast, int *maxNoise)
830
{
831 832 833 834
    const vector signed char neg1 = vec_splat_s8(-1);
    const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
                                                                     0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F};

835 836 837 838 839
    const vector signed int zero = vec_splat_s32(0);
    const vector signed short vsint16_1 = vec_splat_s16(1);
    vector signed int v_dp = zero;
    vector signed int v_sysdp = zero;
    int d, sysd, i;
840

841
#define LOAD_LINE(src, i)                                               \
842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860
    register int j##src##i = i * stride;                                \
    vector unsigned char perm##src##i = vec_lvsl(j##src##i, src);       \
    const vector unsigned char v_##src##A1##i = vec_ld(j##src##i, src); \
    const vector unsigned char v_##src##A2##i = vec_ld(j##src##i + 16, src); \
    const vector unsigned char v_##src##A##i =                          \
        vec_perm(v_##src##A1##i, v_##src##A2##i, perm##src##i);         \
    vector signed short v_##src##Ass##i =                               \
        (vector signed short)vec_mergeh((vector signed char)zero,       \
                                        (vector signed char)v_##src##A##i)

    LOAD_LINE(src, 0);
    LOAD_LINE(src, 1);
    LOAD_LINE(src, 2);
    LOAD_LINE(src, 3);
    LOAD_LINE(src, 4);
    LOAD_LINE(src, 5);
    LOAD_LINE(src, 6);
    LOAD_LINE(src, 7);

861 862 863 864 865 866 867 868
    LOAD_LINE(tempBlurred, 0);
    LOAD_LINE(tempBlurred, 1);
    LOAD_LINE(tempBlurred, 2);
    LOAD_LINE(tempBlurred, 3);
    LOAD_LINE(tempBlurred, 4);
    LOAD_LINE(tempBlurred, 5);
    LOAD_LINE(tempBlurred, 6);
    LOAD_LINE(tempBlurred, 7);
869 870
#undef LOAD_LINE

871
#define ACCUMULATE_DIFFS(i) do {                                \
872 873 874 875 876
        vector signed short v_d = vec_sub(v_tempBlurredAss##i,  \
                                          v_srcAss##i);         \
        v_dp = vec_msums(v_d, v_d, v_dp);                       \
        v_sysdp = vec_msums(v_d, vsint16_1, v_sysdp);           \
    } while (0)
877 878 879 880 881 882 883 884 885

    ACCUMULATE_DIFFS(0);
    ACCUMULATE_DIFFS(1);
    ACCUMULATE_DIFFS(2);
    ACCUMULATE_DIFFS(3);
    ACCUMULATE_DIFFS(4);
    ACCUMULATE_DIFFS(5);
    ACCUMULATE_DIFFS(6);
    ACCUMULATE_DIFFS(7);
886 887
#undef ACCUMULATE_DIFFS

888 889 890 891
    tempBlurredPast[127]= maxNoise[0];
    tempBlurredPast[128]= maxNoise[1];
    tempBlurredPast[129]= maxNoise[2];

892 893
    v_dp = vec_sums(v_dp, zero);
    v_sysdp = vec_sums(v_sysdp, zero);
894

895 896
    v_dp = vec_splat(v_dp, 3);
    v_sysdp = vec_splat(v_sysdp, 3);
897

898 899
    vec_ste(v_dp, 0, &d);
    vec_ste(v_sysdp, 0, &sysd);
900

901 902
    i = d;
    d = (4*d
903 904 905
         +(*(tempBlurredPast-256))
         +(*(tempBlurredPast-1))+ (*(tempBlurredPast+1))
         +(*(tempBlurredPast+256))
906
         +4)>>3;
907

908
    *tempBlurredPast=i;
909

910 911
    if (d > maxNoise[1]) {
        if (d < maxNoise[2]) {
912
#define OP(i) v_tempBlurredAss##i = vec_avg(v_tempBlurredAss##i, v_srcAss##i);
913

914 915 916 917 918 919 920 921
            OP(0);
            OP(1);
            OP(2);
            OP(3);
            OP(4);
            OP(5);
            OP(6);
            OP(7);
922
#undef OP
923
        } else {
924
#define OP(i) v_tempBlurredAss##i = v_srcAss##i;
925

926 927 928 929 930 931 932 933
            OP(0);
            OP(1);
            OP(2);
            OP(3);
            OP(4);
            OP(5);
            OP(6);
            OP(7);
934
#undef OP
935 936 937 938 939 940
        }
    } else {
        if (d < maxNoise[0]) {
            const vector signed short vsint16_7 = vec_splat_s16(7);
            const vector signed short vsint16_4 = vec_splat_s16(4);
            const vector unsigned short vuint16_3 = vec_splat_u16(3);
941

942 943 944 945 946
#define OP(i) do {                                                      \
            const vector signed short v_temp =                          \
                vec_mladd(v_tempBlurredAss##i, vsint16_7, v_srcAss##i); \
            const vector signed short v_temp2 = vec_add(v_temp, vsint16_4); \
            v_tempBlurredAss##i = vec_sr(v_temp2, vuint16_3);           \
947
        } while (0)
948 949 950 951 952 953 954 955 956

            OP(0);
            OP(1);
            OP(2);
            OP(3);
            OP(4);
            OP(5);
            OP(6);
            OP(7);
957
#undef OP
958 959 960
        } else {
            const vector signed short vsint16_3 = vec_splat_s16(3);
            const vector signed short vsint16_2 = vec_splat_s16(2);
961

962
#define OP(i) do {                                              \
963 964 965 966 967
            const vector signed short v_temp =                  \
                vec_mladd(v_tempBlurredAss##i, vsint16_3, v_srcAss##i); \
            const vector signed short v_temp2 = vec_add(v_temp, vsint16_2); \
            v_tempBlurredAss##i =                                       \
                vec_sr(v_temp2, (vector unsigned short)vsint16_2);      \
968
        } while (0)
969 970 971 972 973 974 975 976 977

            OP(0);
            OP(1);
            OP(2);
            OP(3);
            OP(4);
            OP(5);
            OP(6);
            OP(7);
978
#undef OP
979
        }
980 981
    }

982 983 984 985 986 987 988 989 990 991 992 993
#define PACK_AND_STORE(src, i) do {                                      \
    const vector unsigned char perms = vec_lvsr(i * stride, src);        \
    const vector unsigned char vf =                                      \
        vec_packsu(v_tempBlurredAss##1, (vector signed short)zero);     \
    const vector unsigned char vg = vec_perm(vf, v_##src##A##i, permHH); \
    const vector unsigned char mask =                                    \
        vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms); \
    const vector unsigned char vg2 = vec_perm(vg, vg, perms);            \
    const vector unsigned char svA = vec_sel(v_##src##A1##i, vg2, mask); \
    const vector unsigned char svB = vec_sel(vg2, v_##src##A2##i, mask); \
    vec_st(svA, i * stride, src);                                        \
    vec_st(svB, i * stride + 16, src);                                   \
994
} while (0)
995 996 997 998 999 1000 1001 1002 1003

    PACK_AND_STORE(src, 0);
    PACK_AND_STORE(src, 1);
    PACK_AND_STORE(src, 2);
    PACK_AND_STORE(src, 3);
    PACK_AND_STORE(src, 4);
    PACK_AND_STORE(src, 5);
    PACK_AND_STORE(src, 6);
    PACK_AND_STORE(src, 7);
1004 1005 1006 1007 1008 1009 1010 1011
    PACK_AND_STORE(tempBlurred, 0);
    PACK_AND_STORE(tempBlurred, 1);
    PACK_AND_STORE(tempBlurred, 2);
    PACK_AND_STORE(tempBlurred, 3);
    PACK_AND_STORE(tempBlurred, 4);
    PACK_AND_STORE(tempBlurred, 5);
    PACK_AND_STORE(tempBlurred, 6);
    PACK_AND_STORE(tempBlurred, 7);
1012 1013
#undef PACK_AND_STORE
}
1014 1015

static inline void transpose_16x8_char_toPackedAlign_altivec(unsigned char* dst, unsigned char* src, int stride) {
1016
    const vector unsigned char zero = vec_splat_u8(0);
1017

1018
#define LOAD_DOUBLE_LINE(i, j)                                          \
1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
    vector unsigned char perm1##i = vec_lvsl(i * stride, src);          \
    vector unsigned char perm2##i = vec_lvsl(j * stride, src);          \
    vector unsigned char srcA##i = vec_ld(i * stride, src);             \
    vector unsigned char srcB##i = vec_ld(i * stride + 16, src);        \
    vector unsigned char srcC##i = vec_ld(j * stride, src);             \
    vector unsigned char srcD##i = vec_ld(j * stride+ 16, src);         \
    vector unsigned char src##i = vec_perm(srcA##i, srcB##i, perm1##i); \
    vector unsigned char src##j = vec_perm(srcC##i, srcD##i, perm2##i)

    LOAD_DOUBLE_LINE(0, 1);
    LOAD_DOUBLE_LINE(2, 3);
    LOAD_DOUBLE_LINE(4, 5);
    LOAD_DOUBLE_LINE(6, 7);
1032 1033
#undef LOAD_DOUBLE_LINE

1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
    vector unsigned char tempA = vec_mergeh(src0, zero);
    vector unsigned char tempB = vec_mergel(src0, zero);
    vector unsigned char tempC = vec_mergeh(src1, zero);
    vector unsigned char tempD = vec_mergel(src1, zero);
    vector unsigned char tempE = vec_mergeh(src2, zero);
    vector unsigned char tempF = vec_mergel(src2, zero);
    vector unsigned char tempG = vec_mergeh(src3, zero);
    vector unsigned char tempH = vec_mergel(src3, zero);
    vector unsigned char tempI = vec_mergeh(src4, zero);
    vector unsigned char tempJ = vec_mergel(src4, zero);
    vector unsigned char tempK = vec_mergeh(src5, zero);
    vector unsigned char tempL = vec_mergel(src5, zero);
    vector unsigned char tempM = vec_mergeh(src6, zero);
    vector unsigned char tempN = vec_mergel(src6, zero);
    vector unsigned char tempO = vec_mergeh(src7, zero);
    vector unsigned char tempP = vec_mergel(src7, zero);

1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
    vector unsigned char temp0  = vec_mergeh(tempA, tempI);
    vector unsigned char temp1  = vec_mergel(tempA, tempI);
    vector unsigned char temp2  = vec_mergeh(tempB, tempJ);
    vector unsigned char temp3  = vec_mergel(tempB, tempJ);
    vector unsigned char temp4  = vec_mergeh(tempC, tempK);
    vector unsigned char temp5  = vec_mergel(tempC, tempK);
    vector unsigned char temp6  = vec_mergeh(tempD, tempL);
    vector unsigned char temp7  = vec_mergel(tempD, tempL);
    vector unsigned char temp8  = vec_mergeh(tempE, tempM);
    vector unsigned char temp9  = vec_mergel(tempE, tempM);
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
    vector unsigned char temp10 = vec_mergeh(tempF, tempN);
    vector unsigned char temp11 = vec_mergel(tempF, tempN);
    vector unsigned char temp12 = vec_mergeh(tempG, tempO);
    vector unsigned char temp13 = vec_mergel(tempG, tempO);
    vector unsigned char temp14 = vec_mergeh(tempH, tempP);
    vector unsigned char temp15 = vec_mergel(tempH, tempP);

    tempA = vec_mergeh(temp0, temp8);
    tempB = vec_mergel(temp0, temp8);
    tempC = vec_mergeh(temp1, temp9);
    tempD = vec_mergel(temp1, temp9);
    tempE = vec_mergeh(temp2, temp10);
    tempF = vec_mergel(temp2, temp10);
    tempG = vec_mergeh(temp3, temp11);
    tempH = vec_mergel(temp3, temp11);
    tempI = vec_mergeh(temp4, temp12);
    tempJ = vec_mergel(temp4, temp12);
    tempK = vec_mergeh(temp5, temp13);
    tempL = vec_mergel(temp5, temp13);
    tempM = vec_mergeh(temp6, temp14);
    tempN = vec_mergel(temp6, temp14);
    tempO = vec_mergeh(temp7, temp15);
    tempP = vec_mergel(temp7, temp15);

1085 1086 1087 1088 1089 1090 1091 1092 1093 1094
    temp0  = vec_mergeh(tempA, tempI);
    temp1  = vec_mergel(tempA, tempI);
    temp2  = vec_mergeh(tempB, tempJ);
    temp3  = vec_mergel(tempB, tempJ);
    temp4  = vec_mergeh(tempC, tempK);
    temp5  = vec_mergel(tempC, tempK);
    temp6  = vec_mergeh(tempD, tempL);
    temp7  = vec_mergel(tempD, tempL);
    temp8  = vec_mergeh(tempE, tempM);
    temp9  = vec_mergel(tempE, tempM);
1095 1096 1097 1098 1099 1100 1101
    temp10 = vec_mergeh(tempF, tempN);
    temp11 = vec_mergel(tempF, tempN);
    temp12 = vec_mergeh(tempG, tempO);
    temp13 = vec_mergel(tempG, tempO);
    temp14 = vec_mergeh(tempH, tempP);
    temp15 = vec_mergel(tempH, tempP);

1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
    vec_st(temp0,    0, dst);
    vec_st(temp1,   16, dst);
    vec_st(temp2,   32, dst);
    vec_st(temp3,   48, dst);
    vec_st(temp4,   64, dst);
    vec_st(temp5,   80, dst);
    vec_st(temp6,   96, dst);
    vec_st(temp7,  112, dst);
    vec_st(temp8,  128, dst);
    vec_st(temp9,  144, dst);
1112 1113 1114 1115 1116 1117
    vec_st(temp10, 160, dst);
    vec_st(temp11, 176, dst);
    vec_st(temp12, 192, dst);
    vec_st(temp13, 208, dst);
    vec_st(temp14, 224, dst);
    vec_st(temp15, 240, dst);
1118 1119 1120
}

static inline void transpose_8x16_char_fromPackedAlign_altivec(unsigned char* dst, unsigned char* src, int stride) {
1121
    const vector unsigned char zero = vec_splat_u8(0);
1122
    const vector signed   char neg1 = vec_splat_s8(-1);
1123

1124
#define LOAD_DOUBLE_LINE(i, j)                                  \
1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
    vector unsigned char src##i = vec_ld(i * 16, src);            \
    vector unsigned char src##j = vec_ld(j * 16, src)

    LOAD_DOUBLE_LINE(0, 1);
    LOAD_DOUBLE_LINE(2, 3);
    LOAD_DOUBLE_LINE(4, 5);
    LOAD_DOUBLE_LINE(6, 7);
    LOAD_DOUBLE_LINE(8, 9);
    LOAD_DOUBLE_LINE(10, 11);
    LOAD_DOUBLE_LINE(12, 13);
    LOAD_DOUBLE_LINE(14, 15);
1136 1137
#undef LOAD_DOUBLE_LINE

1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
    vector unsigned char tempA = vec_mergeh(src0, src8);
    vector unsigned char tempB;
    vector unsigned char tempC = vec_mergeh(src1, src9);
    vector unsigned char tempD;
    vector unsigned char tempE = vec_mergeh(src2, src10);
    vector unsigned char tempG = vec_mergeh(src3, src11);
    vector unsigned char tempI = vec_mergeh(src4, src12);
    vector unsigned char tempJ;
    vector unsigned char tempK = vec_mergeh(src5, src13);
    vector unsigned char tempL;
    vector unsigned char tempM = vec_mergeh(src6, src14);
    vector unsigned char tempO = vec_mergeh(src7, src15);

    vector unsigned char temp0 = vec_mergeh(tempA, tempI);
    vector unsigned char temp1 = vec_mergel(tempA, tempI);
    vector unsigned char temp2;
    vector unsigned char temp3;
    vector unsigned char temp4 = vec_mergeh(tempC, tempK);
    vector unsigned char temp5 = vec_mergel(tempC, tempK);
    vector unsigned char temp6;
    vector unsigned char temp7;
    vector unsigned char temp8 = vec_mergeh(tempE, tempM);
    vector unsigned char temp9 = vec_mergel(tempE, tempM);
    vector unsigned char temp12 = vec_mergeh(tempG, tempO);
    vector unsigned char temp13 = vec_mergel(tempG, tempO);

    tempA = vec_mergeh(temp0, temp8);
    tempB = vec_mergel(temp0, temp8);
    tempC = vec_mergeh(temp1, temp9);
    tempD = vec_mergel(temp1, temp9);
    tempI = vec_mergeh(temp4, temp12);
    tempJ = vec_mergel(temp4, temp12);
    tempK = vec_mergeh(temp5, temp13);
    tempL = vec_mergel(temp5, temp13);

    temp0 = vec_mergeh(tempA, tempI);
    temp1 = vec_mergel(tempA, tempI);
    temp2 = vec_mergeh(tempB, tempJ);
    temp3 = vec_mergel(tempB, tempJ);
    temp4 = vec_mergeh(tempC, tempK);
    temp5 = vec_mergel(tempC, tempK);
    temp6 = vec_mergeh(tempD, tempL);
    temp7 = vec_mergel(tempD, tempL);


1183
#define STORE_DOUBLE_LINE(i, j) do {                                    \
1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
    vector unsigned char dstAi = vec_ld(i * stride, dst);               \
    vector unsigned char dstBi = vec_ld(i * stride + 16, dst);          \
    vector unsigned char dstAj = vec_ld(j * stride, dst);               \
    vector unsigned char dstBj = vec_ld(j * stride+ 16, dst);           \
    vector unsigned char aligni = vec_lvsr(i * stride, dst);            \
    vector unsigned char alignj = vec_lvsr(j * stride, dst);            \
    vector unsigned char maski =                                        \
        vec_perm(zero, (vector unsigned char)neg1, aligni);             \
    vector unsigned char maskj =                                        \
        vec_perm(zero, (vector unsigned char)neg1, alignj);             \
    vector unsigned char dstRi = vec_perm(temp##i, temp##i, aligni);    \
    vector unsigned char dstRj = vec_perm(temp##j, temp##j, alignj);    \
    vector unsigned char dstAFi = vec_sel(dstAi, dstRi, maski);         \
    vector unsigned char dstBFi = vec_sel(dstRi, dstBi, maski);         \
    vector unsigned char dstAFj = vec_sel(dstAj, dstRj, maskj);         \
    vector unsigned char dstBFj = vec_sel(dstRj, dstBj, maskj);         \
    vec_st(dstAFi, i * stride, dst);                                    \
    vec_st(dstBFi, i * stride + 16, dst);                               \
    vec_st(dstAFj, j * stride, dst);                                    \
    vec_st(dstBFj, j * stride + 16, dst);                               \
1204
} while (0)
1205 1206 1207 1208 1209

    STORE_DOUBLE_LINE(0,1);
    STORE_DOUBLE_LINE(2,3);
    STORE_DOUBLE_LINE(4,5);
    STORE_DOUBLE_LINE(6,7);
1210
}