vc1dsp.c 29.7 KB
Newer Older
1 2 3 4
/*
 * VC-1 and WMV3 decoder - DSP functions
 * Copyright (c) 2006 Konstantin Shishkov
 *
5 6 7
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
8 9
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13 14 15 16 17
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19 20 21 22
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

/**
23
* @file
24 25 26 27
 * VC-1 and WMV3 decoder
 *
 */

28
#include "libavutil/avassert.h"
29
#include "libavutil/common.h"
30
#include "h264chroma.h"
31
#include "rnd_avg.h"
32
#include "vc1dsp.h"
33 34


Kostya Shishkov's avatar
Kostya Shishkov committed
35
/** Apply overlap transform to horizontal edge
36
*/
37
static void vc1_v_overlap_c(uint8_t* src, int stride)
38 39 40
{
    int i;
    int a, b, c, d;
Kostya Shishkov's avatar
Kostya Shishkov committed
41
    int d1, d2;
42
    int rnd = 1;
43 44 45 46 47
    for(i = 0; i < 8; i++) {
        a = src[-2*stride];
        b = src[-stride];
        c = src[0];
        d = src[stride];
Kostya Shishkov's avatar
Kostya Shishkov committed
48 49
        d1 = (a - d + 3 + rnd) >> 3;
        d2 = (a - d + b - c + 4 - rnd) >> 3;
50

Kostya Shishkov's avatar
Kostya Shishkov committed
51
        src[-2*stride] = a - d1;
52 53
        src[-stride] = av_clip_uint8(b - d2);
        src[0] = av_clip_uint8(c + d2);
Kostya Shishkov's avatar
Kostya Shishkov committed
54
        src[stride] = d + d1;
55
        src++;
56
        rnd = !rnd;
57 58 59
    }
}

Kostya Shishkov's avatar
Kostya Shishkov committed
60
/** Apply overlap transform to vertical edge
61
*/
62
static void vc1_h_overlap_c(uint8_t* src, int stride)
63 64 65
{
    int i;
    int a, b, c, d;
Kostya Shishkov's avatar
Kostya Shishkov committed
66
    int d1, d2;
67
    int rnd = 1;
68 69 70 71 72
    for(i = 0; i < 8; i++) {
        a = src[-2];
        b = src[-1];
        c = src[0];
        d = src[1];
Kostya Shishkov's avatar
Kostya Shishkov committed
73 74
        d1 = (a - d + 3 + rnd) >> 3;
        d2 = (a - d + b - c + 4 - rnd) >> 3;
75

Kostya Shishkov's avatar
Kostya Shishkov committed
76
        src[-2] = a - d1;
77 78
        src[-1] = av_clip_uint8(b - d2);
        src[0] = av_clip_uint8(c + d2);
Kostya Shishkov's avatar
Kostya Shishkov committed
79
        src[1] = d + d1;
80
        src += stride;
81
        rnd = !rnd;
82 83 84
    }
}

Diego Biurrun's avatar
Diego Biurrun committed
85
static void vc1_v_s_overlap_c(int16_t *top,  int16_t *bottom)
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
{
    int i;
    int a, b, c, d;
    int d1, d2;
    int rnd1 = 4, rnd2 = 3;
    for(i = 0; i < 8; i++) {
        a = top[48];
        b = top[56];
        c = bottom[0];
        d = bottom[8];
        d1 = a - d;
        d2 = a - d + b - c;

        top[48]   = ((a << 3) - d1 + rnd1) >> 3;
        top[56]   = ((b << 3) - d2 + rnd2) >> 3;
        bottom[0] = ((c << 3) + d2 + rnd1) >> 3;
        bottom[8] = ((d << 3) + d1 + rnd2) >> 3;

        bottom++;
        top++;
        rnd2 = 7 - rnd2;
        rnd1 = 7 - rnd1;
    }
}

Diego Biurrun's avatar
Diego Biurrun committed
111
static void vc1_h_s_overlap_c(int16_t *left, int16_t *right)
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
{
    int i;
    int a, b, c, d;
    int d1, d2;
    int rnd1 = 4, rnd2 = 3;
    for(i = 0; i < 8; i++) {
        a = left[6];
        b = left[7];
        c = right[0];
        d = right[1];
        d1 = a - d;
        d2 = a - d + b - c;

        left[6]  = ((a << 3) - d1 + rnd1) >> 3;
        left[7]  = ((b << 3) - d2 + rnd2) >> 3;
        right[0] = ((c << 3) + d2 + rnd1) >> 3;
        right[1] = ((d << 3) + d1 + rnd2) >> 3;

        right += 8;
        left += 8;
        rnd2 = 7 - rnd2;
        rnd1 = 7 - rnd1;
    }
}

137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
/**
 * VC-1 in-loop deblocking filter for one line
 * @param src source block type
 * @param stride block stride
 * @param pq block quantizer
 * @return whether other 3 pairs should be filtered or not
 * @see 8.6
 */
static av_always_inline int vc1_filter_line(uint8_t* src, int stride, int pq){
    int a0 = (2*(src[-2*stride] - src[ 1*stride]) - 5*(src[-1*stride] - src[ 0*stride]) + 4) >> 3;
    int a0_sign = a0 >> 31;        /* Store sign */
    a0 = (a0 ^ a0_sign) - a0_sign; /* a0 = FFABS(a0); */
    if(a0 < pq){
        int a1 = FFABS((2*(src[-4*stride] - src[-1*stride]) - 5*(src[-3*stride] - src[-2*stride]) + 4) >> 3);
        int a2 = FFABS((2*(src[ 0*stride] - src[ 3*stride]) - 5*(src[ 1*stride] - src[ 2*stride]) + 4) >> 3);
        if(a1 < a0 || a2 < a0){
            int clip = src[-1*stride] - src[ 0*stride];
            int clip_sign = clip >> 31;
            clip = ((clip ^ clip_sign) - clip_sign)>>1;
            if(clip){
                int a3 = FFMIN(a1, a2);
                int d = 5 * (a3 - a0);
                int d_sign = (d >> 31);
                d = ((d ^ d_sign) - d_sign) >> 3;
                d_sign ^= a0_sign;

                if( d_sign ^ clip_sign )
                    d = 0;
                else{
                    d = FFMIN(d, clip);
                    d = (d ^ d_sign) - d_sign;          /* Restore sign */
168 169
                    src[-1*stride] = av_clip_uint8(src[-1*stride] - d);
                    src[ 0*stride] = av_clip_uint8(src[ 0*stride] + d);
170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
                }
                return 1;
            }
        }
    }
    return 0;
}

/**
 * VC-1 in-loop deblocking filter
 * @param src source block type
 * @param step distance between horizontally adjacent elements
 * @param stride distance between vertically adjacent elements
 * @param len edge length to filter (4 or 8 pixels)
 * @param pq block quantizer
 * @see 8.6
 */
187
static inline void vc1_loop_filter(uint8_t* src, int step, int stride, int len, int pq)
188 189 190 191 192 193 194 195 196 197 198 199 200 201
{
    int i;
    int filt3;

    for(i = 0; i < len; i += 4){
        filt3 = vc1_filter_line(src + 2*step, stride, pq);
        if(filt3){
            vc1_filter_line(src + 0*step, stride, pq);
            vc1_filter_line(src + 1*step, stride, pq);
            vc1_filter_line(src + 3*step, stride, pq);
        }
        src += step * 4;
    }
}
202

203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
static void vc1_v_loop_filter4_c(uint8_t *src, int stride, int pq)
{
    vc1_loop_filter(src, 1, stride, 4, pq);
}

static void vc1_h_loop_filter4_c(uint8_t *src, int stride, int pq)
{
    vc1_loop_filter(src, stride, 1, 4, pq);
}

static void vc1_v_loop_filter8_c(uint8_t *src, int stride, int pq)
{
    vc1_loop_filter(src, 1, stride, 8, pq);
}

static void vc1_h_loop_filter8_c(uint8_t *src, int stride, int pq)
{
    vc1_loop_filter(src, stride, 1, 8, pq);
}

static void vc1_v_loop_filter16_c(uint8_t *src, int stride, int pq)
{
    vc1_loop_filter(src, 1, stride, 16, pq);
}

static void vc1_h_loop_filter16_c(uint8_t *src, int stride, int pq)
{
    vc1_loop_filter(src, stride, 1, 16, pq);
}

233 234
/** Do inverse transform on 8x8 block
*/
Diego Biurrun's avatar
Diego Biurrun committed
235
static void vc1_inv_trans_8x8_dc_c(uint8_t *dest, int linesize, int16_t *block)
236 237 238 239 240 241
{
    int i;
    int dc = block[0];
    dc = (3 * dc +  1) >> 1;
    dc = (3 * dc + 16) >> 5;
    for(i = 0; i < 8; i++){
242 243 244 245 246 247 248 249
        dest[0] = av_clip_uint8(dest[0] + dc);
        dest[1] = av_clip_uint8(dest[1] + dc);
        dest[2] = av_clip_uint8(dest[2] + dc);
        dest[3] = av_clip_uint8(dest[3] + dc);
        dest[4] = av_clip_uint8(dest[4] + dc);
        dest[5] = av_clip_uint8(dest[5] + dc);
        dest[6] = av_clip_uint8(dest[6] + dc);
        dest[7] = av_clip_uint8(dest[7] + dc);
250 251 252 253
        dest += linesize;
    }
}

Diego Biurrun's avatar
Diego Biurrun committed
254
static void vc1_inv_trans_8x8_c(int16_t block[64])
255 256 257
{
    int i;
    register int t1,t2,t3,t4,t5,t6,t7,t8;
Diego Biurrun's avatar
Diego Biurrun committed
258
    int16_t *src, *dst, temp[64];
259 260

    src = block;
261
    dst = temp;
262
    for(i = 0; i < 8; i++){
263 264 265 266
        t1 = 12 * (src[ 0] + src[32]) + 4;
        t2 = 12 * (src[ 0] - src[32]) + 4;
        t3 = 16 * src[16] +  6 * src[48];
        t4 =  6 * src[16] - 16 * src[48];
267 268 269 270 271 272

        t5 = t1 + t3;
        t6 = t2 + t4;
        t7 = t2 - t4;
        t8 = t1 - t3;

273 274 275 276
        t1 = 16 * src[ 8] + 15 * src[24] +  9 * src[40] +  4 * src[56];
        t2 = 15 * src[ 8] -  4 * src[24] - 16 * src[40] -  9 * src[56];
        t3 =  9 * src[ 8] - 16 * src[24] +  4 * src[40] + 15 * src[56];
        t4 =  4 * src[ 8] -  9 * src[24] + 15 * src[40] - 16 * src[56];
277

278 279 280 281 282 283 284 285
        dst[0] = (t5 + t1) >> 3;
        dst[1] = (t6 + t2) >> 3;
        dst[2] = (t7 + t3) >> 3;
        dst[3] = (t8 + t4) >> 3;
        dst[4] = (t8 - t4) >> 3;
        dst[5] = (t7 - t3) >> 3;
        dst[6] = (t6 - t2) >> 3;
        dst[7] = (t5 - t1) >> 3;
286

287
        src += 1;
288 289 290
        dst += 8;
    }

291
    src = temp;
292 293
    dst = block;
    for(i = 0; i < 8; i++){
294 295
        t1 = 12 * (src[ 0] + src[32]) + 64;
        t2 = 12 * (src[ 0] - src[32]) + 64;
296 297 298 299 300 301 302 303 304 305 306 307 308
        t3 = 16 * src[16] +  6 * src[48];
        t4 =  6 * src[16] - 16 * src[48];

        t5 = t1 + t3;
        t6 = t2 + t4;
        t7 = t2 - t4;
        t8 = t1 - t3;

        t1 = 16 * src[ 8] + 15 * src[24] +  9 * src[40] +  4 * src[56];
        t2 = 15 * src[ 8] -  4 * src[24] - 16 * src[40] -  9 * src[56];
        t3 =  9 * src[ 8] - 16 * src[24] +  4 * src[40] + 15 * src[56];
        t4 =  4 * src[ 8] -  9 * src[24] + 15 * src[40] - 16 * src[56];

309 310 311 312 313 314 315 316
        dst[ 0] = (t5 + t1) >> 7;
        dst[ 8] = (t6 + t2) >> 7;
        dst[16] = (t7 + t3) >> 7;
        dst[24] = (t8 + t4) >> 7;
        dst[32] = (t8 - t4 + 1) >> 7;
        dst[40] = (t7 - t3 + 1) >> 7;
        dst[48] = (t6 - t2 + 1) >> 7;
        dst[56] = (t5 - t1 + 1) >> 7;
317 318 319 320 321 322 323 324

        src++;
        dst++;
    }
}

/** Do inverse transform on 8x4 part of block
*/
Diego Biurrun's avatar
Diego Biurrun committed
325
static void vc1_inv_trans_8x4_dc_c(uint8_t *dest, int linesize, int16_t *block)
326 327 328 329 330 331
{
    int i;
    int dc = block[0];
    dc = ( 3 * dc +  1) >> 1;
    dc = (17 * dc + 64) >> 7;
    for(i = 0; i < 4; i++){
332 333 334 335 336 337 338 339
        dest[0] = av_clip_uint8(dest[0] + dc);
        dest[1] = av_clip_uint8(dest[1] + dc);
        dest[2] = av_clip_uint8(dest[2] + dc);
        dest[3] = av_clip_uint8(dest[3] + dc);
        dest[4] = av_clip_uint8(dest[4] + dc);
        dest[5] = av_clip_uint8(dest[5] + dc);
        dest[6] = av_clip_uint8(dest[6] + dc);
        dest[7] = av_clip_uint8(dest[7] + dc);
340 341 342 343
        dest += linesize;
    }
}

Diego Biurrun's avatar
Diego Biurrun committed
344
static void vc1_inv_trans_8x4_c(uint8_t *dest, int linesize, int16_t *block)
345 346 347
{
    int i;
    register int t1,t2,t3,t4,t5,t6,t7,t8;
Diego Biurrun's avatar
Diego Biurrun committed
348
    int16_t *src, *dst;
349

350 351
    src = block;
    dst = block;
352
    for(i = 0; i < 4; i++){
353 354
        t1 = 12 * (src[0] + src[4]) + 4;
        t2 = 12 * (src[0] - src[4]) + 4;
355 356 357 358 359 360 361 362 363 364 365 366 367
        t3 = 16 * src[2] +  6 * src[6];
        t4 =  6 * src[2] - 16 * src[6];

        t5 = t1 + t3;
        t6 = t2 + t4;
        t7 = t2 - t4;
        t8 = t1 - t3;

        t1 = 16 * src[1] + 15 * src[3] +  9 * src[5] +  4 * src[7];
        t2 = 15 * src[1] -  4 * src[3] - 16 * src[5] -  9 * src[7];
        t3 =  9 * src[1] - 16 * src[3] +  4 * src[5] + 15 * src[7];
        t4 =  4 * src[1] -  9 * src[3] + 15 * src[5] - 16 * src[7];

368 369 370 371 372 373 374 375
        dst[0] = (t5 + t1) >> 3;
        dst[1] = (t6 + t2) >> 3;
        dst[2] = (t7 + t3) >> 3;
        dst[3] = (t8 + t4) >> 3;
        dst[4] = (t8 - t4) >> 3;
        dst[5] = (t7 - t3) >> 3;
        dst[6] = (t6 - t2) >> 3;
        dst[7] = (t5 - t1) >> 3;
376 377 378 379 380

        src += 8;
        dst += 8;
    }

381
    src = block;
382
    for(i = 0; i < 8; i++){
383 384
        t1 = 17 * (src[ 0] + src[16]) + 64;
        t2 = 17 * (src[ 0] - src[16]) + 64;
385 386
        t3 = 22 * src[ 8] + 10 * src[24];
        t4 = 22 * src[24] - 10 * src[ 8];
387

388 389 390 391
        dest[0*linesize] = av_clip_uint8(dest[0*linesize] + ((t1 + t3) >> 7));
        dest[1*linesize] = av_clip_uint8(dest[1*linesize] + ((t2 - t4) >> 7));
        dest[2*linesize] = av_clip_uint8(dest[2*linesize] + ((t2 + t4) >> 7));
        dest[3*linesize] = av_clip_uint8(dest[3*linesize] + ((t1 - t3) >> 7));
392 393

        src ++;
394
        dest++;
395 396 397 398 399
    }
}

/** Do inverse transform on 4x8 parts of block
*/
Diego Biurrun's avatar
Diego Biurrun committed
400
static void vc1_inv_trans_4x8_dc_c(uint8_t *dest, int linesize, int16_t *block)
401 402 403 404 405 406
{
    int i;
    int dc = block[0];
    dc = (17 * dc +  4) >> 3;
    dc = (12 * dc + 64) >> 7;
    for(i = 0; i < 8; i++){
407 408 409 410
        dest[0] = av_clip_uint8(dest[0] + dc);
        dest[1] = av_clip_uint8(dest[1] + dc);
        dest[2] = av_clip_uint8(dest[2] + dc);
        dest[3] = av_clip_uint8(dest[3] + dc);
411 412 413 414
        dest += linesize;
    }
}

Diego Biurrun's avatar
Diego Biurrun committed
415
static void vc1_inv_trans_4x8_c(uint8_t *dest, int linesize, int16_t *block)
416 417 418
{
    int i;
    register int t1,t2,t3,t4,t5,t6,t7,t8;
Diego Biurrun's avatar
Diego Biurrun committed
419
    int16_t *src, *dst;
420

421 422
    src = block;
    dst = block;
423
    for(i = 0; i < 8; i++){
424 425
        t1 = 17 * (src[0] + src[2]) + 4;
        t2 = 17 * (src[0] - src[2]) + 4;
426 427
        t3 = 22 * src[1] + 10 * src[3];
        t4 = 22 * src[3] - 10 * src[1];
428

429 430 431 432
        dst[0] = (t1 + t3) >> 3;
        dst[1] = (t2 - t4) >> 3;
        dst[2] = (t2 + t4) >> 3;
        dst[3] = (t1 - t3) >> 3;
433 434 435 436 437

        src += 8;
        dst += 8;
    }

438
    src = block;
439
    for(i = 0; i < 4; i++){
440 441
        t1 = 12 * (src[ 0] + src[32]) + 64;
        t2 = 12 * (src[ 0] - src[32]) + 64;
442 443 444 445 446 447 448 449 450 451 452 453 454
        t3 = 16 * src[16] +  6 * src[48];
        t4 =  6 * src[16] - 16 * src[48];

        t5 = t1 + t3;
        t6 = t2 + t4;
        t7 = t2 - t4;
        t8 = t1 - t3;

        t1 = 16 * src[ 8] + 15 * src[24] +  9 * src[40] +  4 * src[56];
        t2 = 15 * src[ 8] -  4 * src[24] - 16 * src[40] -  9 * src[56];
        t3 =  9 * src[ 8] - 16 * src[24] +  4 * src[40] + 15 * src[56];
        t4 =  4 * src[ 8] -  9 * src[24] + 15 * src[40] - 16 * src[56];

455 456 457 458 459 460 461 462
        dest[0*linesize] = av_clip_uint8(dest[0*linesize] + ((t5 + t1) >> 7));
        dest[1*linesize] = av_clip_uint8(dest[1*linesize] + ((t6 + t2) >> 7));
        dest[2*linesize] = av_clip_uint8(dest[2*linesize] + ((t7 + t3) >> 7));
        dest[3*linesize] = av_clip_uint8(dest[3*linesize] + ((t8 + t4) >> 7));
        dest[4*linesize] = av_clip_uint8(dest[4*linesize] + ((t8 - t4 + 1) >> 7));
        dest[5*linesize] = av_clip_uint8(dest[5*linesize] + ((t7 - t3 + 1) >> 7));
        dest[6*linesize] = av_clip_uint8(dest[6*linesize] + ((t6 - t2 + 1) >> 7));
        dest[7*linesize] = av_clip_uint8(dest[7*linesize] + ((t5 - t1 + 1) >> 7));
463

464 465
        src ++;
        dest++;
466 467 468 469 470
    }
}

/** Do inverse transform on 4x4 part of block
*/
Diego Biurrun's avatar
Diego Biurrun committed
471
static void vc1_inv_trans_4x4_dc_c(uint8_t *dest, int linesize, int16_t *block)
472 473 474 475 476 477
{
    int i;
    int dc = block[0];
    dc = (17 * dc +  4) >> 3;
    dc = (17 * dc + 64) >> 7;
    for(i = 0; i < 4; i++){
478 479 480 481
        dest[0] = av_clip_uint8(dest[0] + dc);
        dest[1] = av_clip_uint8(dest[1] + dc);
        dest[2] = av_clip_uint8(dest[2] + dc);
        dest[3] = av_clip_uint8(dest[3] + dc);
482 483 484 485
        dest += linesize;
    }
}

Diego Biurrun's avatar
Diego Biurrun committed
486
static void vc1_inv_trans_4x4_c(uint8_t *dest, int linesize, int16_t *block)
487 488
{
    int i;
489
    register int t1,t2,t3,t4;
Diego Biurrun's avatar
Diego Biurrun committed
490
    int16_t *src, *dst;
491

492 493
    src = block;
    dst = block;
494
    for(i = 0; i < 4; i++){
495 496
        t1 = 17 * (src[0] + src[2]) + 4;
        t2 = 17 * (src[0] - src[2]) + 4;
497 498
        t3 = 22 * src[1] + 10 * src[3];
        t4 = 22 * src[3] - 10 * src[1];
499

500 501 502 503
        dst[0] = (t1 + t3) >> 3;
        dst[1] = (t2 - t4) >> 3;
        dst[2] = (t2 + t4) >> 3;
        dst[3] = (t1 - t3) >> 3;
504 505 506 507 508

        src += 8;
        dst += 8;
    }

509
    src = block;
510
    for(i = 0; i < 4; i++){
511 512
        t1 = 17 * (src[ 0] + src[16]) + 64;
        t2 = 17 * (src[ 0] - src[16]) + 64;
513 514 515
        t3 = 22 * src[ 8] + 10 * src[24];
        t4 = 22 * src[24] - 10 * src[ 8];

516 517 518 519
        dest[0*linesize] = av_clip_uint8(dest[0*linesize] + ((t1 + t3) >> 7));
        dest[1*linesize] = av_clip_uint8(dest[1*linesize] + ((t2 - t4) >> 7));
        dest[2*linesize] = av_clip_uint8(dest[2*linesize] + ((t2 + t4) >> 7));
        dest[3*linesize] = av_clip_uint8(dest[3*linesize] + ((t1 - t3) >> 7));
520 521

        src ++;
522
        dest++;
523 524 525 526
    }
}

/* motion compensation functions */
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543
/** Filter in case of 2 filters */
#define VC1_MSPEL_FILTER_16B(DIR, TYPE)                                 \
static av_always_inline int vc1_mspel_ ## DIR ## _filter_16bits(const TYPE *src, int stride, int mode) \
{                                                                       \
    switch(mode){                                                       \
    case 0: /* no shift - should not occur */                           \
        return 0;                                                       \
    case 1: /* 1/4 shift */                                             \
        return -4*src[-stride] + 53*src[0] + 18*src[stride] - 3*src[stride*2]; \
    case 2: /* 1/2 shift */                                             \
        return -src[-stride] + 9*src[0] + 9*src[stride] - src[stride*2]; \
    case 3: /* 3/4 shift */                                             \
        return -3*src[-stride] + 18*src[0] + 53*src[stride] - 4*src[stride*2]; \
    }                                                                   \
    return 0; /* should not occur */                                    \
}

544 545
VC1_MSPEL_FILTER_16B(ver, uint8_t)
VC1_MSPEL_FILTER_16B(hor, int16_t)
546

547 548 549

/** Filter used to interpolate fractional pel values
 */
550
static av_always_inline int vc1_mspel_filter(const uint8_t *src, int stride, int mode, int r)
551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
{
    switch(mode){
    case 0: //no shift
        return src[0];
    case 1: // 1/4 shift
        return (-4*src[-stride] + 53*src[0] + 18*src[stride] - 3*src[stride*2] + 32 - r) >> 6;
    case 2: // 1/2 shift
        return (-src[-stride] + 9*src[0] + 9*src[stride] - src[stride*2] + 8 - r) >> 4;
    case 3: // 3/4 shift
        return (-3*src[-stride] + 18*src[0] + 53*src[stride] - 4*src[stride*2] + 32 - r) >> 6;
    }
    return 0; //should not occur
}

/** Function used to do motion compensation with bicubic interpolation
 */
567
#define VC1_MSPEL_MC(OP, OP4, OPNAME)\
568
static av_always_inline void OPNAME ## vc1_mspel_mc(uint8_t *dst, const uint8_t *src, ptrdiff_t stride, int hmode, int vmode, int rnd)\
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
{\
    int     i, j;\
\
    if (vmode) { /* Horizontal filter to apply */\
        int r;\
\
        if (hmode) { /* Vertical filter to apply, output to tmp */\
            static const int shift_value[] = { 0, 5, 1, 5 };\
            int              shift = (shift_value[hmode]+shift_value[vmode])>>1;\
            int16_t          tmp[11*8], *tptr = tmp;\
\
            r = (1<<(shift-1)) + rnd-1;\
\
            src -= 1;\
            for(j = 0; j < 8; j++) {\
                for(i = 0; i < 11; i++)\
                    tptr[i] = (vc1_mspel_ver_filter_16bits(src + i, stride, vmode)+r)>>shift;\
                src += stride;\
                tptr += 11;\
            }\
\
            r = 64-rnd;\
            tptr = tmp+1;\
            for(j = 0; j < 8; j++) {\
                for(i = 0; i < 8; i++)\
                    OP(dst[i], (vc1_mspel_hor_filter_16bits(tptr + i, 1, hmode)+r)>>7);\
                dst += stride;\
                tptr += 11;\
            }\
\
            return;\
        }\
        else { /* No horizontal filter, output 8 lines to dst */\
            r = 1-rnd;\
\
            for(j = 0; j < 8; j++) {\
                for(i = 0; i < 8; i++)\
                    OP(dst[i], vc1_mspel_filter(src + i, stride, vmode, r));\
                src += stride;\
                dst += stride;\
            }\
            return;\
        }\
    }\
\
    /* Horizontal mode with no vertical mode */\
    for(j = 0; j < 8; j++) {\
        for(i = 0; i < 8; i++)\
            OP(dst[i], vc1_mspel_filter(src + i, 1, hmode, rnd));\
        dst += stride;\
        src += stride;\
    }\
621
}\
622
static void OPNAME ## pixels8x8_c(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int rnd){\
623 624
    int i;\
    for(i=0; i<8; i++){\
625 626
        OP4(*(uint32_t*)(block  ), AV_RN32(pixels  ));\
        OP4(*(uint32_t*)(block+4), AV_RN32(pixels+4));\
627 628 629
        pixels+=line_size;\
        block +=line_size;\
    }\
630 631
}

632 633
#define op_put(a, b) a = av_clip_uint8(b)
#define op_avg(a, b) a = (a + av_clip_uint8(b) + 1) >> 1
634 635
#define op4_avg(a, b) a = rnd_avg32(a, b)
#define op4_put(a, b) a = b
636

637 638
VC1_MSPEL_MC(op_put, op4_put, put_)
VC1_MSPEL_MC(op_avg, op4_avg, avg_)
639

640 641
/* pixel functions - really are entry points to vc1_mspel_mc */

642
#define PUT_VC1_MSPEL(a, b)\
643 644 645 646 647 648 649 650 651 652 653
static void put_vc1_mspel_mc ## a ## b ##_c(uint8_t *dst,               \
                                            const uint8_t *src,         \
                                            ptrdiff_t stride, int rnd)  \
{                                                                       \
    put_vc1_mspel_mc(dst, src, stride, a, b, rnd);                      \
}                                                                       \
static void avg_vc1_mspel_mc ## a ## b ##_c(uint8_t *dst,               \
                                            const uint8_t *src,         \
                                            ptrdiff_t stride, int rnd)  \
{                                                                       \
    avg_vc1_mspel_mc(dst, src, stride, a, b, rnd);                      \
654 655
}

656 657 658
PUT_VC1_MSPEL(1, 0)
PUT_VC1_MSPEL(2, 0)
PUT_VC1_MSPEL(3, 0)
659

660 661 662 663
PUT_VC1_MSPEL(0, 1)
PUT_VC1_MSPEL(1, 1)
PUT_VC1_MSPEL(2, 1)
PUT_VC1_MSPEL(3, 1)
664

665 666 667 668
PUT_VC1_MSPEL(0, 2)
PUT_VC1_MSPEL(1, 2)
PUT_VC1_MSPEL(2, 2)
PUT_VC1_MSPEL(3, 2)
669

670 671 672 673
PUT_VC1_MSPEL(0, 3)
PUT_VC1_MSPEL(1, 3)
PUT_VC1_MSPEL(2, 3)
PUT_VC1_MSPEL(3, 3)
674

675 676 677 678 679 680 681
static void put_no_rnd_vc1_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){
    const int A=(8-x)*(8-y);
    const int B=(  x)*(8-y);
    const int C=(8-x)*(  y);
    const int D=(  x)*(  y);
    int i;

682
    av_assert2(x<8 && y<8 && x>=0 && y>=0);
683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698

    for(i=0; i<h; i++)
    {
        dst[0] = (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + 32 - 4) >> 6;
        dst[1] = (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + 32 - 4) >> 6;
        dst[2] = (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + 32 - 4) >> 6;
        dst[3] = (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + 32 - 4) >> 6;
        dst[4] = (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + 32 - 4) >> 6;
        dst[5] = (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + 32 - 4) >> 6;
        dst[6] = (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + 32 - 4) >> 6;
        dst[7] = (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + 32 - 4) >> 6;
        dst+= stride;
        src+= stride;
    }
}

699 700 701 702 703 704 705
static void put_no_rnd_vc1_chroma_mc4_c(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y){
    const int A=(8-x)*(8-y);
    const int B=(  x)*(8-y);
    const int C=(8-x)*(  y);
    const int D=(  x)*(  y);
    int i;

706
    av_assert2(x<8 && y<8 && x>=0 && y>=0);
707 708 709 710 711 712 713 714 715 716 717 718

    for(i=0; i<h; i++)
    {
        dst[0] = (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + 32 - 4) >> 6;
        dst[1] = (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + 32 - 4) >> 6;
        dst[2] = (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + 32 - 4) >> 6;
        dst[3] = (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + 32 - 4) >> 6;
        dst+= stride;
        src+= stride;
    }
}

719 720 721 722 723 724 725 726
#define avg2(a,b) ((a+b+1)>>1)
static void avg_no_rnd_vc1_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){
    const int A=(8-x)*(8-y);
    const int B=(  x)*(8-y);
    const int C=(8-x)*(  y);
    const int D=(  x)*(  y);
    int i;

727
    av_assert2(x<8 && y<8 && x>=0 && y>=0);
728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743

    for(i=0; i<h; i++)
    {
        dst[0] = avg2(dst[0], ((A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + 32 - 4) >> 6));
        dst[1] = avg2(dst[1], ((A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + 32 - 4) >> 6));
        dst[2] = avg2(dst[2], ((A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + 32 - 4) >> 6));
        dst[3] = avg2(dst[3], ((A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + 32 - 4) >> 6));
        dst[4] = avg2(dst[4], ((A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + 32 - 4) >> 6));
        dst[5] = avg2(dst[5], ((A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + 32 - 4) >> 6));
        dst[6] = avg2(dst[6], ((A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + 32 - 4) >> 6));
        dst[7] = avg2(dst[7], ((A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + 32 - 4) >> 6));
        dst+= stride;
        src+= stride;
    }
}

744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763
static void avg_no_rnd_vc1_chroma_mc4_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){
    const int A=(8-x)*(8-y);
    const int B=(  x)*(8-y);
    const int C=(8-x)*(  y);
    const int D=(  x)*(  y);
    int i;

    av_assert2(x<8 && y<8 && x>=0 && y>=0);

    for(i=0; i<h; i++)
    {
        dst[0] = avg2(dst[0], ((A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + 32 - 4) >> 6));
        dst[1] = avg2(dst[1], ((A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + 32 - 4) >> 6));
        dst[2] = avg2(dst[2], ((A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + 32 - 4) >> 6));
        dst[3] = avg2(dst[3], ((A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + 32 - 4) >> 6));
        dst+= stride;
        src+= stride;
    }
}

764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823
#if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER

static void sprite_h_c(uint8_t *dst, const uint8_t *src, int offset, int advance, int count)
{
    while (count--) {
        int a = src[(offset >> 16)    ];
        int b = src[(offset >> 16) + 1];
        *dst++ = a + ((b - a) * (offset&0xFFFF) >> 16);
        offset += advance;
    }
}

static av_always_inline void sprite_v_template(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1,
                                            int two_sprites, const uint8_t *src2a, const uint8_t *src2b, int offset2,
                                            int alpha, int scaled, int width)
{
    int a1, b1, a2, b2;
    while (width--) {
        a1 = *src1a++;
        if (scaled) {
            b1 = *src1b++;
            a1 = a1 + ((b1 - a1) * offset1 >> 16);
        }
        if (two_sprites) {
            a2 = *src2a++;
            if (scaled > 1) {
                b2 = *src2b++;
                a2 = a2 + ((b2 - a2) * offset2 >> 16);
            }
            a1 = a1 + ((a2 - a1) * alpha >> 16);
        }
        *dst++ = a1;
    }
}

static void sprite_v_single_c(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset, int width)
{
    sprite_v_template(dst, src1a, src1b, offset, 0, NULL, NULL, 0, 0, 1, width);
}

static void sprite_v_double_noscale_c(uint8_t *dst, const uint8_t *src1a, const uint8_t *src2a, int alpha, int width)
{
    sprite_v_template(dst, src1a, NULL, 0, 1, src2a, NULL, 0, alpha, 0, width);
}

static void sprite_v_double_onescale_c(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1,
                                                     const uint8_t *src2a, int alpha, int width)
{
    sprite_v_template(dst, src1a, src1b, offset1, 1, src2a, NULL, 0, alpha, 1, width);
}

static void sprite_v_double_twoscale_c(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1,
                                                     const uint8_t *src2a, const uint8_t *src2b, int offset2,
                                       int alpha, int width)
{
    sprite_v_template(dst, src1a, src1b, offset1, 1, src2a, src2b, offset2, alpha, 2, width);
}

#endif

824
av_cold void ff_vc1dsp_init(VC1DSPContext* dsp) {
825
    dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_c;
826 827 828
    dsp->vc1_inv_trans_4x8 = vc1_inv_trans_4x8_c;
    dsp->vc1_inv_trans_8x4 = vc1_inv_trans_8x4_c;
    dsp->vc1_inv_trans_4x4 = vc1_inv_trans_4x4_c;
829 830 831 832
    dsp->vc1_inv_trans_8x8_dc = vc1_inv_trans_8x8_dc_c;
    dsp->vc1_inv_trans_4x8_dc = vc1_inv_trans_4x8_dc_c;
    dsp->vc1_inv_trans_8x4_dc = vc1_inv_trans_8x4_dc_c;
    dsp->vc1_inv_trans_4x4_dc = vc1_inv_trans_4x4_dc_c;
833 834
    dsp->vc1_h_overlap = vc1_h_overlap_c;
    dsp->vc1_v_overlap = vc1_v_overlap_c;
835 836
    dsp->vc1_h_s_overlap = vc1_h_s_overlap_c;
    dsp->vc1_v_s_overlap = vc1_v_s_overlap_c;
837 838 839 840 841 842
    dsp->vc1_v_loop_filter4 = vc1_v_loop_filter4_c;
    dsp->vc1_h_loop_filter4 = vc1_h_loop_filter4_c;
    dsp->vc1_v_loop_filter8 = vc1_v_loop_filter8_c;
    dsp->vc1_h_loop_filter8 = vc1_h_loop_filter8_c;
    dsp->vc1_v_loop_filter16 = vc1_v_loop_filter16_c;
    dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_c;
843

844
    dsp->put_vc1_mspel_pixels_tab[ 0] = put_pixels8x8_c;
845 846 847 848 849 850 851 852 853 854 855 856 857 858 859
    dsp->put_vc1_mspel_pixels_tab[ 1] = put_vc1_mspel_mc10_c;
    dsp->put_vc1_mspel_pixels_tab[ 2] = put_vc1_mspel_mc20_c;
    dsp->put_vc1_mspel_pixels_tab[ 3] = put_vc1_mspel_mc30_c;
    dsp->put_vc1_mspel_pixels_tab[ 4] = put_vc1_mspel_mc01_c;
    dsp->put_vc1_mspel_pixels_tab[ 5] = put_vc1_mspel_mc11_c;
    dsp->put_vc1_mspel_pixels_tab[ 6] = put_vc1_mspel_mc21_c;
    dsp->put_vc1_mspel_pixels_tab[ 7] = put_vc1_mspel_mc31_c;
    dsp->put_vc1_mspel_pixels_tab[ 8] = put_vc1_mspel_mc02_c;
    dsp->put_vc1_mspel_pixels_tab[ 9] = put_vc1_mspel_mc12_c;
    dsp->put_vc1_mspel_pixels_tab[10] = put_vc1_mspel_mc22_c;
    dsp->put_vc1_mspel_pixels_tab[11] = put_vc1_mspel_mc32_c;
    dsp->put_vc1_mspel_pixels_tab[12] = put_vc1_mspel_mc03_c;
    dsp->put_vc1_mspel_pixels_tab[13] = put_vc1_mspel_mc13_c;
    dsp->put_vc1_mspel_pixels_tab[14] = put_vc1_mspel_mc23_c;
    dsp->put_vc1_mspel_pixels_tab[15] = put_vc1_mspel_mc33_c;
860

861
    dsp->avg_vc1_mspel_pixels_tab[ 0] = avg_pixels8x8_c;
862 863 864 865 866 867 868 869 870 871 872 873 874 875 876
    dsp->avg_vc1_mspel_pixels_tab[ 1] = avg_vc1_mspel_mc10_c;
    dsp->avg_vc1_mspel_pixels_tab[ 2] = avg_vc1_mspel_mc20_c;
    dsp->avg_vc1_mspel_pixels_tab[ 3] = avg_vc1_mspel_mc30_c;
    dsp->avg_vc1_mspel_pixels_tab[ 4] = avg_vc1_mspel_mc01_c;
    dsp->avg_vc1_mspel_pixels_tab[ 5] = avg_vc1_mspel_mc11_c;
    dsp->avg_vc1_mspel_pixels_tab[ 6] = avg_vc1_mspel_mc21_c;
    dsp->avg_vc1_mspel_pixels_tab[ 7] = avg_vc1_mspel_mc31_c;
    dsp->avg_vc1_mspel_pixels_tab[ 8] = avg_vc1_mspel_mc02_c;
    dsp->avg_vc1_mspel_pixels_tab[ 9] = avg_vc1_mspel_mc12_c;
    dsp->avg_vc1_mspel_pixels_tab[10] = avg_vc1_mspel_mc22_c;
    dsp->avg_vc1_mspel_pixels_tab[11] = avg_vc1_mspel_mc32_c;
    dsp->avg_vc1_mspel_pixels_tab[12] = avg_vc1_mspel_mc03_c;
    dsp->avg_vc1_mspel_pixels_tab[13] = avg_vc1_mspel_mc13_c;
    dsp->avg_vc1_mspel_pixels_tab[14] = avg_vc1_mspel_mc23_c;
    dsp->avg_vc1_mspel_pixels_tab[15] = avg_vc1_mspel_mc33_c;
877 878 879

    dsp->put_no_rnd_vc1_chroma_pixels_tab[0]= put_no_rnd_vc1_chroma_mc8_c;
    dsp->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_no_rnd_vc1_chroma_mc8_c;
880
    dsp->put_no_rnd_vc1_chroma_pixels_tab[1] = put_no_rnd_vc1_chroma_mc4_c;
881
    dsp->avg_no_rnd_vc1_chroma_pixels_tab[1] = avg_no_rnd_vc1_chroma_mc4_c;
882

883 884 885 886 887 888 889 890
#if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
    dsp->sprite_h = sprite_h_c;
    dsp->sprite_v_single = sprite_v_single_c;
    dsp->sprite_v_double_noscale = sprite_v_double_noscale_c;
    dsp->sprite_v_double_onescale = sprite_v_double_onescale_c;
    dsp->sprite_v_double_twoscale = sprite_v_double_twoscale_c;
#endif

891 892
    if (ARCH_PPC)
        ff_vc1dsp_init_ppc(dsp);
893 894
    if (ARCH_X86)
        ff_vc1dsp_init_x86(dsp);
895
}