h264_intrapred_10bit.asm 31.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
;*****************************************************************************
;* MMX/SSE2/AVX-optimized 10-bit H.264 intra prediction code
;*****************************************************************************
;* Copyright (C) 2005-2011 x264 project
;*
;* Authors: Daniel Kang <daniel.d.kang@gmail.com>
;*
;* This file is part of Libav.
;*
;* Libav is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* Libav is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with Libav; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************

25 26
%include "libavutil/x86/x86inc.asm"
%include "libavutil/x86/x86util.asm"
27 28 29

SECTION_RODATA

30
cextern pw_16
31
cextern pw_8
32
cextern pw_4
33
cextern pw_2
34 35
cextern pw_1

36 37 38 39 40 41 42
pw_m32101234: dw -3, -2, -1, 0, 1, 2, 3, 4
pw_m3:        times 8 dw -3
pw_pixel_max: times 8 dw ((1 << 10)-1)
pw_512:       times 8 dw 512
pd_17:        times 4 dd 17
pd_16:        times 4 dd 16

43 44
SECTION .text

45 46
; dest, left, right, src
; output: %1 = (t[n-1] + t[n]*2 + t[n+1] + 2) >> 2
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
%macro PRED4x4_LOWPASS 4
    paddw       %2, %3
    psrlw       %2, 1
    pavgw       %1, %4, %2
%endmacro

;-----------------------------------------------------------------------------
; void pred4x4_down_right(pixel *src, const pixel *topright, int stride)
;-----------------------------------------------------------------------------
%macro PRED4x4_DR 1
cglobal pred4x4_down_right_10_%1, 3,3
    sub       r0, r2
    lea       r1, [r0+r2*2]
    movhps    m1, [r1-8]
    movhps    m2, [r0+r2*1-8]
    movhps    m4, [r0-8]
    punpckhwd m2, m4
    movq      m3, [r0]
    punpckhdq m1, m2
    PALIGNR   m3, m1, 10, m1
    movhps    m4, [r1+r2*1-8]
68
    PALIGNR   m0, m3, m4, 14, m4
69
    movhps    m4, [r1+r2*2-8]
70 71
    PALIGNR   m2, m0, m4, 14, m4
    PRED4x4_LOWPASS m0, m2, m3, m0
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
    movq      [r1+r2*2], m0
    psrldq    m0, 2
    movq      [r1+r2*1], m0
    psrldq    m0, 2
    movq      [r0+r2*2], m0
    psrldq    m0, 2
    movq      [r0+r2*1], m0
    RET
%endmacro

INIT_XMM
%define PALIGNR PALIGNR_MMX
PRED4x4_DR sse2
%define PALIGNR PALIGNR_SSSE3
PRED4x4_DR ssse3
87
%if HAVE_AVX
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
INIT_AVX
PRED4x4_DR avx
%endif

;-----------------------------------------------------------------------------
; void pred4x4_vertical_right(pixel *src, const pixel *topright, int stride)
;-----------------------------------------------------------------------------
%macro PRED4x4_VR 1
cglobal pred4x4_vertical_right_10_%1, 3,3,6
    sub     r0, r2
    lea     r1, [r0+r2*2]
    movq    m5, [r0]            ; ........t3t2t1t0
    movhps  m1, [r0-8]
    PALIGNR m0, m5, m1, 14, m1  ; ......t3t2t1t0lt
    pavgw   m5, m0
    movhps  m1, [r0+r2*1-8]
    PALIGNR m0, m1, 14, m1      ; ....t3t2t1t0ltl0
    movhps  m2, [r0+r2*2-8]
106
    PALIGNR m1, m0, m2, 14, m2  ; ..t3t2t1t0ltl0l1
107
    movhps  m3, [r1+r2*1-8]
108 109 110 111
    PALIGNR m2, m1, m3, 14, m3  ; t3t2t1t0ltl0l1l2
    PRED4x4_LOWPASS m1, m0, m2, m1
    pslldq  m0, m1, 12
    psrldq  m1, 4
112
    movq    [r0+r2*1], m5
113 114 115
    movq    [r0+r2*2], m1
    PALIGNR m5, m0, 14, m2
    pslldq  m0, 2
116
    movq    [r1+r2*1], m5
117 118
    PALIGNR m1, m0, 14, m0
    movq    [r1+r2*2], m1
119 120 121 122 123 124 125 126
    RET
%endmacro

INIT_XMM
%define PALIGNR PALIGNR_MMX
PRED4x4_VR sse2
%define PALIGNR PALIGNR_SSSE3
PRED4x4_VR ssse3
127
%if HAVE_AVX
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
INIT_AVX
PRED4x4_VR avx
%endif

;-----------------------------------------------------------------------------
; void pred4x4_horizontal_down(pixel *src, const pixel *topright, int stride)
;-----------------------------------------------------------------------------
%macro PRED4x4_HD 1
cglobal pred4x4_horizontal_down_10_%1, 3,3
    sub        r0, r2
    lea        r1, [r0+r2*2]
    movq       m0, [r0-8]      ; lt ..
    movhps     m0, [r0]
    pslldq     m0, 2           ; t2 t1 t0 lt .. .. .. ..
    movq       m1, [r1+r2*2-8] ; l3
    movq       m3, [r1+r2*1-8]
    punpcklwd  m1, m3          ; l2 l3
    movq       m2, [r0+r2*2-8] ; l1
    movq       m3, [r0+r2*1-8]
    punpcklwd  m2, m3          ; l0 l1
    punpckhdq  m1, m2          ; l0 l1 l2 l3
    punpckhqdq m1, m0          ; t2 t1 t0 lt l0 l1 l2 l3
    psrldq     m0, m1, 4       ; .. .. t2 t1 t0 lt l0 l1
151 152 153
    psrldq     m3, m1, 2       ; .. t2 t1 t0 lt l0 l1 l2
    pavgw      m5, m1, m3
    PRED4x4_LOWPASS m3, m1, m0, m3
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
    punpcklwd  m5, m3
    psrldq     m3, 8
    PALIGNR    m3, m5, 12, m4
    movq       [r1+r2*2], m5
    movhps     [r0+r2*2], m5
    psrldq     m5, 4
    movq       [r1+r2*1], m5
    movq       [r0+r2*1], m3
    RET
%endmacro

INIT_XMM
%define PALIGNR PALIGNR_MMX
PRED4x4_HD sse2
%define PALIGNR PALIGNR_SSSE3
PRED4x4_HD ssse3
170
%if HAVE_AVX
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
INIT_AVX
PRED4x4_HD avx
%endif

;-----------------------------------------------------------------------------
; void pred4x4_dc(pixel *src, const pixel *topright, int stride)
;-----------------------------------------------------------------------------
%macro HADDD 2 ; sum junk
%if mmsize == 16
    movhlps %2, %1
    paddd   %1, %2
    pshuflw %2, %1, 0xE
    paddd   %1, %2
%else
    pshufw  %2, %1, 0xE
    paddd   %1, %2
%endif
%endmacro

%macro HADDW 2
    pmaddwd %1, [pw_1]
    HADDD   %1, %2
%endmacro

INIT_MMX
cglobal pred4x4_dc_10_mmxext, 3,3
    sub    r0, r2
    lea    r1, [r0+r2*2]
    movq   m2, [r0+r2*1-8]
    paddw  m2, [r0+r2*2-8]
    paddw  m2, [r1+r2*1-8]
    paddw  m2, [r1+r2*2-8]
    psrlq  m2, 48
    movq   m0, [r0]
    HADDW  m0, m1
    paddw  m0, [pw_4]
    paddw  m0, m2
    psrlw  m0, 3
    SPLATW m0, m0, 0
    movq   [r0+r2*1], m0
    movq   [r0+r2*2], m0
    movq   [r1+r2*1], m0
    movq   [r1+r2*2], m0
    RET

;-----------------------------------------------------------------------------
; void pred4x4_down_left(pixel *src, const pixel *topright, int stride)
;-----------------------------------------------------------------------------
%macro PRED4x4_DL 1
cglobal pred4x4_down_left_10_%1, 3,3
    sub        r0, r2
222 223 224 225 226 227
    movq       m0, [r0]
    movhps     m0, [r1]
    psrldq     m2, m0, 2
    pslldq     m3, m0, 2
    pshufhw    m2, m2, 10100100b
    PRED4x4_LOWPASS m0, m3, m2, m0
228 229 230 231 232 233 234 235 236 237 238 239 240
    lea        r1, [r0+r2*2]
    movhps     [r1+r2*2], m0
    psrldq     m0, 2
    movq       [r0+r2*1], m0
    psrldq     m0, 2
    movq       [r0+r2*2], m0
    psrldq     m0, 2
    movq       [r1+r2*1], m0
    RET
%endmacro

INIT_XMM
PRED4x4_DL sse2
241
%if HAVE_AVX
242 243 244 245 246 247 248 249 250 251 252 253
INIT_AVX
PRED4x4_DL avx
%endif

;-----------------------------------------------------------------------------
; void pred4x4_vertical_left(pixel *src, const pixel *topright, int stride)
;-----------------------------------------------------------------------------
%macro PRED4x4_VL 1
cglobal pred4x4_vertical_left_10_%1, 3,3
    sub        r0, r2
    movu       m1, [r0]
    movhps     m1, [r1]
254
    psrldq     m0, m1, 2
255
    psrldq     m2, m1, 4
256 257
    pavgw      m4, m0, m1
    PRED4x4_LOWPASS m0, m1, m2, m0
258 259 260 261 262 263 264 265 266 267 268 269
    lea        r1, [r0+r2*2]
    movq       [r0+r2*1], m4
    movq       [r0+r2*2], m0
    psrldq     m4, 2
    psrldq     m0, 2
    movq       [r1+r2*1], m4
    movq       [r1+r2*2], m0
    RET
%endmacro

INIT_XMM
PRED4x4_VL sse2
270
%if HAVE_AVX
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
INIT_AVX
PRED4x4_VL avx
%endif

;-----------------------------------------------------------------------------
; void pred4x4_horizontal_up(pixel *src, const pixel *topright, int stride)
;-----------------------------------------------------------------------------
INIT_MMX
cglobal pred4x4_horizontal_up_10_mmxext, 3,3
    sub       r0, r2
    lea       r1, [r0+r2*2]
    movq      m0, [r0+r2*1-8]
    punpckhwd m0, [r0+r2*2-8]
    movq      m1, [r1+r2*1-8]
    punpckhwd m1, [r1+r2*2-8]
    punpckhdq m0, m1
    pshufw    m1, m1, 0xFF
    movq      [r1+r2*2], m1
    movd      [r1+r2*1+4], m1
    pshufw    m2, m0, 11111001b
    movq      m1, m2
    pavgw     m2, m0

    pshufw    m5, m0, 11111110b
295
    PRED4x4_LOWPASS m1, m0, m5, m1
296
    movq      m6, m2
297
    punpcklwd m6, m1
298 299
    movq      [r0+r2*1], m6
    psrlq     m2, 16
300 301
    psrlq     m1, 16
    punpcklwd m2, m1
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
    movq      [r0+r2*2], m2
    psrlq     m2, 32
    movd      [r1+r2*1], m2
    RET



;-----------------------------------------------------------------------------
; void pred8x8_vertical(pixel *src, int stride)
;-----------------------------------------------------------------------------
INIT_XMM
cglobal pred8x8_vertical_10_sse2, 2,2
    sub  r0, r1
    mova m0, [r0]
%rep 3
    mova [r0+r1*1], m0
    mova [r0+r1*2], m0
    lea  r0, [r0+r1*2]
%endrep
    mova [r0+r1*1], m0
    mova [r0+r1*2], m0
    RET

;-----------------------------------------------------------------------------
; void pred8x8_horizontal(pixel *src, int stride)
;-----------------------------------------------------------------------------
INIT_XMM
cglobal pred8x8_horizontal_10_sse2, 2,3
330
    mov         r2d, 4
331 332 333 334 335 336 337 338 339 340
.loop:
    movq         m0, [r0+r1*0-8]
    movq         m1, [r0+r1*1-8]
    pshuflw      m0, m0, 0xff
    pshuflw      m1, m1, 0xff
    punpcklqdq   m0, m0
    punpcklqdq   m1, m1
    mova  [r0+r1*0], m0
    mova  [r0+r1*1], m1
    lea          r0, [r0+r1*2]
341
    dec          r2d
342 343
    jg .loop
    REP_RET
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358

;-----------------------------------------------------------------------------
; void predict_8x8_dc(pixel *src, int stride)
;-----------------------------------------------------------------------------
%macro MOV8 2-3
; sort of a hack, but it works
%if mmsize==8
    movq    [%1+0], %2
    movq    [%1+8], %3
%else
    movdqa    [%1], %2
%endif
%endmacro

%macro PRED8x8_DC 2
359
cglobal pred8x8_dc_10_%1, 2,6
360 361 362 363
    sub         r0, r1
    pxor        m4, m4
    movq        m0, [r0+0]
    movq        m1, [r0+8]
364 365 366 367 368 369 370 371 372 373 374 375 376
%if mmsize==16
    punpcklwd   m0, m1
    movhlps     m1, m0
    paddw       m0, m1
%else
    pshufw      m2, m0, 00001110b
    pshufw      m3, m1, 00001110b
    paddw       m0, m2
    paddw       m1, m3
    punpcklwd   m0, m1
%endif
    %2          m2, m0, 00001110b
    paddw       m0, m2
377

378 379
    lea         r5, [r1*3]
    lea         r4, [r0+r1*4]
380 381 382
    movzx      r2d, word [r0+r1*1-2]
    movzx      r3d, word [r0+r1*2-2]
    add        r2d, r3d
383
    movzx      r3d, word [r0+r5*1-2]
384
    add        r2d, r3d
385
    movzx      r3d, word [r4-2]
386 387 388
    add        r2d, r3d
    movd        m2, r2d            ; s2

389 390
    movzx      r2d, word [r4+r1*1-2]
    movzx      r3d, word [r4+r1*2-2]
391
    add        r2d, r3d
392
    movzx      r3d, word [r4+r5*1-2]
393
    add        r2d, r3d
394
    movzx      r3d, word [r4+r1*4-2]
395 396 397 398 399 400 401 402 403 404
    add        r2d, r3d
    movd        m3, r2d            ; s3

    punpcklwd   m2, m3
    punpckldq   m0, m2            ; s0, s1, s2, s3
    %2          m3, m0, 11110110b ; s2, s1, s3, s3
    %2          m0, m0, 01110100b ; s0, s1, s3, s1
    paddw       m0, m3
    psrlw       m0, 2
    pavgw       m0, m4            ; s0+s2, s1, s3, s1+s3
405
%if mmsize==16
406 407 408 409 410 411 412 413 414 415 416 417
    punpcklwd   m0, m0
    pshufd      m3, m0, 11111010b
    punpckldq   m0, m0
    SWAP         0,1
%else
    pshufw      m1, m0, 0x00
    pshufw      m2, m0, 0x55
    pshufw      m3, m0, 0xaa
    pshufw      m4, m0, 0xff
%endif
    MOV8   r0+r1*1, m1, m2
    MOV8   r0+r1*2, m1, m2
418
    MOV8   r0+r5*1, m1, m2
419
    MOV8   r0+r1*4, m1, m2
420 421 422 423
    MOV8   r4+r1*1, m3, m4
    MOV8   r4+r1*2, m3, m4
    MOV8   r4+r5*1, m3, m4
    MOV8   r4+r1*4, m3, m4
424 425 426 427 428 429 430 431 432 433 434
    RET
%endmacro

INIT_MMX
PRED8x8_DC mmxext, pshufw
INIT_XMM
PRED8x8_DC sse2  , pshuflw

;-----------------------------------------------------------------------------
; void pred8x8_top_dc(pixel *src, int stride)
;-----------------------------------------------------------------------------
435 436
INIT_XMM
cglobal pred8x8_top_dc_10_sse2, 2,4
437
    sub         r0, r1
438 439 440 441 442 443 444 445
    mova        m0, [r0]
    pshuflw     m1, m0, 0x4e
    pshufhw     m1, m1, 0x4e
    paddw       m0, m1
    pshuflw     m1, m0, 0xb1
    pshufhw     m1, m1, 0xb1
    paddw       m0, m1
    lea         r2, [r1*3]
446
    lea         r3, [r0+r1*4]
447
    paddw       m0, [pw_2]
448
    psrlw       m0, 2
449 450 451 452 453 454 455 456
    mova [r0+r1*1], m0
    mova [r0+r1*2], m0
    mova [r0+r2*1], m0
    mova [r0+r1*4], m0
    mova [r3+r1*1], m0
    mova [r3+r1*2], m0
    mova [r3+r2*1], m0
    mova [r3+r1*4], m0
457 458
    RET

459 460 461 462 463 464
;-----------------------------------------------------------------------------
; void pred8x8_plane(pixel *src, int stride)
;-----------------------------------------------------------------------------
INIT_XMM
cglobal pred8x8_plane_10_sse2, 2,7,7
    sub       r0, r1
465
    lea       r2, [r1*3]
466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
    lea       r3, [r0+r1*4]
    mova      m2, [r0]
    pmaddwd   m2, [pw_m32101234]
    HADDD     m2, m1
    movd      m0, [r0-4]
    psrld     m0, 14
    psubw     m2, m0               ; H
    movd      m0, [r3+r1*4-4]
    movd      m1, [r0+12]
    paddw     m0, m1
    psllw     m0, 4                ; 16*(src[7*stride-1] + src[-stride+7])
    movzx    r4d, word [r3+r1*1-2] ; src[4*stride-1]
    movzx    r5d, word [r0+r2*1-2] ; src[2*stride-1]
    sub      r4d, r5d
    movzx    r6d, word [r3+r1*2-2] ; src[5*stride-1]
    movzx    r5d, word [r0+r1*2-2] ; src[1*stride-1]
    sub      r6d, r5d
    lea      r4d, [r4+r6*2]
    movzx    r5d, word [r3+r2*1-2] ; src[6*stride-1]
    movzx    r6d, word [r0+r1*1-2] ; src[0*stride-1]
    sub      r5d, r6d
487
    lea      r5d, [r5*3]
488 489 490 491 492 493 494 495 496 497
    add      r4d, r5d
    movzx    r6d, word [r3+r1*4-2] ; src[7*stride-1]
    movzx    r5d, word [r0+r1*0-2] ; src[ -stride-1]
    sub      r6d, r5d
    lea      r4d, [r4+r6*4]
    movd      m3, r4d              ; V
    punpckldq m2, m3
    pmaddwd   m2, [pd_17]
    paddd     m2, [pd_16]
    psrad     m2, 5                ; b, c
498

499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526
    mova      m3, [pw_pixel_max]
    pxor      m1, m1
    SPLATW    m0, m0, 1
    SPLATW    m4, m2, 2
    SPLATW    m2, m2, 0
    pmullw    m2, [pw_m32101234]   ; b
    pmullw    m5, m4, [pw_m3]      ; c
    paddw     m5, [pw_16]
    mov      r2d, 8
    add       r0, r1
.loop:
    paddsw    m6, m2, m5
    paddsw    m6, m0
    psraw     m6, 5
    CLIPW     m6, m1, m3
    mova    [r0], m6
    paddw     m5, m4
    add       r0, r1
    dec r2d
    jg .loop
    REP_RET


;-----------------------------------------------------------------------------
; void pred8x8l_128_dc(pixel *src, int has_topleft, int has_topright, int stride)
;-----------------------------------------------------------------------------
%macro PRED8x8L_128_DC 1
cglobal pred8x8l_128_dc_10_%1, 4,4
527 528
    mova      m0, [pw_512] ; (1<<(BIT_DEPTH-1))
    lea       r1, [r3*3]
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
    lea       r2, [r0+r3*4]
    MOV8 r0+r3*0, m0, m0
    MOV8 r0+r3*1, m0, m0
    MOV8 r0+r3*2, m0, m0
    MOV8 r0+r1*1, m0, m0
    MOV8 r2+r3*0, m0, m0
    MOV8 r2+r3*1, m0, m0
    MOV8 r2+r3*2, m0, m0
    MOV8 r2+r1*1, m0, m0
    RET
%endmacro

INIT_MMX
PRED8x8L_128_DC mmxext
INIT_XMM
PRED8x8L_128_DC sse2
545 546 547 548 549 550 551

;-----------------------------------------------------------------------------
; void pred8x8l_top_dc(pixel *src, int has_topleft, int has_topright, int stride)
;-----------------------------------------------------------------------------
%macro PRED8x8L_TOP_DC 1
cglobal pred8x8l_top_dc_10_%1, 4,4,6
    sub         r0, r3
552 553 554 555 556 557 558 559 560
    mova        m0, [r0]
    shr        r1d, 14
    shr        r2d, 13
    neg         r1
    pslldq      m1, m0, 2
    psrldq      m2, m0, 2
    pinsrw      m1, [r0+r1], 0
    pinsrw      m2, [r0+r2+14], 7
    lea         r1, [r3*3]
561
    lea         r2, [r0+r3*4]
562
    PRED4x4_LOWPASS m0, m2, m1, m0
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579
    HADDW       m0, m1
    paddw       m0, [pw_4]
    psrlw       m0, 3
    SPLATW      m0, m0, 0
    mova [r0+r3*1], m0
    mova [r0+r3*2], m0
    mova [r0+r1*1], m0
    mova [r0+r3*4], m0
    mova [r2+r3*1], m0
    mova [r2+r3*2], m0
    mova [r2+r1*1], m0
    mova [r2+r3*4], m0
    RET
%endmacro

INIT_XMM
PRED8x8L_TOP_DC sse2
580
%if HAVE_AVX
581 582 583
INIT_AVX
PRED8x8L_TOP_DC avx
%endif
584 585 586 587 588 589

;-----------------------------------------------------------------------------
;void pred8x8l_dc(pixel *src, int has_topleft, int has_topright, int stride)
;-----------------------------------------------------------------------------
;TODO: see if scalar is faster
%macro PRED8x8L_DC 1
590
cglobal pred8x8l_dc_10_%1, 4,6,6
591
    sub         r0, r3
592 593 594 595 596 597
    lea         r4, [r0+r3*4]
    lea         r5, [r3*3]
    mova        m0, [r0+r3*2-16]
    punpckhwd   m0, [r0+r3*1-16]
    mova        m1, [r4+r3*0-16]
    punpckhwd   m1, [r0+r5*1-16]
598
    punpckhdq   m1, m0
599 600 601 602
    mova        m2, [r4+r3*2-16]
    punpckhwd   m2, [r4+r3*1-16]
    mova        m3, [r4+r3*4-16]
    punpckhwd   m3, [r4+r5*1-16]
603 604
    punpckhdq   m3, m2
    punpckhqdq  m3, m1
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
    mova        m0, [r0]
    shr        r1d, 14
    shr        r2d, 13
    neg         r1
    pslldq      m1, m0, 2
    psrldq      m2, m0, 2
    pinsrw      m1, [r0+r1], 0
    pinsrw      m2, [r0+r2+14], 7
    not         r1
    and         r1, r3
    pslldq      m4, m3, 2
    psrldq      m5, m3, 2
    pshuflw     m4, m4, 11100101b
    pinsrw      m5, [r0+r1-2], 7
    PRED4x4_LOWPASS m3, m4, m5, m3
    PRED4x4_LOWPASS m0, m2, m1, m0
    paddw       m0, m3
    HADDW       m0, m1
    paddw       m0, [pw_8]
    psrlw       m0, 4
    SPLATW      m0, m0
    mova [r0+r3*1], m0
    mova [r0+r3*2], m0
    mova [r0+r5*1], m0
    mova [r0+r3*4], m0
    mova [r4+r3*1], m0
    mova [r4+r3*2], m0
    mova [r4+r5*1], m0
    mova [r4+r3*4], m0
634 635 636 637 638
    RET
%endmacro

INIT_XMM
PRED8x8L_DC sse2
639
%if HAVE_AVX
640 641 642
INIT_AVX
PRED8x8L_DC avx
%endif
643 644 645 646 647 648 649

;-----------------------------------------------------------------------------
; void pred8x8l_vertical(pixel *src, int has_topleft, int has_topright, int stride)
;-----------------------------------------------------------------------------
%macro PRED8x8L_VERTICAL 1
cglobal pred8x8l_vertical_10_%1, 4,4,6
    sub         r0, r3
650 651 652 653 654 655 656 657 658
    mova        m0, [r0]
    shr        r1d, 14
    shr        r2d, 13
    neg         r1
    pslldq      m1, m0, 2
    psrldq      m2, m0, 2
    pinsrw      m1, [r0+r1], 0
    pinsrw      m2, [r0+r2+14], 7
    lea         r1, [r3*3]
659
    lea         r2, [r0+r3*4]
660
    PRED4x4_LOWPASS m0, m2, m1, m0
661 662 663 664 665 666 667 668 669 670 671 672 673
    mova [r0+r3*1], m0
    mova [r0+r3*2], m0
    mova [r0+r1*1], m0
    mova [r0+r3*4], m0
    mova [r2+r3*1], m0
    mova [r2+r3*2], m0
    mova [r2+r1*1], m0
    mova [r2+r3*4], m0
    RET
%endmacro

INIT_XMM
PRED8x8L_VERTICAL sse2
674
%if HAVE_AVX
675 676 677
INIT_AVX
PRED8x8L_VERTICAL avx
%endif
678 679 680 681 682

;-----------------------------------------------------------------------------
; void pred8x8l_horizontal(uint8_t *src, int has_topleft, int has_topright, int stride)
;-----------------------------------------------------------------------------
%macro PRED8x8L_HORIZONTAL 1
683 684 685 686 687 688 689 690 691 692 693
cglobal pred8x8l_horizontal_10_%1, 4,4,5
    mova        m0, [r0-16]
    shr        r1d, 14
    dec         r1
    and         r1, r3
    sub         r1, r3
    punpckhwd   m0, [r0+r1-16]
    mova        m1, [r0+r3*2-16]
    punpckhwd   m1, [r0+r3*1-16]
    lea         r2, [r0+r3*4]
    lea         r1, [r3*3]
694
    punpckhdq   m1, m0
695 696 697 698
    mova        m2, [r2+r3*0-16]
    punpckhwd   m2, [r0+r1-16]
    mova        m3, [r2+r3*2-16]
    punpckhwd   m3, [r2+r3*1-16]
699 700
    punpckhdq   m3, m2
    punpckhqdq  m3, m1
701 702 703 704 705 706
    PALIGNR     m4, m3, [r2+r1-16], 14, m0
    pslldq      m0, m4, 2
    pshuflw     m0, m0, 11100101b
    PRED4x4_LOWPASS m4, m3, m0, m4
    punpckhwd   m3, m4, m4
    punpcklwd   m4, m4
707 708 709 710
    pshufd      m0, m3, 0xff
    pshufd      m1, m3, 0xaa
    pshufd      m2, m3, 0x55
    pshufd      m3, m3, 0x00
711 712 713 714 715 716 717 718 719 720 721 722
    mova [r0+r3*0], m0
    mova [r0+r3*1], m1
    mova [r0+r3*2], m2
    mova [r0+r1*1], m3
    pshufd      m0, m4, 0xff
    pshufd      m1, m4, 0xaa
    pshufd      m2, m4, 0x55
    pshufd      m3, m4, 0x00
    mova [r2+r3*0], m0
    mova [r2+r3*1], m1
    mova [r2+r3*2], m2
    mova [r2+r1*1], m3
723 724 725 726 727 728 729 730
    RET
%endmacro

INIT_XMM
%define PALIGNR PALIGNR_MMX
PRED8x8L_HORIZONTAL sse2
%define PALIGNR PALIGNR_SSSE3
PRED8x8L_HORIZONTAL ssse3
731
%if HAVE_AVX
732 733 734
INIT_AVX
PRED8x8L_HORIZONTAL avx
%endif
735 736 737 738 739

;-----------------------------------------------------------------------------
;void pred8x8l_down_left(pixel *src, int has_topleft, int has_topright, int stride)
;-----------------------------------------------------------------------------
%macro PRED8x8L_DOWN_LEFT 1
740
cglobal pred8x8l_down_left_10_%1, 4,4,7
741 742
    sub         r0, r3
    mova        m3, [r0]
743 744 745 746 747 748 749 750 751
    shr        r1d, 14
    neg         r1
    shr        r2d, 13
    pslldq      m1, m3, 2
    psrldq      m2, m3, 2
    pinsrw      m1, [r0+r1], 0
    pinsrw      m2, [r0+r2+14], 7
    PRED4x4_LOWPASS m6, m2, m1, m3
    jz .fix_tr ; flags from shr r2d
752
    mova        m1, [r0+16]
753 754 755 756
    psrldq      m5, m1, 2
    PALIGNR     m2, m1, m3, 14, m3
    pshufhw     m5, m5, 10100100b
    PRED4x4_LOWPASS m1, m2, m5, m1
757
.do_topright:
758 759
    lea         r1, [r3*3]
    psrldq      m5, m1, 14
760
    lea         r2, [r0+r3*4]
761 762 763 764 765 766
    PALIGNR     m2, m1, m6,  2, m0
    PALIGNR     m3, m1, m6, 14, m0
    PALIGNR     m5, m1,  2, m0
    pslldq      m4, m6, 2
    PRED4x4_LOWPASS m6, m4, m2, m6
    PRED4x4_LOWPASS m1, m3, m5, m1
767
    mova [r2+r3*4], m1
768 769
    PALIGNR     m1, m6, 14, m2
    pslldq      m6, 2
770
    mova [r2+r1*1], m1
771 772
    PALIGNR     m1, m6, 14, m2
    pslldq      m6, 2
773
    mova [r2+r3*2], m1
774 775
    PALIGNR     m1, m6, 14, m2
    pslldq      m6, 2
776
    mova [r2+r3*1], m1
777 778
    PALIGNR     m1, m6, 14, m2
    pslldq      m6, 2
779
    mova [r0+r3*4], m1
780 781
    PALIGNR     m1, m6, 14, m2
    pslldq      m6, 2
782
    mova [r0+r1*1], m1
783 784
    PALIGNR     m1, m6, 14, m2
    pslldq      m6, 2
785
    mova [r0+r3*2], m1
786
    PALIGNR     m1, m6, 14, m6
787 788
    mova [r0+r3*1], m1
    RET
789 790 791 792
.fix_tr:
    punpckhwd   m3, m3
    pshufd      m1, m3, 0xFF
    jmp .do_topright
793 794 795 796 797 798 799
%endmacro

INIT_XMM
%define PALIGNR PALIGNR_MMX
PRED8x8L_DOWN_LEFT sse2
%define PALIGNR PALIGNR_SSSE3
PRED8x8L_DOWN_LEFT ssse3
800
%if HAVE_AVX
801 802 803
INIT_AVX
PRED8x8L_DOWN_LEFT avx
%endif
804 805

;-----------------------------------------------------------------------------
806
;void pred8x8l_down_right(pixel *src, int has_topleft, int has_topright, int stride)
807 808
;-----------------------------------------------------------------------------
%macro PRED8x8L_DOWN_RIGHT 1
809 810
; standard forbids this when has_topleft is false
; no need to check
811 812
cglobal pred8x8l_down_right_10_%1, 4,5,8
    sub         r0, r3
813 814
    lea         r4, [r0+r3*4]
    lea         r1, [r3*3]
815 816
    mova        m0, [r0+r3*1-16]
    punpckhwd   m0, [r0+r3*0-16]
817
    mova        m1, [r0+r1*1-16]
818 819
    punpckhwd   m1, [r0+r3*2-16]
    punpckhdq   m1, m0
820 821 822 823
    mova        m2, [r4+r3*1-16]
    punpckhwd   m2, [r4+r3*0-16]
    mova        m3, [r4+r1*1-16]
    punpckhwd   m3, [r4+r3*2-16]
824 825
    punpckhdq   m3, m2
    punpckhqdq  m3, m1
826 827 828 829 830 831 832 833
    mova        m0, [r4+r3*4-16]
    mova        m1, [r0]
    PALIGNR     m4, m3, m0, 14, m0
    PALIGNR     m1, m3,  2, m2
    pslldq      m0, m4, 2
    pshuflw     m0, m0, 11100101b
    PRED4x4_LOWPASS m6, m1, m4, m3
    PRED4x4_LOWPASS m4, m3, m0, m4
834
    mova        m3, [r0]
835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866
    shr        r2d, 13
    pslldq      m1, m3, 2
    psrldq      m2, m3, 2
    pinsrw      m1, [r0-2], 0
    pinsrw      m2, [r0+r2+14], 7
    PRED4x4_LOWPASS m3, m2, m1, m3
    PALIGNR     m2, m3, m6,  2, m0
    PALIGNR     m5, m3, m6, 14, m0
    psrldq      m7, m3, 2
    PRED4x4_LOWPASS m6, m4, m2, m6
    PRED4x4_LOWPASS m3, m5, m7, m3
    mova [r4+r3*4], m6
    PALIGNR     m3, m6, 14, m2
    pslldq      m6, 2
    mova [r0+r3*1], m3
    PALIGNR     m3, m6, 14, m2
    pslldq      m6, 2
    mova [r0+r3*2], m3
    PALIGNR     m3, m6, 14, m2
    pslldq      m6, 2
    mova [r0+r1*1], m3
    PALIGNR     m3, m6, 14, m2
    pslldq      m6, 2
    mova [r0+r3*4], m3
    PALIGNR     m3, m6, 14, m2
    pslldq      m6, 2
    mova [r4+r3*1], m3
    PALIGNR     m3, m6, 14, m2
    pslldq      m6, 2
    mova [r4+r3*2], m3
    PALIGNR     m3, m6, 14, m6
    mova [r4+r1*1], m3
867 868 869 870 871 872 873 874
    RET
%endmacro

INIT_XMM
%define PALIGNR PALIGNR_MMX
PRED8x8L_DOWN_RIGHT sse2
%define PALIGNR PALIGNR_SSSE3
PRED8x8L_DOWN_RIGHT ssse3
875
%if HAVE_AVX
876 877 878
INIT_AVX
PRED8x8L_DOWN_RIGHT avx
%endif
879 880 881 882 883

;-----------------------------------------------------------------------------
; void pred8x8l_vertical_right(pixel *src, int has_topleft, int has_topright, int stride)
;-----------------------------------------------------------------------------
%macro PRED8x8L_VERTICAL_RIGHT 1
884 885
; likewise with 8x8l_down_right
cglobal pred8x8l_vertical_right_10_%1, 4,5,7
886
    sub         r0, r3
887 888
    lea         r4, [r0+r3*4]
    lea         r1, [r3*3]
889 890
    mova        m0, [r0+r3*1-16]
    punpckhwd   m0, [r0+r3*0-16]
891
    mova        m1, [r0+r1*1-16]
892 893
    punpckhwd   m1, [r0+r3*2-16]
    punpckhdq   m1, m0
894 895 896 897
    mova        m2, [r4+r3*1-16]
    punpckhwd   m2, [r4+r3*0-16]
    mova        m3, [r4+r1*1-16]
    punpckhwd   m3, [r4+r3*2-16]
898 899
    punpckhdq   m3, m2
    punpckhqdq  m3, m1
900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915
    mova        m0, [r4+r3*4-16]
    mova        m1, [r0]
    PALIGNR     m4, m3, m0, 14, m0
    PALIGNR     m1, m3,  2, m2
    PRED4x4_LOWPASS m3, m1, m4, m3
    mova        m2, [r0]
    shr        r2d, 13
    pslldq      m1, m2, 2
    psrldq      m5, m2, 2
    pinsrw      m1, [r0-2], 0
    pinsrw      m5, [r0+r2+14], 7
    PRED4x4_LOWPASS m2, m5, m1, m2
    PALIGNR     m6, m2, m3, 12, m1
    PALIGNR     m5, m2, m3, 14, m0
    PRED4x4_LOWPASS m0, m6, m2, m5
    pavgw       m2, m5
916
    mova [r0+r3*2], m0
917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937
    mova [r0+r3*1], m2
    pslldq      m6, m3, 4
    pslldq      m1, m3, 2
    PRED4x4_LOWPASS m1, m3, m6, m1
    PALIGNR     m2, m1, 14, m4
    mova [r0+r1*1], m2
    pslldq      m1, 2
    PALIGNR     m0, m1, 14, m3
    mova [r0+r3*4], m0
    pslldq      m1, 2
    PALIGNR     m2, m1, 14, m4
    mova [r4+r3*1], m2
    pslldq      m1, 2
    PALIGNR     m0, m1, 14, m3
    mova [r4+r3*2], m0
    pslldq      m1, 2
    PALIGNR     m2, m1, 14, m4
    mova [r4+r1*1], m2
    pslldq      m1, 2
    PALIGNR     m0, m1, 14, m1
    mova [r4+r3*4], m0
938 939 940 941 942 943 944 945
    RET
%endmacro

INIT_XMM
%define PALIGNR PALIGNR_MMX
PRED8x8L_VERTICAL_RIGHT sse2
%define PALIGNR PALIGNR_SSSE3
PRED8x8L_VERTICAL_RIGHT ssse3
946
%if HAVE_AVX
947 948 949
INIT_AVX
PRED8x8L_VERTICAL_RIGHT avx
%endif
950 951 952 953 954

;-----------------------------------------------------------------------------
; void pred8x8l_horizontal_up(pixel *src, int has_topleft, int has_topright, int stride)
;-----------------------------------------------------------------------------
%macro PRED8x8L_HORIZONTAL_UP 1
955
cglobal pred8x8l_horizontal_up_10_%1, 4,4,6
956
    mova        m0, [r0+r3*0-16]
957 958 959 960 961 962 963
    punpckhwd   m0, [r0+r3*1-16]
    shr        r1d, 14
    dec         r1
    and         r1, r3
    sub         r1, r3
    mova        m4, [r0+r1*1-16]
    lea         r1, [r3*3]
964
    lea         r2, [r0+r3*4]
965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990
    mova        m1, [r0+r3*2-16]
    punpckhwd   m1, [r0+r1*1-16]
    punpckhdq   m0, m1
    mova        m2, [r2+r3*0-16]
    punpckhwd   m2, [r2+r3*1-16]
    mova        m3, [r2+r3*2-16]
    punpckhwd   m3, [r2+r1*1-16]
    punpckhdq   m2, m3
    punpckhqdq  m0, m2
    PALIGNR     m1, m0, m4, 14, m4
    psrldq      m2, m0, 2
    pshufhw     m2, m2, 10100100b
    PRED4x4_LOWPASS m0, m1, m2, m0
    psrldq      m1, m0, 2
    psrldq      m2, m0, 4
    pshufhw     m1, m1, 10100100b
    pshufhw     m2, m2, 01010100b
    pavgw       m4, m0, m1
    PRED4x4_LOWPASS m1, m2, m0, m1
    punpckhwd   m5, m4, m1
    punpcklwd   m4, m1
    mova [r2+r3*0], m5
    mova [r0+r3*0], m4
    pshufd      m0, m5, 11111001b
    pshufd      m1, m5, 11111110b
    pshufd      m2, m5, 11111111b
991 992 993
    mova [r2+r3*1], m0
    mova [r2+r3*2], m1
    mova [r2+r1*1], m2
994 995 996 997 998 999
    PALIGNR     m2, m5, m4, 4, m0
    PALIGNR     m3, m5, m4, 8, m1
    PALIGNR     m5, m5, m4, 12, m4
    mova [r0+r3*1], m2
    mova [r0+r3*2], m3
    mova [r0+r1*1], m5
1000 1001 1002 1003 1004 1005 1006 1007
    RET
%endmacro

INIT_XMM
%define PALIGNR PALIGNR_MMX
PRED8x8L_HORIZONTAL_UP sse2
%define PALIGNR PALIGNR_SSSE3
PRED8x8L_HORIZONTAL_UP ssse3
1008
%if HAVE_AVX
1009 1010 1011
INIT_AVX
PRED8x8L_HORIZONTAL_UP avx
%endif
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028


;-----------------------------------------------------------------------------
; void pred16x16_vertical(pixel *src, int stride)
;-----------------------------------------------------------------------------
%macro MOV16 3-5
    mova [%1+     0], %2
    mova [%1+mmsize], %3
%if mmsize==8
    mova [%1+    16], %4
    mova [%1+    24], %5
%endif
%endmacro

%macro PRED16x16_VERTICAL 1
cglobal pred16x16_vertical_10_%1, 2,3
    sub   r0, r1
1029
    mov  r2d, 8
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039
    mova  m0, [r0+ 0]
    mova  m1, [r0+mmsize]
%if mmsize==8
    mova  m2, [r0+16]
    mova  m3, [r0+24]
%endif
.loop:
    MOV16 r0+r1*1, m0, m1, m2, m3
    MOV16 r0+r1*2, m0, m1, m2, m3
    lea   r0, [r0+r1*2]
1040
    dec   r2d
1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
    jg .loop
    REP_RET
%endmacro

INIT_MMX
PRED16x16_VERTICAL mmxext
INIT_XMM
PRED16x16_VERTICAL sse2

;-----------------------------------------------------------------------------
; void pred16x16_horizontal(pixel *src, int stride)
;-----------------------------------------------------------------------------
%macro PRED16x16_HORIZONTAL 1
cglobal pred16x16_horizontal_10_%1, 2,3
1055
    mov   r2d, 8
1056 1057 1058 1059 1060 1061 1062 1063
.vloop:
    movd   m0, [r0+r1*0-4]
    movd   m1, [r0+r1*1-4]
    SPLATW m0, m0, 1
    SPLATW m1, m1, 1
    MOV16  r0+r1*0, m0, m0, m0, m0
    MOV16  r0+r1*1, m1, m1, m1, m1
    lea    r0, [r0+r1*2]
1064
    dec    r2d
1065
    jg .vloop
1066 1067 1068 1069 1070 1071 1072
    REP_RET
%endmacro

INIT_MMX
PRED16x16_HORIZONTAL mmxext
INIT_XMM
PRED16x16_HORIZONTAL sse2
1073 1074 1075 1076 1077

;-----------------------------------------------------------------------------
; void pred16x16_dc(pixel *src, int stride)
;-----------------------------------------------------------------------------
%macro PRED16x16_DC 1
1078 1079
cglobal pred16x16_dc_10_%1, 2,6
    mov        r5, r0
1080 1081 1082 1083 1084 1085 1086 1087 1088
    sub        r0, r1
    mova       m0, [r0+0]
    paddw      m0, [r0+mmsize]
%if mmsize==8
    paddw      m0, [r0+16]
    paddw      m0, [r0+24]
%endif
    HADDW      m0, m2

1089 1090 1091
    lea        r0, [r0+r1-2]
    movzx     r3d, word [r0]
    movzx     r4d, word [r0+r1]
1092 1093
%rep 7
    lea        r0, [r0+r1*2]
1094
    movzx     r2d, word [r0]
1095
    add       r3d, r2d
1096 1097
    movzx     r2d, word [r0+r1]
    add       r4d, r2d
1098
%endrep
1099
    lea       r3d, [r3+r4+16]
1100 1101 1102 1103 1104 1105 1106

    movd       m1, r3d
    paddw      m0, m1
    psrlw      m0, 5
    SPLATW     m0, m0
    mov       r3d, 8
.loop:
1107 1108 1109
    MOV16 r5+r1*0, m0, m0, m0, m0
    MOV16 r5+r1*1, m0, m0, m0, m0
    lea        r5, [r5+r1*2]
1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
    dec       r3d
    jg .loop
    REP_RET
%endmacro

INIT_MMX
PRED16x16_DC mmxext
INIT_XMM
PRED16x16_DC sse2

;-----------------------------------------------------------------------------
; void pred16x16_top_dc(pixel *src, int stride)
;-----------------------------------------------------------------------------
%macro PRED16x16_TOP_DC 1
cglobal pred16x16_top_dc_10_%1, 2,3
    sub        r0, r1
    mova       m0, [r0+0]
    paddw      m0, [r0+mmsize]
%if mmsize==8
    paddw      m0, [r0+16]
    paddw      m0, [r0+24]
%endif
    HADDW      m0, m2

    SPLATW     m0, m0
    paddw      m0, [pw_8]
    psrlw      m0, 4
    mov       r2d, 8
.loop:
    MOV16 r0+r1*1, m0, m0, m0, m0
    MOV16 r0+r1*2, m0, m0, m0, m0
    lea        r0, [r0+r1*2]
    dec       r2d
    jg .loop
    REP_RET
%endmacro

INIT_MMX
PRED16x16_TOP_DC mmxext
INIT_XMM
PRED16x16_TOP_DC sse2

;-----------------------------------------------------------------------------
; void pred16x16_left_dc(pixel *src, int stride)
;-----------------------------------------------------------------------------
%macro PRED16x16_LEFT_DC 1
1156 1157
cglobal pred16x16_left_dc_10_%1, 2,6
    mov        r5, r0
1158 1159

    sub        r0, 2
1160 1161
    movzx     r3d, word [r0]
    movzx     r4d, word [r0+r1]
1162 1163
%rep 7
    lea        r0, [r0+r1*2]
1164 1165 1166 1167
    movzx     r2d, word [r0]
    add       r3d, r2d
    movzx     r2d, word [r0+r1]
    add       r4d, r2d
1168
%endrep
1169 1170
    lea       r3d, [r3+r4+8]
    shr       r3d, 4
1171

1172
    movd       m0, r3d
1173 1174 1175
    SPLATW     m0, m0
    mov       r3d, 8
.loop:
1176 1177 1178
    MOV16 r5+r1*0, m0, m0, m0, m0
    MOV16 r5+r1*1, m0, m0, m0, m0
    lea        r5, [r5+r1*2]
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208
    dec       r3d
    jg .loop
    REP_RET
%endmacro

INIT_MMX
PRED16x16_LEFT_DC mmxext
INIT_XMM
PRED16x16_LEFT_DC sse2

;-----------------------------------------------------------------------------
; void pred16x16_128_dc(pixel *src, int stride)
;-----------------------------------------------------------------------------
%macro PRED16x16_128_DC 1
cglobal pred16x16_128_dc_10_%1, 2,3
    mova       m0, [pw_512]
    mov       r2d, 8
.loop:
    MOV16 r0+r1*0, m0, m0, m0, m0
    MOV16 r0+r1*1, m0, m0, m0, m0
    lea        r0, [r0+r1*2]
    dec       r2d
    jg .loop
    REP_RET
%endmacro

INIT_MMX
PRED16x16_128_DC mmxext
INIT_XMM
PRED16x16_128_DC sse2