h264_idct_10bit.asm 15.6 KB
Newer Older
1 2 3 4 5 6 7
;*****************************************************************************
;* MMX/SSE2/AVX-optimized 10-bit H.264 iDCT code
;*****************************************************************************
;* Copyright (C) 2005-2011 x264 project
;*
;* Authors: Daniel Kang <daniel.d.kang@gmail.com>
;*
8
;* This file is part of FFmpeg.
9
;*
10
;* FFmpeg is free software; you can redistribute it and/or
11 12 13 14
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
15
;* FFmpeg is distributed in the hope that it will be useful,
16 17 18 19 20
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
21
;* License along with FFmpeg; if not, write to the Free Software
22 23 24
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************

25
%include "libavutil/x86/x86util.asm"
26 27 28

SECTION .text

29 30
cextern pw_1023
%define pw_pixel_max pw_1023
31
cextern pd_32
32

33
;-----------------------------------------------------------------------------
34
; void ff_h264_idct_add_10(pixel *dst, int16_t *block, int stride)
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
;-----------------------------------------------------------------------------
%macro STORE_DIFFx2 6
    psrad       %1, 6
    psrad       %2, 6
    packssdw    %1, %2
    movq        %3, [%5]
    movhps      %3, [%5+%6]
    paddsw      %1, %3
    CLIPW       %1, %4, [pw_pixel_max]
    movq      [%5], %1
    movhps [%5+%6], %1
%endmacro

%macro STORE_DIFF16 5
    psrad       %1, 6
    psrad       %2, 6
    packssdw    %1, %2
    paddsw      %1, [%5]
    CLIPW       %1, %3, %4
    mova      [%5], %1
%endmacro

;dst, in, stride
%macro IDCT4_ADD_10 3
    mova  m0, [%2+ 0]
    mova  m1, [%2+16]
    mova  m2, [%2+32]
    mova  m3, [%2+48]
    IDCT4_1D d,0,1,2,3,4,5
    TRANSPOSE4x4D 0,1,2,3,4
    paddd m0, [pd_32]
    IDCT4_1D d,0,1,2,3,4,5
    pxor  m5, m5
68 69 70 71
    mova [%2+ 0], m5
    mova [%2+16], m5
    mova [%2+32], m5
    mova [%2+48], m5
72 73 74 75 76
    STORE_DIFFx2 m0, m1, m4, m5, %1, %3
    lea   %1, [%1+%3*2]
    STORE_DIFFx2 m2, m3, m4, m5, %1, %3
%endmacro

77 78
%macro IDCT_ADD_10 0
cglobal h264_idct_add_10, 3,3
79
    movsxdifnidn r2, r2d
80 81 82 83
    IDCT4_ADD_10 r0, r1, r2
    RET
%endmacro

84 85
INIT_XMM sse2
IDCT_ADD_10
86
%if HAVE_AVX_EXTERNAL
87 88
INIT_XMM avx
IDCT_ADD_10
89 90 91
%endif

;-----------------------------------------------------------------------------
92 93 94
; void ff_h264_idct_add16_10(pixel *dst, const int *block_offset,
;                            int16_t *block, int stride,
;                            const uint8_t nnzc[6*8])
95 96
;-----------------------------------------------------------------------------
;;;;;;; NO FATE SAMPLES TRIGGER THIS
97 98
%macro ADD4x4IDCT 0
add4x4_idct %+ SUFFIX:
99 100 101 102 103 104 105 106 107 108
    add   r5, r0
    mova  m0, [r2+ 0]
    mova  m1, [r2+16]
    mova  m2, [r2+32]
    mova  m3, [r2+48]
    IDCT4_1D d,0,1,2,3,4,5
    TRANSPOSE4x4D 0,1,2,3,4
    paddd m0, [pd_32]
    IDCT4_1D d,0,1,2,3,4,5
    pxor  m5, m5
109 110 111 112
    mova  [r2+ 0], m5
    mova  [r2+16], m5
    mova  [r2+32], m5
    mova  [r2+48], m5
113 114 115 116 117 118
    STORE_DIFFx2 m0, m1, m4, m5, r5, r3
    lea   r5, [r5+r3*2]
    STORE_DIFFx2 m2, m3, m4, m5, r5, r3
    ret
%endmacro

119
INIT_XMM sse2
120
ALIGN 16
121
ADD4x4IDCT
122
%if HAVE_AVX_EXTERNAL
123
INIT_XMM avx
124
ALIGN 16
125
ADD4x4IDCT
126 127
%endif

128 129 130 131 132 133 134
%macro ADD16_OP 2
    cmp          byte [r4+%2], 0
    jz .skipblock%1
    mov         r5d, [r1+%1*4]
    call add4x4_idct %+ SUFFIX
.skipblock%1:
%if %1<15
135 136 137 138
    add          r2, 64
%endif
%endmacro

139 140
%macro IDCT_ADD16_10 0
cglobal h264_idct_add16_10, 5,6
141
    movsxdifnidn r3, r3d
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
    ADD16_OP 0, 4+1*8
    ADD16_OP 1, 5+1*8
    ADD16_OP 2, 4+2*8
    ADD16_OP 3, 5+2*8
    ADD16_OP 4, 6+1*8
    ADD16_OP 5, 7+1*8
    ADD16_OP 6, 6+2*8
    ADD16_OP 7, 7+2*8
    ADD16_OP 8, 4+3*8
    ADD16_OP 9, 5+3*8
    ADD16_OP 10, 4+4*8
    ADD16_OP 11, 5+4*8
    ADD16_OP 12, 6+3*8
    ADD16_OP 13, 7+3*8
    ADD16_OP 14, 6+4*8
    ADD16_OP 15, 7+4*8
158
    REP_RET
159 160
%endmacro

161 162
INIT_XMM sse2
IDCT_ADD16_10
163
%if HAVE_AVX_EXTERNAL
164 165
INIT_XMM avx
IDCT_ADD16_10
166 167 168
%endif

;-----------------------------------------------------------------------------
169
; void ff_h264_idct_dc_add_10(pixel *dst, int16_t *block, int stride)
170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
;-----------------------------------------------------------------------------
%macro IDCT_DC_ADD_OP_10 3
    pxor      m5, m5
%if avx_enabled
    paddw     m1, m0, [%1+0   ]
    paddw     m2, m0, [%1+%2  ]
    paddw     m3, m0, [%1+%2*2]
    paddw     m4, m0, [%1+%3  ]
%else
    mova      m1, [%1+0   ]
    mova      m2, [%1+%2  ]
    mova      m3, [%1+%2*2]
    mova      m4, [%1+%3  ]
    paddw     m1, m0
    paddw     m2, m0
    paddw     m3, m0
    paddw     m4, m0
%endif
    CLIPW     m1, m5, m6
    CLIPW     m2, m5, m6
    CLIPW     m3, m5, m6
    CLIPW     m4, m5, m6
    mova [%1+0   ], m1
    mova [%1+%2  ], m2
    mova [%1+%2*2], m3
    mova [%1+%3  ], m4
%endmacro

198
INIT_MMX mmxext
199
cglobal h264_idct_dc_add_10,3,3
200
    movsxdifnidn r2, r2d
201
    movd      m0, [r1]
202
    mov dword [r1], 0
203 204 205 206 207 208 209 210 211
    paddd     m0, [pd_32]
    psrad     m0, 6
    lea       r1, [r2*3]
    pshufw    m0, m0, 0
    mova      m6, [pw_pixel_max]
    IDCT_DC_ADD_OP_10 r0, r2, r1
    RET

;-----------------------------------------------------------------------------
212
; void ff_h264_idct8_dc_add_10(pixel *dst, int16_t *block, int stride)
213
;-----------------------------------------------------------------------------
214
%macro IDCT8_DC_ADD 0
215
cglobal h264_idct8_dc_add_10,3,4,7
216
    movsxdifnidn r2, r2d
217 218 219 220
    movd      m0, [r1]
    mov dword[r1], 0
    paddd     m0, [pd_32]
    psrad     m0, 6
221 222 223 224 225 226 227 228 229
    lea       r1, [r2*3]
    SPLATW    m0, m0, 0
    mova      m6, [pw_pixel_max]
    IDCT_DC_ADD_OP_10 r0, r2, r1
    lea       r0, [r0+r2*4]
    IDCT_DC_ADD_OP_10 r0, r2, r1
    RET
%endmacro

230 231
INIT_XMM sse2
IDCT8_DC_ADD
232
%if HAVE_AVX_EXTERNAL
233 234
INIT_XMM avx
IDCT8_DC_ADD
235 236 237
%endif

;-----------------------------------------------------------------------------
238 239 240
; void ff_h264_idct_add16intra_10(pixel *dst, const int *block_offset,
;                                 int16_t *block, int stride,
;                                 const uint8_t nnzc[6*8])
241
;-----------------------------------------------------------------------------
242
%macro AC 1
243
.ac%1:
244 245 246
    mov  r5d, [r1+(%1+0)*4]
    call add4x4_idct %+ SUFFIX
    mov  r5d, [r1+(%1+1)*4]
247
    add  r2, 64
248
    call add4x4_idct %+ SUFFIX
249
    add  r2, 64
250
    jmp .skipadd%1
251 252
%endmacro

253
%assign last_block 16
254 255 256
%macro ADD16_OP_INTRA 2
    cmp      word [r4+%2], 0
    jnz .ac%1
257 258
    mov      r5d, [r2+ 0]
    or       r5d, [r2+64]
259 260 261 262 263
    jz .skipblock%1
    mov      r5d, [r1+(%1+0)*4]
    call idct_dc_add %+ SUFFIX
.skipblock%1:
%if %1<last_block-2
264
    add       r2, 128
265
%endif
266
.skipadd%1:
267 268
%endmacro

269 270
%macro IDCT_ADD16INTRA_10 0
idct_dc_add %+ SUFFIX:
271 272 273
    add       r5, r0
    movq      m0, [r2+ 0]
    movhps    m0, [r2+64]
274 275
    mov dword [r2+ 0], 0
    mov dword [r2+64], 0
276 277 278 279 280 281 282 283 284
    paddd     m0, [pd_32]
    psrad     m0, 6
    pshufhw   m0, m0, 0
    pshuflw   m0, m0, 0
    lea       r6, [r3*3]
    mova      m6, [pw_pixel_max]
    IDCT_DC_ADD_OP_10 r5, r3, r6
    ret

285
cglobal h264_idct_add16intra_10,5,7,8
286
    movsxdifnidn r3, r3d
287 288 289 290 291 292 293 294
    ADD16_OP_INTRA 0, 4+1*8
    ADD16_OP_INTRA 2, 4+2*8
    ADD16_OP_INTRA 4, 6+1*8
    ADD16_OP_INTRA 6, 6+2*8
    ADD16_OP_INTRA 8, 4+3*8
    ADD16_OP_INTRA 10, 4+4*8
    ADD16_OP_INTRA 12, 6+3*8
    ADD16_OP_INTRA 14, 6+4*8
295
    REP_RET
296 297 298 299 300 301 302 303
    AC 8
    AC 10
    AC 12
    AC 14
    AC 0
    AC 2
    AC 4
    AC 6
304 305
%endmacro

306 307
INIT_XMM sse2
IDCT_ADD16INTRA_10
308
%if HAVE_AVX_EXTERNAL
309 310
INIT_XMM avx
IDCT_ADD16INTRA_10
311 312
%endif

313
%assign last_block 36
314
;-----------------------------------------------------------------------------
315 316 317
; void ff_h264_idct_add8_10(pixel **dst, const int *block_offset,
;                           int16_t *block, int stride,
;                           const uint8_t nnzc[6*8])
318
;-----------------------------------------------------------------------------
319 320
%macro IDCT_ADD8 0
cglobal h264_idct_add8_10,5,8,7
321
    movsxdifnidn r3, r3d
322
%if ARCH_X86_64
323
    mov      r7, r0
324
%endif
325 326
    add      r2, 1024
    mov      r0, [r0]
327 328
    ADD16_OP_INTRA 16, 4+ 6*8
    ADD16_OP_INTRA 18, 4+ 7*8
329
    add      r2, 1024-128*2
330
%if ARCH_X86_64
331
    mov      r0, [r7+gprsize]
332
%else
333 334
    mov      r0, r0m
    mov      r0, [r0+gprsize]
335
%endif
336 337
    ADD16_OP_INTRA 32, 4+11*8
    ADD16_OP_INTRA 34, 4+12*8
338
    REP_RET
339 340 341 342
    AC 16
    AC 18
    AC 32
    AC 34
343

344 345
%endmacro ; IDCT_ADD8

346 347
INIT_XMM sse2
IDCT_ADD8
348
%if HAVE_AVX_EXTERNAL
349 350
INIT_XMM avx
IDCT_ADD8
351
%endif
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404

;-----------------------------------------------------------------------------
; void ff_h264_idct_add8_422_10(pixel **dst, const int *block_offset,
;                               int16_t *block, int stride,
;                               const uint8_t nnzc[6*8])
;-----------------------------------------------------------------------------
%assign last_block 44

%macro IDCT_ADD8_422 0

cglobal h264_idct_add8_422_10, 5, 8, 7
    movsxdifnidn r3, r3d
%if ARCH_X86_64
    mov      r7, r0
%endif

    add      r2, 1024
    mov      r0, [r0]
    ADD16_OP_INTRA 16, 4+ 6*8
    ADD16_OP_INTRA 18, 4+ 7*8
    ADD16_OP_INTRA 24, 4+ 8*8 ; i+4
    ADD16_OP_INTRA 26, 4+ 9*8 ; i+4
    add      r2, 1024-128*4

%if ARCH_X86_64
    mov      r0, [r7+gprsize]
%else
    mov      r0, r0m
    mov      r0, [r0+gprsize]
%endif

    ADD16_OP_INTRA 32, 4+11*8
    ADD16_OP_INTRA 34, 4+12*8
    ADD16_OP_INTRA 40, 4+13*8 ; i+4
    ADD16_OP_INTRA 42, 4+14*8 ; i+4
REP_RET
    AC 16
    AC 18
    AC 24 ; i+4
    AC 26 ; i+4
    AC 32
    AC 34
    AC 40 ; i+4
    AC 42 ; i+4

%endmacro

INIT_XMM sse2
IDCT_ADD8_422
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
IDCT_ADD8_422
%endif
405 406

;-----------------------------------------------------------------------------
407
; void ff_h264_idct8_add_10(pixel *dst, int16_t *block, int stride)
408 409
;-----------------------------------------------------------------------------
%macro IDCT8_1D 2
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
    SWAP      0, 1
    psrad     m4, m5, 1
    psrad     m1, m0, 1
    paddd     m4, m5
    paddd     m1, m0
    paddd     m4, m7
    paddd     m1, m5
    psubd     m4, m0
    paddd     m1, m3

    psubd     m0, m3
    psubd     m5, m3
    paddd     m0, m7
    psubd     m5, m7
    psrad     m3, 1
    psrad     m7, 1
    psubd     m0, m3
    psubd     m5, m7

    SWAP      1, 7
    psrad     m1, m7, 2
    psrad     m3, m4, 2
    paddd     m3, m0
    psrad     m0, 2
    paddd     m1, m5
    psrad     m5, 2
    psubd     m0, m4
    psubd     m7, m5

    SWAP      5, 6
    psrad     m4, m2, 1
    psrad     m6, m5, 1
    psubd     m4, m5
    paddd     m6, m2

    mova      m2, %1
    mova      m5, %2
    SUMSUB_BA d, 5, 2
    SUMSUB_BA d, 6, 5
    SUMSUB_BA d, 4, 2
    SUMSUB_BA d, 7, 6
    SUMSUB_BA d, 0, 4
    SUMSUB_BA d, 3, 2
    SUMSUB_BA d, 1, 5
    SWAP      7, 6, 4, 5, 2, 3, 1, 0 ; 70315246 -> 01234567
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
%endmacro

%macro IDCT8_1D_FULL 1
    mova         m7, [%1+112*2]
    mova         m6, [%1+ 96*2]
    mova         m5, [%1+ 80*2]
    mova         m3, [%1+ 48*2]
    mova         m2, [%1+ 32*2]
    mova         m1, [%1+ 16*2]
    IDCT8_1D   [%1], [%1+ 64*2]
%endmacro

; %1=int16_t *block, %2=int16_t *dstblock
%macro IDCT8_ADD_SSE_START 2
    IDCT8_1D_FULL %1
470
%if ARCH_X86_64
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508
    TRANSPOSE4x4D  0,1,2,3,8
    mova    [%2    ], m0
    TRANSPOSE4x4D  4,5,6,7,8
    mova    [%2+8*2], m4
%else
    mova         [%1], m7
    TRANSPOSE4x4D   0,1,2,3,7
    mova           m7, [%1]
    mova    [%2     ], m0
    mova    [%2+16*2], m1
    mova    [%2+32*2], m2
    mova    [%2+48*2], m3
    TRANSPOSE4x4D   4,5,6,7,3
    mova    [%2+ 8*2], m4
    mova    [%2+24*2], m5
    mova    [%2+40*2], m6
    mova    [%2+56*2], m7
%endif
%endmacro

; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
%macro IDCT8_ADD_SSE_END 3
    IDCT8_1D_FULL %2
    mova  [%2     ], m6
    mova  [%2+16*2], m7

    pxor         m7, m7
    STORE_DIFFx2 m0, m1, m6, m7, %1, %3
    lea          %1, [%1+%3*2]
    STORE_DIFFx2 m2, m3, m6, m7, %1, %3
    mova         m0, [%2     ]
    mova         m1, [%2+16*2]
    lea          %1, [%1+%3*2]
    STORE_DIFFx2 m4, m5, m6, m7, %1, %3
    lea          %1, [%1+%3*2]
    STORE_DIFFx2 m0, m1, m6, m7, %1, %3
%endmacro

509 510
%macro IDCT8_ADD 0
cglobal h264_idct8_add_10, 3,4,16
511
    movsxdifnidn r2, r2d
512
%if UNIX64 == 0
513 514
    %assign pad 16-gprsize-(stack_offset&15)
    sub  rsp, pad
515
    call h264_idct8_add1_10 %+ SUFFIX
516 517 518 519 520 521
    add  rsp, pad
    RET
%endif

ALIGN 16
; TODO: does not need to use stack
522
h264_idct8_add1_10 %+ SUFFIX:
523 524 525 526
%assign pad 256+16-gprsize
    sub          rsp, pad
    add   dword [r1], 32

527
%if ARCH_X86_64
528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
    IDCT8_ADD_SSE_START r1, rsp
    SWAP 1,  9
    SWAP 2, 10
    SWAP 3, 11
    SWAP 5, 13
    SWAP 6, 14
    SWAP 7, 15
    IDCT8_ADD_SSE_START r1+16, rsp+128
    PERMUTE 1,9, 2,10, 3,11, 5,1, 6,2, 7,3, 9,13, 10,14, 11,15, 13,5, 14,6, 15,7
    IDCT8_1D [rsp], [rsp+128]
    SWAP 0,  8
    SWAP 1,  9
    SWAP 2, 10
    SWAP 3, 11
    SWAP 4, 12
    SWAP 5, 13
    SWAP 6, 14
    SWAP 7, 15
    IDCT8_1D [rsp+16], [rsp+144]
    psrad         m8, 6
    psrad         m0, 6
    packssdw      m8, m0
    paddsw        m8, [r0]
    pxor          m0, m0
552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
    mova    [r1+  0], m0
    mova    [r1+ 16], m0
    mova    [r1+ 32], m0
    mova    [r1+ 48], m0
    mova    [r1+ 64], m0
    mova    [r1+ 80], m0
    mova    [r1+ 96], m0
    mova    [r1+112], m0
    mova    [r1+128], m0
    mova    [r1+144], m0
    mova    [r1+160], m0
    mova    [r1+176], m0
    mova    [r1+192], m0
    mova    [r1+208], m0
    mova    [r1+224], m0
    mova    [r1+240], m0
568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
    CLIPW         m8, m0, [pw_pixel_max]
    mova        [r0], m8
    mova          m8, [pw_pixel_max]
    STORE_DIFF16  m9, m1, m0, m8, r0+r2
    lea           r0, [r0+r2*2]
    STORE_DIFF16 m10, m2, m0, m8, r0
    STORE_DIFF16 m11, m3, m0, m8, r0+r2
    lea           r0, [r0+r2*2]
    STORE_DIFF16 m12, m4, m0, m8, r0
    STORE_DIFF16 m13, m5, m0, m8, r0+r2
    lea           r0, [r0+r2*2]
    STORE_DIFF16 m14, m6, m0, m8, r0
    STORE_DIFF16 m15, m7, m0, m8, r0+r2
%else
    IDCT8_ADD_SSE_START r1,    rsp
    IDCT8_ADD_SSE_START r1+16, rsp+128
    lea           r3, [r0+8]
    IDCT8_ADD_SSE_END r0, rsp,    r2
    IDCT8_ADD_SSE_END r3, rsp+16, r2
587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602
    mova    [r1+  0], m7
    mova    [r1+ 16], m7
    mova    [r1+ 32], m7
    mova    [r1+ 48], m7
    mova    [r1+ 64], m7
    mova    [r1+ 80], m7
    mova    [r1+ 96], m7
    mova    [r1+112], m7
    mova    [r1+128], m7
    mova    [r1+144], m7
    mova    [r1+160], m7
    mova    [r1+176], m7
    mova    [r1+192], m7
    mova    [r1+208], m7
    mova    [r1+224], m7
    mova    [r1+240], m7
603 604 605 606 607 608
%endif ; ARCH_X86_64

    add          rsp, pad
    ret
%endmacro

609 610
INIT_XMM sse2
IDCT8_ADD
611
%if HAVE_AVX_EXTERNAL
612 613
INIT_XMM avx
IDCT8_ADD
614 615 616
%endif

;-----------------------------------------------------------------------------
617 618 619
; void ff_h264_idct8_add4_10(pixel **dst, const int *block_offset,
;                            int16_t *block, int stride,
;                            const uint8_t nnzc[6*8])
620 621
;-----------------------------------------------------------------------------
;;;;;;; NO FATE SAMPLES TRIGGER THIS
622 623 624 625
%macro IDCT8_ADD4_OP 2
    cmp       byte [r4+%2], 0
    jz .skipblock%1
    mov      r0d, [r6+%1*4]
626
    add       r0, r5
627 628 629
    call h264_idct8_add1_10 %+ SUFFIX
.skipblock%1:
%if %1<12
630 631 632 633
    add       r1, 256
%endif
%endmacro

634 635
%macro IDCT8_ADD4 0
cglobal h264_idct8_add4_10, 0,7,16
636
    movsxdifnidn r3, r3d
637 638 639 640 641 642 643
    %assign pad 16-gprsize-(stack_offset&15)
    SUB      rsp, pad
    mov       r5, r0mp
    mov       r6, r1mp
    mov       r1, r2mp
    mov      r2d, r3m
    movifnidn r4, r4mp
644 645 646 647
    IDCT8_ADD4_OP  0, 4+1*8
    IDCT8_ADD4_OP  4, 6+1*8
    IDCT8_ADD4_OP  8, 4+3*8
    IDCT8_ADD4_OP 12, 6+3*8
648 649 650 651
    ADD       rsp, pad
    RET
%endmacro ; IDCT8_ADD4

652 653
INIT_XMM sse2
IDCT8_ADD4
654
%if HAVE_AVX_EXTERNAL
655 656
INIT_XMM avx
IDCT8_ADD4
657
%endif