vp9itxfm.asm 110 KB
Newer Older
1
;******************************************************************************
2
;* VP9 IDCT SIMD optimizations
3
;*
4
;* Copyright (C) 2013 Clément Bœsch <u pkh me>
5
;* Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************

%include "libavutil/x86/x86util.asm"
25
%include "vp9itxfm_template.asm"
26

27
SECTION_RODATA 32
28

29
%macro VP9_IDCT_COEFFS 2-3 0
30
const pw_m%1_%2
31
times 8 dw -%1,  %2
32
const pw_%2_%1
33
times 8 dw  %2,  %1
34 35 36

%if %3 == 1
const pw_m%2_m%1
37
times 8 dw -%2, -%1
38 39
%if %1 != %2
const pw_m%2_%1
40
times 8 dw -%2,  %1
41
const pw_%1_%2
42
times 8 dw  %1,  %2
43 44 45 46
%endif
%endif

%if %1 < 11585
47
pw_m%1x2:   times 16 dw -%1*2
48
%elif %1 > 11585
49
pw_%1x2:    times 16 dw  %1*2
50 51
%else
const pw_%1x2
52
times 16 dw %1*2
53 54 55
%endif

%if %2 != %1
56
pw_%2x2:    times 16 dw  %2*2
57
%endif
58 59
%endmacro

60
VP9_IDCT_COEFFS 16364,   804
61
VP9_IDCT_COEFFS 16305,  1606
62 63 64 65
VP9_IDCT_COEFFS 16069,  3196, 1
VP9_IDCT_COEFFS 15893,  3981
VP9_IDCT_COEFFS 15137,  6270, 1
VP9_IDCT_COEFFS 14811,  7005
66
VP9_IDCT_COEFFS 14449,  7723
67 68
VP9_IDCT_COEFFS 13160,  9760
VP9_IDCT_COEFFS 11585, 11585, 1
69
VP9_IDCT_COEFFS 11003, 12140
70 71
VP9_IDCT_COEFFS 10394, 12665
VP9_IDCT_COEFFS  9102, 13623, 1
72
VP9_IDCT_COEFFS  8423, 14053
73 74
VP9_IDCT_COEFFS  5520, 15426
VP9_IDCT_COEFFS  4756, 15679
75
VP9_IDCT_COEFFS  2404, 16207
76

77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
const pw_5283_13377
times 4 dw 5283, 13377
const pw_9929_13377
times 4 dw 9929, 13377
const pw_15212_m13377
times 4 dw 15212, -13377
const pw_15212_9929
times 4 dw 15212, 9929
const pw_m5283_m15212
times 4 dw -5283, -15212
const pw_13377x2
times 8 dw 13377*2
const pw_m13377_13377
times 4 dw -13377, 13377
const pw_13377_0
times 4 dw 13377, 0
93

94 95 96
cextern pw_8
cextern pw_16
cextern pw_32
97 98 99 100
cextern pw_512
cextern pw_1024
cextern pw_2048
cextern pw_m1
101
cextern pd_8192
102

103 104
SECTION .text

105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
%macro VP9_UNPACK_MULSUB_2D_4X 6 ; dst1 [src1], dst2 [src2], dst3, dst4, mul1, mul2
    punpckhwd          m%4, m%2, m%1
    punpcklwd          m%2, m%1
    pmaddwd            m%3, m%4, [pw_m%5_%6]
    pmaddwd            m%4, [pw_%6_%5]
    pmaddwd            m%1, m%2, [pw_m%5_%6]
    pmaddwd            m%2, [pw_%6_%5]
%endmacro

%macro VP9_RND_SH_SUMSUB_BA 6 ; dst1 [src1], dst2 [src2], src3, src4, tmp, round
    SUMSUB_BA            d, %1, %2, %5
    SUMSUB_BA            d, %3, %4, %5
    paddd              m%1, %6
    paddd              m%2, %6
    paddd              m%3, %6
    paddd              m%4, %6
    psrad              m%1, 14
    psrad              m%2, 14
    psrad              m%3, 14
    psrad              m%4, 14
    packssdw           m%1, m%3
    packssdw           m%2, m%4
%endmacro

129
%macro VP9_STORE_2X 5-6 dstq ; reg1, reg2, tmp1, tmp2, zero, dst
130 131 132 133
%if mmsize == 32
    pmovzxbw           m%3, [%6]
    pmovzxbw           m%4, [%6+strideq]
%else
134 135
    movh               m%3, [%6]
    movh               m%4, [%6+strideq]
136 137
    punpcklbw          m%3, m%5
    punpcklbw          m%4, m%5
138
%endif
139 140
    paddw              m%3, m%1
    paddw              m%4, m%2
141 142 143 144 145 146 147 148 149 150 151
%if mmsize == 32
    packuswb           m%3, m%4
    ; Intel...
    vpermq             m%3, m%3, q3120
    mova              [%6], xm%3
    vextracti128 [%6+strideq], m%3, 1
%elif mmsize == 16
    packuswb           m%3, m%4
    movh              [%6], m%3
    movhps    [%6+strideq], m%3
%else
152 153
    packuswb           m%3, m%5
    packuswb           m%4, m%5
154 155
    movh              [%6], m%3
    movh      [%6+strideq], m%4
156
%endif
157 158
%endmacro

159 160 161 162 163 164 165 166 167 168 169 170
%macro ZERO_BLOCK 4 ; mem, stride, nnzcpl, zero_reg
%assign %%y 0
%rep %3
%assign %%x 0
%rep %3*2/mmsize
    mova      [%1+%%y+%%x], %4
%assign %%x (%%x+mmsize)
%endrep
%assign %%y (%%y+%2)
%endrep
%endmacro

171
;-------------------------------------------------------------------------------------------
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
; void vp9_iwht_iwht_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
;-------------------------------------------------------------------------------------------

INIT_MMX mmx
cglobal vp9_iwht_iwht_4x4_add, 3, 3, 0, dst, stride, block, eob
    mova                m0, [blockq+0*8]
    mova                m1, [blockq+1*8]
    mova                m2, [blockq+2*8]
    mova                m3, [blockq+3*8]
    psraw               m0, 2
    psraw               m1, 2
    psraw               m2, 2
    psraw               m3, 2

    VP9_IWHT4_1D
    TRANSPOSE4x4W        0, 1, 2, 3, 4
    VP9_IWHT4_1D

    pxor                m4, m4
    VP9_STORE_2X         0, 1, 5, 6, 4
    lea               dstq, [dstq+strideq*2]
    VP9_STORE_2X         2, 3, 5, 6, 4
    ZERO_BLOCK      blockq, 8, 4, m4
    RET

;-------------------------------------------------------------------------------------------
198 199 200 201 202 203 204 205 206 207 208 209 210 211
; void vp9_idct_idct_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
;-------------------------------------------------------------------------------------------

; 2x2 top left corner
%macro VP9_IDCT4_2x2_1D 0
    pmulhrsw            m0, m5                              ; m0=t1
    mova                m2, m0                              ; m2=t0
    mova                m3, m1
    pmulhrsw            m1, m6                              ; m1=t2
    pmulhrsw            m3, m7                              ; m3=t3
    VP9_IDCT4_1D_FINALIZE
%endmacro

%macro VP9_IDCT4_WRITEOUT 0
212
%if cpuflag(ssse3)
213 214 215
    mova                m5, [pw_2048]
    pmulhrsw            m0, m5              ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
    pmulhrsw            m1, m5
216 217 218 219 220 221 222
%else
    mova                m5, [pw_8]
    paddw               m0, m5
    paddw               m1, m5
    psraw               m0, 4
    psraw               m1, 4
%endif
223
    VP9_STORE_2X         0,  1,  6,  7,  4
224
    lea               dstq, [dstq+2*strideq]
225
%if cpuflag(ssse3)
226 227
    pmulhrsw            m2, m5
    pmulhrsw            m3, m5
228 229 230 231 232 233
%else
    paddw               m2, m5
    paddw               m3, m5
    psraw               m2, 4
    psraw               m3, 4
%endif
234
    VP9_STORE_2X         2,  3,  6,  7,  4
235 236
%endmacro

237 238 239
%macro IDCT_4x4_FN 1
INIT_MMX %1
cglobal vp9_idct_idct_4x4_add, 4, 4, 0, dst, stride, block, eob
240

241
%if cpuflag(ssse3)
242 243 244 245 246
    cmp eobd, 4 ; 2x2 or smaller
    jg .idctfull

    cmp eobd, 1 ; faster path for when only DC is set
    jne .idct2x2
247 248 249 250
%else
    cmp eobd, 1
    jg .idctfull
%endif
251

252
%if cpuflag(ssse3)
253 254 255 256
    movd                m0, [blockq]
    mova                m5, [pw_11585x2]
    pmulhrsw            m0, m5
    pmulhrsw            m0, m5
257 258 259 260 261 262 263 264 265 266 267
%else
    DEFINE_ARGS dst, stride, block, coef
    movsx            coefd, word [blockq]
    imul             coefd, 11585
    add              coefd, 8192
    sar              coefd, 14
    imul             coefd, 11585
    add              coefd, (8 << 14) + 8192
    sar              coefd, 14 + 4
    movd                m0, coefd
%endif
268 269 270
    pshufw              m0, m0, 0
    pxor                m4, m4
    movh          [blockq], m4
271
%if cpuflag(ssse3)
272
    pmulhrsw            m0, [pw_2048]       ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
273
%endif
274
    VP9_STORE_2X         0,  0,  6,  7,  4
275
    lea               dstq, [dstq+2*strideq]
276
    VP9_STORE_2X         0,  0,  6,  7,  4
277 278
    RET

279
%if cpuflag(ssse3)
280 281 282 283 284 285 286 287
; faster path for when only top left 2x2 block is set
.idct2x2:
    movd                m0, [blockq+0]
    movd                m1, [blockq+8]
    mova                m5, [pw_11585x2]
    mova                m6, [pw_6270x2]
    mova                m7, [pw_15137x2]
    VP9_IDCT4_2x2_1D
288 289 290 291 292
    ; partial 2x4 transpose
    punpcklwd           m0, m1
    punpcklwd           m2, m3
    SBUTTERFLY          dq, 0, 2, 1
    SWAP                1, 2
293 294 295 296 297 298
    VP9_IDCT4_2x2_1D
    pxor                m4, m4  ; used for the block reset, and VP9_STORE_2X
    movh       [blockq+ 0], m4
    movh       [blockq+ 8], m4
    VP9_IDCT4_WRITEOUT
    RET
299
%endif
300 301 302 303 304 305

.idctfull: ; generic full 4x4 idct/idct
    mova                m0, [blockq+ 0]
    mova                m1, [blockq+ 8]
    mova                m2, [blockq+16]
    mova                m3, [blockq+24]
306
%if cpuflag(ssse3)
307
    mova                m6, [pw_11585x2]
308
%endif
309 310 311 312 313 314 315 316 317 318 319
    mova                m7, [pd_8192]       ; rounding
    VP9_IDCT4_1D
    TRANSPOSE4x4W  0, 1, 2, 3, 4
    VP9_IDCT4_1D
    pxor                m4, m4  ; used for the block reset, and VP9_STORE_2X
    mova       [blockq+ 0], m4
    mova       [blockq+ 8], m4
    mova       [blockq+16], m4
    mova       [blockq+24], m4
    VP9_IDCT4_WRITEOUT
    RET
320 321 322 323
%endmacro

IDCT_4x4_FN mmxext
IDCT_4x4_FN ssse3
324

325 326 327 328 329 330
;-------------------------------------------------------------------------------------------
; void vp9_iadst_iadst_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
;-------------------------------------------------------------------------------------------

%macro IADST4_FN 5
INIT_MMX %5
331
cglobal vp9_%1_%3_4x4_add, 3, 3, 0, dst, stride, block, eob
332
%if WIN64 && notcpuflag(ssse3)
333
    WIN64_SPILL_XMM 8
334
%endif
335
    movdqa            xmm5, [pd_8192]
336 337 338 339
    mova                m0, [blockq+ 0]
    mova                m1, [blockq+ 8]
    mova                m2, [blockq+16]
    mova                m3, [blockq+24]
340
%if cpuflag(ssse3)
341
    mova                m6, [pw_11585x2]
342 343 344 345
%endif
%ifnidn %1%3, iadstiadst
    movdq2q             m7, xmm5
%endif
346 347 348 349 350 351 352 353 354 355 356 357
    VP9_%2_1D
    TRANSPOSE4x4W  0, 1, 2, 3, 4
    VP9_%4_1D
    pxor                m4, m4  ; used for the block reset, and VP9_STORE_2X
    mova       [blockq+ 0], m4
    mova       [blockq+ 8], m4
    mova       [blockq+16], m4
    mova       [blockq+24], m4
    VP9_IDCT4_WRITEOUT
    RET
%endmacro

358 359 360 361
IADST4_FN idct,  IDCT4,  iadst, IADST4, sse2
IADST4_FN iadst, IADST4, idct,  IDCT4,  sse2
IADST4_FN iadst, IADST4, iadst, IADST4, sse2

362 363 364 365
IADST4_FN idct,  IDCT4,  iadst, IADST4, ssse3
IADST4_FN iadst, IADST4, idct,  IDCT4,  ssse3
IADST4_FN iadst, IADST4, iadst, IADST4, ssse3

366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
%macro SCRATCH 3
%if ARCH_X86_64
    SWAP                %1, %2
%else
    mova              [%3], m%1
%endif
%endmacro

%macro UNSCRATCH 3
%if ARCH_X86_64
    SWAP                %1, %2
%else
    mova               m%1, [%3]
%endif
%endmacro
381

382 383 384 385 386
;-------------------------------------------------------------------------------------------
; void vp9_idct_idct_8x8_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
;-------------------------------------------------------------------------------------------

%macro VP9_IDCT8_1D_FINALIZE 0
387 388 389 390 391 392 393 394 395 396 397 398 399 400
    SUMSUB_BA            w,  3,  6, 5                       ; m3=t0+t7, m6=t0-t7
    SUMSUB_BA            w,  1,  2, 5                       ; m1=t1+t6, m2=t1-t6
    SUMSUB_BA            w,  7,  0, 5                       ; m7=t2+t5, m0=t2-t5

    UNSCRATCH            5, 8, blockq+ 0
    SCRATCH              2, 8, blockq+ 0

    SUMSUB_BA            w,  5,  4, 2                       ; m5=t3+t4, m4=t3-t4
    SWAP                 7,  6,  2
    SWAP                 3,  5,  0

%if ARCH_X86_64
    SWAP                 6, 8
%endif
401 402
%endmacro

403 404 405 406 407
; x86-32
; - in: m0/m4 is in mem
; - out: m6 is in mem
; x86-64:
; - everything is in registers (m0-7)
408
%macro VP9_IDCT8_1D 0
409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
%if ARCH_X86_64
    SWAP                 0, 8
    SWAP                 4, 9
%endif

    VP9_UNPACK_MULSUB_2W_4X 5,  3,  9102, 13623, D_8192_REG, 0, 4  ; m5=t5a, m3=t6a
    VP9_UNPACK_MULSUB_2W_4X 1,  7, 16069,  3196, D_8192_REG, 0, 4  ; m1=t4a, m7=t7a
    SUMSUB_BA            w,  5,  1, 0                       ; m5=t4a+t5a (t4), m1=t4a-t5a (t5a)
    SUMSUB_BA            w,  3,  7, 0                       ; m3=t7a+t6a (t7), m7=t7a-t6a (t6a)
%if cpuflag(ssse3)
    SUMSUB_BA            w,  1,  7, 0                       ; m1=t6a+t5a (t6), m7=t6a-t5a (t5)
    pmulhrsw            m1, W_11585x2_REG                   ; m1=t6
    pmulhrsw            m7, W_11585x2_REG                   ; m7=t5
%else
    VP9_UNPACK_MULSUB_2W_4X 7,  1, 11585, 11585, D_8192_REG, 0, 4
%endif
    VP9_UNPACK_MULSUB_2W_4X 2,  6, 15137,  6270, D_8192_REG, 0, 4  ; m2=t2a, m6=t3a

    UNSCRATCH            0, 8, blockq+ 0    ; IN(0)
    UNSCRATCH            4, 9, blockq+64    ; IN(4)
    SCRATCH              5, 8, blockq+ 0

%if cpuflag(ssse3)
    SUMSUB_BA            w, 4, 0, 5                         ; m4=IN(0)+IN(4) m0=IN(0)-IN(4)
    pmulhrsw            m4, W_11585x2_REG                   ; m4=t0a
    pmulhrsw            m0, W_11585x2_REG                   ; m0=t1a
%else
    SCRATCH              7, 9, blockq+64
    VP9_UNPACK_MULSUB_2W_4X 0,  4, 11585, 11585, D_8192_REG, 5, 7
    UNSCRATCH            7, 9, blockq+64
%endif
    SUMSUB_BA            w,  6,  4, 5                       ; m6=t0a+t3a (t0), m4=t0a-t3a (t3)
    SUMSUB_BA            w,  2,  0, 5                       ; m2=t1a+t2a (t1), m0=t1a-t2a (t2)

443 444 445
    VP9_IDCT8_1D_FINALIZE
%endmacro

446
%macro VP9_IDCT8_4x4_1D 0
447 448
    pmulhrsw            m0, W_11585x2_REG                   ; m0=t1a/t0a
    pmulhrsw            m6, m2, [pw_15137x2]                ; m6=t3a
449
    pmulhrsw            m2, [pw_6270x2]                     ; m2=t2a
450
    pmulhrsw            m7, m1, [pw_16069x2]                ; m7=t7a
451
    pmulhrsw            m1, [pw_3196x2]                     ; m1=t4a
452
    pmulhrsw            m5, m3, [pw_m9102x2]                ; m5=t5a
453
    pmulhrsw            m3, [pw_13623x2]                    ; m3=t6a
454 455 456 457 458 459 460 461 462
    SUMSUB_BA            w,  5,  1, 4                       ; m1=t4a+t5a (t4), m5=t4a-t5a (t5a)
    SUMSUB_BA            w,  3,  7, 4                       ; m3=t7a+t6a (t7), m7=t7a-t6a (t6a)
    SUMSUB_BA            w,  1,  7, 4                       ; m1=t6a+t5a (t6), m7=t6a-t5a (t5)
    pmulhrsw            m1, W_11585x2_REG                   ; m1=t6
    pmulhrsw            m7, W_11585x2_REG                   ; m7=t5
    psubw               m4, m0, m6                          ; m4=t0a-t3a (t3)
    paddw               m6, m0                              ; m6=t0a+t3a (t0)
    SCRATCH              5,  8, blockq+ 0
    SUMSUB_BA            w,  2,  0, 5                       ; m2=t1a+t2a (t1), m0=t1a-t2a (t2)
463 464 465
    VP9_IDCT8_1D_FINALIZE
%endmacro

466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
%macro VP9_IDCT8_2x2_1D 1
    pmulhrsw            m0, W_11585x2_REG                   ; m0=t0
    pmulhrsw            m3, m1, W_16069x2_REG               ; m3=t7
    pmulhrsw            m1, W_3196x2_REG                    ; m1=t4
    psubw               m7, m3, m1                          ; t5 = t7a - t4a
    paddw               m5, m3, m1                          ; t6 = t7a + t4a
    pmulhrsw            m7, W_11585x2_REG                   ; m7=t5
    pmulhrsw            m5, W_11585x2_REG                   ; m5=t6
    SWAP                 5,  1
    ; merged VP9_IDCT8_1D_FINALIZE to make register-sharing w/ avx easier
    psubw               m6, m0, m3                          ; m6=t0-t7
    paddw               m3, m0                              ; m3=t0+t7
    psubw               m2, m0, m1                          ; m2=t1-t6
    paddw               m1, m0                              ; m1=t1+t6
%if %1 == 1
    punpcklwd           m3, m1
%define SCRATCH_REG 1
%elif ARCH_X86_32
    mova       [blockq+ 0], m2
%define SCRATCH_REG 2
%else
%define SCRATCH_REG 8
%endif
    psubw               m4, m0, m5                          ; m4=t3-t4
    paddw               m5, m0                              ; m5=t3+t4
    SUMSUB_BA            w,  7,  0, SCRATCH_REG             ; m7=t2+t5, m0=t2-t5
    SWAP                 7,  6,  2
    SWAP                 3,  5,  0
%undef SCRATCH_REG
495 496
%endmacro

497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
%macro VP9_IDCT8_WRITEx2 6-8 5 ; line1, line2, tmp1, tmp2, zero, pw_1024/pw_16, shift
%if cpuflag(ssse3)
    pmulhrsw           m%1, %6              ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
    pmulhrsw           m%2, %6
%else
    paddw              m%1, %6
    paddw              m%2, %6
    psraw              m%1, %7
    psraw              m%2, %7
%endif
%if %0 <= 7
    VP9_STORE_2X        %1, %2, %3, %4, %5
%else
    VP9_STORE_2X        %1, %2, %3, %4, %5, %8
%endif
%endmacro

; x86-32:
; - m6 is in mem
; x86-64:
; - m8 holds m6 (SWAP)
; m6 holds zero
519
%macro VP9_IDCT8_WRITEOUT 0
520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
%if ARCH_X86_64
%if cpuflag(ssse3)
    mova                m9, [pw_1024]
%else
    mova                m9, [pw_16]
%endif
%define ROUND_REG m9
%else
%if cpuflag(ssse3)
%define ROUND_REG [pw_1024]
%else
%define ROUND_REG [pw_16]
%endif
%endif
    SCRATCH              5, 10, blockq+16
    SCRATCH              7, 11, blockq+32
    VP9_IDCT8_WRITEx2    0,  1, 5, 7, 6, ROUND_REG
537
    lea               dstq, [dstq+2*strideq]
538
    VP9_IDCT8_WRITEx2    2,  3, 5, 7, 6, ROUND_REG
539
    lea               dstq, [dstq+2*strideq]
540 541 542
    UNSCRATCH            5, 10, blockq+16
    UNSCRATCH            7, 11, blockq+32
    VP9_IDCT8_WRITEx2    4,  5, 0, 1, 6, ROUND_REG
543
    lea               dstq, [dstq+2*strideq]
544 545 546 547
    UNSCRATCH            5, 8, blockq+ 0
    VP9_IDCT8_WRITEx2    5,  7, 0, 1, 6, ROUND_REG

%undef ROUND_REG
548 549
%endmacro

550
%macro VP9_IDCT_IDCT_8x8_ADD_XMM 2
551
INIT_XMM %1
552
cglobal vp9_idct_idct_8x8_add, 4, 4, %2, dst, stride, block, eob
553

554 555
%if cpuflag(ssse3)
%if ARCH_X86_64
556
    mova               m12, [pw_11585x2]    ; often used
557 558 559 560
%define W_11585x2_REG m12
%else
%define W_11585x2_REG [pw_11585x2]
%endif
561

562
    cmp eobd, 12 ; top left half or less
563 564
    jg .idctfull

565 566 567
    cmp eobd, 3  ; top left corner or less
    jg .idcthalf

568 569
    cmp eobd, 1 ; faster path for when only DC is set
    jne .idcttopleftcorner
570 571 572 573
%else
    cmp eobd, 1
    jg .idctfull
%endif
574

575
%if cpuflag(ssse3)
576
    movd                m0, [blockq]
577 578 579 580 581 582 583 584 585 586 587 588 589
    pmulhrsw            m0, W_11585x2_REG
    pmulhrsw            m0, W_11585x2_REG
%else
    DEFINE_ARGS dst, stride, block, coef
    movsx            coefd, word [blockq]
    imul             coefd, 11585
    add              coefd, 8192
    sar              coefd, 14
    imul             coefd, 11585
    add              coefd, (16 << 14) + 8192
    sar              coefd, 14 + 5
    movd                m0, coefd
%endif
590 591 592
    SPLATW              m0, m0, 0
    pxor                m4, m4
    movd          [blockq], m4
593 594 595 596
%if cpuflag(ssse3)
    pmulhrsw            m0, [pw_1024]       ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
%endif
%rep 3
597
    VP9_STORE_2X         0,  0,  6,  7,  4
598
    lea               dstq, [dstq+2*strideq]
599
%endrep
600
    VP9_STORE_2X         0,  0,  6,  7,  4
601 602
    RET

603
%if cpuflag(ssse3)
604 605 606 607 608
; faster path for when only left corner is set (3 input: DC, right to DC, below
; to DC). Note: also working with a 2x2 block
.idcttopleftcorner:
    movd                m0, [blockq+0]
    movd                m1, [blockq+16]
609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632
%if ARCH_X86_64
    mova               m10, [pw_3196x2]
    mova               m11, [pw_16069x2]
%define W_3196x2_REG m10
%define W_16069x2_REG m11
%else
%define W_3196x2_REG [pw_3196x2]
%define W_16069x2_REG [pw_16069x2]
%endif
    VP9_IDCT8_2x2_1D 1
    ; partial 2x8 transpose
    ; punpcklwd m0, m1 already done inside idct
    punpcklwd           m2, m3
    punpcklwd           m4, m5
    punpcklwd           m6, m7
    punpckldq           m0, m2
    punpckldq           m4, m6
    SBUTTERFLY         qdq, 0, 4, 1
    SWAP                 1, 4
    VP9_IDCT8_2x2_1D 2
%if ARCH_X86_64
    SWAP                 6, 8
%endif
    pxor                m6, m6  ; used for the block reset, and VP9_STORE_2X
633
    VP9_IDCT8_WRITEOUT
634 635 636 637 638 639 640 641
%if ARCH_X86_64
    movd       [blockq+ 0], m6
    movd       [blockq+16], m6
%else
    mova       [blockq+ 0], m6
    mova       [blockq+16], m6
    mova       [blockq+32], m6
%endif
642 643
    RET

644 645 646 647 648 649
.idcthalf:
    movh                m0, [blockq + 0]
    movh                m1, [blockq +16]
    movh                m2, [blockq +32]
    movh                m3, [blockq +48]
    VP9_IDCT8_4x4_1D
650 651 652 653 654 655 656 657 658 659 660 661 662 663
    ; partial 4x8 transpose
%if ARCH_X86_32
    mova                m6, [blockq+ 0]
%endif
    punpcklwd           m0, m1
    punpcklwd           m2, m3
    punpcklwd           m4, m5
    punpcklwd           m6, m7
    SBUTTERFLY          dq, 0, 2, 1
    SBUTTERFLY          dq, 4, 6, 5
    SBUTTERFLY         qdq, 0, 4, 1
    SBUTTERFLY         qdq, 2, 6, 5
    SWAP                 1, 4
    SWAP                 3, 6
664
    VP9_IDCT8_4x4_1D
665 666 667 668
%if ARCH_X86_64
    SWAP                 6, 8
%endif
    pxor                m6, m6
669
    VP9_IDCT8_WRITEOUT
670 671 672 673 674 675 676 677 678 679
%if ARCH_X86_64
    movh       [blockq+ 0], m6
    movh       [blockq+16], m6
    movh       [blockq+32], m6
%else
    mova       [blockq+ 0], m6
    mova       [blockq+16], m6
    mova       [blockq+32], m6
%endif
    movh       [blockq+48], m6
680
    RET
681
%endif
682

683
.idctfull: ; generic full 8x8 idct/idct
684
%if ARCH_X86_64
685
    mova                m0, [blockq+  0]    ; IN(0)
686
%endif
687 688 689
    mova                m1, [blockq+ 16]    ; IN(1)
    mova                m2, [blockq+ 32]    ; IN(2)
    mova                m3, [blockq+ 48]    ; IN(3)
690 691 692 693 694 695 696 697 698 699 700 701
%if ARCH_X86_64
    mova                m4, [blockq+ 64]    ; IN(4)
%endif
    mova                m5, [blockq+ 80]    ; IN(5)
    mova                m6, [blockq+ 96]    ; IN(6)
    mova                m7, [blockq+112]    ; IN(7)
%if ARCH_X86_64
    mova               m11, [pd_8192]       ; rounding
%define D_8192_REG m11
%else
%define D_8192_REG [pd_8192]
%endif
702
    VP9_IDCT8_1D
703 704 705 706 707 708
%if ARCH_X86_64
    TRANSPOSE8x8W  0, 1, 2, 3, 4, 5, 6, 7, 8
%else
    TRANSPOSE8x8W  0, 1, 2, 3, 4, 5, 6, 7, [blockq+0], [blockq+64], 1
    mova        [blockq+0], m0
%endif
709
    VP9_IDCT8_1D
710

711 712 713 714
%if ARCH_X86_64
    SWAP                 6, 8
%endif
    pxor                m6, m6  ; used for the block reset, and VP9_STORE_2X
715
    VP9_IDCT8_WRITEOUT
716
    ZERO_BLOCK      blockq, 16, 8, m6
717
    RET
718
%undef W_11585x2_REG
719 720
%endmacro

721 722 723
VP9_IDCT_IDCT_8x8_ADD_XMM sse2, 12
VP9_IDCT_IDCT_8x8_ADD_XMM ssse3, 13
VP9_IDCT_IDCT_8x8_ADD_XMM avx, 13
724

725 726 727 728
;---------------------------------------------------------------------------------------------
; void vp9_iadst_iadst_8x8_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
;---------------------------------------------------------------------------------------------

729 730 731 732 733 734 735 736 737 738 739 740
; x86-32:
; - in: m0/3/4/7 are in mem [blockq+N*16]
; - out: m6 is in mem [blockq+0]
; x86-64:
; - everything is in registers
%macro VP9_IADST8_1D 0 ; input/output=m0/1/2/3/4/5/6/7
%if ARCH_X86_64
    SWAP                     0, 8
    SWAP                     3, 9
    SWAP                     4, 10
    SWAP                     7, 11
%endif
741

742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
    VP9_UNPACK_MULSUB_2D_4X  5,  2,  0,  3, 14449,  7723    ; m5/2=t3[d], m2/4=t2[d]
    VP9_UNPACK_MULSUB_2D_4X  1,  6,  4,  7,  4756, 15679    ; m1/4=t7[d], m6/7=t6[d]
    SCRATCH                  4, 12, blockq+1*16
    VP9_RND_SH_SUMSUB_BA     6,  2,  7,  3, 4, D_8192_REG  ; m6=t2[w], m2=t6[w]
    UNSCRATCH                4, 12, blockq+1*16
    VP9_RND_SH_SUMSUB_BA     1,  5,  4,  0, 3, D_8192_REG  ; m1=t3[w], m5=t7[w]

    UNSCRATCH                0,  8, blockq+16*0
    UNSCRATCH                3,  9, blockq+16*3
    UNSCRATCH                4, 10, blockq+16*4
    UNSCRATCH                7, 11, blockq+16*7
    SCRATCH                  1,  8, blockq+16*1
    SCRATCH                  2,  9, blockq+16*2
    SCRATCH                  5, 10, blockq+16*5
    SCRATCH                  6, 11, blockq+16*6

    VP9_UNPACK_MULSUB_2D_4X  7,  0,  1,  2, 16305,  1606    ; m7/1=t1[d], m0/2=t0[d]
    VP9_UNPACK_MULSUB_2D_4X  3,  4,  5,  6, 10394, 12665    ; m3/5=t5[d], m4/6=t4[d]
    SCRATCH                  1, 12, blockq+ 0*16
    VP9_RND_SH_SUMSUB_BA     4,  0,  6,  2, 1, D_8192_REG  ; m4=t0[w], m0=t4[w]
    UNSCRATCH                1, 12, blockq+ 0*16
    VP9_RND_SH_SUMSUB_BA     3,  7,  5,  1, 2, D_8192_REG  ; m3=t1[w], m7=t5[w]

    UNSCRATCH                2,  9, blockq+16*2
    UNSCRATCH                5, 10, blockq+16*5
    SCRATCH                  3,  9, blockq+16*3
    SCRATCH                  4, 10, blockq+16*4

    ; m4=t0, m3=t1, m6=t2, m1=t3, m0=t4, m7=t5, m2=t6, m5=t7

    VP9_UNPACK_MULSUB_2D_4X  0,  7,  1,  3, 15137,  6270    ; m0/1=t5[d], m7/3=t4[d]
    VP9_UNPACK_MULSUB_2D_4X  5,  2,  4,  6,  6270, 15137    ; m5/4=t6[d], m2/6=t7[d]
    SCRATCH                  1, 12, blockq+ 0*16
    VP9_RND_SH_SUMSUB_BA     5,  7,  4,  3, 1, D_8192_REG
    UNSCRATCH                1, 12, blockq+ 0*16
    PSIGNW                  m5, W_M1_REG                    ; m5=out1[w], m7=t6[w]
    VP9_RND_SH_SUMSUB_BA     2,  0,  6,  1, 3, D_8192_REG   ; m2=out6[w], m0=t7[w]

    UNSCRATCH                1,  8, blockq+16*1
    UNSCRATCH                3,  9, blockq+16*3
    UNSCRATCH                4, 10, blockq+16*4
    UNSCRATCH                6, 11, blockq+16*6
    SCRATCH                  2,  8, blockq+16*0

    SUMSUB_BA                w,  6,  4, 2                   ; m6=out0[w], m4=t2[w]
    SUMSUB_BA                w,  1,  3, 2
    PSIGNW                  m1, W_M1_REG                    ; m1=out7[w], m3=t3[w]

    ; m6=out0, m5=out1, m4=t2, m3=t3, m7=t6, m0=t7, m2=out6, m1=out7

792 793
    ; unfortunately, the code below overflows in some cases
%if 0; cpuflag(ssse3)
794 795 796 797 798 799 800 801 802 803 804 805 806 807
    SUMSUB_BA                w,  3,  4,  2
    SUMSUB_BA                w,  0,  7,  2
    pmulhrsw                m3, W_11585x2_REG
    pmulhrsw                m7, W_11585x2_REG
    pmulhrsw                m4, W_11585x2_REG               ; out4
    pmulhrsw                m0, W_11585x2_REG               ; out2
%else
    SCRATCH                  5,  9, blockq+16*1
    VP9_UNPACK_MULSUB_2W_4X  4, 3, 11585, 11585, D_8192_REG, 2, 5
    VP9_UNPACK_MULSUB_2W_4X  7, 0, 11585, 11585, D_8192_REG, 2, 5
    UNSCRATCH                5,  9, blockq+16*1
%endif
    PSIGNW                  m3, W_M1_REG                    ; out3
    PSIGNW                  m7, W_M1_REG                    ; out5
808

809
    ; m6=out0, m5=out1, m0=out2, m3=out3, m4=out4, m7=out5, m2=out6, m1=out7
810

811 812 813 814 815 816
%if ARCH_X86_64
    SWAP                     2, 8
%endif
    SWAP                     0, 6, 2
    SWAP                     7, 1, 5
%endmacro
817

818 819 820
%macro IADST8_FN 6
INIT_XMM %5
cglobal vp9_%1_%3_8x8_add, 3, 3, %6, dst, stride, block, eob
821

822 823 824 825 826
%ifidn %1, idct
%define first_is_idct 1
%else
%define first_is_idct 0
%endif
827

828 829 830 831 832
%ifidn %3, idct
%define second_is_idct 1
%else
%define second_is_idct 0
%endif
833

834
%if ARCH_X86_64
835
    mova                m0, [blockq+  0]    ; IN(0)
836
%endif
837 838
    mova                m1, [blockq+ 16]    ; IN(1)
    mova                m2, [blockq+ 32]    ; IN(2)
839
%if ARCH_X86_64 || first_is_idct
840
    mova                m3, [blockq+ 48]    ; IN(3)
841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863
%endif
%if ARCH_X86_64
    mova                m4, [blockq+ 64]    ; IN(4)
%endif
    mova                m5, [blockq+ 80]    ; IN(5)
    mova                m6, [blockq+ 96]    ; IN(6)
%if ARCH_X86_64 || first_is_idct
    mova                m7, [blockq+112]    ; IN(7)
%endif
%if ARCH_X86_64
%if cpuflag(ssse3)
    mova               m15, [pw_11585x2]    ; often used
%endif
    mova               m13, [pd_8192]       ; rounding
    mova               m14, [pw_m1]
%define W_11585x2_REG m15
%define D_8192_REG m13
%define W_M1_REG m14
%else
%define W_11585x2_REG [pw_11585x2]
%define D_8192_REG [pd_8192]
%define W_M1_REG [pw_m1]
%endif
864

865
    ; note different calling conventions for idct8 vs. iadst8 on x86-32
866
    VP9_%2_1D
867 868 869 870 871 872 873 874 875 876
%if ARCH_X86_64
    TRANSPOSE8x8W  0, 1, 2, 3, 4, 5, 6, 7, 8
%else
    TRANSPOSE8x8W  0, 1, 2, 3, 4, 5, 6, 7, [blockq+0], [blockq+64], 1
    mova      [blockq+  0], m0
%if second_is_idct == 0
    mova      [blockq+ 48], m3
    mova      [blockq+112], m7
%endif
%endif
877 878
    VP9_%4_1D

879 880 881 882
%if ARCH_X86_64
    SWAP                 6, 8
%endif
    pxor                m6, m6  ; used for the block reset, and VP9_STORE_2X
883
    VP9_IDCT8_WRITEOUT
884
    ZERO_BLOCK      blockq, 16, 8, m6
885
    RET
886 887 888 889 890

%undef W_11585x2_REG
%undef first_is_idct
%undef second_is_idct

891 892
%endmacro

893 894 895 896 897 898 899 900 901
IADST8_FN idct,  IDCT8,  iadst, IADST8, sse2, 15
IADST8_FN iadst, IADST8, idct,  IDCT8,  sse2, 15
IADST8_FN iadst, IADST8, iadst, IADST8, sse2, 15
IADST8_FN idct,  IDCT8,  iadst, IADST8, ssse3, 16
IADST8_FN idct,  IDCT8,  iadst, IADST8, avx, 16
IADST8_FN iadst, IADST8, idct,  IDCT8,  ssse3, 16
IADST8_FN iadst, IADST8, idct,  IDCT8,  avx, 16
IADST8_FN iadst, IADST8, iadst, IADST8, ssse3, 16
IADST8_FN iadst, IADST8, iadst, IADST8, avx, 16
902

903 904 905 906
;---------------------------------------------------------------------------------------------
; void vp9_idct_idct_16x16_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
;---------------------------------------------------------------------------------------------

907 908
; x86-64:
; at the end of this macro, m7 is stored in [%4+15*%5]
909 910 911 912
; everything else (t0-6 and t8-15) is stored in m0-6 and m8-15
; the following sumsubs have not been done yet:
;    SUMSUB_BA            w,  6,  9, 15      ; t6, t9
;    SUMSUB_BA            w,  7,  8, 15      ; t7, t8
913 914 915 916 917
; or (x86-32) t0-t5 are in m0-m5, t10-t15 are in x11/9/7/5/3/1,
; and the following simsubs have not been done yet:
;    SUMSUB_BA            w, x13, x14, 7       ; t6, t9
;    SUMSUB_BA            w, x15, x12, 7       ; t7, t8

918
%macro VP9_IDCT16_1D_START 6 ; src, nnzc, stride, scratch, scratch_stride, is_iadst
919 920 921 922 923 924 925 926 927 928 929 930
%if %2 <= 4
    mova                m3, [%1+ 1*%3]      ; IN(1)
    mova                m0, [%1+ 3*%3]      ; IN(3)

    pmulhrsw            m4, m3,  [pw_16305x2]       ; t14-15
    pmulhrsw            m3, [pw_1606x2]             ; t8-9
    pmulhrsw            m7, m0,  [pw_m4756x2]       ; t10-11
    pmulhrsw            m0, [pw_15679x2]            ; t12-13

    ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m14=t5, m13=t6, m15=t7
    ; m3=t8, m5=t9, m1=t10, m7=t11, m0=t12, m6=t13, m2=t14, m4=t15

931 932 933 934 935
    VP9_UNPACK_MULSUB_2W_4X 2, 5, 4, 3, 15137,  6270, [pd_8192], 1, 6 ; t9,  t14
    SCRATCH              4, 10, %4+ 1*%5
    SCRATCH              5, 11, %4+ 7*%5
    VP9_UNPACK_MULSUB_2W_4X 6, 1, 0, 7, 6270, m15137, [pd_8192], 4, 5 ; t10, t13
    UNSCRATCH            5, 11, %4+ 7*%5
936 937 938 939

    ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
    ; m7=t8, m6=t9, m2=t10, m3=t11, m4=t12, m5=t13, m1=t14, m0=t15
%else
940 941
    mova                m5, [%1+ 1*%3]      ; IN(1)
    mova                m4, [%1+ 7*%3]      ; IN(7)
942
%if %2 <= 8
943 944 945 946
    pmulhrsw            m2, m5,  [pw_16305x2]       ; t15
    pmulhrsw            m5, [pw_1606x2]             ; t8
    pmulhrsw            m3, m4,  [pw_m10394x2]      ; t9
    pmulhrsw            m4, [pw_12665x2]            ; t14
947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968
%else
    mova                m3, [%1+ 9*%3]      ; IN(9)
    mova                m2, [%1+15*%3]      ; IN(15)

    ; m10=in0, m5=in1, m14=in2, m6=in3, m9=in4, m7=in5, m15=in6, m4=in7
    ; m11=in8, m3=in9, m12=in10 m0=in11, m8=in12, m1=in13, m13=in14, m2=in15

    VP9_UNPACK_MULSUB_2W_4X   5,   2, 16305,  1606, [pd_8192], 0, 1 ; t8,  t15
    VP9_UNPACK_MULSUB_2W_4X   3,   4, 10394, 12665, [pd_8192], 0, 1 ; t9,  t14
%endif

    SUMSUB_BA            w,  3,  5, 0       ; t8,  t9
    SUMSUB_BA            w,  4,  2, 0       ; t15, t14

    VP9_UNPACK_MULSUB_2W_4X   2,   5, 15137,  6270, [pd_8192], 0, 1 ; t9,  t14

    SCRATCH              4, 10, %4+ 1*%5
    SCRATCH              5, 11, %4+ 7*%5

    mova                m6, [%1+ 3*%3]      ; IN(3)
    mova                m7, [%1+ 5*%3]      ; IN(5)
%if %2 <= 8
969 970 971 972 973
    pmulhrsw            m0, m7,  [pw_14449x2]       ; t13
    pmulhrsw            m7, [pw_7723x2]             ; t10
    pmulhrsw            m1, m6,  [pw_m4756x2]       ; t11
    pmulhrsw            m6, [pw_15679x2]            ; t12
%else
974 975
    mova                m0, [%1+11*%3]      ; IN(11)
    mova                m1, [%1+13*%3]      ; IN(13)
976

977 978
    VP9_UNPACK_MULSUB_2W_4X   7,   0, 14449,  7723, [pd_8192], 4, 5 ; t10, t13
    VP9_UNPACK_MULSUB_2W_4X   1,   6,  4756, 15679, [pd_8192], 4, 5 ; t11, t12
979
%endif
980 981 982 983

    ; m11=t0, m10=t1, m9=t2, m8=t3, m14=t4, m12=t5, m15=t6, m13=t7
    ; m5=t8, m3=t9, m7=t10, m1=t11, m6=t12, m0=t13, m4=t14, m2=t15

984 985
    SUMSUB_BA            w,  7,  1, 4       ; t11, t10
    SUMSUB_BA            w,  0,  6, 4       ; t12, t13
986 987 988 989

    ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m14=t5, m13=t6, m15=t7
    ; m3=t8, m5=t9, m1=t10, m7=t11, m0=t12, m6=t13, m2=t14, m4=t15

990 991 992
    VP9_UNPACK_MULSUB_2W_4X   6,   1, 6270, m15137, [pd_8192], 4, 5 ; t10, t13

    UNSCRATCH            5, 11, %4+ 7*%5
993
%endif
994 995 996 997

    ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m13=t5, m14=t6, m15=t7
    ; m3=t8, m2=t9, m6=t10, m7=t11, m0=t12, m1=t13, m5=t14, m4=t15

998 999 1000 1001 1002 1003 1004 1005 1006
    SUMSUB_BA            w,  7,  3, 4       ; t8,  t11

    ; backup first register
    mova        [%4+15*%5], m7

    SUMSUB_BA            w,  6,  2, 7       ; t9,  t10
    UNSCRATCH            4, 10, %4+ 1*%5
    SUMSUB_BA            w,  0,  4, 7       ; t15, t12
    SUMSUB_BA            w,  1,  5, 7       ; t14. t13
1007 1008 1009 1010

    ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
    ; m7=t8, m6=t9, m2=t10, m3=t11, m4=t12, m5=t13, m1=t14, m0=t15

1011
%if cpuflag(ssse3) && %6 == 0
1012 1013
    SUMSUB_BA            w,  2,  5, 7
    SUMSUB_BA            w,  3,  4, 7
1014 1015 1016 1017
    pmulhrsw            m5, [pw_11585x2]    ; t10
    pmulhrsw            m4, [pw_11585x2]    ; t11
    pmulhrsw            m3, [pw_11585x2]    ; t12
    pmulhrsw            m2, [pw_11585x2]    ; t13
1018 1019 1020 1021 1022 1023
%else
    SCRATCH              6, 10, %4+ 1*%5
    VP9_UNPACK_MULSUB_2W_4X   5,   2, 11585, 11585, [pd_8192], 6, 7 ; t10, t13
    VP9_UNPACK_MULSUB_2W_4X   4,   3, 11585, 11585, [pd_8192], 6, 7 ; t11, t12
    UNSCRATCH            6, 10, %4+ 1*%5
%endif
1024 1025 1026 1027

    ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
    ; m7=t8, m6=t9, m5=t10, m4=t11, m3=t12, m2=t13, m1=t14, m0=t15

1028 1029 1030 1031 1032 1033 1034 1035 1036
    SCRATCH              0,  8, %4+ 1*%5
    SCRATCH              1,  9, %4+ 3*%5
    SCRATCH              2, 10, %4+ 5*%5
    SCRATCH              3, 11, %4+ 7*%5
    SCRATCH              4, 12, %4+ 9*%5
    SCRATCH              5, 13, %4+11*%5
    SCRATCH              6, 14, %4+13*%5

    ; even (tx8x8)
1037
%if %2 <= 4
1038 1039 1040 1041 1042 1043 1044
    mova                m3, [%1+ 0*%3]      ; IN(0)
    mova                m4, [%1+ 2*%3]      ; IN(2)

    pmulhrsw            m3, [pw_11585x2]    ; t0-t3
    pmulhrsw            m7, m4, [pw_16069x2]        ; t6-7
    pmulhrsw            m4, [pw_3196x2]             ; t4-5

1045
%if 0 ; overflows :(
1046 1047 1048 1049
    paddw               m6, m7, m4
    psubw               m5, m7, m4
    pmulhrsw            m5, [pw_11585x2]            ; t5
    pmulhrsw            m6, [pw_11585x2]            ; t6
1050 1051 1052
%else
    VP9_UNPACK_MULSUB_2W_4X  5, 6, 7, 4, 11585, 11585, [pd_8192], 0, 1 ; t5,  t6
%endif
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087

    psubw               m0, m3, m7
    paddw               m7, m3
    psubw               m1, m3, m6
    paddw               m6, m3
    psubw               m2, m3, m5
    paddw               m5, m3

%if ARCH_X86_32
    SWAP                 0, 7
%endif
    SCRATCH              7, 15, %4+12*%5
%else
    mova                m6, [%1+ 2*%3]      ; IN(2)
    mova                m1, [%1+ 4*%3]      ; IN(4)
    mova                m7, [%1+ 6*%3]      ; IN(6)
%if %2 <= 8
    pmulhrsw            m0, m1,  [pw_15137x2]       ; t3
    pmulhrsw            m1, [pw_6270x2]             ; t2
    pmulhrsw            m5, m6, [pw_16069x2]        ; t7
    pmulhrsw            m6, [pw_3196x2]             ; t4
    pmulhrsw            m4, m7, [pw_m9102x2]        ; t5
    pmulhrsw            m7, [pw_13623x2]            ; t6
%else
    mova                m4, [%1+10*%3]      ; IN(10)
    mova                m0, [%1+12*%3]      ; IN(12)
    mova                m5, [%1+14*%3]      ; IN(14)

    VP9_UNPACK_MULSUB_2W_4X   1,   0, 15137,  6270, [pd_8192], 2, 3 ; t2,  t3
    VP9_UNPACK_MULSUB_2W_4X   6,   5, 16069,  3196, [pd_8192], 2, 3 ; t4,  t7
    VP9_UNPACK_MULSUB_2W_4X   4,   7,  9102, 13623, [pd_8192], 2, 3 ; t5,  t6
%endif

    SUMSUB_BA            w,  4,  6, 2       ; t4,  t5
    SUMSUB_BA            w,  7,  5, 2       ; t7,  t6
1088

1089
%if cpuflag(ssse3) && %6 == 0
1090 1091 1092
    SUMSUB_BA            w,  6,  5, 2
    pmulhrsw            m5, [pw_11585x2]                              ; t5
    pmulhrsw            m6, [pw_11585x2]                              ; t6
1093
%else
1094 1095 1096 1097 1098
    VP9_UNPACK_MULSUB_2W_4X  5,  6, 11585, 11585, [pd_8192], 2, 3 ; t5,  t6
%endif

    SCRATCH              5, 15, %4+10*%5
    mova                m2, [%1+ 0*%3]      ; IN(0)
1099
%if %2 <= 8
1100 1101 1102 1103 1104
    pmulhrsw            m2, [pw_11585x2]    ; t0 and t1
    psubw               m3, m2, m0
    paddw               m0, m2

    SUMSUB_BA            w,  7,  0, 5       ; t0,  t7
1105
%else
1106
    mova                m3, [%1+ 8*%3]      ; IN(8)
1107 1108

    ; from 3 stages back
1109
%if cpuflag(ssse3) && %6 == 0
1110 1111 1112 1113 1114 1115 1116 1117
    SUMSUB_BA            w,  3,  2, 5
    pmulhrsw            m3, [pw_11585x2]    ; t0
    pmulhrsw            m2, [pw_11585x2]    ; t1
%else
    mova        [%1+ 0*%3], m0
    VP9_UNPACK_MULSUB_2W_4X  2,  3, 11585,  11585, [pd_8192], 5, 0 ; t0, t1
    mova                m0, [%1+ 0*%3]
%endif
1118 1119

    ; from 2 stages back
1120 1121 1122 1123 1124 1125 1126
    SUMSUB_BA            w,  0,  3, 5      ; t0,  t3

    SUMSUB_BA            w,  7,  0, 5      ; t0,  t7
%endif
    UNSCRATCH            5, 15, %4+10*%5
%if ARCH_X86_32
    SWAP                 0, 7
1127
%endif
1128 1129
    SCRATCH              7, 15, %4+12*%5
    SUMSUB_BA            w,  1,  2, 7       ; t1,  t2
1130 1131

    ; from 1 stage back
1132 1133
    SUMSUB_BA            w,  6,  1, 7       ; t1,  t6
    SUMSUB_BA            w,  5,  2, 7       ; t2,  t5
1134
%endif
1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
    SUMSUB_BA            w,  4,  3, 7       ; t3,  t4

%if ARCH_X86_64
    SWAP                 0, 8
    SWAP                 1, 9
    SWAP                 2, 10
    SWAP                 3, 11
    SWAP                 4, 12
    SWAP                 5, 13
    SWAP                 6, 14
1145 1146 1147 1148 1149 1150 1151

    SUMSUB_BA            w,  0, 15, 7       ; t0, t15
    SUMSUB_BA            w,  1, 14, 7       ; t1, t14
    SUMSUB_BA            w,  2, 13, 7       ; t2, t13
    SUMSUB_BA            w,  3, 12, 7       ; t3, t12
    SUMSUB_BA            w,  4, 11, 7       ; t4, t11
    SUMSUB_BA            w,  5, 10, 7       ; t5, t10
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162
%else
    SWAP                 1, 6
    SWAP                 2, 5
    SWAP                 3, 4
    mova        [%4+14*%5], m6

%macro %%SUMSUB_BA_STORE 5 ; reg, from_mem, to_mem, scratch, scratch_stride
    mova                m6, [%4+%2*%5]
    SUMSUB_BA            w,  6, %1, 7
    SWAP                %1, 6
    mova        [%4+%3*%5], m6
1163 1164
%endmacro

1165 1166 1167 1168 1169 1170 1171 1172
    %%SUMSUB_BA_STORE    0,  1,  1, %4, %5  ; t0, t15
    %%SUMSUB_BA_STORE    1,  3,  3, %4, %5  ; t1, t14
    %%SUMSUB_BA_STORE    2,  5,  5, %4, %5  ; t2, t13
    %%SUMSUB_BA_STORE    3,  7,  7, %4, %5  ; t3, t12
    %%SUMSUB_BA_STORE    4,  9,  9, %4, %5  ; t4, t11
    %%SUMSUB_BA_STORE    5, 11, 11, %4, %5  ; t5, t10
%endif
%endmacro
1173

1174
%macro VP9_IDCT16_1D 2-4 16, 1 ; src, pass, nnzc, is_iadst
1175
%if %2 == 1
1176
    VP9_IDCT16_1D_START %1, %3, 32, tmpq, 16, %4
1177 1178

%if ARCH_X86_64
1179
    ; backup a different register
1180 1181
    mova                m7, [tmpq+15*16]
    mova      [tmpq+ 1*16], m15
1182 1183 1184 1185 1186

    SUMSUB_BA            w,  6,  9, 15      ; t6, t9
    SUMSUB_BA            w,  7,  8, 15      ; t7, t8

    TRANSPOSE8x8W        0, 1, 2, 3, 4, 5, 6, 7, 15
1187 1188 1189 1190 1191 1192 1193 1194 1195
    mova        [tmpq+  0], m0
    mova        [tmpq+ 32], m1
    mova        [tmpq+ 64], m2
    mova        [tmpq+ 96], m3
    mova        [tmpq+128], m4
    mova        [tmpq+160], m5
    mova        [tmpq+192], m6
    mova        [tmpq+224], m7

1196
    mova               m15, [tmpq+ 1*16]
1197
    TRANSPOSE8x8W        8, 9, 10, 11, 12, 13, 14, 15, 0
1198 1199 1200 1201 1202 1203 1204 1205
    mova        [tmpq+ 16], m8
    mova        [tmpq+ 48], m9
    mova        [tmpq+ 80], m10
    mova        [tmpq+112], m11
    mova        [tmpq+144], m12
    mova        [tmpq+176], m13
    mova        [tmpq+208], m14
    mova        [tmpq+240], m15
1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241
%else
    mova                m6, [tmpq+13*16]
    mova                m7, [tmpq+14*16]
    SUMSUB_BA            w, 6, 7                ; t6, t9
    mova      [tmpq+14*16], m6
    mova      [tmpq+13*16], m7
    mova                m7, [tmpq+15*16]
    mova                m6, [tmpq+12*16]
    SUMSUB_BA            w, 7, 6                ; t7, t8
    mova      [tmpq+15*16], m6

    TRANSPOSE8x8W       0, 1, 2, 3, 4, 5, 6, 7, [tmpq+14*16], [tmpq+ 8*16], 1
    mova     [tmpq+ 0*16], m0
    mova     [tmpq+ 2*16], m1
    mova     [tmpq+ 4*16], m2
    mova     [tmpq+ 6*16], m3
    mova     [tmpq+10*16], m5
    mova     [tmpq+12*16], m6
    mova     [tmpq+14*16], m7

    mova                m0, [tmpq+15*16]
    mova                m1, [tmpq+13*16]
    mova                m2, [tmpq+11*16]
    mova                m3, [tmpq+ 9*16]
    mova                m4, [tmpq+ 7*16]
    mova                m5, [tmpq+ 5*16]
    mova                m7, [tmpq+ 1*16]
    TRANSPOSE8x8W       0, 1, 2, 3, 4, 5, 6, 7, [tmpq+ 3*16], [tmpq+ 9*16], 1
    mova     [tmpq+ 1*16], m0
    mova     [tmpq+ 3*16], m1
    mova     [tmpq+ 5*16], m2
    mova     [tmpq+ 7*16], m3
    mova     [tmpq+11*16], m5
    mova     [tmpq+13*16], m6
    mova     [tmpq+15*16], m7
%endif
1242
%else ; %2 == 2
1243
    VP9_IDCT16_1D_START %1, %3, 32, %1, 32, %4
1244 1245 1246 1247 1248 1249

%if cpuflag(ssse3)
%define ROUND_REG [pw_512]
%else
%define ROUND_REG [pw_32]
%endif
1250 1251

    pxor                m7, m7
1252 1253 1254 1255 1256 1257
%if ARCH_X86_64
    ; backup more registers
    mova        [%1+ 2*32], m8
    mova        [%1+ 3*32], m9

    VP9_IDCT8_WRITEx2    0,  1, 8, 9, 7, ROUND_REG, 6
1258
    lea               dstq, [dstq+strideq*2]
1259
    VP9_IDCT8_WRITEx2    2,  3, 8, 9, 7, ROUND_REG, 6
1260
    lea               dstq, [dstq+strideq*2]
1261
    VP9_IDCT8_WRITEx2    4,  5, 8, 9, 7, ROUND_REG, 6
1262 1263 1264 1265
    lea               dstq, [dstq+strideq*2]

    ; restore from cache
    SWAP                 0, 7               ; move zero from m7 to m0
1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294
    mova                m7, [%1+15*32]
    mova                m8, [%1+ 2*32]
    mova                m9, [%1+ 3*32]

    SUMSUB_BA            w,  6,  9, 3       ; t6, t9
    SUMSUB_BA            w,  7,  8, 3       ; t7, t8

    VP9_IDCT8_WRITEx2    6,  7, 3, 4, 0, ROUND_REG, 6
    lea               dstq, [dstq+strideq*2]
    VP9_IDCT8_WRITEx2    8,  9, 3, 4, 0, ROUND_REG, 6
    lea               dstq, [dstq+strideq*2]
    VP9_IDCT8_WRITEx2   10, 11, 1, 2, 0, ROUND_REG, 6
    lea               dstq, [dstq+strideq*2]
    VP9_IDCT8_WRITEx2   12, 13, 1, 2, 0, ROUND_REG, 6
    lea               dstq, [dstq+strideq*2]
    VP9_IDCT8_WRITEx2   14, 15, 1, 2, 0, ROUND_REG, 6
%else
    mova      [tmpq+ 0*32], m5

    VP9_IDCT8_WRITEx2    0,  1, 5, 6, 7, ROUND_REG, 6
    lea               dstq, [dstq+strideq*2]
    VP9_IDCT8_WRITEx2    2,  3, 5, 6, 7, ROUND_REG, 6
    lea               dstq, [dstq+strideq*2]

    SWAP                 0, 7               ; move zero from m7 to m0
    mova                m5, [tmpq+ 0*32]

    VP9_IDCT8_WRITEx2    4,  5, 1, 2, 0, ROUND_REG, 6
    lea               dstq, [dstq+strideq*2]
1295

1296 1297 1298 1299 1300
    mova                m4, [tmpq+13*32]
    mova                m7, [tmpq+14*32]
    mova                m5, [tmpq+15*32]
    mova                m6, [tmpq+12*32]
    SUMSUB_BADC w, 4, 7, 5, 6, 1
1301

1302
    VP9_IDCT8_WRITEx2    4,  5, 1, 2, 0, ROUND_REG, 6
1303
    lea               dstq, [dstq+strideq*2]
1304
    VP9_IDCT8_WRITEx2    6,  7, 1, 2, 0, ROUND_REG, 6
1305
    lea               dstq, [dstq+strideq*2]
1306 1307 1308 1309 1310 1311 1312

    mova                m4, [tmpq+11*32]
    mova                m5, [tmpq+ 9*32]
    mova                m6, [tmpq+ 7*32]
    mova                m7, [tmpq+ 5*32]

    VP9_IDCT8_WRITEx2    4,  5, 1, 2, 0, ROUND_REG, 6
1313
    lea               dstq, [dstq+strideq*2]
1314
    VP9_IDCT8_WRITEx2    6,  7, 1, 2, 0, ROUND_REG, 6
1315
    lea               dstq, [dstq+strideq*2]
1316 1317 1318 1319 1320 1321 1322 1323 1324

    mova                m4, [tmpq+ 3*32]
    mova                m5, [tmpq+ 1*32]

    VP9_IDCT8_WRITEx2    4,  5, 1, 2, 0, ROUND_REG, 6
    lea               dstq, [dstq+strideq*2]
%endif

%undef ROUND_REG
1325 1326 1327
%endif ; %2 == 1/2
%endmacro

1328
%macro VP9_STORE_2XFULL 6-7 strideq; dc, tmp1, tmp2, tmp3, tmp4, zero, stride
1329
    mova               m%3, [dstq]
1330
    mova               m%5, [dstq+%7]
1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341
    punpcklbw          m%2, m%3, m%6
    punpckhbw          m%3, m%6
    punpcklbw          m%4, m%5, m%6
    punpckhbw          m%5, m%6
    paddw              m%2, m%1
    paddw              m%3, m%1
    paddw              m%4, m%1
    paddw              m%5, m%1
    packuswb           m%2, m%3
    packuswb           m%4, m%5
    mova            [dstq], m%2
1342
    mova         [dstq+%7], m%4
1343 1344
%endmacro

1345 1346
%macro VP9_IDCT_IDCT_16x16_ADD_XMM 1
INIT_XMM %1
1347
cglobal vp9_idct_idct_16x16_add, 4, 6, 16, 512, dst, stride, block, eob
1348
%if cpuflag(ssse3)
1349 1350 1351
    ; 2x2=eob=3, 4x4=eob=10
    cmp eobd, 38
    jg .idctfull
1352
    cmp eobd, 1 ; faster path for when only DC is set
1353
    jne .idct8x8
1354 1355 1356 1357
%else
    cmp eobd, 1 ; faster path for when only DC is set
    jg .idctfull
%endif
1358 1359

    ; dc-only
1360
%if cpuflag(ssse3)
1361 1362 1363 1364
    movd                m0, [blockq]
    mova                m1, [pw_11585x2]
    pmulhrsw            m0, m1
    pmulhrsw            m0, m1
1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375
%else
    DEFINE_ARGS dst, stride, block, coef
    movsx            coefd, word [blockq]
    imul             coefd, 11585
    add              coefd, 8192
    sar              coefd, 14
    imul             coefd, 11585
    add              coefd, (32 << 14) + 8192
    sar              coefd, 14 + 6
    movd                m0, coefd
%endif
1376
    SPLATW              m0, m0, q0000
1377
%if cpuflag(ssse3)
1378
    pmulhrsw            m0, [pw_512]
1379
%endif
1380 1381 1382 1383 1384 1385 1386 1387 1388
    pxor                m5, m5
    movd          [blockq], m5
%rep 7
    VP9_STORE_2XFULL    0, 1, 2, 3, 4, 5
    lea               dstq, [dstq+2*strideq]
%endrep
    VP9_STORE_2XFULL    0, 1, 2, 3, 4, 5
    RET

1389
    DEFINE_ARGS dst, stride, block, cnt, dst_bak, tmp
1390
%if cpuflag(ssse3)
1391
.idct8x8:
1392
    mov               tmpq, rsp
1393
    VP9_IDCT16_1D   blockq, 1, 8, 0
1394 1395 1396 1397

    mov               cntd, 2
    mov           dst_bakq, dstq
.loop2_8x8:
1398
    VP9_IDCT16_1D     tmpq, 2, 8, 0
1399
    lea               dstq, [dst_bakq+8]
1400
    add               tmpq, 16
1401 1402 1403 1404 1405 1406 1407
    dec               cntd
    jg .loop2_8x8

    ; at the end of the loop, m0 should still be zero
    ; use that to zero out block coefficients
    ZERO_BLOCK      blockq, 32, 8, m0
    RET
1408
%endif
1409

1410 1411
.idctfull:
    mov               cntd, 2
1412
    mov               tmpq, rsp
1413
.loop1_full:
1414
    VP9_IDCT16_1D   blockq, 1, 16, 0
1415
    add             blockq, 16
1416
    add               tmpq, 256
1417 1418 1419 1420 1421
    dec               cntd
    jg .loop1_full
    sub             blockq, 32

    mov               cntd, 2
1422
    mov               tmpq, rsp
1423 1424
    mov           dst_bakq, dstq
.loop2_full:
1425
    VP9_IDCT16_1D     tmpq, 2, 16, 0
1426
    lea               dstq, [dst_bakq+8]
1427
    add               tmpq, 16
1428 1429 1430 1431 1432
    dec               cntd
    jg .loop2_full

    ; at the end of the loop, m0 should still be zero
    ; use that to zero out block coefficients
1433
    ZERO_BLOCK      blockq, 32, 16, m0
1434
    RET
1435 1436
%endmacro

1437
VP9_IDCT_IDCT_16x16_ADD_XMM sse2
1438 1439
VP9_IDCT_IDCT_16x16_ADD_XMM ssse3
VP9_IDCT_IDCT_16x16_ADD_XMM avx
1440

1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542
%macro VP9_IDCT16_YMM_1D 0
    VP9_UNPACK_MULSUB_2W_4X  1,  15, 16305,  1606, [pd_8192], 0, 4 ; t8,  t15
    VP9_UNPACK_MULSUB_2W_4X  9,   7, 10394, 12665, [pd_8192], 0, 4 ; t9,  t14

    SUMSUB_BA            w,  9,   1, 0      ; t8,  t9
    SUMSUB_BA            w,  7,  15, 0      ; t15, t14

    VP9_UNPACK_MULSUB_2W_4X 15,   1, 15137,  6270, [pd_8192], 0, 4 ; t9,  t14

    VP9_UNPACK_MULSUB_2W_4X  5,  11, 14449,  7723, [pd_8192], 0, 4 ; t10, t13
    VP9_UNPACK_MULSUB_2W_4X 13,   3,  4756, 15679, [pd_8192], 0, 4 ; t11, t12

    SUMSUB_BA            w,  5,  13, 0      ; t11, t10
    SUMSUB_BA            w, 11,   3, 0      ; t12, t13

    VP9_UNPACK_MULSUB_2W_4X  3,  13, 6270, m15137, [pd_8192], 0, 4 ; t10, t13

    SUMSUB_BA            w,  5,   9, 0      ; t8,  t11
    SUMSUB_BA            w,  3,  15, 0      ; t9,  t10
    SUMSUB_BA            w, 11,   7, 0      ; t15, t12
    SUMSUB_BA            w, 13,   1, 0      ; t14, t13

    SUMSUB_BA            w, 15,   1, 0
    SUMSUB_BA            w,  9,   7, 0
    pmulhrsw            m1, [pw_11585x2]    ; t10
    pmulhrsw            m7, [pw_11585x2]    ; t11
    pmulhrsw            m9, [pw_11585x2]    ; t12
    pmulhrsw           m15, [pw_11585x2]    ; t13

    ; even (tx8x8)
    mova                m4, [blockq+128]
    mova      [blockq+128], m5
    VP9_UNPACK_MULSUB_2W_4X   4,  12, 15137,  6270, [pd_8192], 0, 5 ; t2,  t3
    VP9_UNPACK_MULSUB_2W_4X   2,  14, 16069,  3196, [pd_8192], 0, 5 ; t4,  t7
    VP9_UNPACK_MULSUB_2W_4X  10,   6,  9102, 13623, [pd_8192], 0, 5 ; t5,  t6
    mova                m0, [blockq+  0]
    SUMSUB_BA            w,   8,   0, 5
    pmulhrsw            m8, [pw_11585x2]    ; t0
    pmulhrsw            m0, [pw_11585x2]    ; t1

    SUMSUB_BA            w,  10,   2, 5     ; t4,  t5
    SUMSUB_BA            w,   6,  14, 5     ; t7,  t6
    SUMSUB_BA            w,  12,   8, 5     ; t0,  t3
    SUMSUB_BA            w,   4,   0, 5     ; t1,  t2

    SUMSUB_BA            w,   2,  14, 5
    pmulhrsw           m14, [pw_11585x2]    ; t5
    pmulhrsw            m2, [pw_11585x2]    ; t6

    SUMSUB_BA            w,   6,  12, 5     ; t0,  t7
    SUMSUB_BA            w,   2,   4, 5     ; t1,  t6
    SUMSUB_BA            w,  14,   0, 5     ; t2,  t5
    SUMSUB_BA            w,  10,   8, 5     ; t3,  t4

    ; final stage
    SUMSUB_BA            w, 11,  6,  5      ; out0, out15
    SUMSUB_BA            w, 13,  2,  5      ; out1, out14
    SUMSUB_BA            w, 15, 14,  5      ; out2, out13
    SUMSUB_BA            w,  9, 10,  5      ; out3, out12
    SUMSUB_BA            w,  7,  8,  5      ; out4, out11
    SUMSUB_BA            w,  1,  0,  5      ; out5, out10
    SUMSUB_BA            w,  3,  4,  5      ; out6, out9
    mova                m5, [blockq+128]
    mova      [blockq+192], m3
    SUMSUB_BA            w,  5, 12,  3      ; out7, out8

    SWAP  0, 11,  8, 12, 10
    SWAP  1, 13, 14,  2, 15,  6,  3,  9,  4,  7,  5
%endmacro

; this is almost identical to VP9_STORE_2X, but it does two rows
; for slightly improved interleaving, and it omits vpermq since the
; input is DC so all values are identical
%macro VP9_STORE_YMM_DC_4X 6 ; reg, tmp1, tmp2, tmp3, tmp4, zero
    mova              xm%2, [dstq]
    mova              xm%4, [dstq+strideq*2]
    vinserti128        m%2, m%2, [dstq+strideq], 1
    vinserti128        m%4, m%4, [dstq+stride3q], 1
    punpckhbw          m%3, m%2, m%6
    punpcklbw          m%2, m%6
    punpckhbw          m%5, m%4, m%6
    punpcklbw          m%4, m%6
    paddw              m%3, m%1
    paddw              m%2, m%1
    paddw              m%5, m%1
    paddw              m%4, m%1
    packuswb           m%2, m%3
    packuswb           m%4, m%5
    mova            [dstq], xm%2
    mova        [dstq+strideq*2], xm%4
    vextracti128  [dstq+strideq], m%2, 1
    vextracti128 [dstq+stride3q], m%4, 1
%endmacro

%if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
INIT_YMM avx2
cglobal vp9_idct_idct_16x16_add, 4, 4, 16, dst, stride, block, eob
    cmp eobd, 1 ; faster path for when only DC is set
    jg .idctfull

    ; dc-only
    mova                m1, [pw_11585x2]
1543
    vpbroadcastw        m0, [blockq]
1544 1545 1546
    pmulhrsw            m0, m1
    pmulhrsw            m0, m1
    pxor                m5, m5
1547
    pmulhrsw            m0, [pw_512]
1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585
    movd          [blockq], xm5

    DEFINE_ARGS dst, stride, stride3, cnt
    mov               cntd, 4
    lea           stride3q, [strideq*3]
.loop_dc:
    VP9_STORE_YMM_DC_4X  0, 1, 2, 3, 4, 5
    lea               dstq, [dstq+4*strideq]
    dec               cntd
    jg .loop_dc
    RET

    DEFINE_ARGS dst, stride, block, eob
.idctfull:
    mova                m1, [blockq+ 32]
    mova                m2, [blockq+ 64]
    mova                m3, [blockq+ 96]
    mova                m5, [blockq+160]
    mova                m6, [blockq+192]
    mova                m7, [blockq+224]
    mova                m8, [blockq+256]
    mova                m9, [blockq+288]
    mova               m10, [blockq+320]
    mova               m11, [blockq+352]
    mova               m12, [blockq+384]
    mova               m13, [blockq+416]
    mova               m14, [blockq+448]
    mova               m15, [blockq+480]

    VP9_IDCT16_YMM_1D
    TRANSPOSE16x16W      0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, \
                         [blockq+192], [blockq+128], 1
    mova      [blockq+  0], m0
    VP9_IDCT16_YMM_1D

    mova      [blockq+224], m7

    ; store
1586
    VP9_IDCT8_WRITEx2    0,  1, 6, 7, unused, [pw_512], 6
1587
    lea               dstq, [dstq+2*strideq]
1588
    VP9_IDCT8_WRITEx2    2,  3, 6, 7, unused, [pw_512], 6
1589
    lea               dstq, [dstq+2*strideq]
1590
    VP9_IDCT8_WRITEx2    4,  5, 6, 7, unused, [pw_512], 6
1591 1592 1593
    lea               dstq, [dstq+2*strideq]
    mova                m6, [blockq+192]
    mova                m7, [blockq+224]
1594
    VP9_IDCT8_WRITEx2    6,  7, 1, 2, unused, [pw_512], 6
1595
    lea               dstq, [dstq+2*strideq]
1596
    VP9_IDCT8_WRITEx2    8,  9, 1, 2, unused, [pw_512], 6
1597
    lea               dstq, [dstq+2*strideq]
1598
    VP9_IDCT8_WRITEx2   10, 11, 1, 2, unused, [pw_512], 6
1599
    lea               dstq, [dstq+2*strideq]
1600
    VP9_IDCT8_WRITEx2   12, 13, 1, 2, unused, [pw_512], 6
1601
    lea               dstq, [dstq+2*strideq]
1602
    VP9_IDCT8_WRITEx2   14, 15, 1, 2, unused, [pw_512], 6
1603 1604 1605 1606
    lea               dstq, [dstq+2*strideq]

    ; at the end of the loop, m0 should still be zero
    ; use that to zero out block coefficients
1607
    pxor                m0, m0
1608 1609 1610 1611
    ZERO_BLOCK      blockq, 32, 16, m0
    RET
%endif

1612 1613 1614 1615 1616 1617 1618 1619
;---------------------------------------------------------------------------------------------
; void vp9_iadst_iadst_16x16_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
;---------------------------------------------------------------------------------------------

%macro VP9_IADST16_1D 2 ; src, pass
%assign %%str 16*%2
    mova                m0, [%1+ 0*32]  ; in0
    mova                m1, [%1+15*32]  ; in15
1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636
    mova                m2, [%1+ 7*32]  ; in7
    mova                m3, [%1+ 8*32]  ; in8

    VP9_UNPACK_MULSUB_2D_4X  1,  0,  4,  5, 16364,   804    ; m1/4=t1[d], m0/5=t0[d]
    VP9_UNPACK_MULSUB_2D_4X  2,  3,  7,  6, 11003, 12140    ; m2/7=t9[d], m3/6=t8[d]
    SCRATCH              4, 8, tmpq+ 0*%%str
    VP9_RND_SH_SUMSUB_BA     3,  0,  6,  5,  4, [pd_8192]   ; m3=t0[w], m0=t8[w]
    UNSCRATCH            4, 8, tmpq+ 0*%%str
    VP9_RND_SH_SUMSUB_BA     2,  1,  7,  4,  5, [pd_8192]   ; m2=t1[w], m1=t9[w]

    SCRATCH              0, 10, tmpq+ 0*%%str
    SCRATCH              1, 11, tmpq+15*%%str
    mova   [tmpq+ 7*%%str], m2
    mova   [tmpq+ 8*%%str], m3

    mova                m1, [%1+ 2*32]  ; in2
    mova                m0, [%1+13*32]  ; in13
1637 1638 1639
    mova                m3, [%1+ 5*32]  ; in5
    mova                m2, [%1+10*32]  ; in10

1640
    VP9_UNPACK_MULSUB_2D_4X  0,  1,  6,  7, 15893,  3981    ; m0/6=t3[d], m1/7=t2[d]
1641
    VP9_UNPACK_MULSUB_2D_4X  3,  2,  4,  5,  8423, 14053    ; m3/4=t11[d], m2/5=t10[d]
1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681
    SCRATCH              4, 12, tmpq+ 2*%%str
    VP9_RND_SH_SUMSUB_BA     2,  1,  5,  7,  4, [pd_8192]   ; m2=t2[w], m1=t10[w]
    UNSCRATCH            4, 12, tmpq+ 2*%%str
    VP9_RND_SH_SUMSUB_BA     3,  0,  4,  6,  5, [pd_8192]   ; m3=t3[w], m0=t11[w]

    SCRATCH              0, 12, tmpq+ 2*%%str
    SCRATCH              1, 13, tmpq+13*%%str
    mova   [tmpq+ 5*%%str], m2
    mova   [tmpq+10*%%str], m3

    mova                m2, [%1+ 4*32]  ; in4
    mova                m3, [%1+11*32]  ; in11
    mova                m0, [%1+ 3*32]  ; in3
    mova                m1, [%1+12*32]  ; in12

    VP9_UNPACK_MULSUB_2D_4X  3,  2,  7,  6, 14811,  7005    ; m3/7=t5[d], m2/6=t4[d]
    VP9_UNPACK_MULSUB_2D_4X  0,  1,  4,  5,  5520, 15426    ; m0/4=t13[d], m1/5=t12[d]
    SCRATCH              4, 9, tmpq+ 4*%%str
    VP9_RND_SH_SUMSUB_BA     1,  2,  5,  6,  4, [pd_8192]   ; m1=t4[w], m2=t12[w]
    UNSCRATCH            4, 9, tmpq+ 4*%%str
    VP9_RND_SH_SUMSUB_BA     0,  3,  4,  7,  6, [pd_8192]   ; m0=t5[w], m3=t13[w]

    SCRATCH              0,  8, tmpq+ 4*%%str
    mova   [tmpq+11*%%str], m1          ; t4:m1->r11
    UNSCRATCH            0, 10, tmpq+ 0*%%str
    UNSCRATCH            1, 11, tmpq+15*%%str

    ; round 2 interleaved part 1
    VP9_UNPACK_MULSUB_2D_4X  0,  1,  6,  7, 16069,  3196    ; m1/7=t8[d], m0/6=t9[d]
    VP9_UNPACK_MULSUB_2D_4X  3,  2,  5,  4,  3196, 16069    ; m3/5=t12[d], m2/4=t13[d]
    SCRATCH              4, 9, tmpq+ 3*%%str
    VP9_RND_SH_SUMSUB_BA     3,  1,  5,  7,  4, [pd_8192]   ; m3=t8[w], m1=t12[w]
    UNSCRATCH            4, 9, tmpq+ 3*%%str
    VP9_RND_SH_SUMSUB_BA     2,  0,  4,  6,  5, [pd_8192]   ; m2=t9[w], m0=t13[w]

    SCRATCH              0, 10, tmpq+ 0*%%str
    SCRATCH              1, 11, tmpq+15*%%str
    SCRATCH              2, 14, tmpq+ 3*%%str
    SCRATCH              3, 15, tmpq+12*%%str

1682 1683
    mova                m2, [%1+ 6*32]  ; in6
    mova                m3, [%1+ 9*32]  ; in9
1684 1685
    mova                m0, [%1+ 1*32]  ; in1
    mova                m1, [%1+14*32]  ; in14
1686 1687

    VP9_UNPACK_MULSUB_2D_4X  3,  2,  7,  6, 13160,  9760    ; m3/7=t7[d], m2/6=t6[d]
1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718
    VP9_UNPACK_MULSUB_2D_4X  0,  1,  4,  5,  2404, 16207    ; m0/4=t15[d], m1/5=t14[d]
    SCRATCH              4, 9, tmpq+ 6*%%str
    VP9_RND_SH_SUMSUB_BA     1,  2,  5,  6,  4, [pd_8192]   ; m1=t6[w], m2=t14[w]
    UNSCRATCH            4, 9, tmpq+ 6*%%str
    VP9_RND_SH_SUMSUB_BA     0,  3,  4,  7,  6, [pd_8192]   ; m0=t7[w], m3=t15[w]

    ; r8=t0, r7=t1, r5=t2, r10=t3, r11=t4, m8|r4=t5, m1=t6, m0=t7
    ; m10|r0=t8, m11|r15=t9, m13|r13=t10, m12|r2=t11, m14|r3=t12, m15|r12=t13, m2=t14, m3=t15

    UNSCRATCH            4, 12, tmpq+ 2*%%str
    UNSCRATCH            5, 13, tmpq+13*%%str
    SCRATCH              0, 12, tmpq+ 1*%%str
    SCRATCH              1, 13, tmpq+14*%%str

    ; remainder of round 2 (rest of t8-15)
    VP9_UNPACK_MULSUB_2D_4X  5,  4,  6,  7,  9102, 13623    ; m5/6=t11[d], m4/7=t10[d]
    VP9_UNPACK_MULSUB_2D_4X  3,  2,  1,  0, 13623,  9102    ; m3/1=t14[d], m2/0=t15[d]
    SCRATCH              0, 9, tmpq+ 6*%%str
    VP9_RND_SH_SUMSUB_BA     3,  4,  1,  7,  0, [pd_8192]   ; m3=t10[w], m4=t14[w]
    UNSCRATCH            0, 9, tmpq+ 6*%%str
    VP9_RND_SH_SUMSUB_BA     2,  5,  0,  6,  1, [pd_8192]   ; m2=t11[w], m5=t15[w]

    ; m15|r12=t8, m14|r3=t9, m3=t10, m2=t11, m11|r15=t12, m10|r0=t13, m4=t14, m5=t15

    UNSCRATCH            6, 14, tmpq+ 3*%%str
    UNSCRATCH            7, 15, tmpq+12*%%str

    SUMSUB_BA                w,  3,  7,  1
    PSIGNW                  m3, [pw_m1]                     ; m3=out1[w], m7=t10[w]
    SUMSUB_BA                w,  2,  6,  1                  ; m2=out14[w], m6=t11[w]

1719 1720 1721
    ; unfortunately, the code below overflows in some cases, e.g.
    ; http://downloads.webmproject.org/test_data/libvpx/vp90-2-14-resize-fp-tiles-16-8.webm
%if 0; cpuflag(ssse3)
1722 1723 1724 1725 1726 1727
    SUMSUB_BA                w,  7,  6,  1
    pmulhrsw                m7, [pw_11585x2]                ; m7=out6[w]
    pmulhrsw                m6, [pw_11585x2]                ; m6=out9[w]
%else
    VP9_UNPACK_MULSUB_2W_4X  6,  7, 11585, 11585, [pd_8192], 1, 0
%endif
1728

1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743
    mova       [tmpq+ 3*%%str], m6
    mova       [tmpq+ 6*%%str], m7
    UNSCRATCH                6, 10, tmpq+ 0*%%str
    UNSCRATCH                7, 11, tmpq+15*%%str
    mova       [tmpq+13*%%str], m2
    SCRATCH                  3, 11, tmpq+ 9*%%str

    VP9_UNPACK_MULSUB_2D_4X  7,  6,  2,  3, 15137,  6270    ; m6/3=t13[d], m7/2=t12[d]
    VP9_UNPACK_MULSUB_2D_4X  5,  4,  1,  0,  6270, 15137    ; m5/1=t14[d], m4/0=t15[d]
    SCRATCH              0, 9, tmpq+ 2*%%str
    VP9_RND_SH_SUMSUB_BA     5,  6,  1,  3,  0, [pd_8192]   ; m5=out2[w], m6=t14[w]
    UNSCRATCH            0, 9, tmpq+ 2*%%str
    VP9_RND_SH_SUMSUB_BA     4,  7,  0,  2,  1, [pd_8192]
    PSIGNW                  m4, [pw_m1]                     ; m4=out13[w], m7=t15[w]

1744 1745
    ; unfortunately, the code below overflows in some cases
%if 0; cpuflag(ssse3)
1746 1747 1748 1749 1750 1751 1752
    SUMSUB_BA                w,  7,  6,  1
    pmulhrsw                m7, [pw_m11585x2]               ; m7=out5[w]
    pmulhrsw                m6, [pw_11585x2]                ; m6=out10[w]
%else
    PSIGNW                  m7, [pw_m1]
    VP9_UNPACK_MULSUB_2W_4X  7,  6, 11585, 11585, [pd_8192], 1, 0
%endif
1753

1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798
    ; m11|r13=out1, m5=out2, m7=out5, r15=out6, r3=out9, m6=out10, m4=out13, r2=out14

    mova                    m2, [tmpq+ 8*%%str]
    mova                    m3, [tmpq+ 7*%%str]
    mova                    m1, [tmpq+11*%%str]
    mova       [tmpq+ 7*%%str], m6
    mova       [tmpq+11*%%str], m4
    mova                    m4, [tmpq+ 5*%%str]
    SCRATCH                  5, 14, tmpq+ 5*%%str
    SCRATCH                  7, 15, tmpq+ 8*%%str
    UNSCRATCH                6,  8, tmpq+ 4*%%str
    UNSCRATCH                5, 12, tmpq+ 1*%%str
    UNSCRATCH                7, 13, tmpq+14*%%str

    ; m2=t0, m3=t1, m9=t2, m0=t3, m1=t4, m8=t5, m13=t6, m12=t7
    ; m11|r13=out1, m5=out2, m7=out5, r15=out6, r3=out9, r10=out10, r11=out13, r2=out14

    SUMSUB_BA                w,  1,  2, 0                   ; m1=t0[w], m2=t4[w]
    mova                    m0, [tmpq+10*%%str]
    SCRATCH                  1, 12, tmpq+ 1*%%str
    SUMSUB_BA                w,  6,  3, 1                   ; m8=t1[w], m3=t5[w]
    SCRATCH                  6, 13, tmpq+ 4*%%str
    SUMSUB_BA                w,  7,  4, 1                   ; m13=t2[w], m9=t6[w]
    SCRATCH                  7,  8, tmpq+10*%%str
    SUMSUB_BA                w,  5,  0, 1                   ; m12=t3[w], m0=t7[w]
    SCRATCH                  5,  9, tmpq+14*%%str

    VP9_UNPACK_MULSUB_2D_4X  2,  3,  7,  5, 15137,  6270    ; m2/6=t5[d], m3/10=t4[d]
    VP9_UNPACK_MULSUB_2D_4X  0,  4,  1,  6,  6270, 15137    ; m0/14=t6[d], m9/15=t7[d]
    SCRATCH                  6, 10, tmpq+ 0*%%str
    VP9_RND_SH_SUMSUB_BA     0,  3,  1,  5,  6, [pd_8192]
    UNSCRATCH                6, 10, tmpq+ 0*%%str
    PSIGNW                  m0, [pw_m1]                     ; m0=out3[w], m3=t6[w]
    VP9_RND_SH_SUMSUB_BA     4,  2,  6,  7,  5, [pd_8192]   ; m9=out12[w], m2=t7[w]

    UNSCRATCH                1,  8, tmpq+10*%%str
    UNSCRATCH                5,  9, tmpq+14*%%str
    UNSCRATCH                6, 12, tmpq+ 1*%%str
    UNSCRATCH                7, 13, tmpq+ 4*%%str
    SCRATCH                  4,  9, tmpq+14*%%str

    SUMSUB_BA                w,  1,  6,  4                  ; m13=out0[w], m1=t2[w]
    SUMSUB_BA                w,  5,  7,  4
    PSIGNW                  m5, [pw_m1]                     ; m12=out15[w], m8=t3[w]

1799 1800 1801
    ; unfortunately, the code below overflows in some cases, e.g.
    ; http://downloads.webmproject.org/test_data/libvpx/vp90-2-14-resize-fp-tiles-16-8-4-2-1.webm
%if 0 ; cpuflag(ssse3)
1802 1803 1804
    SUMSUB_BA               w,   7,  6,  4
    pmulhrsw                m7, [pw_m11585x2]               ; m8=out7[w]
    pmulhrsw                m6, [pw_11585x2]                ; m1=out8[w]
1805
    SWAP                     6,  7
1806 1807 1808 1809 1810
    SUMSUB_BA                w,  3,  2,  4
    pmulhrsw                m3, [pw_11585x2]                ; m3=out4[w]
    pmulhrsw                m2, [pw_11585x2]                ; m2=out11[w]
%else
    SCRATCH                  5,  8, tmpq+10*%%str
1811
    VP9_UNPACK_MULSUB_2W_4X  6,  7, 11585, m11585, [pd_8192],  5,  4
1812 1813 1814
    VP9_UNPACK_MULSUB_2W_4X  2,  3, 11585, 11585, [pd_8192],  5,  4
    UNSCRATCH                5,  8, tmpq+10*%%str
%endif
1815

1816 1817
    ; m13=out0, m0=out3, m3=out4, m8=out7, m1=out8, m2=out11, m9=out12, m12=out15
    ; m11|r13=out1, m5=out2, m7=out5, r15=out6, r3=out9, r10=out10, r11=out13, r2=out14
1818 1819

%if %2 == 1
1820 1821
%if ARCH_X86_64
    mova                   m13, [tmpq+ 6*%%str]
1822
    TRANSPOSE8x8W            1, 11, 14, 0, 3, 15, 13, 6, 10
1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833
    mova          [tmpq+ 0*16], m1
    mova          [tmpq+ 2*16], m11
    mova          [tmpq+ 4*16], m14
    mova          [tmpq+ 6*16], m0
    mova                    m1, [tmpq+ 3*%%str]
    mova                   m11, [tmpq+ 7*%%str]
    mova                   m14, [tmpq+11*%%str]
    mova                    m0, [tmpq+13*%%str]
    mova          [tmpq+ 8*16], m3
    mova          [tmpq+10*16], m15
    mova          [tmpq+12*16], m13
1834
    mova          [tmpq+14*16], m6
1835

1836 1837
    TRANSPOSE8x8W            7, 1, 11, 2, 9, 14, 0, 5, 10
    mova          [tmpq+ 1*16], m7
1838 1839 1840 1841 1842 1843 1844
    mova          [tmpq+ 3*16], m1
    mova          [tmpq+ 5*16], m11
    mova          [tmpq+ 7*16], m2
    mova          [tmpq+ 9*16], m9
    mova          [tmpq+11*16], m14
    mova          [tmpq+13*16], m0
    mova          [tmpq+15*16], m5
1845
%else
1846 1847
    mova       [tmpq+12*%%str], m2
    mova       [tmpq+ 1*%%str], m5
1848
    mova       [tmpq+15*%%str], m7
1849 1850
    mova                    m2, [tmpq+ 9*%%str]
    mova                    m5, [tmpq+ 5*%%str]
1851 1852
    mova                    m7, [tmpq+ 8*%%str]
    TRANSPOSE8x8W            1, 2, 5, 0, 3, 7, 4, 6, [tmpq+ 6*%%str], [tmpq+ 8*%%str], 1
1853 1854 1855 1856
    mova          [tmpq+ 0*16], m1
    mova          [tmpq+ 2*16], m2
    mova          [tmpq+ 4*16], m5
    mova          [tmpq+ 6*16], m0
1857
    mova          [tmpq+10*16], m7
1858 1859 1860
    mova                    m3, [tmpq+12*%%str]
    mova          [tmpq+12*16], m4
    mova                    m4, [tmpq+14*%%str]
1861
    mova          [tmpq+14*16], m6
1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884

    mova                    m0, [tmpq+15*%%str]
    mova                    m1, [tmpq+ 3*%%str]
    mova                    m2, [tmpq+ 7*%%str]
    mova                    m5, [tmpq+11*%%str]
    mova                    m7, [tmpq+ 1*%%str]
    TRANSPOSE8x8W            0, 1, 2, 3, 4, 5, 6, 7, [tmpq+13*%%str], [tmpq+ 9*%%str], 1
    mova          [tmpq+ 1*16], m0
    mova          [tmpq+ 3*16], m1
    mova          [tmpq+ 5*16], m2
    mova          [tmpq+ 7*16], m3
    mova          [tmpq+11*16], m5
    mova          [tmpq+13*16], m6
    mova          [tmpq+15*16], m7
%endif
%else
    pxor                    m4, m4

%if cpuflag(ssse3)
%define ROUND_REG [pw_512]
%else
%define ROUND_REG [pw_32]
%endif
1885

1886 1887 1888
%if ARCH_X86_64
    mova                   m12, [tmpq+ 6*%%str]
    VP9_IDCT8_WRITEx2        1, 11, 10,  8,  4, ROUND_REG, 6
1889
    lea                   dstq, [dstq+strideq*2]
1890
    VP9_IDCT8_WRITEx2       14,  0, 10,  8,  4, ROUND_REG, 6
1891
    lea                   dstq, [dstq+strideq*2]
1892
    VP9_IDCT8_WRITEx2        3, 15, 10,  8,  4, ROUND_REG, 6
1893
    lea                   dstq, [dstq+strideq*2]
1894
    VP9_IDCT8_WRITEx2       12,  6, 10,  8,  4, ROUND_REG, 6
1895 1896
    lea                   dstq, [dstq+strideq*2]

1897 1898 1899 1900
    mova                    m1, [tmpq+ 3*%%str]
    mova                   m11, [tmpq+ 7*%%str]
    mova                   m14, [tmpq+11*%%str]
    mova                    m0, [tmpq+13*%%str]
1901

1902
    VP9_IDCT8_WRITEx2        7,  1, 10,  8,  4, ROUND_REG, 6
1903
    lea                   dstq, [dstq+strideq*2]
1904
    VP9_IDCT8_WRITEx2       11,  2, 10,  8,  4, ROUND_REG, 6
1905
    lea                   dstq, [dstq+strideq*2]
1906
    VP9_IDCT8_WRITEx2        9, 14, 10,  8,  4, ROUND_REG, 6
1907
    lea                   dstq, [dstq+strideq*2]
1908 1909 1910 1911
    VP9_IDCT8_WRITEx2        0,  5, 10,  8,  4, ROUND_REG, 6
%else
    mova       [tmpq+ 0*%%str], m2
    mova       [tmpq+ 1*%%str], m5
1912
    mova       [tmpq+ 2*%%str], m7
1913
    mova                    m2, [tmpq+ 9*%%str]
1914
    VP9_IDCT8_WRITEx2        1,  2,  5,  7,  4, ROUND_REG, 6
1915 1916 1917 1918 1919 1920 1921 1922
    lea                   dstq, [dstq+strideq*2]
    mova                    m5, [tmpq+ 5*%%str]
    VP9_IDCT8_WRITEx2        5,  0,  1,  2,  4, ROUND_REG, 6
    lea                   dstq, [dstq+strideq*2]
    mova                    m5, [tmpq+ 8*%%str]
    VP9_IDCT8_WRITEx2        3,  5,  1,  2,  4, ROUND_REG, 6
    lea                   dstq, [dstq+strideq*2]
    mova                    m5, [tmpq+ 6*%%str]
1923
    VP9_IDCT8_WRITEx2        5,  6,  1,  2,  4, ROUND_REG, 6
1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944
    lea                   dstq, [dstq+strideq*2]

    mova                    m0, [tmpq+ 2*%%str]
    mova                    m3, [tmpq+ 3*%%str]
    VP9_IDCT8_WRITEx2        0,  3,  1,  2,  4, ROUND_REG, 6
    lea                   dstq, [dstq+strideq*2]
    mova                    m0, [tmpq+ 7*%%str]
    mova                    m3, [tmpq+ 0*%%str]
    VP9_IDCT8_WRITEx2        0,  3,  1,  2,  4, ROUND_REG, 6
    lea                   dstq, [dstq+strideq*2]
    mova                    m0, [tmpq+14*%%str]
    mova                    m3, [tmpq+11*%%str]
    VP9_IDCT8_WRITEx2        0,  3,  1,  2,  4, ROUND_REG, 6
    lea                   dstq, [dstq+strideq*2]
    mova                    m0, [tmpq+13*%%str]
    mova                    m3, [tmpq+ 1*%%str]
    VP9_IDCT8_WRITEx2        0,  3,  1,  2,  4, ROUND_REG, 6
%endif

    SWAP                     0,  4 ; zero
%undef ROUND_REG
1945 1946 1947 1948 1949
%endif
%endmacro

%macro IADST16_FN 5
INIT_XMM %5
1950
cglobal vp9_%1_%3_16x16_add, 3, 6, 16, 512, dst, stride, block, cnt, dst_bak, tmp
1951
    mov               cntd, 2
1952
    mov               tmpq, rsp
1953 1954 1955
.loop1_full:
    VP9_%2_1D       blockq, 1
    add             blockq, 16
1956
    add               tmpq, 256
1957 1958 1959 1960 1961
    dec               cntd
    jg .loop1_full
    sub             blockq, 32

    mov               cntd, 2
1962
    mov               tmpq, rsp
1963 1964
    mov           dst_bakq, dstq
.loop2_full:
1965
    VP9_%4_1D         tmpq, 2
1966
    lea               dstq, [dst_bakq+8]
1967
    add               tmpq, 16
1968 1969 1970 1971 1972 1973 1974 1975 1976
    dec               cntd
    jg .loop2_full

    ; at the end of the loop, m0 should still be zero
    ; use that to zero out block coefficients
    ZERO_BLOCK      blockq, 32, 16, m0
    RET
%endmacro

1977 1978 1979
IADST16_FN idct,  IDCT16,  iadst, IADST16, sse2
IADST16_FN iadst, IADST16, idct,  IDCT16,  sse2
IADST16_FN iadst, IADST16, iadst, IADST16, sse2
1980 1981 1982
IADST16_FN idct,  IDCT16,  iadst, IADST16, ssse3
IADST16_FN iadst, IADST16, idct,  IDCT16,  ssse3
IADST16_FN iadst, IADST16, iadst, IADST16, ssse3
1983 1984
IADST16_FN idct,  IDCT16,  iadst, IADST16, avx
IADST16_FN iadst, IADST16, idct,  IDCT16,  avx
1985 1986
IADST16_FN iadst, IADST16, iadst, IADST16, avx

1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094
; in: data in m[0-15] except m0/m4, which are in [blockq+0] and [blockq+128]
; out: m[0-15] except m6, which is in [blockq+192]
; uses blockq as scratch space
%macro VP9_IADST16_YMM_1D 0
    mova          [blockq+ 32], m3
    mova          [blockq+ 64], m7
    mova          [blockq+ 96], m8

    ; first half of round 1
    VP9_UNPACK_MULSUB_2D_4X  9,  6,  0,  3, 13160,  9760    ; m9/x=t7[d], m6/x=t6[d]
    VP9_UNPACK_MULSUB_2D_4X  1, 14,  4,  7,  2404, 16207    ; m1/x=t15[d], m14/x=t14[d]
    VP9_RND_SH_SUMSUB_BA    14,  6,  7,  3,  8, [pd_8192]   ; m14=t6[w], m6=t14[w]
    VP9_RND_SH_SUMSUB_BA     1,  9,  4,  0,  8, [pd_8192]   ; m1=t7[w], m9=t15[w]

    VP9_UNPACK_MULSUB_2D_4X 13,  2,  4,  7, 15893,  3981    ; m13/x=t3[d], m2/x=t2[d]
    VP9_UNPACK_MULSUB_2D_4X  5, 10,  0,  3,  8423, 14053    ; m5/x=t11[d], m10/x=t10[d]
    VP9_RND_SH_SUMSUB_BA    10,  2,  3,  7,  8, [pd_8192]   ; m10=t2[w], m2=t10[w]
    VP9_RND_SH_SUMSUB_BA     5, 13,  0,  4,  8, [pd_8192]   ; m5=t3[w], m13=t11[w]

    ; half of round 2 t8-15
    VP9_UNPACK_MULSUB_2D_4X  2, 13,  4,  7,  9102, 13623    ; m2/x=t11[d], m13/x=t10[d]
    VP9_UNPACK_MULSUB_2D_4X  9,  6,  3,  0, 13623,  9102    ; m9/x=t14[d], m6/x=t15[d]
    VP9_RND_SH_SUMSUB_BA     9, 13,  3,  7,  8, [pd_8192]   ; m9=t10[w], m13=t14[w]
    VP9_RND_SH_SUMSUB_BA     6,  2,  0,  4,  8, [pd_8192]   ; m6=t11[w], m2=t15[w]

    SUMSUB_BA            w, 14, 10,  8                      ; m14=t2, m10=t6
    SUMSUB_BA            w,  1,  5,  8                      ; m1=t3, m5=t7

    mova                    m0, [blockq+  0]
    mova                    m4, [blockq+128]
    mova                    m3, [blockq+ 32]
    mova                    m7, [blockq+ 64]
    mova                    m8, [blockq+ 96]
    mova          [blockq+  0], m1
    mova          [blockq+128], m14
    mova          [blockq+ 32], m6
    mova          [blockq+ 64], m9
    mova          [blockq+ 96], m10

    ; second half of round 1
    VP9_UNPACK_MULSUB_2D_4X 15,  0,  1,  9, 16364,   804    ; m15/x=t1[d], m0/x=t0[d]
    VP9_UNPACK_MULSUB_2D_4X  7,  8, 10,  6, 11003, 12140    ; m7/x=t9[d], m8/x=t8[d]
    VP9_RND_SH_SUMSUB_BA     8,  0,  6,  9, 14, [pd_8192]   ; m8=t0[w], m0=t8[w]
    VP9_RND_SH_SUMSUB_BA     7, 15, 10,  1, 14, [pd_8192]   ; m7=t1[w], m15=t9[w]

    VP9_UNPACK_MULSUB_2D_4X 11,  4, 10,  6, 14811,  7005    ; m11/x=t5[d], m4/x=t4[d]
    VP9_UNPACK_MULSUB_2D_4X  3, 12,  1,  9,  5520, 15426    ; m3/x=t13[d], m12/x=t12[d]
    VP9_RND_SH_SUMSUB_BA    12,  4,  9,  6, 14, [pd_8192]   ; m12=t4[w], m4=t12[w]
    VP9_RND_SH_SUMSUB_BA     3, 11,  1, 10, 14, [pd_8192]   ; m3=t5[w], m11=t13[w]

    ; second half of round 2 t8-15
    VP9_UNPACK_MULSUB_2D_4X  0, 15,  6, 10, 16069,  3196    ; m15/x=t8[d], m0/x=t9[d]
    VP9_UNPACK_MULSUB_2D_4X 11,  4,  9,  1,  3196, 16069    ; m11/x=t12[d], m4/x=t13[d]
    VP9_RND_SH_SUMSUB_BA    11, 15,  9, 10, 14, [pd_8192]   ; m11=t8[w], m15=t12[w]
    VP9_RND_SH_SUMSUB_BA     4,  0,  1,  6, 14, [pd_8192]   ; m4=t9[w], m0=t13[w]

    SUMSUB_BA            w, 12,  8, 14                      ; m12=t0, m8=t4
    SUMSUB_BA            w,  3,  7, 14                      ; m3=t1, m7=t5

    mova                   m10, [blockq+ 96]
    mova          [blockq+ 96], m12

    ; round 3
    VP9_UNPACK_MULSUB_2D_4X 15,  0,  9, 12, 15137,  6270    ; m15/x=t13[d], m0/x=t12[d]
    VP9_UNPACK_MULSUB_2D_4X  2, 13,  1,  6,  6270, 15137    ; m2/x=t14[d], m13/x=t15[d]
    VP9_RND_SH_SUMSUB_BA     2,  0,  1, 12, 14, [pd_8192]   ; m2=out2[w], m0=t14a[w]
    VP9_RND_SH_SUMSUB_BA    13, 15,  6,  9, 14, [pd_8192]
    PSIGNW                 m13, [pw_m1]                     ; m13=out13[w], m15=t15a[w]

    VP9_UNPACK_MULSUB_2D_4X  8,  7, 12,  9, 15137,  6270    ; m8/x=t5[d], m7/x=t4[d]
    VP9_UNPACK_MULSUB_2D_4X  5, 10,  1,  6,  6270, 15137    ; m5/x=t6[d], m10/x=t7[d]
    VP9_RND_SH_SUMSUB_BA     5,  7,  1,  9, 14, [pd_8192]
    PSIGNW                  m5, [pw_m1]                     ; m5=out3[w], m7=t6[w]
    VP9_RND_SH_SUMSUB_BA    10,  8,  6, 12, 14, [pd_8192]   ; m10=out12[w], m8=t7[w]

    mova                    m1, [blockq+  0]
    mova                   m14, [blockq+128]
    mova                    m6, [blockq+ 32]
    mova                    m9, [blockq+ 64]
    mova                   m12, [blockq+ 96]
    mova          [blockq+  0], m10
    mova          [blockq+128], m5

    SUMSUB_BA            w, 14, 12,  5                      ; m14=out0, m12=t2a
    SUMSUB_BA            w,  1,  3,  5
    PSIGNW                  m1, [pw_m1]                     ; m1=out15, m3=t3a

    SUMSUB_BA            w,  9, 11,  5
    PSIGNW                  m9, [pw_m1]                     ; m9=out1, m11=t10
    SUMSUB_BA            w,  6,  4,  5                      ; m6=out14, m4=t11

    VP9_UNPACK_MULSUB_2W_4X  4, 11, 11585, 11585, [pd_8192],  5, 10 ; m4=out9, m11=out6
    mova                    m5, [blockq+128]
    mova          [blockq+192], m11
    PSIGNW                 m15, [pw_m1]
    VP9_UNPACK_MULSUB_2W_4X 15,  0, 11585, 11585, [pd_8192], 10, 11 ; m15=out5, m0=out10

    PSIGNW                  m3, [pw_m1]
    VP9_UNPACK_MULSUB_2W_4X  3, 12, 11585, 11585, [pd_8192], 10, 11 ; m3=out7,m12=out8
    VP9_UNPACK_MULSUB_2W_4X  8,  7, 11585, 11585, [pd_8192], 10, 11 ; m8=out11,m7=out4

    mova                   m10, [blockq+  0]

    SWAP                     0, 14,  6, 11,  8, 12, 10
    SWAP                     1,  9, 15,  4,  7,  3,  5
    SWAP                     5,  9, 15
%endmacro

2095
%if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153
%macro IADST16_YMM_FN 4
INIT_YMM avx2
cglobal vp9_%1_%3_16x16_add, 4, 4, 16, dst, stride, block, eob
    mova                m1, [blockq+ 32]
    mova                m2, [blockq+ 64]
    mova                m3, [blockq+ 96]
    mova                m5, [blockq+160]
    mova                m6, [blockq+192]
    mova                m7, [blockq+224]
    mova                m8, [blockq+256]
    mova                m9, [blockq+288]
    mova               m10, [blockq+320]
    mova               m11, [blockq+352]
    mova               m12, [blockq+384]
    mova               m13, [blockq+416]
    mova               m14, [blockq+448]
    mova               m15, [blockq+480]

    VP9_%2_YMM_1D
    TRANSPOSE16x16W      0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, \
                         [blockq+192], [blockq+128], 1
    mova      [blockq+  0], m0
    VP9_%4_YMM_1D

    mova      [blockq+224], m7

    ; store
    VP9_IDCT8_WRITEx2    0,  1, 6, 7, unused, [pw_512], 6
    lea               dstq, [dstq+2*strideq]
    VP9_IDCT8_WRITEx2    2,  3, 6, 7, unused, [pw_512], 6
    lea               dstq, [dstq+2*strideq]
    VP9_IDCT8_WRITEx2    4,  5, 6, 7, unused, [pw_512], 6
    lea               dstq, [dstq+2*strideq]
    mova                m6, [blockq+192]
    mova                m7, [blockq+224]
    VP9_IDCT8_WRITEx2    6,  7, 1, 2, unused, [pw_512], 6
    lea               dstq, [dstq+2*strideq]
    VP9_IDCT8_WRITEx2    8,  9, 1, 2, unused, [pw_512], 6
    lea               dstq, [dstq+2*strideq]
    VP9_IDCT8_WRITEx2   10, 11, 1, 2, unused, [pw_512], 6
    lea               dstq, [dstq+2*strideq]
    VP9_IDCT8_WRITEx2   12, 13, 1, 2, unused, [pw_512], 6
    lea               dstq, [dstq+2*strideq]
    VP9_IDCT8_WRITEx2   14, 15, 1, 2, unused, [pw_512], 6
    lea               dstq, [dstq+2*strideq]

    ; at the end of the loop, m0 should still be zero
    ; use that to zero out block coefficients
    pxor                m0, m0
    ZERO_BLOCK      blockq, 32, 16, m0
    RET
%endmacro

IADST16_YMM_FN idct,  IDCT16,  iadst, IADST16
IADST16_YMM_FN iadst, IADST16, idct,  IDCT16
IADST16_YMM_FN iadst, IADST16, iadst, IADST16
%endif

2154 2155 2156 2157 2158
;---------------------------------------------------------------------------------------------
; void vp9_idct_idct_32x32_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
;---------------------------------------------------------------------------------------------

%macro VP9_IDCT32_1D 2-3 32 ; src, pass, nnzc
2159 2160 2161 2162 2163 2164
%if %2 == 1
%assign %%str mmsize
%else
%assign %%str 64
%endif

2165
    ; first do t0-15, this can be done identical to idct16x16
2166
    VP9_IDCT16_1D_START %1, %3/2, 64*2, tmpq, 2*%%str, 1
2167 2168 2169 2170

    ; store everything on stack to make space available for t16-31
    ; we store interleaved with the output of the second half (t16-31)
    ; so we don't need to allocate extra stack space
2171 2172 2173 2174 2175 2176
    mova    [tmpq+ 0*%%str], m0     ; t0
    mova    [tmpq+ 4*%%str], m1     ; t1
    mova    [tmpq+ 8*%%str], m2     ; t2
    mova    [tmpq+12*%%str], m3     ; t3
    mova    [tmpq+16*%%str], m4     ; t4
    mova    [tmpq+20*%%str], m5     ; t5
2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196
%if ARCH_X86_64
    mova    [tmpq+22*%%str], m10    ; t10
    mova    [tmpq+18*%%str], m11    ; t11
    mova    [tmpq+14*%%str], m12    ; t12
    mova    [tmpq+10*%%str], m13    ; t13
    mova    [tmpq+ 6*%%str], m14    ; t14
    mova    [tmpq+ 2*%%str], m15    ; t15
%endif

    mova                m0, [tmpq+ 30*%%str]
    UNSCRATCH            1,  6, tmpq+26*%%str
    UNSCRATCH            2,  8, tmpq+24*%%str
    UNSCRATCH            3,  9, tmpq+28*%%str
    SUMSUB_BA            w,  1,  3, 4       ; t6, t9
    SUMSUB_BA            w,  0,  2, 4       ; t7, t8

    mova    [tmpq+24*%%str], m1     ; t6
    mova    [tmpq+28*%%str], m0     ; t7
    mova    [tmpq+30*%%str], m2     ; t8
    mova    [tmpq+26*%%str], m3     ; t9
2197 2198

    ; then, secondly, do t16-31
2199 2200 2201 2202
%if %3 <= 8
    mova                 m4, [%1+ 1*64]
    mova                 m7, [%1+ 7*64]

2203
    pmulhrsw             m1,  m4, [pw_16364x2] ;t31
2204 2205
    pmulhrsw             m4, [pw_804x2] ;t16

2206
    VP9_UNPACK_MULSUB_2W_4X   5,  0,  1,  4, 16069,  3196, [pd_8192], 6,  2 ; t17, t30
2207

2208 2209
    pmulhrsw             m3,  m7, [pw_m5520x2] ;t19
    pmulhrsw             m7, [pw_15426x2] ;t28
2210

2211 2212 2213 2214
    SCRATCH               4, 13, tmpq+ 1*%%str
    SCRATCH               5, 12, tmpq+15*%%str

    VP9_UNPACK_MULSUB_2W_4X   2,  6,  7,  3, 3196, m16069, [pd_8192], 4,  5 ; t18, t29
2215
%else
2216 2217
    mova                 m0, [%1+ 1*64]
    mova                 m1, [%1+15*64]
2218
%if %3 <= 16
2219 2220 2221 2222
    pmulhrsw             m5, m0, [pw_16364x2]
    pmulhrsw             m0, [pw_804x2]
    pmulhrsw             m4, m1, [pw_m11003x2]
    pmulhrsw             m1, [pw_12140x2]
2223
%else
2224
    mova                 m4, [%1+17*64]
2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245
    mova                 m5, [%1+31*64]

    VP9_UNPACK_MULSUB_2W_4X   0,  5, 16364,   804, [pd_8192], 2, 3 ; t16, t31
    VP9_UNPACK_MULSUB_2W_4X   4,  1, 11003, 12140, [pd_8192], 2, 3 ; t17, t30
%endif
    SUMSUB_BA             w,  4,  0,  2
    SUMSUB_BA             w,  1,  5,  2

    VP9_UNPACK_MULSUB_2W_4X   5,  0, 16069,  3196, [pd_8192], 2, 3 ; t17, t30

    SCRATCH               4, 13, tmpq+ 1*%%str
    SCRATCH               5, 12, tmpq+15*%%str

    mova                 m2, [%1+ 7*64]
    mova                 m3, [%1+ 9*64]
%if %3 <= 16
    pmulhrsw             m7,  m3, [pw_14811x2]
    pmulhrsw             m3, [pw_7005x2]
    pmulhrsw             m6,  m2, [pw_m5520x2]
    pmulhrsw             m2, [pw_15426x2]
%else
2246 2247 2248
    mova                 m7, [%1+23*64]
    mova                 m6, [%1+25*64]

2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295
    VP9_UNPACK_MULSUB_2W_4X   3,  7, 14811,  7005, [pd_8192], 4, 5 ; t18, t29
    VP9_UNPACK_MULSUB_2W_4X   6,  2,  5520, 15426, [pd_8192], 4, 5 ; t19, t28
%endif
    SUMSUB_BA             w,  3,  6,  4
    SUMSUB_BA             w,  7,  2,  4

    VP9_UNPACK_MULSUB_2W_4X   2,  6, 3196, m16069, [pd_8192], 4, 5 ; t18, t29
%endif

    UNSCRATCH             5, 12, tmpq+15*%%str
    SUMSUB_BA             w,  6,  0,  4
    mova    [tmpq+25*%%str], m6             ; t19
    UNSCRATCH             4, 13, tmpq+ 1*%%str
    SUMSUB_BA             w,  7,  1,  6
    SUMSUB_BA             w,  3,  4,  6
    mova    [tmpq+23*%%str], m3             ; t16
    SUMSUB_BA             w,  2,  5,  6

    VP9_UNPACK_MULSUB_2W_4X   0,  5, 15137,  6270, [pd_8192], 6, 3 ; t18, t29
    VP9_UNPACK_MULSUB_2W_4X   1,  4, 15137,  6270, [pd_8192], 6, 3 ; t19, t28

    SCRATCH               0, 10, tmpq+ 1*%%str
    SCRATCH               1, 11, tmpq+ 7*%%str
    SCRATCH               2,  9, tmpq+ 9*%%str
    SCRATCH               4, 14, tmpq+15*%%str
    SCRATCH               5, 15, tmpq+17*%%str
    SCRATCH               7, 13, tmpq+31*%%str

%if %3 <= 8
    mova                 m0, [%1+ 5*64]
    mova                 m3, [%1+ 3*64]

    pmulhrsw             m5,  m0, [pw_15893x2] ;t27
    pmulhrsw             m0, [pw_3981x2] ;t20

    VP9_UNPACK_MULSUB_2W_4X   1,  4,  5,  0,  9102, 13623, [pd_8192], 7,  2 ; t21, t26

    pmulhrsw             m6,  m3, [pw_m2404x2] ;t23
    pmulhrsw             m3, [pw_16207x2] ;t24

    SCRATCH               5,  8, tmpq+ 5*%%str
    SCRATCH               4, 12, tmpq+11*%%str

    VP9_UNPACK_MULSUB_2W_4X   7,  2,  3,  6, 13623, m9102, [pd_8192], 4, 5 ; t22, t25
%else
    mova                 m4, [%1+ 5*64]
    mova                 m5, [%1+11*64]
2296
%if %3 <= 16
2297 2298 2299 2300
    pmulhrsw             m1, m4, [pw_15893x2]
    pmulhrsw             m4, [pw_3981x2]
    pmulhrsw             m0, m5, [pw_m8423x2]
    pmulhrsw             m5, [pw_14053x2]
2301
%else
2302 2303 2304 2305 2306
    mova                 m0, [%1+21*64]
    mova                 m1, [%1+27*64]

    VP9_UNPACK_MULSUB_2W_4X   4,  1, 15893,  3981, [pd_8192], 2, 3 ; t20, t27
    VP9_UNPACK_MULSUB_2W_4X   0,  5,  8423, 14053, [pd_8192], 2, 3 ; t21, t26
2307
%endif
2308 2309
    SUMSUB_BA             w,  0,  4,  2
    SUMSUB_BA             w,  5,  1,  2
2310

2311
    VP9_UNPACK_MULSUB_2W_4X   1,  4,  9102, 13623, [pd_8192], 2, 3 ; t21, t26
2312

2313 2314
    SCRATCH               5,  8, tmpq+ 5*%%str
    SCRATCH               4, 12, tmpq+11*%%str
2315

2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330
    mova                 m7, [%1+ 3*64]
    mova                 m6, [%1+13*64]
%if %3 <= 16
    pmulhrsw             m3, m6, [pw_13160x2]
    pmulhrsw             m6, [pw_9760x2]
    pmulhrsw             m2, m7, [pw_m2404x2]
    pmulhrsw             m7, [pw_16207x2]
%else
    mova                 m2, [%1+29*64]
    mova                 m3, [%1+19*64]
    VP9_UNPACK_MULSUB_2W_4X   6,  3, 13160,  9760, [pd_8192], 4, 5 ; t22, t25
    VP9_UNPACK_MULSUB_2W_4X   2,  7,  2404, 16207, [pd_8192], 4, 5 ; t23, t24
%endif
    SUMSUB_BA             w,  6,  2,  4
    SUMSUB_BA             w,  3,  7,  4
2331

2332
    VP9_UNPACK_MULSUB_2W_4X   7,  2, 13623, m9102, [pd_8192], 4, 5 ; t22, t25
2333
%endif
2334 2335 2336 2337

    ; m4=t16, m5=t17, m9=t18, m8=t19, m0=t20, m1=t21, m13=t22, m12=t23,
    ; m3=t24, m2=t25, m14=t26, m15=t27, m7=t28, m6=t29, m10=t30, m11=t31

2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348
    UNSCRATCH             4, 12, tmpq+11*%%str
    SUMSUB_BA             w,  0,  6, 5
    SUMSUB_BA             w,  4,  2, 5
    UNSCRATCH             5,  8, tmpq+ 5*%%str
    SCRATCH               4,  8, tmpq+11*%%str
    SUMSUB_BA             w,  1,  7, 4
    SUMSUB_BA             w,  5,  3, 4
    SCRATCH               5, 12, tmpq+ 5*%%str

    VP9_UNPACK_MULSUB_2W_4X   3,  6, 6270, m15137, [pd_8192], 4, 5 ; t20, t27
    VP9_UNPACK_MULSUB_2W_4X   2,  7, 6270, m15137, [pd_8192], 4, 5 ; t21, t26
2349 2350 2351 2352

    ; m8[s]=t16, m9=t17, m5=t18, m4[s]=t19, m12=t20, m13=t21, m1=t22, m0=t23,
    ; m15=t24, m14=t25, m2=t26, m3=t27, m11=t28, m10=t29, m6=t30, m7=t31

2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392
    UNSCRATCH             5,  9, tmpq+ 9*%%str
    mova                 m4, [tmpq+23*%%str] ; t16
%if ARCH_X86_64
    SUMSUB_BA             w,  1,  5,  9
    SUMSUB_BA             w,  0,  4,  9
%else
    SUMSUB_BADC           w,  1,  5,  0,  4
%endif
    mova    [tmpq+29*%%str], m1     ; t17
    mova    [tmpq+21*%%str], m0     ; t16
    UNSCRATCH             0, 10, tmpq+ 1*%%str
    UNSCRATCH             1, 11, tmpq+ 7*%%str
%if ARCH_X86_64
    SUMSUB_BA             w,  2,  0,  9
    SUMSUB_BA             w,  3,  1,  9
%else
    SUMSUB_BADC           w,  2,  0,  3,  1
%endif
    mova    [tmpq+ 9*%%str], m2     ; t18
    mova    [tmpq+13*%%str], m3     ; t19
    SCRATCH               0, 10, tmpq+23*%%str
    SCRATCH               1, 11, tmpq+27*%%str

    UNSCRATCH             2, 14, tmpq+15*%%str
    UNSCRATCH             3, 15, tmpq+17*%%str
    SUMSUB_BA             w,  6,  2, 0
    SUMSUB_BA             w,  7,  3, 0
    SCRATCH               6, 14, tmpq+ 3*%%str
    SCRATCH               7, 15, tmpq+ 7*%%str

    UNSCRATCH             0,  8, tmpq+11*%%str
    mova                 m1, [tmpq+25*%%str] ; t19
    UNSCRATCH             6, 12, tmpq+ 5*%%str
    UNSCRATCH             7, 13, tmpq+31*%%str
%if ARCH_X86_64
    SUMSUB_BA             w,  0,  1,  9
    SUMSUB_BA             w,  6,  7,  9
%else
    SUMSUB_BADC           w,  0,  1,  6,  7
%endif
2393 2394 2395 2396

    ; m0=t16, m1=t17, m2=t18, m3=t19, m11=t20, m10=t21, m9=t22, m8=t23,
    ; m7=t24, m6=t25, m5=t26, m4=t27, m12=t28, m13=t29, m14=t30, m15=t31

2397
%if 0; cpuflag(ssse3)
2398 2399 2400 2401 2402 2403
%if ARCH_X86_64
    SUMSUB_BA             w,  4,  7,  8
    SUMSUB_BA             w,  5,  1,  8
%else
    SUMSUB_BADC           w,  4,  7,  5,  1
%endif
2404

2405
    pmulhrsw             m7, [pw_11585x2]
2406
    pmulhrsw             m4, [pw_11585x2]
2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439
    pmulhrsw             m1, [pw_11585x2]
    pmulhrsw             m5, [pw_11585x2]

    mova    [tmpq+ 5*%%str], m7     ; t23
    SCRATCH               1, 13, tmpq+25*%%str
    UNSCRATCH             7, 10, tmpq+23*%%str
    UNSCRATCH             1, 11, tmpq+27*%%str

%if ARCH_X86_64
    SUMSUB_BA             w,  7,  3, 10
    SUMSUB_BA             w,  1,  2, 10
%else
    SUMSUB_BADC           w,  7,  3,  1,  2
%endif

    pmulhrsw             m3, [pw_11585x2]
    pmulhrsw             m7, [pw_11585x2]
    pmulhrsw             m2, [pw_11585x2]
    pmulhrsw             m1, [pw_11585x2]
%else
    SCRATCH               0,  8, tmpq+15*%%str
    SCRATCH               6,  9, tmpq+17*%%str
    VP9_UNPACK_MULSUB_2W_4X  7,  4, 11585, 11585, [pd_8192], 0, 6
    mova    [tmpq+ 5*%%str], m7     ; t23
    UNSCRATCH             7, 10, tmpq+23*%%str
    VP9_UNPACK_MULSUB_2W_4X  1,  5, 11585, 11585, [pd_8192], 0, 6
    SCRATCH               1, 13, tmpq+25*%%str
    UNSCRATCH             1, 11, tmpq+27*%%str
    VP9_UNPACK_MULSUB_2W_4X  3,  7, 11585, 11585, [pd_8192], 0, 6
    VP9_UNPACK_MULSUB_2W_4X  2,  1, 11585, 11585, [pd_8192], 0, 6
    UNSCRATCH             0,  8, tmpq+15*%%str
    UNSCRATCH             6,  9, tmpq+17*%%str
%endif
2440 2441 2442 2443 2444 2445

    ; m0=t16, m1=t17, m2=t18, m3=t19, m4=t20, m5=t21, m6=t22, m7=t23,
    ; m8=t24, m9=t25, m10=t26, m11=t27, m12=t28, m13=t29, m14=t30, m15=t31

    ; then do final pass to sumsub+store the two halves
%if %2 == 1
2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458
    mova    [tmpq+17*%%str], m2     ; t20
    mova    [tmpq+ 1*%%str], m3     ; t21
%if ARCH_X86_64
    mova    [tmpq+25*%%str], m13    ; t22

    mova                 m8, [tmpq+ 0*%%str] ; t0
    mova                 m9, [tmpq+ 4*%%str] ; t1
    mova                m12, [tmpq+ 8*%%str] ; t2
    mova                m11, [tmpq+12*%%str] ; t3
    mova                 m2, [tmpq+16*%%str] ; t4
    mova                 m3, [tmpq+20*%%str] ; t5
    mova                m13, [tmpq+24*%%str] ; t6

2459
    SUMSUB_BA             w,  6,  8, 10
2460 2461 2462 2463 2464 2465 2466
    mova    [tmpq+ 3*%%str], m8              ; t15
    SUMSUB_BA             w,  0,  9,  8
    SUMSUB_BA             w, 15, 12,  8
    SUMSUB_BA             w, 14, 11,  8
    SUMSUB_BA             w,  1,  2,  8
    SUMSUB_BA             w,  7,  3,  8
    SUMSUB_BA             w,  5, 13,  8
2467
    mova                m10, [tmpq+28*%%str] ; t7
2468
    SUMSUB_BA             w,  4, 10,  8
2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512
%if cpuflag(avx2)
    ; the "shitty" about this idct is that the final pass does the outermost
    ; interleave sumsubs (t0/31, t1/30, etc) but the tN for the 16x16 need
    ; to be sequential, which means I need to load/store half of the sumsub
    ; intermediates back to/from memory to get a 16x16 transpose going...
    ; This would be easier if we had more (e.g. 32) YMM regs here.
    mova    [tmpq+ 7*%%str], m9
    mova    [tmpq+11*%%str], m12
    mova    [tmpq+15*%%str], m11
    mova    [tmpq+19*%%str], m2
    mova    [tmpq+23*%%str], m3
    mova    [tmpq+27*%%str], m13
    mova    [tmpq+31*%%str], m10
    mova    [tmpq+12*%%str], m5

    mova                m13, [tmpq+30*%%str] ; t8
    mova                m12, [tmpq+26*%%str] ; t9
    mova                m11, [tmpq+22*%%str] ; t10
    mova                m10, [tmpq+18*%%str] ; t11
    mova                 m9, [tmpq+17*%%str] ; t20
    mova                 m8, [tmpq+ 1*%%str] ; t21
    mova                 m3, [tmpq+25*%%str] ; t22
    mova                 m2, [tmpq+ 5*%%str] ; t23

    SUMSUB_BA             w,  9, 10, 5
    SUMSUB_BA             w,  8, 11, 5
    SUMSUB_BA             w,  3, 12, 5
    SUMSUB_BA             w,  2, 13, 5
    mova    [tmpq+ 1*%%str], m10
    mova    [tmpq+ 5*%%str], m11
    mova    [tmpq+17*%%str], m12
    mova    [tmpq+25*%%str], m13

    mova                m13, [tmpq+14*%%str] ; t12
    mova                m12, [tmpq+10*%%str] ; t13
    mova                m11, [tmpq+ 9*%%str] ; t18
    mova                m10, [tmpq+13*%%str] ; t19

    SUMSUB_BA             w, 11, 12, 5
    SUMSUB_BA             w, 10, 13, 5
    mova    [tmpq+ 9*%%str], m13
    mova    [tmpq+13*%%str], m12
    mova    [tmpq+10*%%str], m10
    mova    [tmpq+14*%%str], m11
2513

2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577
    mova                m13, [tmpq+ 6*%%str] ; t14
    mova                m12, [tmpq+ 2*%%str] ; t15
    mova                m11, [tmpq+21*%%str] ; t16
    mova                m10, [tmpq+29*%%str] ; t17
    SUMSUB_BA             w, 11, 12, 5
    SUMSUB_BA             w, 10, 13, 5
    mova    [tmpq+21*%%str], m12
    mova    [tmpq+29*%%str], m13
    mova                m12, [tmpq+10*%%str]
    mova                m13, [tmpq+14*%%str]

    TRANSPOSE16x16W       6,  0, 15, 14,  1,  7,  5,  4, \
                          2,  3,  8,  9, 12, 13, 10, 11, \
            [tmpq+12*%%str], [tmpq+ 8*%%str], 1
    mova    [tmpq+ 0*%%str], m6
    mova    [tmpq+ 2*%%str], m0
    mova    [tmpq+ 4*%%str], m15
    mova    [tmpq+ 6*%%str], m14
    mova    [tmpq+10*%%str], m7
    mova    [tmpq+12*%%str], m5
    mova    [tmpq+14*%%str], m4
    mova    [tmpq+16*%%str], m2
    mova    [tmpq+18*%%str], m3
    mova    [tmpq+20*%%str], m8
    mova    [tmpq+22*%%str], m9
    mova    [tmpq+24*%%str], m12
    mova    [tmpq+26*%%str], m13
    mova    [tmpq+28*%%str], m10
    mova    [tmpq+30*%%str], m11

    mova                 m0, [tmpq+21*%%str]
    mova                 m1, [tmpq+29*%%str]
    mova                 m2, [tmpq+13*%%str]
    mova                 m3, [tmpq+ 9*%%str]
    mova                 m4, [tmpq+ 1*%%str]
    mova                 m5, [tmpq+ 5*%%str]
    mova                 m7, [tmpq+25*%%str]
    mova                 m8, [tmpq+31*%%str]
    mova                 m9, [tmpq+27*%%str]
    mova                m10, [tmpq+23*%%str]
    mova                m11, [tmpq+19*%%str]
    mova                m12, [tmpq+15*%%str]
    mova                m13, [tmpq+11*%%str]
    mova                m14, [tmpq+ 7*%%str]
    mova                m15, [tmpq+ 3*%%str]
    TRANSPOSE16x16W       0,  1,  2,  3,  4,  5,  6,  7, \
                          8,  9, 10, 11, 12, 13, 14, 15, \
            [tmpq+17*%%str], [tmpq+ 9*%%str], 1
    mova    [tmpq+ 1*%%str], m0
    mova    [tmpq+ 3*%%str], m1
    mova    [tmpq+ 5*%%str], m2
    mova    [tmpq+ 7*%%str], m3
    mova    [tmpq+11*%%str], m5
    mova    [tmpq+13*%%str], m6
    mova    [tmpq+15*%%str], m7
    mova    [tmpq+17*%%str], m8
    mova    [tmpq+19*%%str], m9
    mova    [tmpq+21*%%str], m10
    mova    [tmpq+23*%%str], m11
    mova    [tmpq+25*%%str], m12
    mova    [tmpq+27*%%str], m13
    mova    [tmpq+29*%%str], m14
    mova    [tmpq+31*%%str], m15
%else ; !avx2
2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608
    TRANSPOSE8x8W         6, 0, 15, 14, 1, 7, 5, 4, 8
    mova    [tmpq+ 0*%%str], m6
    mova    [tmpq+ 4*%%str], m0
    mova    [tmpq+ 8*%%str], m15
    mova    [tmpq+12*%%str], m14
    mova    [tmpq+16*%%str], m1
    mova    [tmpq+20*%%str], m7
    mova    [tmpq+24*%%str], m5
    mova    [tmpq+28*%%str], m4

    mova                  m8, [tmpq+ 3*%%str] ; t15
    TRANSPOSE8x8W         10, 13, 3, 2, 11, 12, 9, 8, 0
    mova    [tmpq+ 3*%%str], m10
    mova    [tmpq+ 7*%%str], m13
    mova    [tmpq+11*%%str], m3
    mova    [tmpq+15*%%str], m2
    mova    [tmpq+19*%%str], m11
    mova    [tmpq+23*%%str], m12
    mova    [tmpq+27*%%str], m9
    mova    [tmpq+31*%%str], m8

    mova                m15, [tmpq+30*%%str] ; t8
    mova                m14, [tmpq+26*%%str] ; t9
    mova                m13, [tmpq+22*%%str] ; t10
    mova                m12, [tmpq+18*%%str] ; t11
    mova                m11, [tmpq+14*%%str] ; t12
    mova                m10, [tmpq+10*%%str] ; t13
    mova                 m9, [tmpq+ 6*%%str] ; t14
    mova                 m8, [tmpq+ 2*%%str] ; t15
    mova                 m7, [tmpq+21*%%str] ; t16
    mova                 m6, [tmpq+29*%%str] ; t17
2609 2610 2611
    mova                 m5, [tmpq+ 9*%%str] ; t18
    mova                 m4, [tmpq+13*%%str] ; t19
    mova                 m3, [tmpq+17*%%str] ; t20
2612
    mova                 m2, [tmpq+ 1*%%str] ; t21
2613
    mova                 m1, [tmpq+25*%%str] ; t22
2614 2615

    SUMSUB_BA             w,  7,  8, 0
2616
    mova    [tmpq+ 2*%%str], m8
2617
    mova                 m0, [tmpq+ 5*%%str] ; t23
2618 2619 2620 2621 2622 2623 2624 2625 2626
    SUMSUB_BA             w,  6,  9, 8
    SUMSUB_BA             w,  5, 10, 8
    SUMSUB_BA             w,  4, 11, 8
    SUMSUB_BA             w,  3, 12, 8
    SUMSUB_BA             w,  2, 13, 8
    SUMSUB_BA             w,  1, 14, 8
    SUMSUB_BA             w,  0, 15, 8

    TRANSPOSE8x8W         0, 1, 2, 3, 4, 5, 6, 7, 8
2627 2628 2629 2630 2631 2632 2633 2634 2635 2636
    mova    [tmpq+ 1*%%str], m0
    mova    [tmpq+ 5*%%str], m1
    mova    [tmpq+ 9*%%str], m2
    mova    [tmpq+13*%%str], m3
    mova    [tmpq+17*%%str], m4
    mova    [tmpq+21*%%str], m5
    mova    [tmpq+25*%%str], m6
    mova    [tmpq+29*%%str], m7

    mova                 m8, [tmpq+ 2*%%str]
2637
    TRANSPOSE8x8W         8, 9, 10, 11, 12, 13, 14, 15, 0
2638 2639 2640 2641 2642 2643 2644 2645
    mova    [tmpq+ 2*%%str], m8
    mova    [tmpq+ 6*%%str], m9
    mova    [tmpq+10*%%str], m10
    mova    [tmpq+14*%%str], m11
    mova    [tmpq+18*%%str], m12
    mova    [tmpq+22*%%str], m13
    mova    [tmpq+26*%%str], m14
    mova    [tmpq+30*%%str], m15
2646
%endif ; avx2
2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766
%else
    mova                 m2, [tmpq+24*%%str] ; t6
    mova                 m3, [tmpq+28*%%str] ; t7
    SUMSUB_BADC           w,  5,  2,  4,  3
    mova    [tmpq+24*%%str], m5
    mova    [tmpq+23*%%str], m2
    mova    [tmpq+28*%%str], m4
    mova    [tmpq+19*%%str], m3

    mova                 m2, [tmpq+16*%%str] ; t4
    mova                 m3, [tmpq+20*%%str] ; t5
    SUMSUB_BA             w,  1,  2,  5
    SUMSUB_BA             w,  7,  3,  5
    mova    [tmpq+15*%%str], m2
    mova    [tmpq+11*%%str], m3

    mova                 m2, [tmpq+ 0*%%str] ; t0
    mova                 m3, [tmpq+ 4*%%str] ; t1
    SUMSUB_BA             w,  6,  2,  5
    SUMSUB_BA             w,  0,  3,  5
    mova    [tmpq+31*%%str], m2
    mova    [tmpq+27*%%str], m3

    mova                 m2, [tmpq+ 8*%%str] ; t2
    mova                 m3, [tmpq+12*%%str] ; t3
    mova                 m5, [tmpq+ 7*%%str]
    mova                 m4, [tmpq+ 3*%%str]
    SUMSUB_BADC           w,  5,  2,  4,  3
    mova    [tmpq+ 7*%%str], m2
    mova    [tmpq+ 3*%%str], m3

    mova                 m3, [tmpq+28*%%str]
    TRANSPOSE8x8W         6, 0, 5, 4, 1, 7, 2, 3, [tmpq+24*%%str], [tmpq+16*%%str], 1
    mova    [tmpq+ 0*%%str], m6
    mova    [tmpq+ 4*%%str], m0
    mova    [tmpq+ 8*%%str], m5
    mova    [tmpq+12*%%str], m4
    mova    [tmpq+20*%%str], m7
    mova    [tmpq+24*%%str], m2
    mova    [tmpq+28*%%str], m3

    mova                 m6, [tmpq+19*%%str]
    mova                 m0, [tmpq+23*%%str]
    mova                 m5, [tmpq+11*%%str]
    mova                 m4, [tmpq+15*%%str]
    mova                 m1, [tmpq+ 3*%%str]
    mova                 m7, [tmpq+ 7*%%str]
    mova                 m3, [tmpq+31*%%str]
    TRANSPOSE8x8W         6, 0, 5, 4, 1, 7, 2, 3, [tmpq+27*%%str], [tmpq+19*%%str], 1
    mova    [tmpq+ 3*%%str], m6
    mova    [tmpq+ 7*%%str], m0
    mova    [tmpq+11*%%str], m5
    mova    [tmpq+15*%%str], m4
    mova    [tmpq+23*%%str], m7
    mova    [tmpq+27*%%str], m2
    mova    [tmpq+31*%%str], m3

    mova                 m1, [tmpq+ 6*%%str] ; t14
    mova                 m0, [tmpq+ 2*%%str] ; t15
    mova                 m7, [tmpq+21*%%str] ; t16
    mova                 m6, [tmpq+29*%%str] ; t17
    SUMSUB_BA             w,  7,  0,  2
    SUMSUB_BA             w,  6,  1,  2
    mova    [tmpq+29*%%str], m7
    mova    [tmpq+ 2*%%str], m0
    mova    [tmpq+21*%%str], m6
    mova    [tmpq+ 6*%%str], m1

    mova                 m1, [tmpq+14*%%str] ; t12
    mova                 m0, [tmpq+10*%%str] ; t13
    mova                 m5, [tmpq+ 9*%%str] ; t18
    mova                 m4, [tmpq+13*%%str] ; t19
    SUMSUB_BA             w,  5,  0,  2
    SUMSUB_BA             w,  4,  1,  2
    mova     [tmpq+10*%%str], m0
    mova     [tmpq+14*%%str], m1

    mova                 m1, [tmpq+22*%%str] ; t10
    mova                 m0, [tmpq+18*%%str] ; t11
    mova                 m3, [tmpq+17*%%str] ; t20
    mova                 m2, [tmpq+ 1*%%str] ; t21
    SUMSUB_BA             w,  3,  0,  6
    SUMSUB_BA             w,  2,  1,  6
    mova     [tmpq+18*%%str], m0
    mova     [tmpq+22*%%str], m1

    mova                 m7, [tmpq+30*%%str] ; t8
    mova                 m6, [tmpq+26*%%str] ; t9
    mova                 m1, [tmpq+25*%%str] ; t22
    mova                 m0, [tmpq+ 5*%%str] ; t23
    SUMSUB_BADC           w,  1,  6,  0,  7
    mova     [tmpq+26*%%str], m6
    mova     [tmpq+30*%%str], m7

    mova                 m7, [tmpq+29*%%str]
    TRANSPOSE8x8W         0, 1, 2, 3, 4, 5, 6, 7, [tmpq+21*%%str], [tmpq+17*%%str], 1
    mova    [tmpq+ 1*%%str], m0
    mova    [tmpq+ 5*%%str], m1
    mova    [tmpq+ 9*%%str], m2
    mova    [tmpq+13*%%str], m3
    mova    [tmpq+21*%%str], m5
    mova    [tmpq+25*%%str], m6
    mova    [tmpq+29*%%str], m7

    mova                 m0, [tmpq+ 2*%%str]
    mova                 m1, [tmpq+ 6*%%str]
    mova                 m2, [tmpq+10*%%str]
    mova                 m3, [tmpq+14*%%str]
    mova                 m4, [tmpq+18*%%str]
    mova                 m5, [tmpq+22*%%str]
    mova                 m7, [tmpq+30*%%str]
    TRANSPOSE8x8W         0, 1, 2, 3, 4, 5, 6, 7, [tmpq+26*%%str], [tmpq+18*%%str], 1
    mova    [tmpq+ 2*%%str], m0
    mova    [tmpq+ 6*%%str], m1
    mova    [tmpq+10*%%str], m2
    mova    [tmpq+14*%%str], m3
    mova    [tmpq+22*%%str], m5
    mova    [tmpq+26*%%str], m6
    mova    [tmpq+30*%%str], m7
%endif
2767
%else
2768 2769 2770
    ; t0-7 is in [tmpq+{0,4,8,12,16,20,24,28}*%%str]
    ; t8-15 is in [tmpq+{2,6,10,14,18,22,26,30}*%%str]
    ; t16-19 and t23 is in [tmpq+{1,5,9,13,29}*%%str]
2771 2772
    ; t20-22 is in m4-6
    ; t24-31 is in m8-15
2773 2774 2775 2776 2777 2778

%if cpuflag(ssse3)
%define ROUND_REG [pw_512]
%else
%define ROUND_REG [pw_32]
%endif
2779

2780
%macro %%STORE_2X2 7-8 1 ; src[1-4], tmp[1-2], zero, inc_dst_ptrs
2781 2782
    SUMSUB_BA            w, %4, %1, %5
    SUMSUB_BA            w, %3, %2, %5
2783
    VP9_IDCT8_WRITEx2   %4, %3, %5, %6, %7, ROUND_REG, 6
2784 2785 2786
%if %8 == 1
    add               dstq, stride2q
%endif
2787
    VP9_IDCT8_WRITEx2   %2, %1, %5, %6, %7, ROUND_REG, 6, dst_endq
2788 2789 2790 2791 2792
%if %8 == 1
    sub           dst_endq, stride2q
%endif
%endmacro

2793 2794 2795
%if ARCH_X86_64
    pxor               m10, m10

2796
    ; store t0-1 and t30-31
2797 2798 2799
    mova                m8, [tmpq+ 0*%%str]
    mova                m9, [tmpq+ 4*%%str]
    %%STORE_2X2          8,  9,  0,  6, 12, 11, 10
2800 2801

    ; store t2-3 and t28-29
2802 2803 2804
    mova                m8, [tmpq+ 8*%%str]
    mova                m9, [tmpq+12*%%str]
    %%STORE_2X2          8,  9, 14, 15, 12, 11, 10
2805 2806

    ; store t4-5 and t26-27
2807 2808 2809
    mova                m8, [tmpq+16*%%str]
    mova                m9, [tmpq+20*%%str]
    %%STORE_2X2          8,  9,  7,  1, 12, 11, 10
2810 2811

    ; store t6-7 and t24-25
2812 2813 2814
    mova                m8, [tmpq+24*%%str]
    mova                m9, [tmpq+28*%%str]
    %%STORE_2X2          8,  9,  4,  5, 12, 11, 10
2815 2816

    ; store t8-9 and t22-23
2817 2818 2819 2820
    mova                m8, [tmpq+30*%%str]
    mova                m9, [tmpq+26*%%str]
    mova                m0, [tmpq+ 5*%%str]
    %%STORE_2X2          8,  9, 13,  0, 12, 11, 10
2821 2822

    ; store t10-11 and t20-21
2823 2824 2825
    mova                m8, [tmpq+22*%%str]
    mova                m9, [tmpq+18*%%str]
    %%STORE_2X2          8,  9,  2,  3, 12, 11, 10
2826 2827

    ; store t12-13 and t18-19
2828 2829
    mova                m8, [tmpq+14*%%str]
    mova                m9, [tmpq+10*%%str]
2830 2831
    mova                m5, [tmpq+13*%%str]
    mova                m4, [tmpq+ 9*%%str]
2832
    %%STORE_2X2          8,  9,  4,  5, 12, 11, 10
2833 2834

    ; store t14-17
2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903
    mova                m8, [tmpq+ 6*%%str]
    mova                m9, [tmpq+ 2*%%str]
    mova                m5, [tmpq+29*%%str]
    mova                m4, [tmpq+21*%%str]
    %%STORE_2X2          8,  9,  4,  5, 12, 11, 10, 0

    SWAP                 1, 10 ; zero
%else
    mova   [tmpq+ 1*%%str], m1
    mova   [tmpq+11*%%str], m2
    mova   [tmpq+15*%%str], m3
    mova   [tmpq+17*%%str], m4
    mova   [tmpq+19*%%str], m5
    pxor                m1, m1

    ; store t0-1 and t30-31
    mova                m2, [tmpq+ 0*%%str]
    mova                m3, [tmpq+ 4*%%str]
    %%STORE_2X2          2,  3,  0,  6, 4, 5, 1

    ; store t2-3 and t28-29
    mova                m2, [tmpq+ 8*%%str]
    mova                m3, [tmpq+12*%%str]
    mova                m0, [tmpq+ 3*%%str]
    mova                m6, [tmpq+ 7*%%str]
    %%STORE_2X2          2,  3,  0,  6, 4, 5, 1

    ; store t4-5 and t26-27
    mova                m2, [tmpq+16*%%str]
    mova                m3, [tmpq+20*%%str]
    mova                m0, [tmpq+ 1*%%str]
    %%STORE_2X2          2,  3,  7,  0, 4, 5, 1

    ; store t6-7 and t24-25
    mova                m2, [tmpq+24*%%str]
    mova                m3, [tmpq+28*%%str]
    mova                m0, [tmpq+17*%%str]
    mova                m6, [tmpq+19*%%str]
    %%STORE_2X2          2,  3,  0,  6, 4, 5, 1

    ; store t8-9 and t22-23
    mova                m2, [tmpq+30*%%str]
    mova                m3, [tmpq+26*%%str]
    mova                m0, [tmpq+25*%%str]
    mova                m6, [tmpq+ 5*%%str]
    %%STORE_2X2          2,  3,  0,  6, 4, 5, 1

    ; store t10-11 and t20-21
    mova                m2, [tmpq+22*%%str]
    mova                m3, [tmpq+18*%%str]
    mova                m0, [tmpq+11*%%str]
    mova                m6, [tmpq+15*%%str]
    %%STORE_2X2          2,  3,  0,  6, 4, 5, 1

    ; store t12-13 and t18-19
    mova                m2, [tmpq+14*%%str]
    mova                m3, [tmpq+10*%%str]
    mova                m6, [tmpq+13*%%str]
    mova                m0, [tmpq+ 9*%%str]
    %%STORE_2X2          2,  3,  0,  6, 4, 5, 1

    ; store t14-17
    mova                m2, [tmpq+ 6*%%str]
    mova                m3, [tmpq+ 2*%%str]
    mova                m6, [tmpq+29*%%str]
    mova                m0, [tmpq+21*%%str]
    %%STORE_2X2          2,  3,  0,  6, 4, 5, 1, 0
%endif
%undef ROUND_REG
2904 2905 2906
%endif
%endmacro

2907 2908
%macro VP9_IDCT_IDCT_32x32_ADD_XMM 1
INIT_XMM %1
2909 2910
cglobal vp9_idct_idct_32x32_add, 0, 6 + ARCH_X86_64 * 3, 16, 2048, dst, stride, block, eob
    movifnidn         eobd, dword eobm
2911
%if cpuflag(ssse3)
2912
    cmp eobd, 135
2913
    jg .idctfull
2914
    cmp eobd, 34
2915
    jg .idct16x16
2916 2917
    cmp eobd, 1
    jg .idct8x8
2918 2919 2920 2921
%else
    cmp eobd, 1
    jg .idctfull
%endif
2922 2923

    ; dc-only case
2924 2925 2926
    movifnidn       blockq, blockmp
    movifnidn         dstq, dstmp
    movifnidn      strideq, stridemp
2927
%if cpuflag(ssse3)
2928 2929 2930 2931
    movd                m0, [blockq]
    mova                m1, [pw_11585x2]
    pmulhrsw            m0, m1
    pmulhrsw            m0, m1
2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942
%else
    DEFINE_ARGS dst, stride, block, coef
    movsx            coefd, word [blockq]
    imul             coefd, 11585
    add              coefd, 8192
    sar              coefd, 14
    imul             coefd, 11585
    add              coefd, (32 << 14) + 8192
    sar              coefd, 14 + 6
    movd                m0, coefd
%endif
2943
    SPLATW              m0, m0, q0000
2944
%if cpuflag(ssse3)
2945
    pmulhrsw            m0, [pw_512]
2946
%endif
2947 2948 2949 2950 2951 2952 2953 2954 2955
    pxor                m5, m5
    movd          [blockq], m5
%rep 31
    VP9_STORE_2XFULL    0, 1, 2, 3, 4, 5, mmsize
    add               dstq, strideq
%endrep
    VP9_STORE_2XFULL    0, 1, 2, 3, 4, 5, mmsize
    RET

2956
%if ARCH_X86_64
2957
    DEFINE_ARGS dst_bak, stride, block, cnt, dst, stride30, dst_end, stride2, tmp
2958 2959 2960 2961
%else
%define dst_bakq r0mp
%endif
%if cpuflag(ssse3)
2962
.idct8x8:
2963 2964 2965 2966
%if ARCH_X86_32
    DEFINE_ARGS block, u1, u2, u3, u4, tmp
    mov             blockq, r2mp
%endif
2967
    mov               tmpq, rsp
2968 2969
    VP9_IDCT32_1D   blockq, 1, 8

2970 2971 2972 2973 2974
%if ARCH_X86_32
    DEFINE_ARGS dst, stride, stride30, dst_end, stride2, tmp
    mov            strideq, r1mp
%define cntd dword r3m
%endif
2975 2976 2977 2978 2979 2980 2981
    mov          stride30q, strideq         ; stride
    lea           stride2q, [strideq*2]     ; stride*2
    shl          stride30q, 5               ; stride*32
    mov               cntd, 4
    sub          stride30q, stride2q        ; stride*30
.loop2_8x8:
    mov               dstq, dst_bakq
2982
    lea           dst_endq, [dstq+stride30q]
2983
    VP9_IDCT32_1D     tmpq, 2, 8
2984
    add           dst_bakq, 8
2985
    add               tmpq, 16
2986 2987 2988 2989 2990
    dec               cntd
    jg .loop2_8x8

    ; at the end of the loop, m7 should still be zero
    ; use that to zero out block coefficients
2991 2992 2993 2994
%if ARCH_X86_32
    DEFINE_ARGS block
    mov             blockq, r2mp
%endif
2995
    ZERO_BLOCK      blockq, 64,  8, m1
2996 2997
    RET

2998
.idct16x16:
2999 3000 3001 3002
%if ARCH_X86_32
    DEFINE_ARGS block, tmp, cnt
    mov             blockq, r2mp
%endif
3003
    mov               cntd, 2
3004
    mov               tmpq, rsp
3005 3006 3007
.loop1_16x16:
    VP9_IDCT32_1D   blockq, 1, 16
    add             blockq, 16
3008
    add               tmpq, 512
3009 3010
    dec               cntd
    jg .loop1_16x16
3011 3012

%if ARCH_X86_64
3013
    sub             blockq, 32
3014 3015 3016 3017 3018
%else
    DEFINE_ARGS dst, stride, stride30, dst_end, stride2, tmp
    mov            strideq, r1mp
%define cntd dword r3m
%endif
3019 3020 3021 3022 3023

    mov          stride30q, strideq         ; stride
    lea           stride2q, [strideq*2]     ; stride*2
    shl          stride30q, 5               ; stride*32
    mov               cntd, 4
3024
    mov               tmpq, rsp
3025 3026 3027
    sub          stride30q, stride2q        ; stride*30
.loop2_16x16:
    mov               dstq, dst_bakq
3028
    lea           dst_endq, [dstq+stride30q]
3029
    VP9_IDCT32_1D     tmpq, 2, 16
3030
    add           dst_bakq, 8
3031
    add               tmpq, 16
3032 3033 3034 3035 3036
    dec               cntd
    jg .loop2_16x16

    ; at the end of the loop, m7 should still be zero
    ; use that to zero out block coefficients
3037 3038 3039 3040
%if ARCH_X86_32
    DEFINE_ARGS block
    mov             blockq, r2mp
%endif
3041
    ZERO_BLOCK      blockq, 64, 16, m1
3042
    RET
3043
%endif
3044

3045
.idctfull:
3046 3047 3048 3049
%if ARCH_X86_32
    DEFINE_ARGS block, tmp, cnt
    mov             blockq, r2mp
%endif
3050
    mov               cntd, 4
3051
    mov               tmpq, rsp
3052 3053 3054
.loop1_full:
    VP9_IDCT32_1D   blockq, 1
    add             blockq, 16
3055
    add               tmpq, 512
3056 3057
    dec               cntd
    jg .loop1_full
3058 3059

%if ARCH_X86_64
3060
    sub             blockq, 64
3061 3062 3063 3064 3065
%else
    DEFINE_ARGS dst, stride, stride30, dst_end, stride2, tmp
    mov            strideq, r1mp
%define cntd dword r3m
%endif
3066 3067 3068 3069 3070

    mov          stride30q, strideq         ; stride
    lea           stride2q, [strideq*2]     ; stride*2
    shl          stride30q, 5               ; stride*32
    mov               cntd, 4
3071
    mov               tmpq, rsp
3072 3073 3074
    sub          stride30q, stride2q        ; stride*30
.loop2_full:
    mov               dstq, dst_bakq
3075
    lea           dst_endq, [dstq+stride30q]
3076
    VP9_IDCT32_1D     tmpq, 2
3077
    add           dst_bakq, 8
3078
    add               tmpq, 16
3079 3080 3081 3082 3083
    dec               cntd
    jg .loop2_full

    ; at the end of the loop, m7 should still be zero
    ; use that to zero out block coefficients
3084 3085 3086 3087
%if ARCH_X86_32
    DEFINE_ARGS block
    mov             blockq, r2mp
%endif
3088
    ZERO_BLOCK      blockq, 64, 32, m1
3089
    RET
3090 3091
%endmacro

3092
VP9_IDCT_IDCT_32x32_ADD_XMM sse2
3093 3094
VP9_IDCT_IDCT_32x32_ADD_XMM ssse3
VP9_IDCT_IDCT_32x32_ADD_XMM avx
3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197

; this is almost identical to VP9_STORE_2X, but it does two rows
; for slightly improved interleaving, and it omits vpermq since the
; input is DC so all values are identical
%macro VP9_STORE_YMM_DC_2X2 6 ; reg, tmp1, tmp2, tmp3, tmp4, zero
    mova               m%2, [dstq]
    mova               m%4, [dstq+strideq]
    punpckhbw          m%3, m%2, m%6
    punpcklbw          m%2, m%6
    punpckhbw          m%5, m%4, m%6
    punpcklbw          m%4, m%6
    paddw              m%3, m%1
    paddw              m%2, m%1
    paddw              m%5, m%1
    paddw              m%4, m%1
    packuswb           m%2, m%3
    packuswb           m%4, m%5
    mova  [dstq+strideq*0], m%2
    mova  [dstq+strideq*1], m%4
%endmacro

%if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
INIT_YMM avx2
cglobal vp9_idct_idct_32x32_add, 4, 9, 16, 2048, dst, stride, block, eob
    cmp eobd, 135
    jg .idctfull
    cmp eobd, 1
    jg .idct16x16

    ; dc-only case
    mova                m1, [pw_11585x2]
    vpbroadcastw        m0, [blockq]
    pmulhrsw            m0, m1
    pmulhrsw            m0, m1
    pxor                m5, m5
    pmulhrsw            m0, [pw_512]
    movd          [blockq], xm5

    DEFINE_ARGS dst, stride, cnt
    mov               cntd, 16
.loop_dc:
    VP9_STORE_YMM_DC_2X2 0, 1, 2, 3, 4, 5
    lea               dstq, [dstq+2*strideq]
    dec               cntd
    jg .loop_dc
    RET

    DEFINE_ARGS dst_bak, stride, block, cnt, dst, stride30, dst_end, stride2, tmp
.idct16x16:
    mov               tmpq, rsp
    VP9_IDCT32_1D   blockq, 1, 16

    mov          stride30q, strideq         ; stride
    lea           stride2q, [strideq*2]     ; stride*2
    shl          stride30q, 5               ; stride*32
    mov               cntd, 2
    sub          stride30q, stride2q        ; stride*30
.loop2_16x16:
    mov               dstq, dst_bakq
    lea           dst_endq, [dstq+stride30q]
    VP9_IDCT32_1D     tmpq, 2, 16
    add           dst_bakq, 16
    add               tmpq, 32
    dec               cntd
    jg .loop2_16x16

    ; at the end of the loop, m1 should still be zero
    ; use that to zero out block coefficients
    ZERO_BLOCK      blockq, 64, 16, m1
    RET

.idctfull:
    mov               cntd, 2
    mov               tmpq, rsp
.loop1_full:
    VP9_IDCT32_1D   blockq, 1
    add             blockq, 32
    add               tmpq, 1024
    dec               cntd
    jg .loop1_full

    sub             blockq, 64

    mov          stride30q, strideq         ; stride
    lea           stride2q, [strideq*2]     ; stride*2
    shl          stride30q, 5               ; stride*32
    mov               cntd, 2
    mov               tmpq, rsp
    sub          stride30q, stride2q        ; stride*30
.loop2_full:
    mov               dstq, dst_bakq
    lea           dst_endq, [dstq+stride30q]
    VP9_IDCT32_1D     tmpq, 2
    add           dst_bakq, 16
    add               tmpq, 32
    dec               cntd
    jg .loop2_full

    ; at the end of the loop, m1 should still be zero
    ; use that to zero out block coefficients
    ZERO_BLOCK      blockq, 64, 32, m1
    RET
%endif