fft.asm 25.4 KB
Newer Older
Loren Merritt's avatar
Loren Merritt committed
1 2 3
;******************************************************************************
;* FFT transform with SSE/3DNow optimizations
;* Copyright (c) 2008 Loren Merritt
4
;* Copyright (c) 2011 Vitor Sessak
Loren Merritt's avatar
Loren Merritt committed
5
;*
6 7 8
;* This algorithm (though not any of the implementation details) is
;* based on libdjbfft by D. J. Bernstein.
;*
Loren Merritt's avatar
Loren Merritt committed
9 10 11 12 13 14 15 16 17 18 19 20 21 22
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
23
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Loren Merritt's avatar
Loren Merritt committed
24 25 26 27 28 29 30
;******************************************************************************

; These functions are not individually interchangeable with the C versions.
; While C takes arrays of FFTComplex, SSE/3DNow leave intermediate results
; in blocks as conventient to the vector size.
; i.e. {4x real, 4x imaginary, 4x real, ...} (or 2x respectively)

31
%include "libavutil/x86/x86util.asm"
Loren Merritt's avatar
Loren Merritt committed
32

33
%if ARCH_X86_64
34 35 36 37 38
%define pointer resq
%else
%define pointer resd
%endif

39 40
SECTION_RODATA

41 42 43 44 45 46 47 48 49
struc FFTContext
    .nbits:    resd 1
    .reverse:  resd 1
    .revtab:   pointer 1
    .tmpbuf:   pointer 1
    .mdctsize: resd 1
    .mdctbits: resd 1
    .tcos:     pointer 1
    .tsin:     pointer 1
50 51 52 53
    .fftperm:  pointer 1
    .fftcalc:  pointer 1
    .imdctcalc:pointer 1
    .imdcthalf:pointer 1
54 55
endstruc

Loren Merritt's avatar
Loren Merritt committed
56
%define M_SQRT1_2 0.70710678118654752440
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
%define M_COS_PI_1_8 0.923879532511287
%define M_COS_PI_3_8 0.38268343236509

align 32
ps_cos16_1: dd 1.0, M_COS_PI_1_8, M_SQRT1_2, M_COS_PI_3_8, 1.0, M_COS_PI_1_8, M_SQRT1_2, M_COS_PI_3_8
ps_cos16_2: dd 0, M_COS_PI_3_8, M_SQRT1_2, M_COS_PI_1_8, 0, -M_COS_PI_3_8, -M_SQRT1_2, -M_COS_PI_1_8

ps_root2: times 8 dd M_SQRT1_2
ps_root2mppm: dd -M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, -M_SQRT1_2, -M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, -M_SQRT1_2
ps_p1p1m1p1: dd 0, 0, 1<<31, 0, 0, 0, 1<<31, 0

perm1: dd 0x00, 0x02, 0x03, 0x01, 0x03, 0x00, 0x02, 0x01
perm2: dd 0x00, 0x01, 0x02, 0x03, 0x01, 0x00, 0x02, 0x03
ps_p1p1m1p1root2: dd 1.0, 1.0, -1.0, 1.0, M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, M_SQRT1_2
ps_m1m1p1m1p1m1m1m1: dd 1<<31, 1<<31, 0, 1<<31, 0, 1<<31, 1<<31, 1<<31
72
ps_m1m1m1m1: times 4 dd 1<<31
Loren Merritt's avatar
Loren Merritt committed
73 74 75 76
ps_m1p1: dd 1<<31, 0

%assign i 16
%rep 13
77
cextern cos_ %+ i
Loren Merritt's avatar
Loren Merritt committed
78 79 80
%assign i i<<1
%endrep

81
%if ARCH_X86_64
Loren Merritt's avatar
Loren Merritt committed
82 83 84 85 86 87 88 89 90 91 92
    %define pointer dq
%else
    %define pointer dd
%endif

%macro IF0 1+
%endmacro
%macro IF1 1+
    %1
%endmacro

93
SECTION_TEXT
Loren Merritt's avatar
Loren Merritt committed
94

95
%macro T2_3DNOW 4 ; z0, z1, mem0, mem1
Loren Merritt's avatar
Loren Merritt committed
96 97 98 99 100 101
    mova     %1, %3
    mova     %2, %1
    pfadd    %1, %4
    pfsub    %2, %4
%endmacro

102
%macro T4_3DNOW 6 ; z0, z1, z2, z3, tmp0, tmp1
Loren Merritt's avatar
Loren Merritt committed
103 104 105
    mova     %5, %3
    pfsub    %3, %4
    pfadd    %5, %4 ; {t6,t5}
106
    pxor     %3, [ps_m1p1] ; {t8,t7}
Loren Merritt's avatar
Loren Merritt committed
107
    mova     %6, %1
108 109
    movd [r0+12], %3
    punpckhdq %3, [r0+8]
Loren Merritt's avatar
Loren Merritt committed
110 111 112 113 114 115 116 117
    pfadd    %1, %5 ; {r0,i0}
    pfsub    %6, %5 ; {r2,i2}
    mova     %4, %2
    pfadd    %2, %3 ; {r1,i1}
    pfsub    %4, %3 ; {r3,i3}
    SWAP     %3, %6
%endmacro

118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
;  in: %1 = {r0,i0,r2,i2,r4,i4,r6,i6}
;      %2 = {r1,i1,r3,i3,r5,i5,r7,i7}
;      %3, %4, %5 tmp
; out: %1 = {r0,r1,r2,r3,i0,i1,i2,i3}
;      %2 = {r4,r5,r6,r7,i4,i5,i6,i7}
%macro T8_AVX 5
    vsubps     %5, %1, %2       ; v  = %1 - %2
    vaddps     %3, %1, %2       ; w  = %1 + %2
    vmulps     %2, %5, [ps_p1p1m1p1root2]  ; v *= vals1
    vpermilps  %2, %2, [perm1]
    vblendps   %1, %2, %3, 0x33 ; q = {w1,w2,v4,v2,w5,w6,v7,v6}
    vshufps    %5, %3, %2, 0x4e ; r = {w3,w4,v1,v3,w7,w8,v8,v5}
    vsubps     %4, %5, %1       ; s = r - q
    vaddps     %1, %5, %1       ; u = r + q
    vpermilps  %1, %1, [perm2]  ; k  = {u1,u2,u3,u4,u6,u5,u7,u8}
    vshufps    %5, %4, %1, 0xbb
    vshufps    %3, %4, %1, 0xee
    vperm2f128 %3, %3, %5, 0x13
    vxorps     %4, %4, [ps_m1m1p1m1p1m1m1m1]  ; s *= {1,1,-1,-1,1,-1,-1,-1}
    vshufps    %2, %1, %4, 0xdd
    vshufps    %1, %1, %4, 0x88
    vperm2f128 %4, %2, %1, 0x02 ; v  = {k1,k3,s1,s3,k2,k4,s2,s4}
    vperm2f128 %1, %1, %2, 0x13 ; w  = {k6,k8,s6,s8,k5,k7,s5,s7}
    vsubps     %5, %1, %3
    vblendps   %1, %5, %1, 0x55 ; w -= {0,s7,0,k7,0,s8,0,k8}
    vsubps     %2, %4, %1       ; %2 = v - w
    vaddps     %1, %4, %1       ; %1 = v + w
%endmacro

; In SSE mode do one fft4 transforms
148
; in:  %1={r0,i0,r2,i2} %2={r1,i1,r3,i3}
Loren Merritt's avatar
Loren Merritt committed
149
; out: %1={r0,r1,r2,r3} %2={i0,i1,i2,i3}
150 151 152 153
;
; In AVX mode do two fft4 transforms
; in:  %1={r0,i0,r2,i2,r4,i4,r6,i6} %2={r1,i1,r3,i3,r5,i5,r7,i7}
; out: %1={r0,r1,r2,r3,r4,r5,r6,r7} %2={i0,i1,i2,i3,i4,i5,i6,i7}
Loren Merritt's avatar
Loren Merritt committed
154
%macro T4_SSE 3
155 156 157 158 159 160 161 162 163
    subps    %3, %1, %2       ; {t3,t4,-t8,t7}
    addps    %1, %1, %2       ; {t1,t2,t6,t5}
    xorps    %3, %3, [ps_p1p1m1p1]
    shufps   %2, %1, %3, 0xbe ; {t6,t5,t7,t8}
    shufps   %1, %1, %3, 0x44 ; {t1,t2,t3,t4}
    subps    %3, %1, %2       ; {r2,i2,r3,i3}
    addps    %1, %1, %2       ; {r0,i0,r1,i1}
    shufps   %2, %1, %3, 0xdd ; {i0,i1,i2,i3}
    shufps   %1, %1, %3, 0x88 ; {r0,r1,r2,r3}
Loren Merritt's avatar
Loren Merritt committed
164 165
%endmacro

166
; In SSE mode do one FFT8
167 168
; in:  %1={r0,r1,r2,r3} %2={i0,i1,i2,i3} %3={r4,i4,r6,i6} %4={r5,i5,r7,i7}
; out: %1={r0,r1,r2,r3} %2={i0,i1,i2,i3} %1={r4,r5,r6,r7} %2={i4,i5,i6,i7}
169 170 171 172 173 174
;
; In AVX mode do two FFT8
; in:  %1={r0,i0,r2,i2,r8, i8, r10,i10} %2={r1,i1,r3,i3,r9, i9, r11,i11}
;      %3={r4,i4,r6,i6,r12,i12,r14,i14} %4={r5,i5,r7,i7,r13,i13,r15,i15}
; out: %1={r0,r1,r2,r3,r8, r9, r10,r11} %2={i0,i1,i2,i3,i8, i9, i10,i11}
;      %3={r4,r5,r6,r7,r12,r13,r14,r15} %4={i4,i5,i6,i7,i12,i13,i14,i15}
175
%macro T8_SSE 6
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
    addps    %6, %3, %4       ; {t1,t2,t3,t4}
    subps    %3, %3, %4       ; {r5,i5,r7,i7}
    shufps   %4, %3, %3, 0xb1 ; {i5,r5,i7,r7}
    mulps    %3, %3, [ps_root2mppm] ; {-r5,i5,r7,-i7}
    mulps    %4, %4, [ps_root2]
    addps    %3, %3, %4       ; {t8,t7,ta,t9}
    shufps   %4, %6, %3, 0x9c ; {t1,t4,t7,ta}
    shufps   %6, %6, %3, 0x36 ; {t3,t2,t9,t8}
    subps    %3, %6, %4       ; {t6,t5,tc,tb}
    addps    %6, %6, %4       ; {t1,t2,t9,ta}
    shufps   %5, %6, %3, 0x8d ; {t2,ta,t6,tc}
    shufps   %6, %6, %3, 0xd8 ; {t1,t9,t5,tb}
    subps    %3, %1, %6       ; {r4,r5,r6,r7}
    addps    %1, %1, %6       ; {r0,r1,r2,r3}
    subps    %4, %2, %5       ; {i4,i5,i6,i7}
    addps    %2, %2, %5       ; {i0,i1,i2,i3}
Loren Merritt's avatar
Loren Merritt committed
192 193 194 195 196 197 198 199
%endmacro

; scheduled for cpu-bound sizes
%macro PASS_SMALL 3 ; (to load m4-m7), wre, wim
IF%1 mova    m4, Z(4)
IF%1 mova    m5, Z(5)
    mova     m0, %2 ; wre
    mova     m1, %3 ; wim
200
    mulps    m2, m4, m0 ; r2*wre
201
IF%1 mova    m6, Z2(6)
202
    mulps    m3, m5, m1 ; i2*wim
203
IF%1 mova    m7, Z2(7)
204 205 206 207 208 209 210 211 212
    mulps    m4, m4, m1 ; r2*wim
    mulps    m5, m5, m0 ; i2*wre
    addps    m2, m2, m3 ; r2*wre + i2*wim
    mulps    m3, m1, m7 ; i3*wim
    subps    m5, m5, m4 ; i2*wre - r2*wim
    mulps    m1, m1, m6 ; r3*wim
    mulps    m4, m0, m6 ; r3*wre
    mulps    m0, m0, m7 ; i3*wre
    subps    m4, m4, m3 ; r3*wre - i3*wim
Loren Merritt's avatar
Loren Merritt committed
213
    mova     m3, Z(0)
214 215 216 217 218
    addps    m0, m0, m1 ; i3*wre + r3*wim
    subps    m1, m4, m2 ; t3
    addps    m4, m4, m2 ; t5
    subps    m3, m3, m4 ; r2
    addps    m4, m4, Z(0) ; r0
Loren Merritt's avatar
Loren Merritt committed
219 220 221
    mova     m6, Z(2)
    mova   Z(4), m3
    mova   Z(0), m4
222 223 224 225 226
    subps    m3, m5, m0 ; t4
    subps    m4, m6, m3 ; r3
    addps    m3, m3, m6 ; r1
    mova  Z2(6), m4
    mova   Z(2), m3
Loren Merritt's avatar
Loren Merritt committed
227
    mova     m2, Z(3)
228 229
    addps    m3, m5, m0 ; t6
    subps    m2, m2, m1 ; i3
Loren Merritt's avatar
Loren Merritt committed
230
    mova     m7, Z(1)
231
    addps    m1, m1, Z(3) ; i1
232
    mova  Z2(7), m2
Loren Merritt's avatar
Loren Merritt committed
233
    mova   Z(3), m1
234 235 236
    subps    m4, m7, m3 ; i2
    addps    m3, m3, m7 ; i0
    mova   Z(5), m4
Loren Merritt's avatar
Loren Merritt committed
237 238 239 240 241 242 243 244 245
    mova   Z(1), m3
%endmacro

; scheduled to avoid store->load aliasing
%macro PASS_BIG 1 ; (!interleave)
    mova     m4, Z(4) ; r2
    mova     m5, Z(5) ; i2
    mova     m0, [wq] ; wre
    mova     m1, [wq+o1q] ; wim
246
    mulps    m2, m4, m0 ; r2*wre
247
    mova     m6, Z2(6) ; r3
248
    mulps    m3, m5, m1 ; i2*wim
249
    mova     m7, Z2(7) ; i3
250 251 252 253 254 255 256 257 258
    mulps    m4, m4, m1 ; r2*wim
    mulps    m5, m5, m0 ; i2*wre
    addps    m2, m2, m3 ; r2*wre + i2*wim
    mulps    m3, m1, m7 ; i3*wim
    mulps    m1, m1, m6 ; r3*wim
    subps    m5, m5, m4 ; i2*wre - r2*wim
    mulps    m4, m0, m6 ; r3*wre
    mulps    m0, m0, m7 ; i3*wre
    subps    m4, m4, m3 ; r3*wre - i3*wim
Loren Merritt's avatar
Loren Merritt committed
259
    mova     m3, Z(0)
260 261 262 263 264
    addps    m0, m0, m1 ; i3*wre + r3*wim
    subps    m1, m4, m2 ; t3
    addps    m4, m4, m2 ; t5
    subps    m3, m3, m4 ; r2
    addps    m4, m4, Z(0) ; r0
Loren Merritt's avatar
Loren Merritt committed
265 266 267
    mova     m6, Z(2)
    mova   Z(4), m3
    mova   Z(0), m4
268 269 270 271 272
    subps    m3, m5, m0 ; t4
    subps    m4, m6, m3 ; r3
    addps    m3, m3, m6 ; r1
IF%1 mova Z2(6), m4
IF%1 mova  Z(2), m3
Loren Merritt's avatar
Loren Merritt committed
273
    mova     m2, Z(3)
274 275
    addps    m5, m5, m0 ; t6
    subps    m2, m2, m1 ; i3
Loren Merritt's avatar
Loren Merritt committed
276
    mova     m7, Z(1)
277
    addps    m1, m1, Z(3) ; i1
278
IF%1 mova Z2(7), m2
Loren Merritt's avatar
Loren Merritt committed
279
IF%1 mova  Z(3), m1
280 281 282 283
    subps    m6, m7, m5 ; i2
    addps    m5, m5, m7 ; i0
IF%1 mova  Z(5), m6
IF%1 mova  Z(1), m5
Loren Merritt's avatar
Loren Merritt committed
284
%if %1==0
285 286 287
    INTERL m1, m3, m7, Z, 2
    INTERL m2, m4, m0, Z2, 6

Loren Merritt's avatar
Loren Merritt committed
288 289
    mova     m1, Z(0)
    mova     m2, Z(4)
290 291 292

    INTERL m5, m1, m3, Z, 0
    INTERL m6, m2, m7, Z, 4
Loren Merritt's avatar
Loren Merritt committed
293 294 295 296 297 298 299 300 301 302
%endif
%endmacro

%macro PUNPCK 3
    mova      %3, %1
    punpckldq %1, %2
    punpckhdq %3, %2
%endmacro

%define Z(x) [r0+mmsize*x]
303
%define Z2(x) [r0+mmsize*x]
304 305
%define ZH(x) [r0+mmsize*x+mmsize/2]

306
INIT_YMM avx
307

308
%if HAVE_AVX_EXTERNAL
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397
align 16
fft8_avx:
    mova      m0, Z(0)
    mova      m1, Z(1)
    T8_AVX    m0, m1, m2, m3, m4
    mova      Z(0), m0
    mova      Z(1), m1
    ret


align 16
fft16_avx:
    mova       m2, Z(2)
    mova       m3, Z(3)
    T4_SSE     m2, m3, m7

    mova       m0, Z(0)
    mova       m1, Z(1)
    T8_AVX     m0, m1, m4, m5, m7

    mova       m4, [ps_cos16_1]
    mova       m5, [ps_cos16_2]
    vmulps     m6, m2, m4
    vmulps     m7, m3, m5
    vaddps     m7, m7, m6
    vmulps     m2, m2, m5
    vmulps     m3, m3, m4
    vsubps     m3, m3, m2
    vblendps   m2, m7, m3, 0xf0
    vperm2f128 m3, m7, m3, 0x21
    vaddps     m4, m2, m3
    vsubps     m2, m3, m2
    vperm2f128 m2, m2, m2, 0x01
    vsubps     m3, m1, m2
    vaddps     m1, m1, m2
    vsubps     m5, m0, m4
    vaddps     m0, m0, m4
    vextractf128   Z(0), m0, 0
    vextractf128  ZH(0), m1, 0
    vextractf128   Z(1), m0, 1
    vextractf128  ZH(1), m1, 1
    vextractf128   Z(2), m5, 0
    vextractf128  ZH(2), m3, 0
    vextractf128   Z(3), m5, 1
    vextractf128  ZH(3), m3, 1
    ret

align 16
fft32_avx:
    call fft16_avx

    mova m0, Z(4)
    mova m1, Z(5)

    T4_SSE      m0, m1, m4

    mova m2, Z(6)
    mova m3, Z(7)

    T8_SSE      m0, m1, m2, m3, m4, m6
    ; m0={r0,r1,r2,r3,r8, r9, r10,r11} m1={i0,i1,i2,i3,i8, i9, i10,i11}
    ; m2={r4,r5,r6,r7,r12,r13,r14,r15} m3={i4,i5,i6,i7,i12,i13,i14,i15}

    vperm2f128  m4, m0, m2, 0x20
    vperm2f128  m5, m1, m3, 0x20
    vperm2f128  m6, m0, m2, 0x31
    vperm2f128  m7, m1, m3, 0x31

    PASS_SMALL 0, [cos_32], [cos_32+32]

    ret

fft32_interleave_avx:
    call fft32_avx
    mov r2d, 32
.deint_loop:
    mova     m2, Z(0)
    mova     m3, Z(1)
    vunpcklps      m0, m2, m3
    vunpckhps      m1, m2, m3
    vextractf128   Z(0), m0, 0
    vextractf128  ZH(0), m1, 0
    vextractf128   Z(1), m0, 1
    vextractf128  ZH(1), m1, 1
    add r0, mmsize*2
    sub r2d, mmsize/4
    jg .deint_loop
    ret

398 399
%endif

400
INIT_XMM sse
Loren Merritt's avatar
Loren Merritt committed
401 402

align 16
403
fft4_avx:
Loren Merritt's avatar
Loren Merritt committed
404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
fft4_sse:
    mova     m0, Z(0)
    mova     m1, Z(1)
    T4_SSE   m0, m1, m2
    mova   Z(0), m0
    mova   Z(1), m1
    ret

align 16
fft8_sse:
    mova     m0, Z(0)
    mova     m1, Z(1)
    T4_SSE   m0, m1, m2
    mova     m2, Z(2)
    mova     m3, Z(3)
    T8_SSE   m0, m1, m2, m3, m4, m5
    mova   Z(0), m0
    mova   Z(1), m1
    mova   Z(2), m2
    mova   Z(3), m3
    ret

align 16
fft16_sse:
    mova     m0, Z(0)
    mova     m1, Z(1)
    T4_SSE   m0, m1, m2
    mova     m2, Z(2)
    mova     m3, Z(3)
    T8_SSE   m0, m1, m2, m3, m4, m5
    mova     m4, Z(4)
    mova     m5, Z(5)
    mova   Z(0), m0
    mova   Z(1), m1
    mova   Z(2), m2
    mova   Z(3), m3
    T4_SSE   m4, m5, m6
441 442
    mova     m6, Z2(6)
    mova     m7, Z2(7)
Loren Merritt's avatar
Loren Merritt committed
443
    T4_SSE   m6, m7, m0
444
    PASS_SMALL 0, [cos_16], [cos_16+16]
Loren Merritt's avatar
Loren Merritt committed
445 446 447
    ret


448
%macro FFT48_3DNOW 0
Loren Merritt's avatar
Loren Merritt committed
449
align 16
450
fft4 %+ SUFFIX:
451
    T2_3DNOW m0, m1, Z(0), Z(1)
Loren Merritt's avatar
Loren Merritt committed
452 453
    mova     m2, Z(2)
    mova     m3, Z(3)
454
    T4_3DNOW m0, m1, m2, m3, m4, m5
Loren Merritt's avatar
Loren Merritt committed
455 456 457 458 459 460 461 462 463
    PUNPCK   m0, m1, m4
    PUNPCK   m2, m3, m5
    mova   Z(0), m0
    mova   Z(1), m4
    mova   Z(2), m2
    mova   Z(3), m5
    ret

align 16
464
fft8 %+ SUFFIX:
465
    T2_3DNOW m0, m1, Z(0), Z(1)
Loren Merritt's avatar
Loren Merritt committed
466 467
    mova     m2, Z(2)
    mova     m3, Z(3)
468
    T4_3DNOW m0, m1, m2, m3, m4, m5
Loren Merritt's avatar
Loren Merritt committed
469 470
    mova   Z(0), m0
    mova   Z(2), m2
471 472
    T2_3DNOW m4, m5,  Z(4),  Z(5)
    T2_3DNOW m6, m7, Z2(6), Z2(7)
473 474
    PSWAPD   m0, m5
    PSWAPD   m2, m7
475 476
    pxor     m0, [ps_m1p1]
    pxor     m2, [ps_m1p1]
Loren Merritt's avatar
Loren Merritt committed
477 478
    pfsub    m5, m0
    pfadd    m7, m2
479 480
    pfmul    m5, [ps_root2]
    pfmul    m7, [ps_root2]
481
    T4_3DNOW m1, m3, m5, m7, m0, m2
Loren Merritt's avatar
Loren Merritt committed
482
    mova   Z(5), m5
483
    mova  Z2(7), m7
Loren Merritt's avatar
Loren Merritt committed
484 485
    mova     m0, Z(0)
    mova     m2, Z(2)
486
    T4_3DNOW m0, m2, m4, m6, m5, m7
Loren Merritt's avatar
Loren Merritt committed
487 488 489 490 491 492
    PUNPCK   m0, m1, m5
    PUNPCK   m2, m3, m7
    mova   Z(0), m0
    mova   Z(1), m5
    mova   Z(2), m2
    mova   Z(3), m7
493 494
    PUNPCK   m4,  Z(5), m5
    PUNPCK   m6, Z2(7), m7
Loren Merritt's avatar
Loren Merritt committed
495 496
    mova   Z(4), m4
    mova   Z(5), m5
497 498
    mova  Z2(6), m6
    mova  Z2(7), m7
Loren Merritt's avatar
Loren Merritt committed
499 500 501
    ret
%endmacro

502
%if ARCH_X86_32
503 504
INIT_MMX 3dnowext
FFT48_3DNOW
Loren Merritt's avatar
Loren Merritt committed
505

506
INIT_MMX 3dnow
507
FFT48_3DNOW
508
%endif
Loren Merritt's avatar
Loren Merritt committed
509

510 511 512 513
%define Z(x) [zcq + o1q*(x&6) + mmsize*(x&1)]
%define Z2(x) [zcq + o3q + mmsize*(x&1)]
%define ZH(x) [zcq + o1q*(x&6) + mmsize*(x&1) + mmsize/2]
%define Z2H(x) [zcq + o3q + mmsize*(x&1) + mmsize/2]
Loren Merritt's avatar
Loren Merritt committed
514 515 516 517

%macro DECL_PASS 2+ ; name, payload
align 16
%1:
518
DEFINE_ARGS zc, w, n, o1, o3
Loren Merritt's avatar
Loren Merritt committed
519 520 521 522 523
    lea o3q, [nq*3]
    lea o1q, [nq*8]
    shl o3q, 4
.loop:
    %2
524 525 526
    add zcq, mmsize*2
    add  wq, mmsize
    sub  nd, mmsize/8
Loren Merritt's avatar
Loren Merritt committed
527 528 529 530
    jg .loop
    rep ret
%endmacro

531 532 533 534 535 536 537 538 539 540
%macro FFT_DISPATCH 2; clobbers 5 GPRs, 8 XMMs
    lea r2, [dispatch_tab%1]
    mov r2, [r2 + (%2q-2)*gprsize]
%ifdef PIC
    lea r3, [$$]
    add r2, r3
%endif
    call r2
%endmacro ; FFT_DISPATCH

541
INIT_YMM avx
542

543
%if HAVE_AVX_EXTERNAL
544 545 546 547 548 549 550 551 552 553 554 555 556
%macro INTERL_AVX 5
    vunpckhps      %3, %2, %1
    vunpcklps      %2, %2, %1
    vextractf128   %4(%5), %2, 0
    vextractf128  %4 %+ H(%5), %3, 0
    vextractf128   %4(%5 + 1), %2, 1
    vextractf128  %4 %+ H(%5 + 1), %3, 1
%endmacro

%define INTERL INTERL_AVX

DECL_PASS pass_avx, PASS_BIG 1
DECL_PASS pass_interleave_avx, PASS_BIG 0
557 558 559 560 561 562 563 564

cglobal fft_calc, 2,5,8
    mov     r3d, [r0 + FFTContext.nbits]
    mov     r0, r1
    mov     r1, r3
    FFT_DISPATCH _interleave %+ SUFFIX, r1
    REP_RET

565
%endif
566

567
INIT_XMM sse
568 569 570 571 572 573 574 575 576 577 578

%macro INTERL_SSE 5
    mova     %3, %2
    unpcklps %2, %1
    unpckhps %3, %1
    mova  %4(%5), %2
    mova  %4(%5+1), %3
%endmacro

%define INTERL INTERL_SSE

Loren Merritt's avatar
Loren Merritt committed
579 580 581
DECL_PASS pass_sse, PASS_BIG 1
DECL_PASS pass_interleave_sse, PASS_BIG 0

582
%macro FFT_CALC_FUNC 0
583 584 585 586 587 588 589 590 591
cglobal fft_calc, 2,5,8
    mov     r3d, [r0 + FFTContext.nbits]
    PUSH    r1
    PUSH    r3
    mov     r0, r1
    mov     r1, r3
    FFT_DISPATCH _interleave %+ SUFFIX, r1
    POP     rcx
    POP     r4
592
    cmp     rcx, 3+(mmsize/16)
593 594 595 596 597
    jg      .end
    mov     r2, -1
    add     rcx, 3
    shl     r2, cl
    sub     r4, r2
598
.loop:
599 600 601 602
%if mmsize == 8
    PSWAPD  m0, [r4 + r2 + 4]
    mova [r4 + r2 + 4], m0
%else
603 604 605 606 607 608
    movaps   xmm0, [r4 + r2]
    movaps   xmm1, xmm0
    unpcklps xmm0, [r4 + r2 + 16]
    unpckhps xmm1, [r4 + r2 + 16]
    movaps   [r4 + r2],      xmm0
    movaps   [r4 + r2 + 16], xmm1
609 610
%endif
    add      r2, mmsize*2
611 612
    jl       .loop
.end:
613 614 615 616
%if cpuflag(3dnow)
    femms
    RET
%else
617
    REP_RET
618 619 620 621 622 623
%endif
%endmacro

%if ARCH_X86_32
INIT_MMX 3dnow
FFT_CALC_FUNC
624
INIT_MMX 3dnowext
625 626 627 628
FFT_CALC_FUNC
%endif
INIT_XMM sse
FFT_CALC_FUNC
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649

cglobal fft_permute, 2,7,1
    mov     r4,  [r0 + FFTContext.revtab]
    mov     r5,  [r0 + FFTContext.tmpbuf]
    mov     ecx, [r0 + FFTContext.nbits]
    mov     r2, 1
    shl     r2, cl
    xor     r0, r0
%if ARCH_X86_32
    mov     r1, r1m
%endif
.loop:
    movaps  xmm0, [r1 + 8*r0]
    movzx   r6, word [r4 + 2*r0]
    movzx   r3, word [r4 + 2*r0 + 2]
    movlps  [r5 + 8*r6], xmm0
    movhps  [r5 + 8*r3], xmm0
    add     r0, 2
    cmp     r0, r2
    jl      .loop
    shl     r2, 3
650 651 652 653 654 655 656 657 658 659 660 661
    add     r1, r2
    add     r5, r2
    neg     r2
; nbits >= 2 (FFT4) and sizeof(FFTComplex)=8 => at least 32B
.loopcopy:
    movaps  xmm0, [r5 + r2]
    movaps  xmm1, [r5 + r2 + 16]
    movaps  [r1 + r2], xmm0
    movaps  [r1 + r2 + 16], xmm1
    add     r2, 32
    jl      .loopcopy
    REP_RET
662

663
%macro IMDCT_CALC_FUNC 0
664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686
cglobal imdct_calc, 3,5,3
    mov     r3d, [r0 + FFTContext.mdctsize]
    mov     r4,  [r0 + FFTContext.imdcthalf]
    add     r1,  r3
    PUSH    r3
    PUSH    r1
%if ARCH_X86_32
    push    r2
    push    r1
    push    r0
%else
    sub     rsp, 8
%endif
    call    r4
%if ARCH_X86_32
    add     esp, 12
%else
    add     rsp, 8
%endif
    POP     r1
    POP     r3
    lea     r0, [r1 + 2*r3]
    mov     r2, r3
687
    sub     r3, mmsize
688
    neg     r2
689
    mova    m2, [ps_m1m1m1m1]
690
.loop:
691 692 693 694 695 696 697 698 699 700 701 702 703 704 705
%if mmsize == 8
    PSWAPD  m0, [r1 + r3]
    PSWAPD  m1, [r0 + r2]
    pxor    m0, m2
%else
    mova    m0, [r1 + r3]
    mova    m1, [r0 + r2]
    shufps  m0, m0, 0x1b
    shufps  m1, m1, 0x1b
    xorps   m0, m2
%endif
    mova [r0 + r3], m1
    mova [r1 + r2], m0
    sub     r3, mmsize
    add     r2, mmsize
706
    jl      .loop
707 708 709 710
%if cpuflag(3dnow)
    femms
    RET
%else
711
    REP_RET
712 713
%endif
%endmacro
714

715 716 717
%if ARCH_X86_32
INIT_MMX 3dnow
IMDCT_CALC_FUNC
718
INIT_MMX 3dnowext
719 720 721 722 723 724 725
IMDCT_CALC_FUNC
%endif

INIT_XMM sse
IMDCT_CALC_FUNC

%if ARCH_X86_32
726
INIT_MMX 3dnow
Loren Merritt's avatar
Loren Merritt committed
727 728 729 730 731
%define mulps pfmul
%define addps pfadd
%define subps pfsub
%define unpcklps punpckldq
%define unpckhps punpckhdq
732 733
DECL_PASS pass_3dnow, PASS_SMALL 1, [wq], [wq+o1q]
DECL_PASS pass_interleave_3dnow, PASS_BIG 0
734 735
%define pass_3dnowext pass_3dnow
%define pass_interleave_3dnowext pass_interleave_3dnow
736
%endif
Loren Merritt's avatar
Loren Merritt committed
737

738 739 740 741 742
%ifdef PIC
%define SECTION_REL - $$
%else
%define SECTION_REL
%endif
Loren Merritt's avatar
Loren Merritt committed
743

744 745 746 747 748 749 750
%macro DECL_FFT 1-2 ; nbits, suffix
%ifidn %0, 1
%xdefine fullsuffix SUFFIX
%else
%xdefine fullsuffix %2 %+ SUFFIX
%endif
%xdefine list_of_fft fft4 %+ SUFFIX SECTION_REL, fft8 %+ SUFFIX SECTION_REL
751
%if %1>=5
752
%xdefine list_of_fft list_of_fft, fft16 %+ SUFFIX SECTION_REL
Loren Merritt's avatar
Loren Merritt committed
753
%endif
754
%if %1>=6
755
%xdefine list_of_fft list_of_fft, fft32 %+ fullsuffix SECTION_REL
756
%endif
Loren Merritt's avatar
Loren Merritt committed
757 758 759 760 761

%assign n 1<<%1
%rep 17-%1
%assign n2 n/2
%assign n4 n/4
762
%xdefine list_of_fft list_of_fft, fft %+ n %+ fullsuffix SECTION_REL
Loren Merritt's avatar
Loren Merritt committed
763 764

align 16
765
fft %+ n %+ fullsuffix:
766
    call fft %+ n2 %+ SUFFIX
Loren Merritt's avatar
Loren Merritt committed
767
    add r0, n*4 - (n&(-2<<%1))
768
    call fft %+ n4 %+ SUFFIX
Loren Merritt's avatar
Loren Merritt committed
769
    add r0, n*2 - (n2&(-2<<%1))
770
    call fft %+ n4 %+ SUFFIX
Loren Merritt's avatar
Loren Merritt committed
771
    sub r0, n*6 + (n2&(-2<<%1))
772
    lea r1, [cos_ %+ n]
Loren Merritt's avatar
Loren Merritt committed
773
    mov r2d, n4/2
774
    jmp pass %+ fullsuffix
Loren Merritt's avatar
Loren Merritt committed
775 776 777 778 779 780

%assign n n*2
%endrep
%undef n

align 8
781
dispatch_tab %+ fullsuffix: pointer list_of_fft
Loren Merritt's avatar
Loren Merritt committed
782 783
%endmacro ; DECL_FFT

784
%if HAVE_AVX_EXTERNAL
785 786 787
INIT_YMM avx
DECL_FFT 6
DECL_FFT 6, _interleave
788
%endif
789 790 791
INIT_XMM sse
DECL_FFT 5
DECL_FFT 5, _interleave
792
%if ARCH_X86_32
793 794 795
INIT_MMX 3dnow
DECL_FFT 4
DECL_FFT 4, _interleave
796
INIT_MMX 3dnowext
797 798
DECL_FFT 4
DECL_FFT 4, _interleave
799
%endif
800 801

INIT_XMM sse
802 803 804 805 806 807 808
%undef mulps
%undef addps
%undef subps
%undef unpcklps
%undef unpckhps

%macro PREROTATER 5 ;-2*k, 2*k, input+n4, tcos+n8, tsin+n8
809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
%if mmsize == 8 ; j*2+2-n4, n4-2-j*2, input+n4, tcos+n8, tsin+n8
    PSWAPD     m0, [%3+%2*4]
    movq       m2, [%3+%1*4-8]
    movq       m3, m0
    punpckldq  m0, m2
    punpckhdq  m2, m3
    movd       m1, [%4+%1*2-4] ; tcos[j]
    movd       m3, [%4+%2*2]   ; tcos[n4-j-1]
    punpckldq  m1, [%5+%1*2-4] ; tsin[j]
    punpckldq  m3, [%5+%2*2]   ; tsin[n4-j-1]

    mova       m4, m0
    PSWAPD     m5, m1
    pfmul      m0, m1
    pfmul      m4, m5
    mova       m6, m2
    PSWAPD     m5, m3
    pfmul      m2, m3
    pfmul      m6, m5
828
%if cpuflag(3dnowext)
829 830 831 832 833 834 835 836 837 838 839
    pfpnacc    m0, m4
    pfpnacc    m2, m6
%else
    SBUTTERFLY dq, 0, 4, 1
    SBUTTERFLY dq, 2, 6, 3
    pxor       m4, m7
    pxor       m6, m7
    pfadd      m0, m4
    pfadd      m2, m6
%endif
%else
840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859
    movaps   xmm0, [%3+%2*4]
    movaps   xmm1, [%3+%1*4-0x10]
    movaps   xmm2, xmm0
    shufps   xmm0, xmm1, 0x88
    shufps   xmm1, xmm2, 0x77
    movlps   xmm4, [%4+%2*2]
    movlps   xmm5, [%5+%2*2+0x0]
    movhps   xmm4, [%4+%1*2-0x8]
    movhps   xmm5, [%5+%1*2-0x8]
    movaps   xmm2, xmm0
    movaps   xmm3, xmm1
    mulps    xmm0, xmm5
    mulps    xmm1, xmm4
    mulps    xmm2, xmm4
    mulps    xmm3, xmm5
    subps    xmm1, xmm0
    addps    xmm2, xmm3
    movaps   xmm0, xmm1
    unpcklps xmm1, xmm2
    unpckhps xmm0, xmm2
860
%endif
861 862 863
%endmacro

%macro CMUL 6 ;j, xmm0, xmm1, 3, 4, 5
864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901
    mulps      m6, %3, [%5+%1]
    mulps      m7, %2, [%5+%1]
    mulps      %2, %2, [%6+%1]
    mulps      %3, %3, [%6+%1]
    subps      %2, %2, m6
    addps      %3, %3, m7
%endmacro

%macro POSROTATESHUF_AVX 5 ;j, k, z+n8, tcos+n8, tsin+n8
.post:
    vmovaps      ymm1,   [%3+%1*2]
    vmovaps      ymm0,   [%3+%1*2+0x20]
    vmovaps      ymm3,   [%3+%2*2]
    vmovaps      ymm2,   [%3+%2*2+0x20]

    CMUL         %1, ymm0, ymm1, %3, %4, %5
    CMUL         %2, ymm2, ymm3, %3, %4, %5
    vshufps      ymm1, ymm1, ymm1, 0x1b
    vshufps      ymm3, ymm3, ymm3, 0x1b
    vperm2f128   ymm1, ymm1, ymm1, 0x01
    vperm2f128   ymm3, ymm3, ymm3, 0x01
    vunpcklps    ymm6, ymm2, ymm1
    vunpckhps    ymm4, ymm2, ymm1
    vunpcklps    ymm7, ymm0, ymm3
    vunpckhps    ymm5, ymm0, ymm3

    vextractf128 [%3+%1*2],      ymm7, 0
    vextractf128 [%3+%1*2+0x10], ymm5, 0
    vextractf128 [%3+%1*2+0x20], ymm7, 1
    vextractf128 [%3+%1*2+0x30], ymm5, 1

    vextractf128 [%3+%2*2],      ymm6, 0
    vextractf128 [%3+%2*2+0x10], ymm4, 0
    vextractf128 [%3+%2*2+0x20], ymm6, 1
    vextractf128 [%3+%2*2+0x30], ymm4, 1
    sub      %2,   0x20
    add      %1,   0x20
    jl       .post
902 903 904 905
%endmacro

%macro POSROTATESHUF 5 ;j, k, z+n8, tcos+n8, tsin+n8
.post:
906 907
    movaps   xmm1, [%3+%1*2]
    movaps   xmm0, [%3+%1*2+0x10]
908
    CMUL     %1,   xmm0, xmm1, %3, %4, %5
909 910
    movaps   xmm5, [%3+%2*2]
    movaps   xmm4, [%3+%2*2+0x10]
911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928
    CMUL     %2,   xmm4, xmm5, %3, %4, %5
    shufps   xmm1, xmm1, 0x1b
    shufps   xmm5, xmm5, 0x1b
    movaps   xmm6, xmm4
    unpckhps xmm4, xmm1
    unpcklps xmm6, xmm1
    movaps   xmm2, xmm0
    unpcklps xmm0, xmm5
    unpckhps xmm2, xmm5
    movaps   [%3+%2*2],      xmm6
    movaps   [%3+%2*2+0x10], xmm4
    movaps   [%3+%1*2],      xmm0
    movaps   [%3+%1*2+0x10], xmm2
    sub      %2,   0x10
    add      %1,   0x10
    jl       .post
%endmacro

929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962
%macro CMUL_3DNOW 6
    mova       m6, [%1+%2*2]
    mova       %3, [%1+%2*2+8]
    mova       %4, m6
    mova       m7, %3
    pfmul      m6, [%5+%2]
    pfmul      %3, [%6+%2]
    pfmul      %4, [%6+%2]
    pfmul      m7, [%5+%2]
    pfsub      %3, m6
    pfadd      %4, m7
%endmacro

%macro POSROTATESHUF_3DNOW 5 ;j, k, z+n8, tcos+n8, tsin+n8
.post:
    CMUL_3DNOW %3, %1, m0, m1, %4, %5
    CMUL_3DNOW %3, %2, m2, m3, %4, %5
    movd  [%3+%1*2+ 0], m0
    movd  [%3+%2*2+12], m1
    movd  [%3+%2*2+ 0], m2
    movd  [%3+%1*2+12], m3
    psrlq      m0, 32
    psrlq      m1, 32
    psrlq      m2, 32
    psrlq      m3, 32
    movd  [%3+%1*2+ 8], m0
    movd  [%3+%2*2+ 4], m1
    movd  [%3+%2*2+ 8], m2
    movd  [%3+%1*2+ 4], m3
    sub        %2, 8
    add        %1, 8
    jl         .post
%endmacro

963 964
%macro DECL_IMDCT 1
cglobal imdct_half, 3,12,8; FFTContext *s, FFTSample *output, const FFTSample *input
965
%if ARCH_X86_64
966 967 968
%define rrevtab r7
%define rtcos   r8
%define rtsin   r9
969 970 971 972 973 974 975 976 977 978 979 980
%else
%define rrevtab r6
%define rtsin   r6
%define rtcos   r5
%endif
    mov   r3d, [r0+FFTContext.mdctsize]
    add   r2, r3
    shr   r3, 1
    mov   rtcos, [r0+FFTContext.tcos]
    mov   rtsin, [r0+FFTContext.tsin]
    add   rtcos, r3
    add   rtsin, r3
981
%if ARCH_X86_64 == 0
982 983 984 985 986 987
    push  rtcos
    push  rtsin
%endif
    shr   r3, 1
    mov   rrevtab, [r0+FFTContext.revtab]
    add   rrevtab, r3
988
%if ARCH_X86_64 == 0
989 990 991
    push  rrevtab
%endif

992 993 994 995 996
%if mmsize == 8
    sub   r3, 2
%else
    sub   r3, 4
%endif
997
%if ARCH_X86_64 || mmsize == 8
998 999 1000
    xor   r4, r4
    sub   r4, r3
%endif
1001
%if notcpuflag(3dnowext) && mmsize == 8
1002 1003
    movd  m7, [ps_m1m1m1m1]
%endif
1004
.pre:
1005
%if ARCH_X86_64 == 0
1006
;unspill
1007
%if mmsize != 8
1008 1009
    xor   r4, r4
    sub   r4, r3
1010
%endif
1011
    mov   rtcos, [esp+8]
1012
    mov   rtsin, [esp+4]
1013 1014 1015
%endif

    PREROTATER r4, r3, r2, rtcos, rtsin
1016 1017 1018 1019 1020 1021 1022
%if mmsize == 8
    mov    r6, [esp]                ; rrevtab = ptr+n8
    movzx  r5,  word [rrevtab+r4-2] ; rrevtab[j]
    movzx  r6,  word [rrevtab+r3]   ; rrevtab[n4-j-1]
    mova [r1+r5*8], m0
    mova [r1+r6*8], m2
    add    r4, 2
1023
    sub    r3, 2
1024 1025
%else
%if ARCH_X86_64
Loren Merritt's avatar
Loren Merritt committed
1026 1027
    movzx  r5,  word [rrevtab+r4-4]
    movzx  r6,  word [rrevtab+r4-2]
1028 1029
    movzx  r10, word [rrevtab+r3]
    movzx  r11, word [rrevtab+r3+2]
Loren Merritt's avatar
Loren Merritt committed
1030 1031
    movlps [r1+r5 *8], xmm0
    movhps [r1+r6 *8], xmm0
1032 1033
    movlps [r1+r10*8], xmm1
    movhps [r1+r11*8], xmm1
1034 1035 1036
    add    r4, 4
%else
    mov    r6, [esp]
Loren Merritt's avatar
Loren Merritt committed
1037 1038 1039 1040 1041 1042 1043 1044
    movzx  r5, word [r6+r4-4]
    movzx  r4, word [r6+r4-2]
    movlps [r1+r5*8], xmm0
    movhps [r1+r4*8], xmm0
    movzx  r5, word [r6+r3]
    movzx  r4, word [r6+r3+2]
    movlps [r1+r5*8], xmm1
    movhps [r1+r4*8], xmm1
1045
%endif
1046 1047
    sub    r3, 4
%endif
1048 1049 1050 1051 1052 1053 1054
    jns    .pre

    mov  r5, r0
    mov  r6, r1
    mov  r0, r1
    mov  r1d, [r5+FFTContext.nbits]

1055
    FFT_DISPATCH SUFFIX, r1
1056 1057 1058 1059

    mov  r0d, [r5+FFTContext.mdctsize]
    add  r6, r0
    shr  r0, 1
1060
%if ARCH_X86_64 == 0
1061 1062 1063 1064 1065 1066
%define rtcos r2
%define rtsin r3
    mov  rtcos, [esp+8]
    mov  rtsin, [esp+4]
%endif
    neg  r0
1067
    mov  r1, -mmsize
1068
    sub  r1, r0
1069
    %1 r0, r1, r6, rtcos, rtsin
1070
%if ARCH_X86_64 == 0
1071
    add esp, 12
1072 1073 1074
%endif
%if mmsize == 8
    femms
1075 1076
%endif
    RET
1077 1078
%endmacro

1079
DECL_IMDCT POSROTATESHUF
1080

1081 1082 1083 1084
%if ARCH_X86_32
INIT_MMX 3dnow
DECL_IMDCT POSROTATESHUF_3DNOW

1085
INIT_MMX 3dnowext
1086 1087 1088
DECL_IMDCT POSROTATESHUF_3DNOW
%endif

1089
INIT_YMM avx
1090

1091
%if HAVE_AVX_EXTERNAL
1092
DECL_IMDCT POSROTATESHUF_AVX
1093
%endif