x86inc.asm 34.6 KB
Newer Older
1
;*****************************************************************************
2
;* x86inc.asm: x264asm abstraction layer
3
;*****************************************************************************
4
;* Copyright (C) 2005-2012 x264 project
5
;*
6 7
;* Authors: Loren Merritt <lorenm@u.washington.edu>
;*          Anton Mitrofanov <BugMaster@narod.ru>
8
;*          Jason Garrett-Glaser <darkshikari@gmail.com>
9
;*          Henrik Gramner <hengar-6@student.ltu.se>
10
;*
11 12 13
;* Permission to use, copy, modify, and/or distribute this software for any
;* purpose with or without fee is hereby granted, provided that the above
;* copyright notice and this permission notice appear in all copies.
14
;*
15 16 17 18 19 20 21
;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 23
;*****************************************************************************

24 25 26 27 28 29 30 31 32 33 34 35 36
; This is a header file for the x264ASM assembly language, which uses
; NASM/YASM syntax combined with a large number of macros to provide easy
; abstraction between different calling conventions (x86_32, win64, linux64).
; It also has various other useful features to simplify writing the kind of
; DSP functions that are most often used in x264.

; Unlike the rest of x264, this file is available under an ISC license, as it
; has significant usefulness outside of x264 and we want it to be available
; to the largest audience possible.  Of course, if you modify it for your own
; purposes to add a new feature, we strongly encourage contributing a patch
; as this feature might be useful for others as well.  Send patches or ideas
; to x264-devel@videolan.org .

37 38
%ifndef private_prefix
    %define private_prefix x264
39
%endif
40

41 42 43 44
%ifndef public_prefix
    %define public_prefix private_prefix
%endif

45
%define WIN64  0
46
%define UNIX64 0
47
%if ARCH_X86_64
48
    %ifidn __OUTPUT_FORMAT__,win32
49
        %define WIN64  1
50 51
    %elifidn __OUTPUT_FORMAT__,win64
        %define WIN64  1
52
    %else
53
        %define UNIX64 1
54 55 56
    %endif
%endif

57 58 59 60 61 62
%ifdef PREFIX
    %define mangle(x) _ %+ x
%else
    %define mangle(x) x
%endif

63
; Name of the .rodata section.
64
%macro SECTION_RODATA 0-1 16
65 66 67 68 69 70 71 72 73 74 75 76 77 78
    ; Kludge: Something on OS X fails to align .rodata even given an align
    ; attribute, so use a different read-only section. This has been fixed in
    ; yasm 0.8.0 and nasm 2.6.
    %ifdef __YASM_VERSION_ID__
        %if __YASM_VERSION_ID__ < 00080000h
            %define NEED_MACHO_RODATA_KLUDGE
        %endif
    %elifdef __NASM_VERSION_ID__
        %if __NASM_VERSION_ID__ < 02060000h
            %define NEED_MACHO_RODATA_KLUDGE
        %endif
    %endif

    %ifidn __OUTPUT_FORMAT__,aout
79
        section .text
80
    %else
81 82 83 84 85 86 87 88 89 90 91 92
        %ifndef NEED_MACHO_RODATA_KLUDGE
            SECTION .rodata align=%1
        %else
            %ifidn __OUTPUT_FORMAT__,macho64
                SECTION .text align=%1
            %elifidn __OUTPUT_FORMAT__,macho
                SECTION .text align=%1
                fakegot:
            %else
                SECTION .rodata align=%1
            %endif
        %endif
93
    %endif
94 95

    %undef NEED_MACHO_RODATA_KLUDGE
96 97
%endmacro

98 99 100 101 102 103 104 105 106
; aout does not support align=
%macro SECTION_TEXT 0-1 16
    %ifidn __OUTPUT_FORMAT__,aout
        SECTION .text
    %else
        SECTION .text align=%1
    %endif
%endmacro

107
%if WIN64
108
    %define PIC
Ronald S. Bultje's avatar
Ronald S. Bultje committed
109
%elif ARCH_X86_64 == 0
110 111 112
; x86_32 doesn't require PIC.
; Some distros prefer shared objects to be PIC, but nothing breaks if
; the code contains a few textrels, so we'll skip that complexity.
113 114 115
    %undef PIC
%endif
%ifdef PIC
116
    default rel
117 118
%endif

119 120 121 122 123 124
%macro CPUNOP 1
    %if HAVE_CPUNOP
        CPU %1
    %endif
%endmacro

125
; Always use long nops (reduces 0x90 spam in disassembly on x86_32)
126
CPUNOP amdnop
127

128 129 130 131 132 133 134
; Macros to eliminate most code duplication between x86_32 and x86_64:
; Currently this works only for leaf functions which load all their arguments
; into registers at the start, and make no other use of the stack. Luckily that
; covers most of x264's asm.

; PROLOGUE:
; %1 = number of arguments. loads them from stack if needed.
135 136
; %2 = number of registers used. pushes callee-saved regs if needed.
; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
137 138 139 140 141 142
; %4 = (optional) stack size to be allocated. If not aligned (x86-32 ICC 10.x,
;      MSVC or YMM), the stack will be manually aligned (to 16 or 32 bytes),
;      and an extra register will be allocated to hold the original stack
;      pointer (to not invalidate r0m etc.). To prevent the use of an extra
;      register as stack pointer, request a negative stack size.
; %4+/%5+ = list of names to define to registers
143 144 145
; PROLOGUE can also be invoked by adding the same options to cglobal

; e.g.
Loren Merritt's avatar
Loren Merritt committed
146
; cglobal foo, 2,3,0, dst, src, tmp
147
; declares a function (foo), taking two args (dst and src) and one local variable (tmp)
148 149 150 151 152 153

; TODO Some functions can use some args directly from the stack. If they're the
; last args then you can just not declare them, but if they're in the middle
; we need more flexible macro.

; RET:
154
; Pops anything that was pushed by PROLOGUE, and returns.
155 156 157 158 159

; REP_RET:
; Same, but if it doesn't pop anything it becomes a 2-byte ret, for athlons
; which are slow when a normal ret follows a branch.

160 161 162
; registers:
; rN and rNq are the native-size register holding function argument N
; rNd, rNw, rNb are dword, word, and byte size
163
; rNh is the high 8 bits of the word size
164 165 166
; rNm is the original location of arg N (a register or on the stack), dword
; rNmp is native size

167
%macro DECLARE_REG 2-3
168
    %define r%1q %2
169 170 171 172
    %define r%1d %2d
    %define r%1w %2w
    %define r%1b %2b
    %define r%1h %2h
Loren Merritt's avatar
Loren Merritt committed
173
    %define %2q %2
174 175
    %if %0 == 2
        %define r%1m  %2d
176
        %define r%1mp %2
177
    %elif ARCH_X86_64 ; memory
178
        %define r%1m [rstk + stack_offset + %3]
179
        %define r%1mp qword r %+ %1 %+ m
180
    %else
181
        %define r%1m [rstk + stack_offset + %3]
182
        %define r%1mp dword r %+ %1 %+ m
183
    %endif
184 185 186
    %define r%1  %2
%endmacro

187
%macro DECLARE_REG_SIZE 3
188 189 190 191 192 193
    %define r%1q r%1
    %define e%1q r%1
    %define r%1d e%1
    %define e%1d e%1
    %define r%1w %1
    %define e%1w %1
194 195
    %define r%1h %3
    %define e%1h %3
196 197
    %define r%1b %2
    %define e%1b %2
198
%if ARCH_X86_64 == 0
199 200 201 202
    %define r%1  e%1
%endif
%endmacro

203 204 205 206 207 208 209
DECLARE_REG_SIZE ax, al, ah
DECLARE_REG_SIZE bx, bl, bh
DECLARE_REG_SIZE cx, cl, ch
DECLARE_REG_SIZE dx, dl, dh
DECLARE_REG_SIZE si, sil, null
DECLARE_REG_SIZE di, dil, null
DECLARE_REG_SIZE bp, bpl, null
210

211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
; t# defines for when per-arch register allocation is more complex than just function arguments

%macro DECLARE_REG_TMP 1-*
    %assign %%i 0
    %rep %0
        CAT_XDEFINE t, %%i, r%1
        %assign %%i %%i+1
        %rotate 1
    %endrep
%endmacro

%macro DECLARE_REG_TMP_SIZE 0-*
    %rep %0
        %define t%1q t%1 %+ q
        %define t%1d t%1 %+ d
        %define t%1w t%1 %+ w
227
        %define t%1h t%1 %+ h
228 229 230 231 232
        %define t%1b t%1 %+ b
        %rotate 1
    %endrep
%endmacro

233
DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
234

235
%if ARCH_X86_64
236 237 238 239 240 241 242
    %define gprsize 8
%else
    %define gprsize 4
%endif

%macro PUSH 1
    push %1
243 244 245
    %ifidn rstk, rsp
        %assign stack_offset stack_offset+gprsize
    %endif
246 247 248 249
%endmacro

%macro POP 1
    pop %1
250 251 252
    %ifidn rstk, rsp
        %assign stack_offset stack_offset-gprsize
    %endif
253 254
%endmacro

255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
%macro PUSH_IF_USED 1-*
    %rep %0
        %if %1 < regs_used
            PUSH r%1
        %endif
        %rotate 1
    %endrep
%endmacro

%macro POP_IF_USED 1-*
    %rep %0
        %if %1 < regs_used
            pop r%1
        %endif
        %rotate 1
    %endrep
%endmacro

%macro LOAD_IF_USED 1-*
    %rep %0
        %if %1 < num_args
            mov r%1, r %+ %1 %+ mp
        %endif
        %rotate 1
    %endrep
%endmacro

282 283
%macro SUB 2
    sub %1, %2
284
    %ifidn %1, rstk
285 286 287 288 289 290
        %assign stack_offset stack_offset+(%2)
    %endif
%endmacro

%macro ADD 2
    add %1, %2
291
    %ifidn %1, rstk
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
        %assign stack_offset stack_offset-(%2)
    %endif
%endmacro

%macro movifnidn 2
    %ifnidn %1, %2
        mov %1, %2
    %endif
%endmacro

%macro movsxdifnidn 2
    %ifnidn %1, %2
        movsxd %1, %2
    %endif
%endmacro

%macro ASSERT 1
    %if (%1) == 0
        %error assert failed
    %endif
%endmacro

%macro DEFINE_ARGS 0-*
    %ifdef n_arg_names
        %assign %%i 0
        %rep n_arg_names
            CAT_UNDEF arg_name %+ %%i, q
            CAT_UNDEF arg_name %+ %%i, d
            CAT_UNDEF arg_name %+ %%i, w
321
            CAT_UNDEF arg_name %+ %%i, h
322
            CAT_UNDEF arg_name %+ %%i, b
323
            CAT_UNDEF arg_name %+ %%i, m
324
            CAT_UNDEF arg_name %+ %%i, mp
325 326 327 328 329
            CAT_UNDEF arg_name, %%i
            %assign %%i %%i+1
        %endrep
    %endif

330 331
    %xdefine %%stack_offset stack_offset
    %undef stack_offset ; so that the current value of stack_offset doesn't get baked in by xdefine
332 333 334 335 336
    %assign %%i 0
    %rep %0
        %xdefine %1q r %+ %%i %+ q
        %xdefine %1d r %+ %%i %+ d
        %xdefine %1w r %+ %%i %+ w
337
        %xdefine %1h r %+ %%i %+ h
338
        %xdefine %1b r %+ %%i %+ b
339
        %xdefine %1m r %+ %%i %+ m
340
        %xdefine %1mp r %+ %%i %+ mp
341 342 343 344
        CAT_XDEFINE arg_name, %%i, %1
        %assign %%i %%i+1
        %rotate 1
    %endrep
345 346
    %xdefine stack_offset %%stack_offset
    %assign n_arg_names %0
347 348
%endmacro

349 350 351 352 353 354 355 356
%macro ALLOC_STACK 1-2 0 ; stack_size, n_xmm_regs (for win64 only)
    %ifnum %1
        %if %1 != 0
            %assign %%stack_alignment ((mmsize + 15) & ~15)
            %assign stack_size %1
            %if stack_size < 0
                %assign stack_size -stack_size
            %endif
357 358 359
            %if mmsize != 8
                %assign xmm_regs_used %2
            %endif
360 361 362 363 364 365 366
            %if mmsize <= 16 && HAVE_ALIGNED_STACK
                %assign stack_size_padded stack_size + %%stack_alignment - gprsize - (stack_offset & (%%stack_alignment - 1))
                %if xmm_regs_used > 6
                    %assign stack_size_padded stack_size_padded + (xmm_regs_used - 6) * 16
                %endif
                SUB rsp, stack_size_padded
            %else
367 368
                %assign %%reg_num (regs_used - 1)
                %xdefine rstk r %+ %%reg_num
369 370 371 372 373 374 375 376
                ; align stack, and save original stack location directly above
                ; it, i.e. in [rsp+stack_size_padded], so we can restore the
                ; stack in a single instruction (i.e. mov rsp, rstk or mov
                ; rsp, [rsp+stack_size_padded])
                mov  rstk, rsp
                %assign stack_size_padded stack_size
                %if xmm_regs_used > 6
                    %assign stack_size_padded stack_size_padded + (xmm_regs_used - 6) * 16
377 378 379 380
                    %if mmsize == 32 && xmm_regs_used & 1
                        ; re-align to 32 bytes
                        %assign stack_size_padded (stack_size_padded + 16)
                    %endif
381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
                %endif
                %if %1 < 0 ; need to store rsp on stack
                    sub  rsp, gprsize+stack_size_padded
                    and  rsp, ~(%%stack_alignment-1)
                    %xdefine rstkm [rsp+stack_size_padded]
                    mov rstkm, rstk
                %else ; can keep rsp in rstk during whole function
                    sub  rsp, stack_size_padded
                    and  rsp, ~(%%stack_alignment-1)
                    %xdefine rstkm rstk
                %endif
            %endif
            %if xmm_regs_used > 6
                WIN64_PUSH_XMM
            %endif
        %endif
    %endif
%endmacro

%macro SETUP_STACK_POINTER 1
    %ifnum %1
        %if %1 != 0 && (HAVE_ALIGNED_STACK == 0 || mmsize == 32)
            %if %1 > 0
                %assign regs_used (regs_used + 1)
            %elif ARCH_X86_64 && regs_used == num_args && num_args <= 4 + UNIX64 * 2
                %warning "Stack pointer will overwrite register argument"
            %endif
        %endif
    %endif
%endmacro

%macro DEFINE_ARGS_INTERNAL 3+
    %ifnum %2
        DEFINE_ARGS %3
    %elif %1 == 4
        DEFINE_ARGS %2
    %elif %1 > 4
        DEFINE_ARGS %2, %3
    %endif
%endmacro

422
%if WIN64 ; Windows x64 ;=================================================
423

424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
DECLARE_REG 0,  rcx
DECLARE_REG 1,  rdx
DECLARE_REG 2,  R8
DECLARE_REG 3,  R9
DECLARE_REG 4,  R10, 40
DECLARE_REG 5,  R11, 48
DECLARE_REG 6,  rax, 56
DECLARE_REG 7,  rdi, 64
DECLARE_REG 8,  rsi, 72
DECLARE_REG 9,  rbx, 80
DECLARE_REG 10, rbp, 88
DECLARE_REG 11, R12, 96
DECLARE_REG 12, R13, 104
DECLARE_REG 13, R14, 112
DECLARE_REG 14, R15, 120
439

440
%macro PROLOGUE 2-5+ 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
441
    %assign num_args %1
442
    %assign regs_used %2
443
    ASSERT regs_used >= num_args
444
    SETUP_STACK_POINTER %4
445 446
    ASSERT regs_used <= 15
    PUSH_IF_USED 7, 8, 9, 10, 11, 12, 13, 14
447 448
    ALLOC_STACK %4, %3
    %if mmsize != 8 && stack_size == 0
449 450
        WIN64_SPILL_XMM %3
    %endif
451
    LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
452 453 454 455 456 457 458
    DEFINE_ARGS_INTERNAL %0, %4, %5
%endmacro

%macro WIN64_PUSH_XMM 0
    %assign %%i xmm_regs_used
    %rep (xmm_regs_used-6)
        %assign %%i %%i-1
459
        movdqa [rsp + (%%i-6)*16 + stack_size + (~stack_offset&8)], xmm %+ %%i
460
    %endrep
461 462 463 464 465
%endmacro

%macro WIN64_SPILL_XMM 1
    %assign xmm_regs_used %1
    ASSERT xmm_regs_used <= 16
466
    %if xmm_regs_used > 6
467
        SUB rsp, (xmm_regs_used-6)*16+16
468
        WIN64_PUSH_XMM
469 470 471
    %endif
%endmacro

472
%macro WIN64_RESTORE_XMM_INTERNAL 1
473 474 475 476
    %if xmm_regs_used > 6
        %assign %%i xmm_regs_used
        %rep (xmm_regs_used-6)
            %assign %%i %%i-1
477
            movdqa xmm %+ %%i, [%1 + (%%i-6)*16+stack_size+(~stack_offset&8)]
478
        %endrep
479 480 481
        %if stack_size_padded == 0
            add %1, (xmm_regs_used-6)*16+16
        %endif
482 483 484 485 486 487 488
    %endif
    %if stack_size_padded > 0
        %if stack_size > 0 && (mmsize == 32 || HAVE_ALIGNED_STACK == 0)
            mov rsp, rstkm
        %else
            add %1, stack_size_padded
        %endif
489 490 491
    %endif
%endmacro

492 493
%macro WIN64_RESTORE_XMM 1
    WIN64_RESTORE_XMM_INTERNAL %1
494
    %assign stack_offset (stack_offset-stack_size_padded)
495 496 497
    %assign xmm_regs_used 0
%endmacro

498
%define has_epilogue regs_used > 7 || xmm_regs_used > 6 || mmsize == 32 || stack_size > 0
499

500
%macro RET 0
501
    WIN64_RESTORE_XMM_INTERNAL rsp
502
    POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7
503 504 505
%if mmsize == 32
    vzeroupper
%endif
506
    ret
507 508
%endmacro

509
%elif ARCH_X86_64 ; *nix x64 ;=============================================
510

511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
DECLARE_REG 0,  rdi
DECLARE_REG 1,  rsi
DECLARE_REG 2,  rdx
DECLARE_REG 3,  rcx
DECLARE_REG 4,  R8
DECLARE_REG 5,  R9
DECLARE_REG 6,  rax, 8
DECLARE_REG 7,  R10, 16
DECLARE_REG 8,  R11, 24
DECLARE_REG 9,  rbx, 32
DECLARE_REG 10, rbp, 40
DECLARE_REG 11, R12, 48
DECLARE_REG 12, R13, 56
DECLARE_REG 13, R14, 64
DECLARE_REG 14, R15, 72
526

527
%macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
528 529 530
    %assign num_args %1
    %assign regs_used %2
    ASSERT regs_used >= num_args
531
    SETUP_STACK_POINTER %4
532 533
    ASSERT regs_used <= 15
    PUSH_IF_USED 9, 10, 11, 12, 13, 14
534
    ALLOC_STACK %4
535
    LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14
536
    DEFINE_ARGS_INTERNAL %0, %4, %5
537 538
%endmacro

539
%define has_epilogue regs_used > 9 || mmsize == 32 || stack_size > 0
540

541
%macro RET 0
542 543 544 545 546 547 548
%if stack_size_padded > 0
%if mmsize == 32 || HAVE_ALIGNED_STACK == 0
    mov rsp, rstkm
%else
    add rsp, stack_size_padded
%endif
%endif
549
    POP_IF_USED 14, 13, 12, 11, 10, 9
550 551 552
%if mmsize == 32
    vzeroupper
%endif
553 554 555 556 557
    ret
%endmacro

%else ; X86_32 ;==============================================================

558 559 560 561 562 563 564
DECLARE_REG 0, eax, 4
DECLARE_REG 1, ecx, 8
DECLARE_REG 2, edx, 12
DECLARE_REG 3, ebx, 16
DECLARE_REG 4, esi, 20
DECLARE_REG 5, edi, 24
DECLARE_REG 6, ebp, 28
565 566
%define rsp esp

567 568
%macro DECLARE_ARG 1-*
    %rep %0
569
        %define r%1m [rstk + stack_offset + 4*%1 + 4]
570 571 572
        %define r%1mp dword r%1m
        %rotate 1
    %endrep
573 574
%endmacro

575
DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
576

577
%macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
578
    %assign num_args %1
579
    %assign regs_used %2
580 581 582 583
    ASSERT regs_used >= num_args
    %if num_args > 7
        %assign num_args 7
    %endif
584 585 586
    %if regs_used > 7
        %assign regs_used 7
    %endif
587 588
    SETUP_STACK_POINTER %4
    ASSERT regs_used <= 7
589
    PUSH_IF_USED 3, 4, 5, 6
590
    ALLOC_STACK %4
591
    LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6
592
    DEFINE_ARGS_INTERNAL %0, %4, %5
593 594
%endmacro

595
%define has_epilogue regs_used > 3 || mmsize == 32 || stack_size > 0
596

597
%macro RET 0
598 599 600 601 602 603 604
%if stack_size_padded > 0
%if mmsize == 32 || HAVE_ALIGNED_STACK == 0
    mov rsp, rstkm
%else
    add rsp, stack_size_padded
%endif
%endif
605
    POP_IF_USED 6, 5, 4, 3
606 607 608
%if mmsize == 32
    vzeroupper
%endif
609 610 611 612 613
    ret
%endmacro

%endif ;======================================================================

614
%if WIN64 == 0
615 616 617 618
%macro WIN64_SPILL_XMM 1
%endmacro
%macro WIN64_RESTORE_XMM 1
%endmacro
619 620
%macro WIN64_PUSH_XMM 0
%endmacro
621 622
%endif

623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639
%macro REP_RET 0
    %if has_epilogue
        RET
    %else
        rep ret
    %endif
%endmacro

%macro TAIL_CALL 2 ; callee, is_nonadjacent
    %if has_epilogue
        call %1
        RET
    %elif %2
        jmp %1
    %endif
%endmacro

640 641 642 643 644 645
;=============================================================================
; arch-independent part
;=============================================================================

%assign function_align 16

646 647 648 649
; Begin a function.
; Applies any symbol mangling needed for C linkage, and sets up a define such that
; subsequent uses of the function name automatically refer to the mangled version.
; Appends cpuflags to the function name if cpuflags has been specified.
650 651
; The "" empty default parameter is a workaround for nasm, which fails if SUFFIX
; is empty and we call cglobal_internal with just %1 %+ SUFFIX (without %2).
652
%macro cglobal 1-2+ "" ; name, [PROLOGUE args]
653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669
    cglobal_internal 1, %1 %+ SUFFIX, %2
%endmacro
%macro cvisible 1-2+ "" ; name, [PROLOGUE args]
    cglobal_internal 0, %1 %+ SUFFIX, %2
%endmacro
%macro cglobal_internal 2-3+
    %if %1
        %xdefine %%FUNCTION_PREFIX private_prefix
        %xdefine %%VISIBILITY hidden
    %else
        %xdefine %%FUNCTION_PREFIX public_prefix
        %xdefine %%VISIBILITY
    %endif
    %ifndef cglobaled_%2
        %xdefine %2 mangle(%%FUNCTION_PREFIX %+ _ %+ %2)
        %xdefine %2.skip_prologue %2 %+ .skip_prologue
        CAT_XDEFINE cglobaled_, %2, 1
670
    %endif
671
    %xdefine current_function %2
672
    %ifidn __OUTPUT_FORMAT__,elf
673
        global %2:function %%VISIBILITY
674
    %else
675
        global %2
676 677
    %endif
    align function_align
678
    %2:
679
    RESET_MM_PERMUTATION ; not really needed, but makes disassembly somewhat nicer
680
    %xdefine rstk rsp
681
    %assign stack_offset 0
682 683
    %assign stack_size 0
    %assign stack_size_padded 0
684
    %assign xmm_regs_used 0
685 686
    %ifnidn %3, ""
        PROLOGUE %3
687 688 689 690
    %endif
%endmacro

%macro cextern 1
691
    %xdefine %1 mangle(private_prefix %+ _ %+ %1)
692
    CAT_XDEFINE cglobaled_, %1, 1
693 694 695
    extern %1
%endmacro

696
; like cextern, but without the prefix
697 698
%macro cextern_naked 1
    %xdefine %1 mangle(%1)
699
    CAT_XDEFINE cglobaled_, %1, 1
700
    extern %1
701 702
%endmacro

703
%macro const 2+
704
    %xdefine %1 mangle(private_prefix %+ _ %+ %1)
705 706 707 708
    global %1
    %1: %2
%endmacro

709 710 711 712 713 714
; This is needed for ELF, otherwise the GNU linker assumes the stack is
; executable by default.
%ifidn __OUTPUT_FORMAT__,elf
SECTION .note.GNU-stack noalloc noexec nowrite progbits
%endif

715 716 717 718 719
; cpuflags

%assign cpuflags_mmx      (1<<0)
%assign cpuflags_mmx2     (1<<1) | cpuflags_mmx
%assign cpuflags_3dnow    (1<<2) | cpuflags_mmx
720
%assign cpuflags_3dnowext (1<<3) | cpuflags_3dnow
721 722 723 724 725 726 727 728 729 730
%assign cpuflags_sse      (1<<4) | cpuflags_mmx2
%assign cpuflags_sse2     (1<<5) | cpuflags_sse
%assign cpuflags_sse2slow (1<<6) | cpuflags_sse2
%assign cpuflags_sse3     (1<<7) | cpuflags_sse2
%assign cpuflags_ssse3    (1<<8) | cpuflags_sse3
%assign cpuflags_sse4     (1<<9) | cpuflags_ssse3
%assign cpuflags_sse42    (1<<10)| cpuflags_sse4
%assign cpuflags_avx      (1<<11)| cpuflags_sse42
%assign cpuflags_xop      (1<<12)| cpuflags_avx
%assign cpuflags_fma4     (1<<13)| cpuflags_avx
731 732
%assign cpuflags_avx2     (1<<14)| cpuflags_avx
%assign cpuflags_fma3     (1<<15)| cpuflags_avx
733 734 735 736 737 738 739 740

%assign cpuflags_cache32  (1<<16)
%assign cpuflags_cache64  (1<<17)
%assign cpuflags_slowctz  (1<<18)
%assign cpuflags_lzcnt    (1<<19)
%assign cpuflags_misalign (1<<20)
%assign cpuflags_aligned  (1<<21) ; not a cpu feature, but a function variant
%assign cpuflags_atom     (1<<22)
741 742 743
%assign cpuflags_bmi1     (1<<23)
%assign cpuflags_bmi2     (1<<24)|cpuflags_bmi1
%assign cpuflags_tbm      (1<<25)|cpuflags_bmi1
744 745 746 747 748 749 750 751

%define    cpuflag(x) ((cpuflags & (cpuflags_ %+ x)) == (cpuflags_ %+ x))
%define notcpuflag(x) ((cpuflags & (cpuflags_ %+ x)) != (cpuflags_ %+ x))

; Takes up to 2 cpuflags from the above list.
; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu.
; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co.
%macro INIT_CPUFLAGS 0-2
752
    CPUNOP amdnop
753 754 755 756 757 758 759 760 761 762 763
    %if %0 >= 1
        %xdefine cpuname %1
        %assign cpuflags cpuflags_%1
        %if %0 >= 2
            %xdefine cpuname %1_%2
            %assign cpuflags cpuflags | cpuflags_%2
        %endif
        %xdefine SUFFIX _ %+ cpuname
        %if cpuflag(avx)
            %assign avx_enabled 1
        %endif
764 765 766 767 768
        %if mmsize == 16 && notcpuflag(sse2)
            %define mova movaps
            %define movu movups
            %define movnta movntps
        %endif
769 770 771 772 773
        %if cpuflag(aligned)
            %define movu mova
        %elifidn %1, sse3
            %define movu lddqu
        %endif
774
        %if notcpuflag(sse2)
775
            CPUNOP basicnop
776
        %endif
777 778 779 780 781 782 783
    %else
        %xdefine SUFFIX
        %undef cpuname
        %undef cpuflags
    %endif
%endmacro

784 785 786 787 788 789 790 791 792 793
; merge mmx and sse*

%macro CAT_XDEFINE 3
    %xdefine %1%2 %3
%endmacro

%macro CAT_UNDEF 2
    %undef %1%2
%endmacro

794
%macro INIT_MMX 0-1+
795
    %assign avx_enabled 0
796
    %define RESET_MM_PERMUTATION INIT_MMX %1
797 798 799 800 801
    %define mmsize 8
    %define num_mmregs 8
    %define mova movq
    %define movu movq
    %define movh movd
802
    %define movnta movntq
803 804 805 806 807 808 809 810 811 812 813
    %assign %%i 0
    %rep 8
    CAT_XDEFINE m, %%i, mm %+ %%i
    CAT_XDEFINE nmm, %%i, %%i
    %assign %%i %%i+1
    %endrep
    %rep 8
    CAT_UNDEF m, %%i
    CAT_UNDEF nmm, %%i
    %assign %%i %%i+1
    %endrep
814
    INIT_CPUFLAGS %1
815 816
%endmacro

817
%macro INIT_XMM 0-1+
818
    %assign avx_enabled 0
819
    %define RESET_MM_PERMUTATION INIT_XMM %1
820 821
    %define mmsize 16
    %define num_mmregs 8
822
    %if ARCH_X86_64
823 824 825 826 827
    %define num_mmregs 16
    %endif
    %define mova movdqa
    %define movu movdqu
    %define movh movq
828
    %define movnta movntdq
829 830 831 832 833 834
    %assign %%i 0
    %rep num_mmregs
    CAT_XDEFINE m, %%i, xmm %+ %%i
    CAT_XDEFINE nxmm, %%i, %%i
    %assign %%i %%i+1
    %endrep
835
    INIT_CPUFLAGS %1
836 837
%endmacro

838
; FIXME: INIT_AVX can be replaced by INIT_XMM avx
839 840 841 842 843 844 845
%macro INIT_AVX 0
    INIT_XMM
    %assign avx_enabled 1
    %define PALIGNR PALIGNR_SSSE3
    %define RESET_MM_PERMUTATION INIT_AVX
%endmacro

846
%macro INIT_YMM 0-1+
847
    %assign avx_enabled 1
848
    %define RESET_MM_PERMUTATION INIT_YMM %1
849 850
    %define mmsize 32
    %define num_mmregs 8
851
    %if ARCH_X86_64
852 853 854 855
    %define num_mmregs 16
    %endif
    %define mova vmovaps
    %define movu vmovups
856 857
    %undef movh
    %define movnta vmovntps
858 859 860 861 862 863
    %assign %%i 0
    %rep num_mmregs
    CAT_XDEFINE m, %%i, ymm %+ %%i
    CAT_XDEFINE nymm, %%i, %%i
    %assign %%i %%i+1
    %endrep
864
    INIT_CPUFLAGS %1
865 866
%endmacro

867
INIT_XMM
868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922

; I often want to use macros that permute their arguments. e.g. there's no
; efficient way to implement butterfly or transpose or dct without swapping some
; arguments.
;
; I would like to not have to manually keep track of the permutations:
; If I insert a permutation in the middle of a function, it should automatically
; change everything that follows. For more complex macros I may also have multiple
; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
;
; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
; permutes its arguments. It's equivalent to exchanging the contents of the
; registers, except that this way you exchange the register names instead, so it
; doesn't cost any cycles.

%macro PERMUTE 2-* ; takes a list of pairs to swap
%rep %0/2
    %xdefine tmp%2 m%2
    %xdefine ntmp%2 nm%2
    %rotate 2
%endrep
%rep %0/2
    %xdefine m%1 tmp%2
    %xdefine nm%1 ntmp%2
    %undef tmp%2
    %undef ntmp%2
    %rotate 2
%endrep
%endmacro

%macro SWAP 2-* ; swaps a single chain (sometimes more concise than pairs)
%rep %0-1
%ifdef m%1
    %xdefine tmp m%1
    %xdefine m%1 m%2
    %xdefine m%2 tmp
    CAT_XDEFINE n, m%1, %1
    CAT_XDEFINE n, m%2, %2
%else
    ; If we were called as "SWAP m0,m1" rather than "SWAP 0,1" infer the original numbers here.
    ; Be careful using this mode in nested macros though, as in some cases there may be
    ; other copies of m# that have already been dereferenced and don't get updated correctly.
    %xdefine %%n1 n %+ %1
    %xdefine %%n2 n %+ %2
    %xdefine tmp m %+ %%n1
    CAT_XDEFINE m, %%n1, m %+ %%n2
    CAT_XDEFINE m, %%n2, tmp
    CAT_XDEFINE n, m %+ %%n1, %%n1
    CAT_XDEFINE n, m %+ %%n2, %%n2
%endif
    %undef tmp
    %rotate 1
%endrep
%endmacro

923 924 925 926 927 928 929 930 931
; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later
; calls to that function will automatically load the permutation, so values can
; be returned in mmregs.
%macro SAVE_MM_PERMUTATION 0-1
    %if %0
        %xdefine %%f %1_m
    %else
        %xdefine %%f current_function %+ _m
    %endif
932 933
    %assign %%i 0
    %rep num_mmregs
934
        CAT_XDEFINE %%f, %%i, m %+ %%i
935 936 937 938
    %assign %%i %%i+1
    %endrep
%endmacro

939
%macro LOAD_MM_PERMUTATION 1 ; name to load from
940 941 942 943 944 945 946 947
    %ifdef %1_m0
        %assign %%i 0
        %rep num_mmregs
            CAT_XDEFINE m, %%i, %1_m %+ %%i
            CAT_XDEFINE n, m %+ %%i, %%i
        %assign %%i %%i+1
        %endrep
    %endif
948 949
%endmacro

950
; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't
951
%macro call 1
952
    call_internal %1 %+ SUFFIX, %1
953 954
%endmacro
%macro call_internal 2
955 956 957 958
    %xdefine %%i %2
    %ifndef cglobaled_%2
        %ifdef cglobaled_%1
            %xdefine %%i %1
959
        %endif
960
    %endif
961 962
    call %%i
    LOAD_MM_PERMUTATION %%i
963 964
%endmacro

965
; Substitutions that reduce instruction size but are functionally equivalent
966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988
%macro add 2
    %ifnum %2
        %if %2==128
            sub %1, -128
        %else
            add %1, %2
        %endif
    %else
        add %1, %2
    %endif
%endmacro

%macro sub 2
    %ifnum %2
        %if %2==128
            add %1, -128
        %else
            sub %1, %2
        %endif
    %else
        sub %1, %2
    %endif
%endmacro
989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004

;=============================================================================
; AVX abstraction layer
;=============================================================================

%assign i 0
%rep 16
    %if i < 8
        CAT_XDEFINE sizeofmm, i, 8
    %endif
    CAT_XDEFINE sizeofxmm, i, 16
    CAT_XDEFINE sizeofymm, i, 32
%assign i i+1
%endrep
%undef i

1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
%macro CHECK_AVX_INSTR_EMU 3-*
    %xdefine %%opcode %1
    %xdefine %%dst %2
    %rep %0-2
        %ifidn %%dst, %3
            %error non-avx emulation of ``%%opcode'' is not supported
        %endif
        %rotate 1
    %endrep
%endmacro

1016 1017
;%1 == instruction
;%2 == 1 if float, 0 if int
1018
;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 2- or 3-operand (xmm, xmm, xmm)
1019 1020 1021
;%4 == number of operands given
;%5+: operands
%macro RUN_AVX_INSTR 6-7+
1022 1023 1024 1025
    %ifid %6
        %define %%sizeofreg sizeof%6
    %elifid %5
        %define %%sizeofreg sizeof%5
1026
    %else
1027
        %define %%sizeofreg mmsize
1028
    %endif
1029 1030
    %if %%sizeofreg==32
        %if %4>=3
1031 1032 1033 1034
            v%1 %5, %6, %7
        %else
            v%1 %5, %6
        %endif
1035
    %else
1036
        %if %%sizeofreg==8
1037 1038 1039 1040 1041 1042 1043 1044 1045
            %define %%regmov movq
        %elif %2
            %define %%regmov movaps
        %else
            %define %%regmov movdqa
        %endif

        %if %4>=3+%3
            %ifnidn %5, %6
1046
                %if avx_enabled && %%sizeofreg==16
1047 1048
                    v%1 %5, %6, %7
                %else
1049
                    CHECK_AVX_INSTR_EMU {%1 %5, %6, %7}, %5, %7
1050 1051 1052 1053 1054 1055
                    %%regmov %5, %6
                    %1 %5, %7
                %endif
            %else
                %1 %5, %7
            %endif
1056
        %elif %4>=3
1057 1058 1059 1060 1061 1062 1063
            %1 %5, %6, %7
        %else
            %1 %5, %6
        %endif
    %endif
%endmacro

1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
; 3arg AVX ops with a memory arg can only have it in src2,
; whereas SSE emulation of 3arg prefers to have it in src1 (i.e. the mov).
; So, if the op is symmetric and the wrong one is memory, swap them.
%macro RUN_AVX_INSTR1 8
    %assign %%swap 0
    %if avx_enabled
        %ifnid %6
            %assign %%swap 1
        %endif
    %elifnidn %5, %6
        %ifnid %7
            %assign %%swap 1
        %endif
    %endif
    %if %%swap && %3 == 0 && %8 == 1
        RUN_AVX_INSTR %1, %2, %3, %4, %5, %7, %6
    %else
        RUN_AVX_INSTR %1, %2, %3, %4, %5, %6, %7
    %endif
%endmacro

1085 1086
;%1 == instruction
;%2 == 1 if float, 0 if int
1087
;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 2- or 3-operand (xmm, xmm, xmm)
1088 1089 1090
;%4 == 1 if symmetric (i.e. doesn't matter which src arg is which), 0 if not
%macro AVX_INSTR 4
    %macro %1 2-9 fnord, fnord, fnord, %1, %2, %3, %4
1091 1092 1093
        %ifidn %3, fnord
            RUN_AVX_INSTR %6, %7, %8, 2, %1, %2
        %elifidn %4, fnord
1094
            RUN_AVX_INSTR1 %6, %7, %8, 3, %1, %2, %3, %9
1095 1096 1097 1098 1099 1100 1101 1102
        %elifidn %5, fnord
            RUN_AVX_INSTR %6, %7, %8, 4, %1, %2, %3, %4
        %else
            RUN_AVX_INSTR %6, %7, %8, 5, %1, %2, %3, %4, %5
        %endif
    %endmacro
%endmacro

1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
AVX_INSTR addpd, 1, 0, 1
AVX_INSTR addps, 1, 0, 1
AVX_INSTR addsd, 1, 0, 1
AVX_INSTR addss, 1, 0, 1
AVX_INSTR addsubpd, 1, 0, 0
AVX_INSTR addsubps, 1, 0, 0
AVX_INSTR andpd, 1, 0, 1
AVX_INSTR andps, 1, 0, 1
AVX_INSTR andnpd, 1, 0, 0
AVX_INSTR andnps, 1, 0, 0
AVX_INSTR blendpd, 1, 0, 0
AVX_INSTR blendps, 1, 0, 0
AVX_INSTR blendvpd, 1, 0, 0
AVX_INSTR blendvps, 1, 0, 0
1117 1118 1119 1120
AVX_INSTR cmppd, 1, 1, 0
AVX_INSTR cmpps, 1, 1, 0
AVX_INSTR cmpsd, 1, 1, 0
AVX_INSTR cmpss, 1, 1, 0
1121
AVX_INSTR cvtdq2ps, 1, 0, 0
1122
AVX_INSTR cvtpd2dq, 1, 0, 0
1123
AVX_INSTR cvtps2dq, 1, 0, 0
1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
AVX_INSTR divpd, 1, 0, 0
AVX_INSTR divps, 1, 0, 0
AVX_INSTR divsd, 1, 0, 0
AVX_INSTR divss, 1, 0, 0
AVX_INSTR dppd, 1, 1, 0
AVX_INSTR dpps, 1, 1, 0
AVX_INSTR haddpd, 1, 0, 0
AVX_INSTR haddps, 1, 0, 0
AVX_INSTR hsubpd, 1, 0, 0
AVX_INSTR hsubps, 1, 0, 0
AVX_INSTR maxpd, 1, 0, 1
AVX_INSTR maxps, 1, 0, 1
AVX_INSTR maxsd, 1, 0, 1
AVX_INSTR maxss, 1, 0, 1
AVX_INSTR minpd, 1, 0, 1
AVX_INSTR minps, 1, 0, 1
AVX_INSTR minsd, 1, 0, 1
AVX_INSTR minss, 1, 0, 1
1142 1143
AVX_INSTR movhlps, 1, 0, 0
AVX_INSTR movlhps, 1, 0, 0
1144 1145 1146 1147 1148 1149 1150 1151 1152
AVX_INSTR movsd, 1, 0, 0
AVX_INSTR movss, 1, 0, 0
AVX_INSTR mpsadbw, 0, 1, 0
AVX_INSTR mulpd, 1, 0, 1
AVX_INSTR mulps, 1, 0, 1
AVX_INSTR mulsd, 1, 0, 1
AVX_INSTR mulss, 1, 0, 1
AVX_INSTR orpd, 1, 0, 1
AVX_INSTR orps, 1, 0, 1
1153 1154 1155
AVX_INSTR pabsb, 0, 0, 0
AVX_INSTR pabsw, 0, 0, 0
AVX_INSTR pabsd, 0, 0, 0
1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
AVX_INSTR packsswb, 0, 0, 0
AVX_INSTR packssdw, 0, 0, 0
AVX_INSTR packuswb, 0, 0, 0
AVX_INSTR packusdw, 0, 0, 0
AVX_INSTR paddb, 0, 0, 1
AVX_INSTR paddw, 0, 0, 1
AVX_INSTR paddd, 0, 0, 1
AVX_INSTR paddq, 0, 0, 1
AVX_INSTR paddsb, 0, 0, 1
AVX_INSTR paddsw, 0, 0, 1
AVX_INSTR paddusb, 0, 0, 1
AVX_INSTR paddusw, 0, 0, 1
AVX_INSTR palignr, 0, 1, 0
AVX_INSTR pand, 0, 0, 1
AVX_INSTR pandn, 0, 0, 0
AVX_INSTR pavgb, 0, 0, 1
AVX_INSTR pavgw, 0, 0, 1
AVX_INSTR pblendvb, 0, 0, 0
AVX_INSTR pblendw, 0, 1, 0
AVX_INSTR pcmpestri, 0, 0, 0
AVX_INSTR pcmpestrm, 0, 0, 0
AVX_INSTR pcmpistri, 0, 0, 0
AVX_INSTR pcmpistrm, 0, 0, 0
AVX_INSTR pcmpeqb, 0, 0, 1
AVX_INSTR pcmpeqw, 0, 0, 1
AVX_INSTR pcmpeqd, 0, 0, 1
AVX_INSTR pcmpeqq, 0, 0, 1
AVX_INSTR pcmpgtb, 0, 0, 0
AVX_INSTR pcmpgtw, 0, 0, 0
AVX_INSTR pcmpgtd, 0, 0, 0
AVX_INSTR pcmpgtq, 0, 0, 0
AVX_INSTR phaddw, 0, 0, 0
AVX_INSTR phaddd, 0, 0, 0
AVX_INSTR phaddsw, 0, 0, 0
AVX_INSTR phsubw, 0, 0, 0
AVX_INSTR phsubd, 0, 0, 0
AVX_INSTR phsubsw, 0, 0, 0
AVX_INSTR pmaddwd, 0, 0, 1
AVX_INSTR pmaddubsw, 0, 0, 0
AVX_INSTR pmaxsb, 0, 0, 1
AVX_INSTR pmaxsw, 0, 0, 1
AVX_INSTR pmaxsd, 0, 0, 1
AVX_INSTR pmaxub, 0, 0, 1
AVX_INSTR pmaxuw, 0, 0, 1
AVX_INSTR pmaxud, 0, 0, 1
AVX_INSTR pminsb, 0, 0, 1
AVX_INSTR pminsw, 0, 0, 1
AVX_INSTR pminsd, 0, 0, 1
AVX_INSTR pminub, 0, 0, 1
AVX_INSTR pminuw, 0, 0, 1
AVX_INSTR pminud, 0, 0, 1
1207
AVX_INSTR pmovmskb, 0, 0, 0
1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
AVX_INSTR pmulhuw, 0, 0, 1
AVX_INSTR pmulhrsw, 0, 0, 1
AVX_INSTR pmulhw, 0, 0, 1
AVX_INSTR pmullw, 0, 0, 1
AVX_INSTR pmulld, 0, 0, 1
AVX_INSTR pmuludq, 0, 0, 1
AVX_INSTR pmuldq, 0, 0, 1
AVX_INSTR por, 0, 0, 1
AVX_INSTR psadbw, 0, 0, 1
AVX_INSTR pshufb, 0, 0, 0
1218 1219 1220
AVX_INSTR pshufd, 0, 1, 0
AVX_INSTR pshufhw, 0, 1, 0
AVX_INSTR pshuflw, 0, 1, 0
1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241
AVX_INSTR psignb, 0, 0, 0
AVX_INSTR psignw, 0, 0, 0
AVX_INSTR psignd, 0, 0, 0
AVX_INSTR psllw, 0, 0, 0
AVX_INSTR pslld, 0, 0, 0
AVX_INSTR psllq, 0, 0, 0
AVX_INSTR pslldq, 0, 0, 0
AVX_INSTR psraw, 0, 0, 0
AVX_INSTR psrad, 0, 0, 0
AVX_INSTR psrlw, 0, 0, 0
AVX_INSTR psrld, 0, 0, 0
AVX_INSTR psrlq, 0, 0, 0
AVX_INSTR psrldq, 0, 0, 0
AVX_INSTR psubb, 0, 0, 0
AVX_INSTR psubw, 0, 0, 0
AVX_INSTR psubd, 0, 0, 0
AVX_INSTR psubq, 0, 0, 0
AVX_INSTR psubsb, 0, 0, 0
AVX_INSTR psubsw, 0, 0, 0
AVX_INSTR psubusb, 0, 0, 0
AVX_INSTR psubusw, 0, 0, 0
1242
AVX_INSTR ptest, 0, 0, 0
1243 1244 1245 1246 1247 1248 1249 1250 1251
AVX_INSTR punpckhbw, 0, 0, 0
AVX_INSTR punpckhwd, 0, 0, 0
AVX_INSTR punpckhdq, 0, 0, 0
AVX_INSTR punpckhqdq, 0, 0, 0
AVX_INSTR punpcklbw, 0, 0, 0
AVX_INSTR punpcklwd, 0, 0, 0
AVX_INSTR punpckldq, 0, 0, 0
AVX_INSTR punpcklqdq, 0, 0, 0
AVX_INSTR pxor, 0, 0, 1
1252
AVX_INSTR shufps, 1, 1, 0
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
AVX_INSTR subpd, 1, 0, 0
AVX_INSTR subps, 1, 0, 0
AVX_INSTR subsd, 1, 0, 0
AVX_INSTR subss, 1, 0, 0
AVX_INSTR unpckhpd, 1, 0, 0
AVX_INSTR unpckhps, 1, 0, 0
AVX_INSTR unpcklpd, 1, 0, 0
AVX_INSTR unpcklps, 1, 0, 0
AVX_INSTR xorpd, 1, 0, 1
AVX_INSTR xorps, 1, 0, 1
1263 1264

; 3DNow instructions, for sharing code between AVX, SSE and 3DN
1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
AVX_INSTR pfadd, 1, 0, 1
AVX_INSTR pfsub, 1, 0, 0
AVX_INSTR pfmul, 1, 0, 1

; base-4 constants for shuffles
%assign i 0
%rep 256
    %assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3)
    %if j < 10
        CAT_XDEFINE q000, j, i
    %elif j < 100
        CAT_XDEFINE q00, j, i
    %elif j < 1000
        CAT_XDEFINE q0, j, i
    %else
        CAT_XDEFINE q, j, i
    %endif
%assign i i+1
%endrep
%undef i
%undef j

%macro FMA_INSTR 3
1288 1289 1290
    %macro %1 5-8 %1, %2, %3
        %if cpuflag(xop) || cpuflag(fma4)
            v%6 %1, %2, %3, %4
1291
        %else
1292 1293 1294 1295 1296 1297 1298
            %ifidn %1, %4
                %7 %5, %2, %3
                %8 %1, %4, %5
            %else
                %7 %1, %2, %3
                %8 %1, %4
            %endif
1299 1300 1301 1302
        %endif
    %endmacro
%endmacro

1303
FMA_INSTR  fmaddps,   mulps, addps
1304 1305 1306
FMA_INSTR  pmacsdd,  pmulld, paddd
FMA_INSTR  pmacsww,  pmullw, paddw
FMA_INSTR pmadcswd, pmaddwd, paddd
1307 1308 1309 1310

; tzcnt is equivalent to "rep bsf" and is backwards-compatible with bsf.
; This lets us use tzcnt without bumping the yasm version requirement yet.
%define tzcnt rep bsf