x86inc.asm 49.1 KB
Newer Older
1
;*****************************************************************************
2
;* x86inc.asm: x264asm abstraction layer
3
;*****************************************************************************
4
;* Copyright (C) 2005-2018 x264 project
5
;*
6
;* Authors: Loren Merritt <lorenm@u.washington.edu>
7
;*          Henrik Gramner <henrik@gramner.com>
8
;*          Anton Mitrofanov <BugMaster@narod.ru>
9
;*          Fiona Glaser <fiona@x264.com>
10
;*
11 12 13
;* Permission to use, copy, modify, and/or distribute this software for any
;* purpose with or without fee is hereby granted, provided that the above
;* copyright notice and this permission notice appear in all copies.
14
;*
15 16 17 18 19 20 21
;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 23
;*****************************************************************************

24 25 26 27 28 29 30 31 32 33 34 35 36
; This is a header file for the x264ASM assembly language, which uses
; NASM/YASM syntax combined with a large number of macros to provide easy
; abstraction between different calling conventions (x86_32, win64, linux64).
; It also has various other useful features to simplify writing the kind of
; DSP functions that are most often used in x264.

; Unlike the rest of x264, this file is available under an ISC license, as it
; has significant usefulness outside of x264 and we want it to be available
; to the largest audience possible.  Of course, if you modify it for your own
; purposes to add a new feature, we strongly encourage contributing a patch
; as this feature might be useful for others as well.  Send patches or ideas
; to x264-devel@videolan.org .

37 38
%ifndef private_prefix
    %define private_prefix x264
39
%endif
40

41 42 43 44
%ifndef public_prefix
    %define public_prefix private_prefix
%endif

45 46 47 48 49 50 51 52 53 54 55
%if HAVE_ALIGNED_STACK
    %define STACK_ALIGNMENT 16
%endif
%ifndef STACK_ALIGNMENT
    %if ARCH_X86_64
        %define STACK_ALIGNMENT 16
    %else
        %define STACK_ALIGNMENT 4
    %endif
%endif

56
%define WIN64  0
57
%define UNIX64 0
58
%if ARCH_X86_64
59
    %ifidn __OUTPUT_FORMAT__,win32
60
        %define WIN64  1
61 62
    %elifidn __OUTPUT_FORMAT__,win64
        %define WIN64  1
63 64
    %elifidn __OUTPUT_FORMAT__,x64
        %define WIN64  1
65
    %else
66
        %define UNIX64 1
67 68 69
    %endif
%endif

70 71 72 73 74 75 76 77 78
%define FORMAT_ELF 0
%ifidn __OUTPUT_FORMAT__,elf
    %define FORMAT_ELF 1
%elifidn __OUTPUT_FORMAT__,elf32
    %define FORMAT_ELF 1
%elifidn __OUTPUT_FORMAT__,elf64
    %define FORMAT_ELF 1
%endif

79 80 81 82 83 84
%ifdef PREFIX
    %define mangle(x) _ %+ x
%else
    %define mangle(x) x
%endif

85 86 87
; aout does not support align=
; NOTE: This section is out of sync with x264, in order to
; keep supporting OS/2.
88
%macro SECTION_RODATA 0-1 16
89
    %ifidn __OUTPUT_FORMAT__,aout
90
        SECTION .text
91
    %elifidn __OUTPUT_FORMAT__,coff
James Almer's avatar
James Almer committed
92
        SECTION .text
93 94 95 96
    %elifidn __OUTPUT_FORMAT__,win32
        SECTION .rdata align=%1
    %elif WIN64
        SECTION .rdata align=%1
97
    %else
98
        SECTION .rodata align=%1
99 100 101
    %endif
%endmacro

102
%if WIN64
103
    %define PIC
Ronald S. Bultje's avatar
Ronald S. Bultje committed
104
%elif ARCH_X86_64 == 0
105 106 107
; x86_32 doesn't require PIC.
; Some distros prefer shared objects to be PIC, but nothing breaks if
; the code contains a few textrels, so we'll skip that complexity.
108 109 110
    %undef PIC
%endif
%ifdef PIC
111
    default rel
112 113
%endif

114 115 116 117 118 119
%macro CPUNOP 1
    %if HAVE_CPUNOP
        CPU %1
    %endif
%endmacro

120 121 122 123 124 125 126
; Macros to eliminate most code duplication between x86_32 and x86_64:
; Currently this works only for leaf functions which load all their arguments
; into registers at the start, and make no other use of the stack. Luckily that
; covers most of x264's asm.

; PROLOGUE:
; %1 = number of arguments. loads them from stack if needed.
127 128
; %2 = number of registers used. pushes callee-saved regs if needed.
; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
129 130 131
; %4 = (optional) stack size to be allocated. The stack will be aligned before
;      allocating the specified stack size. If the required stack alignment is
;      larger than the known stack alignment the stack will be manually aligned
132 133 134 135
;      and an extra register will be allocated to hold the original stack
;      pointer (to not invalidate r0m etc.). To prevent the use of an extra
;      register as stack pointer, request a negative stack size.
; %4+/%5+ = list of names to define to registers
136 137 138
; PROLOGUE can also be invoked by adding the same options to cglobal

; e.g.
139 140 141 142
; cglobal foo, 2,3,7,0x40, dst, src, tmp
; declares a function (foo) that automatically loads two arguments (dst and
; src) into registers, uses one additional register (tmp) plus 7 vector
; registers (m0-m6) and allocates 0x40 bytes of stack space.
143 144 145 146 147 148

; TODO Some functions can use some args directly from the stack. If they're the
; last args then you can just not declare them, but if they're in the middle
; we need more flexible macro.

; RET:
149
; Pops anything that was pushed by PROLOGUE, and returns.
150 151

; REP_RET:
152
; Use this instead of RET if it's a branch target.
153

154 155 156
; registers:
; rN and rNq are the native-size register holding function argument N
; rNd, rNw, rNb are dword, word, and byte size
157
; rNh is the high 8 bits of the word size
158 159 160
; rNm is the original location of arg N (a register or on the stack), dword
; rNmp is native size

161
%macro DECLARE_REG 2-3
162
    %define r%1q %2
163 164 165 166
    %define r%1d %2d
    %define r%1w %2w
    %define r%1b %2b
    %define r%1h %2h
Loren Merritt's avatar
Loren Merritt committed
167
    %define %2q %2
168 169
    %if %0 == 2
        %define r%1m  %2d
170
        %define r%1mp %2
171
    %elif ARCH_X86_64 ; memory
172
        %define r%1m [rstk + stack_offset + %3]
173
        %define r%1mp qword r %+ %1 %+ m
174
    %else
175
        %define r%1m [rstk + stack_offset + %3]
176
        %define r%1mp dword r %+ %1 %+ m
177
    %endif
178 179 180
    %define r%1  %2
%endmacro

181
%macro DECLARE_REG_SIZE 3
182 183 184 185 186 187
    %define r%1q r%1
    %define e%1q r%1
    %define r%1d e%1
    %define e%1d e%1
    %define r%1w %1
    %define e%1w %1
188 189
    %define r%1h %3
    %define e%1h %3
190 191
    %define r%1b %2
    %define e%1b %2
192 193 194
    %if ARCH_X86_64 == 0
        %define r%1 e%1
    %endif
195 196
%endmacro

197 198 199 200 201 202 203
DECLARE_REG_SIZE ax, al, ah
DECLARE_REG_SIZE bx, bl, bh
DECLARE_REG_SIZE cx, cl, ch
DECLARE_REG_SIZE dx, dl, dh
DECLARE_REG_SIZE si, sil, null
DECLARE_REG_SIZE di, dil, null
DECLARE_REG_SIZE bp, bpl, null
204

205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
; t# defines for when per-arch register allocation is more complex than just function arguments

%macro DECLARE_REG_TMP 1-*
    %assign %%i 0
    %rep %0
        CAT_XDEFINE t, %%i, r%1
        %assign %%i %%i+1
        %rotate 1
    %endrep
%endmacro

%macro DECLARE_REG_TMP_SIZE 0-*
    %rep %0
        %define t%1q t%1 %+ q
        %define t%1d t%1 %+ d
        %define t%1w t%1 %+ w
221
        %define t%1h t%1 %+ h
222 223 224 225 226
        %define t%1b t%1 %+ b
        %rotate 1
    %endrep
%endmacro

227
DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
228

229
%if ARCH_X86_64
230 231 232 233 234 235 236
    %define gprsize 8
%else
    %define gprsize 4
%endif

%macro PUSH 1
    push %1
237 238 239
    %ifidn rstk, rsp
        %assign stack_offset stack_offset+gprsize
    %endif
240 241 242 243
%endmacro

%macro POP 1
    pop %1
244 245 246
    %ifidn rstk, rsp
        %assign stack_offset stack_offset-gprsize
    %endif
247 248
%endmacro

249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
%macro PUSH_IF_USED 1-*
    %rep %0
        %if %1 < regs_used
            PUSH r%1
        %endif
        %rotate 1
    %endrep
%endmacro

%macro POP_IF_USED 1-*
    %rep %0
        %if %1 < regs_used
            pop r%1
        %endif
        %rotate 1
    %endrep
%endmacro

%macro LOAD_IF_USED 1-*
    %rep %0
        %if %1 < num_args
            mov r%1, r %+ %1 %+ mp
        %endif
        %rotate 1
    %endrep
%endmacro

276 277
%macro SUB 2
    sub %1, %2
278
    %ifidn %1, rstk
279 280 281 282 283 284
        %assign stack_offset stack_offset+(%2)
    %endif
%endmacro

%macro ADD 2
    add %1, %2
285
    %ifidn %1, rstk
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
        %assign stack_offset stack_offset-(%2)
    %endif
%endmacro

%macro movifnidn 2
    %ifnidn %1, %2
        mov %1, %2
    %endif
%endmacro

%macro movsxdifnidn 2
    %ifnidn %1, %2
        movsxd %1, %2
    %endif
%endmacro

%macro ASSERT 1
    %if (%1) == 0
304
        %error assertion ``%1'' failed
305 306 307 308 309 310 311 312 313 314
    %endif
%endmacro

%macro DEFINE_ARGS 0-*
    %ifdef n_arg_names
        %assign %%i 0
        %rep n_arg_names
            CAT_UNDEF arg_name %+ %%i, q
            CAT_UNDEF arg_name %+ %%i, d
            CAT_UNDEF arg_name %+ %%i, w
315
            CAT_UNDEF arg_name %+ %%i, h
316
            CAT_UNDEF arg_name %+ %%i, b
317
            CAT_UNDEF arg_name %+ %%i, m
318
            CAT_UNDEF arg_name %+ %%i, mp
319 320 321 322 323
            CAT_UNDEF arg_name, %%i
            %assign %%i %%i+1
        %endrep
    %endif

324 325
    %xdefine %%stack_offset stack_offset
    %undef stack_offset ; so that the current value of stack_offset doesn't get baked in by xdefine
326 327 328 329 330
    %assign %%i 0
    %rep %0
        %xdefine %1q r %+ %%i %+ q
        %xdefine %1d r %+ %%i %+ d
        %xdefine %1w r %+ %%i %+ w
331
        %xdefine %1h r %+ %%i %+ h
332
        %xdefine %1b r %+ %%i %+ b
333
        %xdefine %1m r %+ %%i %+ m
334
        %xdefine %1mp r %+ %%i %+ mp
335 336 337 338
        CAT_XDEFINE arg_name, %%i, %1
        %assign %%i %%i+1
        %rotate 1
    %endrep
339 340
    %xdefine stack_offset %%stack_offset
    %assign n_arg_names %0
341 342
%endmacro

343
%define required_stack_alignment ((mmsize + 15) & ~15)
344 345
%define vzeroupper_required (mmsize > 16 && (ARCH_X86_64 == 0 || xmm_regs_used > 16 || notcpuflag(avx512)))
%define high_mm_regs (16*cpuflag(avx512))
346

347 348 349
%macro ALLOC_STACK 1-2 0 ; stack_size, n_xmm_regs (for win64 only)
    %ifnum %1
        %if %1 != 0
350
            %assign %%pad 0
351 352 353 354
            %assign stack_size %1
            %if stack_size < 0
                %assign stack_size -stack_size
            %endif
355
            %if WIN64
356
                %assign %%pad %%pad + 32 ; shadow space
357 358 359
                %if mmsize != 8
                    %assign xmm_regs_used %2
                    %if xmm_regs_used > 8
360
                        %assign %%pad %%pad + (xmm_regs_used-8)*16 ; callee-saved xmm registers
361 362
                    %endif
                %endif
363
            %endif
364 365 366
            %if required_stack_alignment <= STACK_ALIGNMENT
                ; maintain the current stack alignment
                %assign stack_size_padded stack_size + %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1))
367 368
                SUB rsp, stack_size_padded
            %else
369 370
                %assign %%reg_num (regs_used - 1)
                %xdefine rstk r %+ %%reg_num
371 372 373 374 375
                ; align stack, and save original stack location directly above
                ; it, i.e. in [rsp+stack_size_padded], so we can restore the
                ; stack in a single instruction (i.e. mov rsp, rstk or mov
                ; rsp, [rsp+stack_size_padded])
                %if %1 < 0 ; need to store rsp on stack
376 377
                    %xdefine rstkm [rsp + stack_size + %%pad]
                    %assign %%pad %%pad + gprsize
378 379 380
                %else ; can keep rsp in rstk during whole function
                    %xdefine rstkm rstk
                %endif
381 382 383 384 385
                %assign stack_size_padded stack_size + ((%%pad + required_stack_alignment-1) & ~(required_stack_alignment-1))
                mov rstk, rsp
                and rsp, ~(required_stack_alignment-1)
                sub rsp, stack_size_padded
                movifnidn rstkm, rstk
386
            %endif
387
            WIN64_PUSH_XMM
388 389 390 391 392 393
        %endif
    %endif
%endmacro

%macro SETUP_STACK_POINTER 1
    %ifnum %1
394
        %if %1 != 0 && required_stack_alignment > STACK_ALIGNMENT
395
            %if %1 > 0
396 397
                ; Reserve an additional register for storing the original stack pointer, but avoid using
                ; eax/rax for this purpose since it can potentially get overwritten as a return value.
398
                %assign regs_used (regs_used + 1)
399 400 401 402 403
                %if ARCH_X86_64 && regs_used == 7
                    %assign regs_used 8
                %elif ARCH_X86_64 == 0 && regs_used == 1
                    %assign regs_used 2
                %endif
404 405 406 407 408
            %endif
            %if ARCH_X86_64 && regs_used < 5 + UNIX64 * 3
                ; Ensure that we don't clobber any registers containing arguments. For UNIX64 we also preserve r6 (rax)
                ; since it's used as a hidden argument in vararg functions to specify the number of vector registers used.
                %assign regs_used 5 + UNIX64 * 3
409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
            %endif
        %endif
    %endif
%endmacro

%macro DEFINE_ARGS_INTERNAL 3+
    %ifnum %2
        DEFINE_ARGS %3
    %elif %1 == 4
        DEFINE_ARGS %2
    %elif %1 > 4
        DEFINE_ARGS %2, %3
    %endif
%endmacro

424
%if WIN64 ; Windows x64 ;=================================================
425

426 427 428 429 430 431 432 433 434 435 436
DECLARE_REG 0,  rcx
DECLARE_REG 1,  rdx
DECLARE_REG 2,  R8
DECLARE_REG 3,  R9
DECLARE_REG 4,  R10, 40
DECLARE_REG 5,  R11, 48
DECLARE_REG 6,  rax, 56
DECLARE_REG 7,  rdi, 64
DECLARE_REG 8,  rsi, 72
DECLARE_REG 9,  rbx, 80
DECLARE_REG 10, rbp, 88
437 438 439 440
DECLARE_REG 11, R14, 96
DECLARE_REG 12, R15, 104
DECLARE_REG 13, R12, 112
DECLARE_REG 14, R13, 120
441

442
%macro PROLOGUE 2-5+ 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
443
    %assign num_args %1
444
    %assign regs_used %2
445
    ASSERT regs_used >= num_args
446
    SETUP_STACK_POINTER %4
447 448
    ASSERT regs_used <= 15
    PUSH_IF_USED 7, 8, 9, 10, 11, 12, 13, 14
449 450
    ALLOC_STACK %4, %3
    %if mmsize != 8 && stack_size == 0
451 452
        WIN64_SPILL_XMM %3
    %endif
453
    LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
454 455 456 457
    DEFINE_ARGS_INTERNAL %0, %4, %5
%endmacro

%macro WIN64_PUSH_XMM 0
458
    ; Use the shadow space to store XMM6 and XMM7, the rest needs stack space allocated.
459
    %if xmm_regs_used > 6 + high_mm_regs
460 461
        movaps [rstk + stack_offset +  8], xmm6
    %endif
462
    %if xmm_regs_used > 7 + high_mm_regs
463 464
        movaps [rstk + stack_offset + 24], xmm7
    %endif
465 466
    %assign %%xmm_regs_on_stack xmm_regs_used - high_mm_regs - 8
    %if %%xmm_regs_on_stack > 0
467
        %assign %%i 8
468
        %rep %%xmm_regs_on_stack
469 470 471 472
            movaps [rsp + (%%i-8)*16 + stack_size + 32], xmm %+ %%i
            %assign %%i %%i+1
        %endrep
    %endif
473 474 475 476
%endmacro

%macro WIN64_SPILL_XMM 1
    %assign xmm_regs_used %1
477 478 479
    ASSERT xmm_regs_used <= 16 + high_mm_regs
    %assign %%xmm_regs_on_stack xmm_regs_used - high_mm_regs - 8
    %if %%xmm_regs_on_stack > 0
480
        ; Allocate stack space for callee-saved xmm registers plus shadow space and align the stack.
481
        %assign %%pad %%xmm_regs_on_stack*16 + 32
482
        %assign stack_size_padded %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1))
483
        SUB rsp, stack_size_padded
484
    %endif
485
    WIN64_PUSH_XMM
486 487
%endmacro

488
%macro WIN64_RESTORE_XMM_INTERNAL 0
489
    %assign %%pad_size 0
490 491 492 493
    %assign %%xmm_regs_on_stack xmm_regs_used - high_mm_regs - 8
    %if %%xmm_regs_on_stack > 0
        %assign %%i xmm_regs_used - high_mm_regs
        %rep %%xmm_regs_on_stack
494
            %assign %%i %%i-1
495
            movaps xmm %+ %%i, [rsp + (%%i-8)*16 + stack_size + 32]
496
        %endrep
497 498
    %endif
    %if stack_size_padded > 0
499
        %if stack_size > 0 && required_stack_alignment > STACK_ALIGNMENT
500 501
            mov rsp, rstkm
        %else
502
            add rsp, stack_size_padded
503
            %assign %%pad_size stack_size_padded
504
        %endif
505
    %endif
506
    %if xmm_regs_used > 7 + high_mm_regs
507
        movaps xmm7, [rsp + stack_offset - %%pad_size + 24]
508
    %endif
509
    %if xmm_regs_used > 6 + high_mm_regs
510
        movaps xmm6, [rsp + stack_offset - %%pad_size +  8]
511
    %endif
512 513
%endmacro

514 515
%macro WIN64_RESTORE_XMM 0
    WIN64_RESTORE_XMM_INTERNAL
516
    %assign stack_offset (stack_offset-stack_size_padded)
517
    %assign stack_size_padded 0
518 519 520
    %assign xmm_regs_used 0
%endmacro

521
%define has_epilogue regs_used > 7 || stack_size > 0 || vzeroupper_required || xmm_regs_used > 6+high_mm_regs
522

523
%macro RET 0
524
    WIN64_RESTORE_XMM_INTERNAL
525
    POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7
526
    %if vzeroupper_required
527 528
        vzeroupper
    %endif
529
    AUTO_REP_RET
530 531
%endmacro

532
%elif ARCH_X86_64 ; *nix x64 ;=============================================
533

534 535 536 537 538 539 540 541 542 543 544
DECLARE_REG 0,  rdi
DECLARE_REG 1,  rsi
DECLARE_REG 2,  rdx
DECLARE_REG 3,  rcx
DECLARE_REG 4,  R8
DECLARE_REG 5,  R9
DECLARE_REG 6,  rax, 8
DECLARE_REG 7,  R10, 16
DECLARE_REG 8,  R11, 24
DECLARE_REG 9,  rbx, 32
DECLARE_REG 10, rbp, 40
545 546 547 548
DECLARE_REG 11, R14, 48
DECLARE_REG 12, R15, 56
DECLARE_REG 13, R12, 64
DECLARE_REG 14, R13, 72
549

550
%macro PROLOGUE 2-5+ 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
551 552
    %assign num_args %1
    %assign regs_used %2
553
    %assign xmm_regs_used %3
554
    ASSERT regs_used >= num_args
555
    SETUP_STACK_POINTER %4
556 557
    ASSERT regs_used <= 15
    PUSH_IF_USED 9, 10, 11, 12, 13, 14
558
    ALLOC_STACK %4
559
    LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14
560
    DEFINE_ARGS_INTERNAL %0, %4, %5
561 562
%endmacro

563
%define has_epilogue regs_used > 9 || stack_size > 0 || vzeroupper_required
564

565
%macro RET 0
566 567 568 569 570 571 572
    %if stack_size_padded > 0
        %if required_stack_alignment > STACK_ALIGNMENT
            mov rsp, rstkm
        %else
            add rsp, stack_size_padded
        %endif
    %endif
573
    POP_IF_USED 14, 13, 12, 11, 10, 9
574
    %if vzeroupper_required
575 576
        vzeroupper
    %endif
577
    AUTO_REP_RET
578 579 580 581
%endmacro

%else ; X86_32 ;==============================================================

582 583 584 585 586 587 588
DECLARE_REG 0, eax, 4
DECLARE_REG 1, ecx, 8
DECLARE_REG 2, edx, 12
DECLARE_REG 3, ebx, 16
DECLARE_REG 4, esi, 20
DECLARE_REG 5, edi, 24
DECLARE_REG 6, ebp, 28
589 590
%define rsp esp

591 592
%macro DECLARE_ARG 1-*
    %rep %0
593
        %define r%1m [rstk + stack_offset + 4*%1 + 4]
594 595 596
        %define r%1mp dword r%1m
        %rotate 1
    %endrep
597 598
%endmacro

599
DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
600

601
%macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
602
    %assign num_args %1
603
    %assign regs_used %2
604 605 606 607
    ASSERT regs_used >= num_args
    %if num_args > 7
        %assign num_args 7
    %endif
608 609 610
    %if regs_used > 7
        %assign regs_used 7
    %endif
611 612
    SETUP_STACK_POINTER %4
    ASSERT regs_used <= 7
613
    PUSH_IF_USED 3, 4, 5, 6
614
    ALLOC_STACK %4
615
    LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6
616
    DEFINE_ARGS_INTERNAL %0, %4, %5
617 618
%endmacro

619
%define has_epilogue regs_used > 3 || stack_size > 0 || vzeroupper_required
620

621
%macro RET 0
622 623 624 625 626 627 628
    %if stack_size_padded > 0
        %if required_stack_alignment > STACK_ALIGNMENT
            mov rsp, rstkm
        %else
            add rsp, stack_size_padded
        %endif
    %endif
629
    POP_IF_USED 6, 5, 4, 3
630
    %if vzeroupper_required
631 632
        vzeroupper
    %endif
633
    AUTO_REP_RET
634 635 636 637
%endmacro

%endif ;======================================================================

638
%if WIN64 == 0
639 640
    %macro WIN64_SPILL_XMM 1
    %endmacro
641
    %macro WIN64_RESTORE_XMM 0
642 643 644
    %endmacro
    %macro WIN64_PUSH_XMM 0
    %endmacro
645 646
%endif

647 648 649 650
; On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either
; a branch or a branch target. So switch to a 2-byte form of ret in that case.
; We can automatically detect "follows a branch", but not a branch target.
; (SSSE3 is a sufficient condition to know that your cpu doesn't have this problem.)
651
%macro REP_RET 0
652
    %if has_epilogue || cpuflag(ssse3)
653 654 655 656
        RET
    %else
        rep ret
    %endif
657
    annotate_function_size
658 659
%endmacro

660 661
%define last_branch_adr $$
%macro AUTO_REP_RET 0
662 663
    %if notcpuflag(ssse3)
        times ((last_branch_adr-$)>>31)+1 rep ; times 1 iff $ == last_branch_adr.
664 665
    %endif
    ret
666
    annotate_function_size
667 668 669 670 671 672
%endmacro

%macro BRANCH_INSTR 0-*
    %rep %0
        %macro %1 1-2 %1
            %2 %1
673 674 675 676
            %if notcpuflag(ssse3)
                %%branch_instr equ $
                %xdefine last_branch_adr %%branch_instr
            %endif
677 678 679 680 681 682 683
        %endmacro
        %rotate 1
    %endrep
%endmacro

BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, jna, jnae, jb, jbe, jnb, jnbe, jc, jnc, js, jns, jo, jno, jp, jnp

684 685 686 687 688 689 690
%macro TAIL_CALL 2 ; callee, is_nonadjacent
    %if has_epilogue
        call %1
        RET
    %elif %2
        jmp %1
    %endif
691
    annotate_function_size
692 693
%endmacro

694 695 696 697 698 699
;=============================================================================
; arch-independent part
;=============================================================================

%assign function_align 16

700 701 702 703
; Begin a function.
; Applies any symbol mangling needed for C linkage, and sets up a define such that
; subsequent uses of the function name automatically refer to the mangled version.
; Appends cpuflags to the function name if cpuflags has been specified.
704 705
; The "" empty default parameter is a workaround for nasm, which fails if SUFFIX
; is empty and we call cglobal_internal with just %1 %+ SUFFIX (without %2).
706
%macro cglobal 1-2+ "" ; name, [PROLOGUE args]
707 708 709 710 711 712
    cglobal_internal 1, %1 %+ SUFFIX, %2
%endmacro
%macro cvisible 1-2+ "" ; name, [PROLOGUE args]
    cglobal_internal 0, %1 %+ SUFFIX, %2
%endmacro
%macro cglobal_internal 2-3+
713
    annotate_function_size
714 715 716 717 718 719 720 721 722 723 724
    %if %1
        %xdefine %%FUNCTION_PREFIX private_prefix
        %xdefine %%VISIBILITY hidden
    %else
        %xdefine %%FUNCTION_PREFIX public_prefix
        %xdefine %%VISIBILITY
    %endif
    %ifndef cglobaled_%2
        %xdefine %2 mangle(%%FUNCTION_PREFIX %+ _ %+ %2)
        %xdefine %2.skip_prologue %2 %+ .skip_prologue
        CAT_XDEFINE cglobaled_, %2, 1
725
    %endif
726
    %xdefine current_function %2
727
    %xdefine current_function_section __SECT__
728
    %if FORMAT_ELF
729
        global %2:function %%VISIBILITY
730
    %else
731
        global %2
732 733
    %endif
    align function_align
734
    %2:
735 736 737 738 739
    RESET_MM_PERMUTATION        ; needed for x86-64, also makes disassembly somewhat nicer
    %xdefine rstk rsp           ; copy of the original stack pointer, used when greater alignment than the known stack alignment is required
    %assign stack_offset 0      ; stack pointer offset relative to the return address
    %assign stack_size 0        ; amount of stack space that can be freely used inside a function
    %assign stack_size_padded 0 ; total amount of allocated stack space, including space for callee-saved xmm registers on WIN64 and alignment padding
740
    %assign xmm_regs_used 0     ; number of XMM registers requested, used for dealing with callee-saved registers on WIN64 and vzeroupper
741 742
    %ifnidn %3, ""
        PROLOGUE %3
743 744 745
    %endif
%endmacro

746 747 748 749 750 751 752 753 754 755
; Create a global symbol from a local label with the correct name mangling and type
%macro cglobal_label 1
    %if FORMAT_ELF
        global current_function %+ %1:function hidden
    %else
        global current_function %+ %1
    %endif
    %1:
%endmacro

756
%macro cextern 1
757
    %xdefine %1 mangle(private_prefix %+ _ %+ %1)
758
    CAT_XDEFINE cglobaled_, %1, 1
759 760 761
    extern %1
%endmacro

762
; like cextern, but without the prefix
763
%macro cextern_naked 1
764 765 766
    %ifdef PREFIX
        %xdefine %1 mangle(%1)
    %endif
767
    CAT_XDEFINE cglobaled_, %1, 1
768
    extern %1
769 770
%endmacro

771
%macro const 1-2+
772
    %xdefine %1 mangle(private_prefix %+ _ %+ %1)
773
    %if FORMAT_ELF
774 775 776 777
        global %1:data hidden
    %else
        global %1
    %endif
778 779 780
    %1: %2
%endmacro

781 782 783
; This is needed for ELF, otherwise the GNU linker assumes the stack is executable by default.
%if FORMAT_ELF
    [SECTION .note.GNU-stack noalloc noexec nowrite progbits]
784 785
%endif

786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803
; Tell debuggers how large the function was.
; This may be invoked multiple times per function; we rely on later instances overriding earlier ones.
; This is invoked by RET and similar macros, and also cglobal does it for the previous function,
; but if the last function in a source file doesn't use any of the standard macros for its epilogue,
; then its size might be unspecified.
%macro annotate_function_size 0
    %ifdef __YASM_VER__
        %ifdef current_function
            %if FORMAT_ELF
                current_function_section
                %%ecf equ $
                size current_function %%ecf - current_function
                __SECT__
            %endif
        %endif
    %endif
%endmacro

804 805 806 807 808
; cpuflags

%assign cpuflags_mmx      (1<<0)
%assign cpuflags_mmx2     (1<<1) | cpuflags_mmx
%assign cpuflags_3dnow    (1<<2) | cpuflags_mmx
809
%assign cpuflags_3dnowext (1<<3) | cpuflags_3dnow
810 811 812
%assign cpuflags_sse      (1<<4) | cpuflags_mmx2
%assign cpuflags_sse2     (1<<5) | cpuflags_sse
%assign cpuflags_sse2slow (1<<6) | cpuflags_sse2
813 814 815 816 817 818 819 820 821 822 823 824 825
%assign cpuflags_lzcnt    (1<<7) | cpuflags_sse2
%assign cpuflags_sse3     (1<<8) | cpuflags_sse2
%assign cpuflags_ssse3    (1<<9) | cpuflags_sse3
%assign cpuflags_sse4     (1<<10)| cpuflags_ssse3
%assign cpuflags_sse42    (1<<11)| cpuflags_sse4
%assign cpuflags_aesni    (1<<12)| cpuflags_sse42
%assign cpuflags_avx      (1<<13)| cpuflags_sse42
%assign cpuflags_xop      (1<<14)| cpuflags_avx
%assign cpuflags_fma4     (1<<15)| cpuflags_avx
%assign cpuflags_fma3     (1<<16)| cpuflags_avx
%assign cpuflags_bmi1     (1<<17)| cpuflags_avx|cpuflags_lzcnt
%assign cpuflags_bmi2     (1<<18)| cpuflags_bmi1
%assign cpuflags_avx2     (1<<19)| cpuflags_fma3|cpuflags_bmi2
826
%assign cpuflags_avx512   (1<<20)| cpuflags_avx2 ; F, CD, BW, DQ, VL
827

828 829
%assign cpuflags_cache32  (1<<21)
%assign cpuflags_cache64  (1<<22)
830 831
%assign cpuflags_aligned  (1<<23) ; not a cpu feature, but a function variant
%assign cpuflags_atom     (1<<24)
832

833 834 835
; Returns a boolean value expressing whether or not the specified cpuflag is enabled.
%define    cpuflag(x) (((((cpuflags & (cpuflags_ %+ x)) ^ (cpuflags_ %+ x)) - 1) >> 31) & 1)
%define notcpuflag(x) (cpuflag(x) ^ 1)
836

837
; Takes an arbitrary number of cpuflags from the above list.
838 839
; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu.
; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co.
840 841 842 843 844
%macro INIT_CPUFLAGS 0-*
    %xdefine SUFFIX
    %undef cpuname
    %assign cpuflags 0

845
    %if %0 >= 1
846 847 848 849 850 851 852 853 854
        %rep %0
            %ifdef cpuname
                %xdefine cpuname cpuname %+ _%1
            %else
                %xdefine cpuname %1
            %endif
            %assign cpuflags cpuflags | cpuflags_%1
            %rotate 1
        %endrep
855
        %xdefine SUFFIX _ %+ cpuname
856

857 858 859
        %if cpuflag(avx)
            %assign avx_enabled 1
        %endif
860
        %if (mmsize == 16 && notcpuflag(sse2)) || (mmsize == 32 && notcpuflag(avx2))
861 862 863 864
            %define mova movaps
            %define movu movups
            %define movnta movntps
        %endif
865 866
        %if cpuflag(aligned)
            %define movu mova
867
        %elif cpuflag(sse3) && notcpuflag(ssse3)
868 869
            %define movu lddqu
        %endif
870 871
    %endif

872
    %if ARCH_X86_64 || cpuflag(sse2)
873
        CPUNOP amdnop
874
    %else
875
        CPUNOP basicnop
876 877 878
    %endif
%endmacro

879
; Merge mmx, sse*, and avx*
880 881 882
; m# is a simd register of the currently selected size
; xm# is the corresponding xmm register if mmsize >= 16, otherwise the same as m#
; ym# is the corresponding ymm register if mmsize >= 32, otherwise the same as m#
883 884
; zm# is the corresponding zmm register if mmsize >= 64, otherwise the same as m#
; (All 4 remain in sync through SWAP.)
885 886 887 888 889 890 891 892 893

%macro CAT_XDEFINE 3
    %xdefine %1%2 %3
%endmacro

%macro CAT_UNDEF 2
    %undef %1%2
%endmacro

894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923
%macro DEFINE_MMREGS 1 ; mmtype
    %assign %%prev_mmregs 0
    %ifdef num_mmregs
        %assign %%prev_mmregs num_mmregs
    %endif

    %assign num_mmregs 8
    %if ARCH_X86_64 && mmsize >= 16
        %assign num_mmregs 16
        %if cpuflag(avx512) || mmsize == 64
            %assign num_mmregs 32
        %endif
    %endif

    %assign %%i 0
    %rep num_mmregs
        CAT_XDEFINE m, %%i, %1 %+ %%i
        CAT_XDEFINE nn%1, %%i, %%i
        %assign %%i %%i+1
    %endrep
    %if %%prev_mmregs > num_mmregs
        %rep %%prev_mmregs - num_mmregs
            CAT_UNDEF m, %%i
            CAT_UNDEF nn %+ mmtype, %%i
            %assign %%i %%i+1
        %endrep
    %endif
    %xdefine mmtype %1
%endmacro

924 925 926 927 928 929 930 931 932 933 934 935
; Prefer registers 16-31 over 0-15 to avoid having to use vzeroupper
%macro AVX512_MM_PERMUTATION 0-1 0 ; start_reg
    %if ARCH_X86_64 && cpuflag(avx512)
        %assign %%i %1
        %rep 16-%1
            %assign %%i_high %%i+16
            SWAP %%i, %%i_high
            %assign %%i %%i+1
        %endrep
    %endif
%endmacro

936
%macro INIT_MMX 0-1+
937
    %assign avx_enabled 0
938
    %define RESET_MM_PERMUTATION INIT_MMX %1
939 940 941 942
    %define mmsize 8
    %define mova movq
    %define movu movq
    %define movh movd
943
    %define movnta movntq
944
    INIT_CPUFLAGS %1
945
    DEFINE_MMREGS mm
946 947
%endmacro

948
%macro INIT_XMM 0-1+
949
    %assign avx_enabled 0
950
    %define RESET_MM_PERMUTATION INIT_XMM %1
951 952 953 954
    %define mmsize 16
    %define mova movdqa
    %define movu movdqu
    %define movh movq
955
    %define movnta movntdq
956
    INIT_CPUFLAGS %1
957
    DEFINE_MMREGS xmm
958
    %if WIN64
959
        AVX512_MM_PERMUTATION 6 ; Swap callee-saved registers with volatile registers
960
    %endif
961 962
%endmacro

963
%macro INIT_YMM 0-1+
964
    %assign avx_enabled 1
965
    %define RESET_MM_PERMUTATION INIT_YMM %1
966
    %define mmsize 32
967 968
    %define mova movdqa
    %define movu movdqu
969
    %undef movh
970
    %define movnta movntdq
971
    INIT_CPUFLAGS %1
972
    DEFINE_MMREGS ymm
973 974 975 976 977 978 979 980 981 982 983 984
    AVX512_MM_PERMUTATION
%endmacro

%macro INIT_ZMM 0-1+
    %assign avx_enabled 1
    %define RESET_MM_PERMUTATION INIT_ZMM %1
    %define mmsize 64
    %define mova movdqa
    %define movu movdqu
    %undef movh
    %define movnta movntdq
    INIT_CPUFLAGS %1
985
    DEFINE_MMREGS zmm
986
    AVX512_MM_PERMUTATION
987 988
%endmacro

989
INIT_XMM
990

991 992 993 994
%macro DECLARE_MMCAST 1
    %define  mmmm%1   mm%1
    %define  mmxmm%1  mm%1
    %define  mmymm%1  mm%1
995
    %define  mmzmm%1  mm%1
996 997 998
    %define xmmmm%1   mm%1
    %define xmmxmm%1 xmm%1
    %define xmmymm%1 xmm%1
999
    %define xmmzmm%1 xmm%1
1000
    %define ymmmm%1   mm%1
1001
    %define ymmxmm%1 xmm%1
1002
    %define ymmymm%1 ymm%1
1003 1004 1005 1006 1007
    %define ymmzmm%1 ymm%1
    %define zmmmm%1   mm%1
    %define zmmxmm%1 xmm%1
    %define zmmymm%1 ymm%1
    %define zmmzmm%1 zmm%1
1008 1009
    %define xm%1 xmm %+ m%1
    %define ym%1 ymm %+ m%1
1010
    %define zm%1 zmm %+ m%1
1011 1012 1013
%endmacro

%assign i 0
1014
%rep 32
1015
    DECLARE_MMCAST i
1016
    %assign i i+1
1017 1018
%endrep

1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
; I often want to use macros that permute their arguments. e.g. there's no
; efficient way to implement butterfly or transpose or dct without swapping some
; arguments.
;
; I would like to not have to manually keep track of the permutations:
; If I insert a permutation in the middle of a function, it should automatically
; change everything that follows. For more complex macros I may also have multiple
; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
;
; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
; permutes its arguments. It's equivalent to exchanging the contents of the
; registers, except that this way you exchange the register names instead, so it
; doesn't cost any cycles.

%macro PERMUTE 2-* ; takes a list of pairs to swap
1034 1035 1036 1037 1038 1039 1040 1041 1042
    %rep %0/2
        %xdefine %%tmp%2 m%2
        %rotate 2
    %endrep
    %rep %0/2
        %xdefine m%1 %%tmp%2
        CAT_XDEFINE nn, m%1, %1
        %rotate 2
    %endrep
1043 1044
%endmacro

1045
%macro SWAP 2+ ; swaps a single chain (sometimes more concise than pairs)
1046 1047 1048 1049 1050
    %ifnum %1 ; SWAP 0, 1, ...
        SWAP_INTERNAL_NUM %1, %2
    %else ; SWAP m0, m1, ...
        SWAP_INTERNAL_NAME %1, %2
    %endif
1051 1052 1053 1054 1055 1056 1057
%endmacro

%macro SWAP_INTERNAL_NUM 2-*
    %rep %0-1
        %xdefine %%tmp m%1
        %xdefine m%1 m%2
        %xdefine m%2 %%tmp
1058 1059
        CAT_XDEFINE nn, m%1, %1
        CAT_XDEFINE nn, m%2, %2
1060
        %rotate 1
1061 1062 1063 1064
    %endrep
%endmacro

%macro SWAP_INTERNAL_NAME 2-*
1065
    %xdefine %%args nn %+ %1
1066
    %rep %0-1
1067
        %xdefine %%args %%args, nn %+ %2
1068
        %rotate 1
1069 1070
    %endrep
    SWAP_INTERNAL_NUM %%args
1071 1072
%endmacro

1073 1074 1075 1076 1077 1078 1079 1080 1081
; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later
; calls to that function will automatically load the permutation, so values can
; be returned in mmregs.
%macro SAVE_MM_PERMUTATION 0-1
    %if %0
        %xdefine %%f %1_m
    %else
        %xdefine %%f current_function %+ _m
    %endif
1082 1083
    %assign %%i 0
    %rep num_mmregs
1084
        CAT_XDEFINE %%f, %%i, m %+ %%i
1085
        %assign %%i %%i+1
1086 1087 1088
    %endrep
%endmacro

1089
%macro LOAD_MM_PERMUTATION 1 ; name to load from
1090 1091 1092 1093
    %ifdef %1_m0
        %assign %%i 0
        %rep num_mmregs
            CAT_XDEFINE m, %%i, %1_m %+ %%i
1094
            CAT_XDEFINE nn, m %+ %%i, %%i
1095
            %assign %%i %%i+1
1096 1097
        %endrep
    %endif
1098 1099
%endmacro

1100
; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't
1101
%macro call 1
1102 1103 1104 1105 1106
    %ifid %1
        call_internal %1 %+ SUFFIX, %1
    %else
        call %1
    %endif
1107 1108
%endmacro
%macro call_internal 2
1109 1110 1111 1112
    %xdefine %%i %2
    %ifndef cglobaled_%2
        %ifdef cglobaled_%1
            %xdefine %%i %1
1113
        %endif
1114
    %endif
1115 1116
    call %%i
    LOAD_MM_PERMUTATION %%i
1117 1118
%endmacro

1119
; Substitutions that reduce instruction size but are functionally equivalent
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
%macro add 2
    %ifnum %2
        %if %2==128
            sub %1, -128
        %else
            add %1, %2
        %endif
    %else
        add %1, %2
    %endif
%endmacro

%macro sub 2
    %ifnum %2
        %if %2==128
            add %1, -128
        %else
            sub %1, %2
        %endif
    %else
        sub %1, %2
    %endif
%endmacro
1143 1144 1145 1146 1147 1148

;=============================================================================
; AVX abstraction layer
;=============================================================================

%assign i 0
1149
%rep 32
1150 1151
    %if i < 8
        CAT_XDEFINE sizeofmm, i, 8
1152
        CAT_XDEFINE regnumofmm, i, i
1153 1154 1155
    %endif
    CAT_XDEFINE sizeofxmm, i, 16
    CAT_XDEFINE sizeofymm, i, 32
1156 1157 1158 1159
    CAT_XDEFINE sizeofzmm, i, 64
    CAT_XDEFINE regnumofxmm, i, i
    CAT_XDEFINE regnumofymm, i, i
    CAT_XDEFINE regnumofzmm, i, i
1160
    %assign i i+1
1161 1162 1163
%endrep
%undef i

1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174
%macro CHECK_AVX_INSTR_EMU 3-*
    %xdefine %%opcode %1
    %xdefine %%dst %2
    %rep %0-2
        %ifidn %%dst, %3
            %error non-avx emulation of ``%%opcode'' is not supported
        %endif
        %rotate 1
    %endrep
%endmacro

1175
;%1 == instruction
1176 1177
;%2 == minimal instruction set
;%3 == 1 if float, 0 if int
1178
;%4 == 1 if 4-operand emulation, 0 if 3-operand emulation, 255 otherwise (no emulation)
1179 1180 1181 1182 1183 1184
;%5 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
;%6+: operands
%macro RUN_AVX_INSTR 6-9+
    %ifnum sizeof%7
        %assign __sizeofreg sizeof%7
    %elifnum sizeof%6
1185
        %assign __sizeofreg sizeof%6
1186
    %else
1187
        %assign __sizeofreg mmsize
1188
    %endif
1189 1190 1191
    %assign __emulate_avx 0
    %if avx_enabled && __sizeofreg >= 16
        %xdefine __instr v%1
1192
    %else
1193
        %xdefine __instr %1
1194
        %if %0 >= 8+%4
1195
            %assign __emulate_avx 1
1196
        %endif
1197
    %endif
1198 1199 1200 1201
    %ifnidn %2, fnord
        %ifdef cpuname
            %if notcpuflag(%2)
                %error use of ``%1'' %2 instruction in cpuname function: current_function
1202 1203
            %elif cpuflags_%2 < cpuflags_sse && notcpuflag(sse2) && __sizeofreg > 8
                %error use of ``%1'' sse2 instruction in cpuname function: current_function
1204 1205 1206
            %endif
        %endif
    %endif
1207

1208
    %if __emulate_avx
1209 1210
        %xdefine __src1 %7
        %xdefine __src2 %8
1211 1212 1213 1214 1215 1216
        %if %5 && %4 == 0
            %ifnidn %6, %7
                %ifidn %6, %8
                    %xdefine __src1 %8
                    %xdefine __src2 %7
                %elifnnum sizeof%8
1217 1218 1219
                    ; 3-operand AVX instructions with a memory arg can only have it in src2,
                    ; whereas SSE emulation prefers to have it in src1 (i.e. the mov).
                    ; So, if the instruction is commutative with a memory arg, swap them.
1220 1221
                    %xdefine __src1 %8
                    %xdefine __src2 %7
1222
                %endif
1223
            %endif
1224 1225 1226 1227 1228 1229 1230
        %endif
        %ifnidn %6, __src1
            %if %0 >= 9
                CHECK_AVX_INSTR_EMU {%1 %6, %7, %8, %9}, %6, __src2, %9
            %else
                CHECK_AVX_INSTR_EMU {%1 %6, %7, %8}, %6, __src2
            %endif
1231
            %if __sizeofreg == 8
1232 1233 1234
                MOVQ %6, __src1
            %elif %3
                MOVAPS %6, __src1
1235
            %else
1236
                MOVDQA %6, __src1
1237 1238
            %endif
        %endif
1239 1240
        %if %0 >= 9
            %1 %6, __src2, %9
1241
        %else
1242
            %1 %6, __src2
1243
        %endif
1244 1245 1246 1247
    %elif %0 >= 9
        __instr %6, %7, %8, %9
    %elif %0 == 8
        __instr %6, %7, %8
1248
    %elif %0 == 7
1249
        __instr %6, %7
1250
    %else
1251
        __instr %6
1252 1253 1254
    %endif
%endmacro

1255
;%1 == instruction
1256 1257
;%2 == minimal instruction set
;%3 == 1 if float, 0 if int
1258
;%4 == 1 if 4-operand emulation, 0 if 3-operand emulation, 255 otherwise (no emulation)
1259
;%5 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
1260
%macro AVX_INSTR 1-5 fnord, 0, 255, 0
1261
    %macro %1 1-10 fnord, fnord, fnord, fnord, %1, %2, %3, %4, %5
1262
        %ifidn %2, fnord
1263
            RUN_AVX_INSTR %6, %7, %8, %9, %10, %1
1264
        %elifidn %3, fnord
1265
            RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2
1266
        %elifidn %4, fnord
1267
            RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3
1268
        %elifidn %5, fnord
1269
            RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3, %4
1270
        %else
1271
            RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3, %4, %5
1272 1273 1274 1275
        %endif
    %endmacro
%endmacro

1276
; Instructions with both VEX/EVEX and legacy encodings
1277
; Non-destructive instructions are written without parameters
1278 1279
AVX_INSTR addpd, sse2, 1, 0, 1
AVX_INSTR addps, sse, 1, 0, 1
1280 1281
AVX_INSTR addsd, sse2, 1, 0, 0
AVX_INSTR addss, sse, 1, 0, 0
1282 1283
AVX_INSTR addsubpd, sse3, 1, 0, 0
AVX_INSTR addsubps, sse3, 1, 0, 0
1284 1285 1286 1287 1288 1289
AVX_INSTR aesdec, aesni, 0, 0, 0
AVX_INSTR aesdeclast, aesni, 0, 0, 0
AVX_INSTR aesenc, aesni, 0, 0, 0
AVX_INSTR aesenclast, aesni, 0, 0, 0
AVX_INSTR aesimc, aesni
AVX_INSTR aeskeygenassist, aesni
1290 1291 1292 1293
AVX_INSTR andnpd, sse2, 1, 0, 0
AVX_INSTR andnps, sse, 1, 0, 0
AVX_INSTR andpd, sse2, 1, 0, 1
AVX_INSTR andps, sse, 1, 0, 1
1294 1295 1296 1297
AVX_INSTR blendpd, sse4, 1, 1, 0
AVX_INSTR blendps, sse4, 1, 1, 0
AVX_INSTR blendvpd, sse4 ; can't be emulated
AVX_INSTR blendvps, sse4 ; can't be emulated
1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325
AVX_INSTR cmpeqpd, sse2, 1, 0, 1
AVX_INSTR cmpeqps, sse, 1, 0, 1
AVX_INSTR cmpeqsd, sse2, 1, 0, 0
AVX_INSTR cmpeqss, sse, 1, 0, 0
AVX_INSTR cmplepd, sse2, 1, 0, 0
AVX_INSTR cmpleps, sse, 1, 0, 0
AVX_INSTR cmplesd, sse2, 1, 0, 0
AVX_INSTR cmpless, sse, 1, 0, 0
AVX_INSTR cmpltpd, sse2, 1, 0, 0
AVX_INSTR cmpltps, sse, 1, 0, 0
AVX_INSTR cmpltsd, sse2, 1, 0, 0
AVX_INSTR cmpltss, sse, 1, 0, 0
AVX_INSTR cmpneqpd, sse2, 1, 0, 1
AVX_INSTR cmpneqps, sse, 1, 0, 1
AVX_INSTR cmpneqsd, sse2, 1, 0, 0
AVX_INSTR cmpneqss, sse, 1, 0, 0
AVX_INSTR cmpnlepd, sse2, 1, 0, 0
AVX_INSTR cmpnleps, sse, 1, 0, 0
AVX_INSTR cmpnlesd, sse2, 1, 0, 0
AVX_INSTR cmpnless, sse, 1, 0, 0
AVX_INSTR cmpnltpd, sse2, 1, 0, 0
AVX_INSTR cmpnltps, sse, 1, 0, 0
AVX_INSTR cmpnltsd, sse2, 1, 0, 0
AVX_INSTR cmpnltss, sse, 1, 0, 0
AVX_INSTR cmpordpd, sse2 1, 0, 1
AVX_INSTR cmpordps, sse 1, 0, 1
AVX_INSTR cmpordsd, sse2 1, 0, 0
AVX_INSTR cmpordss, sse 1, 0, 0
1326 1327 1328 1329
AVX_INSTR cmppd, sse2, 1, 1, 0
AVX_INSTR cmpps, sse, 1, 1, 0
AVX_INSTR cmpsd, sse2, 1, 1, 0
AVX_INSTR cmpss, sse, 1, 1, 0
1330 1331 1332 1333
AVX_INSTR cmpunordpd, sse2, 1, 0, 1
AVX_INSTR cmpunordps, sse, 1, 0, 1
AVX_INSTR cmpunordsd, sse2, 1, 0, 0
AVX_INSTR cmpunordss, sse, 1, 0, 0
1334 1335 1336 1337 1338 1339 1340 1341 1342
AVX_INSTR comisd, sse2
AVX_INSTR comiss, sse
AVX_INSTR cvtdq2pd, sse2
AVX_INSTR cvtdq2ps, sse2
AVX_INSTR cvtpd2dq, sse2
AVX_INSTR cvtpd2ps, sse2
AVX_INSTR cvtps2dq, sse2
AVX_INSTR cvtps2pd, sse2
AVX_INSTR cvtsd2si, sse2
1343 1344 1345 1346
AVX_INSTR cvtsd2ss, sse2, 1, 0, 0
AVX_INSTR cvtsi2sd, sse2, 1, 0, 0
AVX_INSTR cvtsi2ss, sse, 1, 0, 0
AVX_INSTR cvtss2sd, sse2, 1, 0, 0
1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368
AVX_INSTR cvtss2si, sse
AVX_INSTR cvttpd2dq, sse2
AVX_INSTR cvttps2dq, sse2
AVX_INSTR cvttsd2si, sse2
AVX_INSTR cvttss2si, sse
AVX_INSTR divpd, sse2, 1, 0, 0
AVX_INSTR divps, sse, 1, 0, 0
AVX_INSTR divsd, sse2, 1, 0, 0
AVX_INSTR divss, sse, 1, 0, 0
AVX_INSTR dppd, sse4, 1, 1, 0
AVX_INSTR dpps, sse4, 1, 1, 0
AVX_INSTR extractps, sse4
AVX_INSTR haddpd, sse3, 1, 0, 0
AVX_INSTR haddps, sse3, 1, 0, 0
AVX_INSTR hsubpd, sse3, 1, 0, 0
AVX_INSTR hsubps, sse3, 1, 0, 0
AVX_INSTR insertps, sse4, 1, 1, 0
AVX_INSTR lddqu, sse3
AVX_INSTR ldmxcsr, sse
AVX_INSTR maskmovdqu, sse2
AVX_INSTR maxpd, sse2, 1, 0, 1
AVX_INSTR maxps, sse, 1, 0, 1
1369 1370
AVX_INSTR maxsd, sse2, 1, 0, 0
AVX_INSTR maxss, sse, 1, 0, 0
1371 1372
AVX_INSTR minpd, sse2, 1, 0, 1
AVX_INSTR minps, sse, 1, 0, 1
1373 1374
AVX_INSTR minsd, sse2, 1, 0, 0
AVX_INSTR minss, sse, 1, 0, 0
1375 1376
AVX_INSTR movapd, sse2
AVX_INSTR movaps, sse
1377
AVX_INSTR movd, mmx
1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392
AVX_INSTR movddup, sse3
AVX_INSTR movdqa, sse2
AVX_INSTR movdqu, sse2
AVX_INSTR movhlps, sse, 1, 0, 0
AVX_INSTR movhpd, sse2, 1, 0, 0
AVX_INSTR movhps, sse, 1, 0, 0
AVX_INSTR movlhps, sse, 1, 0, 0
AVX_INSTR movlpd, sse2, 1, 0, 0
AVX_INSTR movlps, sse, 1, 0, 0
AVX_INSTR movmskpd, sse2
AVX_INSTR movmskps, sse
AVX_INSTR movntdq, sse2
AVX_INSTR movntdqa, sse4
AVX_INSTR movntpd, sse2
AVX_INSTR movntps, sse
1393
AVX_INSTR movq, mmx
1394 1395 1396 1397 1398 1399
AVX_INSTR movsd, sse2, 1, 0, 0
AVX_INSTR movshdup, sse3
AVX_INSTR movsldup, sse3
AVX_INSTR movss, sse, 1, 0, 0
AVX_INSTR movupd, sse2
AVX_INSTR movups, sse
1400
AVX_INSTR mpsadbw, sse4, 0, 1, 0
1401 1402
AVX_INSTR mulpd, sse2, 1, 0, 1
AVX_INSTR mulps, sse, 1, 0, 1
1403 1404
AVX_INSTR mulsd, sse2, 1, 0, 0
AVX_INSTR mulss, sse, 1, 0, 0
1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421
AVX_INSTR orpd, sse2, 1, 0, 1
AVX_INSTR orps, sse, 1, 0, 1
AVX_INSTR pabsb, ssse3
AVX_INSTR pabsd, ssse3
AVX_INSTR pabsw, ssse3
AVX_INSTR packsswb, mmx, 0, 0, 0
AVX_INSTR packssdw, mmx, 0, 0, 0
AVX_INSTR packuswb, mmx, 0, 0, 0
AVX_INSTR packusdw, sse4, 0, 0, 0
AVX_INSTR paddb, mmx, 0, 0, 1
AVX_INSTR paddw, mmx, 0, 0, 1
AVX_INSTR paddd, mmx, 0, 0, 1
AVX_INSTR paddq, sse2, 0, 0, 1
AVX_INSTR paddsb, mmx, 0, 0, 1
AVX_INSTR paddsw, mmx, 0, 0, 1
AVX_INSTR paddusb, mmx, 0, 0, 1
AVX_INSTR paddusw, mmx, 0, 0, 1
1422
AVX_INSTR palignr, ssse3, 0, 1, 0
1423 1424 1425 1426
AVX_INSTR pand, mmx, 0, 0, 1
AVX_INSTR pandn, mmx, 0, 0, 0
AVX_INSTR pavgb, mmx2, 0, 0, 1
AVX_INSTR pavgw, mmx2, 0, 0, 1
1427 1428 1429 1430 1431 1432 1433
AVX_INSTR pblendvb, sse4 ; can't be emulated
AVX_INSTR pblendw, sse4, 0, 1, 0
AVX_INSTR pclmulqdq, fnord, 0, 1, 0
AVX_INSTR pclmulhqhqdq, fnord, 0, 0, 0
AVX_INSTR pclmulhqlqdq, fnord, 0, 0, 0
AVX_INSTR pclmullqhqdq, fnord, 0, 0, 0
AVX_INSTR pclmullqlqdq, fnord, 0, 0, 0
1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456
AVX_INSTR pcmpestri, sse42
AVX_INSTR pcmpestrm, sse42
AVX_INSTR pcmpistri, sse42
AVX_INSTR pcmpistrm, sse42
AVX_INSTR pcmpeqb, mmx, 0, 0, 1
AVX_INSTR pcmpeqw, mmx, 0, 0, 1
AVX_INSTR pcmpeqd, mmx, 0, 0, 1
AVX_INSTR pcmpeqq, sse4, 0, 0, 1
AVX_INSTR pcmpgtb, mmx, 0, 0, 0
AVX_INSTR pcmpgtw, mmx, 0, 0, 0
AVX_INSTR pcmpgtd, mmx, 0, 0, 0
AVX_INSTR pcmpgtq, sse42, 0, 0, 0
AVX_INSTR pextrb, sse4
AVX_INSTR pextrd, sse4
AVX_INSTR pextrq, sse4
AVX_INSTR pextrw, mmx2
AVX_INSTR phaddw, ssse3, 0, 0, 0
AVX_INSTR phaddd, ssse3, 0, 0, 0
AVX_INSTR phaddsw, ssse3, 0, 0, 0
AVX_INSTR phminposuw, sse4
AVX_INSTR phsubw, ssse3, 0, 0, 0
AVX_INSTR phsubd, ssse3, 0, 0, 0
AVX_INSTR phsubsw, ssse3, 0, 0, 0
1457 1458 1459 1460
AVX_INSTR pinsrb, sse4, 0, 1, 0
AVX_INSTR pinsrd, sse4, 0, 1, 0
AVX_INSTR pinsrq, sse4, 0, 1, 0
AVX_INSTR pinsrw, mmx2, 0, 1, 0
1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531
AVX_INSTR pmaddwd, mmx, 0, 0, 1
AVX_INSTR pmaddubsw, ssse3, 0, 0, 0
AVX_INSTR pmaxsb, sse4, 0, 0, 1
AVX_INSTR pmaxsw, mmx2, 0, 0, 1
AVX_INSTR pmaxsd, sse4, 0, 0, 1
AVX_INSTR pmaxub, mmx2, 0, 0, 1
AVX_INSTR pmaxuw, sse4, 0, 0, 1
AVX_INSTR pmaxud, sse4, 0, 0, 1
AVX_INSTR pminsb, sse4, 0, 0, 1
AVX_INSTR pminsw, mmx2, 0, 0, 1
AVX_INSTR pminsd, sse4, 0, 0, 1
AVX_INSTR pminub, mmx2, 0, 0, 1
AVX_INSTR pminuw, sse4, 0, 0, 1
AVX_INSTR pminud, sse4, 0, 0, 1
AVX_INSTR pmovmskb, mmx2
AVX_INSTR pmovsxbw, sse4
AVX_INSTR pmovsxbd, sse4
AVX_INSTR pmovsxbq, sse4
AVX_INSTR pmovsxwd, sse4
AVX_INSTR pmovsxwq, sse4
AVX_INSTR pmovsxdq, sse4
AVX_INSTR pmovzxbw, sse4
AVX_INSTR pmovzxbd, sse4
AVX_INSTR pmovzxbq, sse4
AVX_INSTR pmovzxwd, sse4
AVX_INSTR pmovzxwq, sse4
AVX_INSTR pmovzxdq, sse4
AVX_INSTR pmuldq, sse4, 0, 0, 1
AVX_INSTR pmulhrsw, ssse3, 0, 0, 1
AVX_INSTR pmulhuw, mmx2, 0, 0, 1
AVX_INSTR pmulhw, mmx, 0, 0, 1
AVX_INSTR pmullw, mmx, 0, 0, 1
AVX_INSTR pmulld, sse4, 0, 0, 1
AVX_INSTR pmuludq, sse2, 0, 0, 1
AVX_INSTR por, mmx, 0, 0, 1
AVX_INSTR psadbw, mmx2, 0, 0, 1
AVX_INSTR pshufb, ssse3, 0, 0, 0
AVX_INSTR pshufd, sse2
AVX_INSTR pshufhw, sse2
AVX_INSTR pshuflw, sse2
AVX_INSTR psignb, ssse3, 0, 0, 0
AVX_INSTR psignw, ssse3, 0, 0, 0
AVX_INSTR psignd, ssse3, 0, 0, 0
AVX_INSTR psllw, mmx, 0, 0, 0
AVX_INSTR pslld, mmx, 0, 0, 0
AVX_INSTR psllq, mmx, 0, 0, 0
AVX_INSTR pslldq, sse2, 0, 0, 0
AVX_INSTR psraw, mmx, 0, 0, 0
AVX_INSTR psrad, mmx, 0, 0, 0
AVX_INSTR psrlw, mmx, 0, 0, 0
AVX_INSTR psrld, mmx, 0, 0, 0
AVX_INSTR psrlq, mmx, 0, 0, 0
AVX_INSTR psrldq, sse2, 0, 0, 0
AVX_INSTR psubb, mmx, 0, 0, 0
AVX_INSTR psubw, mmx, 0, 0, 0
AVX_INSTR psubd, mmx, 0, 0, 0
AVX_INSTR psubq, sse2, 0, 0, 0
AVX_INSTR psubsb, mmx, 0, 0, 0
AVX_INSTR psubsw, mmx, 0, 0, 0
AVX_INSTR psubusb, mmx, 0, 0, 0
AVX_INSTR psubusw, mmx, 0, 0, 0
AVX_INSTR ptest, sse4
AVX_INSTR punpckhbw, mmx, 0, 0, 0
AVX_INSTR punpckhwd, mmx, 0, 0, 0
AVX_INSTR punpckhdq, mmx, 0, 0, 0
AVX_INSTR punpckhqdq, sse2, 0, 0, 0
AVX_INSTR punpcklbw, mmx, 0, 0, 0
AVX_INSTR punpcklwd, mmx, 0, 0, 0
AVX_INSTR punpckldq, mmx, 0, 0, 0
AVX_INSTR punpcklqdq, sse2, 0, 0, 0
AVX_INSTR pxor, mmx, 0, 0, 1
1532
AVX_INSTR rcpps, sse
1533 1534 1535
AVX_INSTR rcpss, sse, 1, 0, 0
AVX_INSTR roundpd, sse4
AVX_INSTR roundps, sse4
1536 1537
AVX_INSTR roundsd, sse4, 1, 1, 0
AVX_INSTR roundss, sse4, 1, 1, 0
1538
AVX_INSTR rsqrtps, sse
1539 1540 1541
AVX_INSTR rsqrtss, sse, 1, 0, 0
AVX_INSTR shufpd, sse2, 1, 1, 0
AVX_INSTR shufps, sse, 1, 1, 0
1542 1543
AVX_INSTR sqrtpd, sse2
AVX_INSTR sqrtps, sse
1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558
AVX_INSTR sqrtsd, sse2, 1, 0, 0
AVX_INSTR sqrtss, sse, 1, 0, 0
AVX_INSTR stmxcsr, sse
AVX_INSTR subpd, sse2, 1, 0, 0
AVX_INSTR subps, sse, 1, 0, 0
AVX_INSTR subsd, sse2, 1, 0, 0
AVX_INSTR subss, sse, 1, 0, 0
AVX_INSTR ucomisd, sse2
AVX_INSTR ucomiss, sse
AVX_INSTR unpckhpd, sse2, 1, 0, 0
AVX_INSTR unpckhps, sse, 1, 0, 0
AVX_INSTR unpcklpd, sse2, 1, 0, 0
AVX_INSTR unpcklps, sse, 1, 0, 0
AVX_INSTR xorpd, sse2, 1, 0, 1
AVX_INSTR xorps, sse, 1, 0, 1
1559 1560

; 3DNow instructions, for sharing code between AVX, SSE and 3DN
1561 1562 1563
AVX_INSTR pfadd, 3dnow, 1, 0, 1
AVX_INSTR pfsub, 3dnow, 1, 0, 0
AVX_INSTR pfmul, 3dnow, 1, 0, 1
1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577

; base-4 constants for shuffles
%assign i 0
%rep 256
    %assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3)
    %if j < 10
        CAT_XDEFINE q000, j, i
    %elif j < 100
        CAT_XDEFINE q00, j, i
    %elif j < 1000
        CAT_XDEFINE q0, j, i
    %else
        CAT_XDEFINE q, j, i
    %endif
1578
    %assign i i+1
1579 1580 1581 1582
%endrep
%undef i
%undef j

1583 1584 1585 1586
%macro FMA_INSTR 3
    %macro %1 4-7 %1, %2, %3
        %if cpuflag(xop)
            v%5 %1, %2, %3, %4
1587
        %elifnidn %1, %4
1588 1589
            %6 %1, %2, %3
            %7 %1, %4
1590 1591
        %else
            %error non-xop emulation of ``%5 %1, %2, %3, %4'' is not supported
1592 1593 1594 1595 1596 1597 1598 1599 1600
        %endif
    %endmacro
%endmacro

FMA_INSTR  pmacsww,  pmullw, paddw
FMA_INSTR  pmacsdd,  pmulld, paddd ; sse4 emulation
FMA_INSTR pmacsdql,  pmuldq, paddq ; sse4 emulation
FMA_INSTR pmadcswd, pmaddwd, paddd

1601 1602 1603
; tzcnt is equivalent to "rep bsf" and is backwards-compatible with bsf.
; This lets us use tzcnt without bumping the yasm version requirement yet.
%define tzcnt rep bsf
1604

1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618
; Macros for consolidating FMA3 and FMA4 using 4-operand (dst, src1, src2, src3) syntax.
; FMA3 is only possible if dst is the same as one of the src registers.
; Either src2 or src3 can be a memory operand.
%macro FMA4_INSTR 2-*
    %push fma4_instr
    %xdefine %$prefix %1
    %rep %0 - 1
        %macro %$prefix%2 4-6 %$prefix, %2
            %if notcpuflag(fma3) && notcpuflag(fma4)
                %error use of ``%5%6'' fma instruction in cpuname function: current_function
            %elif cpuflag(fma4)
                v%5%6 %1, %2, %3, %4
            %elifidn %1, %2
                ; If %3 or %4 is a memory operand it needs to be encoded as the last operand.
1619
                %ifnum sizeof%3
1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634
                    v%{5}213%6 %2, %3, %4
                %else
                    v%{5}132%6 %2, %4, %3
                %endif
            %elifidn %1, %3
                v%{5}213%6 %3, %2, %4
            %elifidn %1, %4
                v%{5}231%6 %4, %2, %3
            %else
                %error fma3 emulation of ``%5%6 %1, %2, %3, %4'' is not supported
            %endif
        %endmacro
        %rotate 1
    %endrep
    %pop
1635 1636
%endmacro

1637 1638 1639 1640 1641 1642
FMA4_INSTR fmadd,    pd, ps, sd, ss
FMA4_INSTR fmaddsub, pd, ps
FMA4_INSTR fmsub,    pd, ps, sd, ss
FMA4_INSTR fmsubadd, pd, ps
FMA4_INSTR fnmadd,   pd, ps, sd, ss
FMA4_INSTR fnmsub,   pd, ps, sd, ss
1643

1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689
; Macros for converting VEX instructions to equivalent EVEX ones.
%macro EVEX_INSTR 2-3 0 ; vex, evex, prefer_evex
    %macro %1 2-7 fnord, fnord, %1, %2, %3
        %ifidn %3, fnord
            %define %%args %1, %2
        %elifidn %4, fnord
            %define %%args %1, %2, %3
        %else
            %define %%args %1, %2, %3, %4
        %endif
        %assign %%evex_required cpuflag(avx512) & %7
        %ifnum regnumof%1
            %if regnumof%1 >= 16 || sizeof%1 > 32
                %assign %%evex_required 1
            %endif
        %endif
        %ifnum regnumof%2
            %if regnumof%2 >= 16 || sizeof%2 > 32
                %assign %%evex_required 1
            %endif
        %endif
        %if %%evex_required
            %6 %%args
        %else
            %5 %%args ; Prefer VEX over EVEX due to shorter instruction length
        %endif
    %endmacro
%endmacro

EVEX_INSTR vbroadcastf128, vbroadcastf32x4
EVEX_INSTR vbroadcasti128, vbroadcasti32x4
EVEX_INSTR vextractf128,   vextractf32x4
EVEX_INSTR vextracti128,   vextracti32x4
EVEX_INSTR vinsertf128,    vinsertf32x4
EVEX_INSTR vinserti128,    vinserti32x4
EVEX_INSTR vmovdqa,        vmovdqa32
EVEX_INSTR vmovdqu,        vmovdqu32
EVEX_INSTR vpand,          vpandd
EVEX_INSTR vpandn,         vpandnd
EVEX_INSTR vpor,           vpord
EVEX_INSTR vpxor,          vpxord
EVEX_INSTR vrcpps,         vrcp14ps,   1 ; EVEX versions have higher precision
EVEX_INSTR vrcpss,         vrcp14ss,   1
EVEX_INSTR vrsqrtps,       vrsqrt14ps, 1
EVEX_INSTR vrsqrtss,       vrsqrt14ss, 1

1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700
; workaround: vpbroadcastq is broken in x86_32 due to a yasm bug (fixed in 1.3.0)
%ifdef __YASM_VER__
    %if __YASM_VERSION_ID__ < 0x01030000 && ARCH_X86_64 == 0
        %macro vpbroadcastq 2
            %if sizeof%1 == 16
                movddup %1, %2
            %else
                vbroadcastsd %1, %2
            %endif
        %endmacro
    %endif
1701
%endif