Commit 5ca8e195 authored by Henrik Gramner's avatar Henrik Gramner Committed by Anton Khirnov

x86inc: Use more consistent indentation

Signed-off-by: 's avatarAnton Khirnov <anton@khirnov.net>
parent 91ed050f
...@@ -183,9 +183,9 @@ ...@@ -183,9 +183,9 @@
%define e%1h %3 %define e%1h %3
%define r%1b %2 %define r%1b %2
%define e%1b %2 %define e%1b %2
%if ARCH_X86_64 == 0 %if ARCH_X86_64 == 0
%define r%1 e%1 %define r%1 e%1
%endif %endif
%endmacro %endmacro
DECLARE_REG_SIZE ax, al, ah DECLARE_REG_SIZE ax, al, ah
...@@ -504,9 +504,9 @@ DECLARE_REG 14, R15, 120 ...@@ -504,9 +504,9 @@ DECLARE_REG 14, R15, 120
%macro RET 0 %macro RET 0
WIN64_RESTORE_XMM_INTERNAL rsp WIN64_RESTORE_XMM_INTERNAL rsp
POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7 POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7
%if mmsize == 32 %if mmsize == 32
vzeroupper vzeroupper
%endif %endif
AUTO_REP_RET AUTO_REP_RET
%endmacro %endmacro
...@@ -543,17 +543,17 @@ DECLARE_REG 14, R15, 72 ...@@ -543,17 +543,17 @@ DECLARE_REG 14, R15, 72
%define has_epilogue regs_used > 9 || mmsize == 32 || stack_size > 0 %define has_epilogue regs_used > 9 || mmsize == 32 || stack_size > 0
%macro RET 0 %macro RET 0
%if stack_size_padded > 0 %if stack_size_padded > 0
%if required_stack_alignment > STACK_ALIGNMENT %if required_stack_alignment > STACK_ALIGNMENT
mov rsp, rstkm mov rsp, rstkm
%else %else
add rsp, stack_size_padded add rsp, stack_size_padded
%endif %endif
%endif %endif
POP_IF_USED 14, 13, 12, 11, 10, 9 POP_IF_USED 14, 13, 12, 11, 10, 9
%if mmsize == 32 %if mmsize == 32
vzeroupper vzeroupper
%endif %endif
AUTO_REP_RET AUTO_REP_RET
%endmacro %endmacro
...@@ -599,29 +599,29 @@ DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14 ...@@ -599,29 +599,29 @@ DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
%define has_epilogue regs_used > 3 || mmsize == 32 || stack_size > 0 %define has_epilogue regs_used > 3 || mmsize == 32 || stack_size > 0
%macro RET 0 %macro RET 0
%if stack_size_padded > 0 %if stack_size_padded > 0
%if required_stack_alignment > STACK_ALIGNMENT %if required_stack_alignment > STACK_ALIGNMENT
mov rsp, rstkm mov rsp, rstkm
%else %else
add rsp, stack_size_padded add rsp, stack_size_padded
%endif %endif
%endif %endif
POP_IF_USED 6, 5, 4, 3 POP_IF_USED 6, 5, 4, 3
%if mmsize == 32 %if mmsize == 32
vzeroupper vzeroupper
%endif %endif
AUTO_REP_RET AUTO_REP_RET
%endmacro %endmacro
%endif ;====================================================================== %endif ;======================================================================
%if WIN64 == 0 %if WIN64 == 0
%macro WIN64_SPILL_XMM 1 %macro WIN64_SPILL_XMM 1
%endmacro %endmacro
%macro WIN64_RESTORE_XMM 1 %macro WIN64_RESTORE_XMM 1
%endmacro %endmacro
%macro WIN64_PUSH_XMM 0 %macro WIN64_PUSH_XMM 0
%endmacro %endmacro
%endif %endif
; On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either ; On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either
...@@ -847,14 +847,14 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, ...@@ -847,14 +847,14 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae,
%define movnta movntq %define movnta movntq
%assign %%i 0 %assign %%i 0
%rep 8 %rep 8
CAT_XDEFINE m, %%i, mm %+ %%i CAT_XDEFINE m, %%i, mm %+ %%i
CAT_XDEFINE nnmm, %%i, %%i CAT_XDEFINE nnmm, %%i, %%i
%assign %%i %%i+1 %assign %%i %%i+1
%endrep %endrep
%rep 8 %rep 8
CAT_UNDEF m, %%i CAT_UNDEF m, %%i
CAT_UNDEF nnmm, %%i CAT_UNDEF nnmm, %%i
%assign %%i %%i+1 %assign %%i %%i+1
%endrep %endrep
INIT_CPUFLAGS %1 INIT_CPUFLAGS %1
%endmacro %endmacro
...@@ -865,7 +865,7 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, ...@@ -865,7 +865,7 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae,
%define mmsize 16 %define mmsize 16
%define num_mmregs 8 %define num_mmregs 8
%if ARCH_X86_64 %if ARCH_X86_64
%define num_mmregs 16 %define num_mmregs 16
%endif %endif
%define mova movdqa %define mova movdqa
%define movu movdqu %define movu movdqu
...@@ -873,9 +873,9 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, ...@@ -873,9 +873,9 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae,
%define movnta movntdq %define movnta movntdq
%assign %%i 0 %assign %%i 0
%rep num_mmregs %rep num_mmregs
CAT_XDEFINE m, %%i, xmm %+ %%i CAT_XDEFINE m, %%i, xmm %+ %%i
CAT_XDEFINE nnxmm, %%i, %%i CAT_XDEFINE nnxmm, %%i, %%i
%assign %%i %%i+1 %assign %%i %%i+1
%endrep %endrep
INIT_CPUFLAGS %1 INIT_CPUFLAGS %1
%endmacro %endmacro
...@@ -886,7 +886,7 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, ...@@ -886,7 +886,7 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae,
%define mmsize 32 %define mmsize 32
%define num_mmregs 8 %define num_mmregs 8
%if ARCH_X86_64 %if ARCH_X86_64
%define num_mmregs 16 %define num_mmregs 16
%endif %endif
%define mova movdqa %define mova movdqa
%define movu movdqu %define movu movdqu
...@@ -894,9 +894,9 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, ...@@ -894,9 +894,9 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae,
%define movnta movntdq %define movnta movntdq
%assign %%i 0 %assign %%i 0
%rep num_mmregs %rep num_mmregs
CAT_XDEFINE m, %%i, ymm %+ %%i CAT_XDEFINE m, %%i, ymm %+ %%i
CAT_XDEFINE nnymm, %%i, %%i CAT_XDEFINE nnymm, %%i, %%i
%assign %%i %%i+1 %assign %%i %%i+1
%endrep %endrep
INIT_CPUFLAGS %1 INIT_CPUFLAGS %1
%endmacro %endmacro
...@@ -920,7 +920,7 @@ INIT_XMM ...@@ -920,7 +920,7 @@ INIT_XMM
%assign i 0 %assign i 0
%rep 16 %rep 16
DECLARE_MMCAST i DECLARE_MMCAST i
%assign i i+1 %assign i i+1
%endrep %endrep
; I often want to use macros that permute their arguments. e.g. there's no ; I often want to use macros that permute their arguments. e.g. there's no
...@@ -938,23 +938,23 @@ INIT_XMM ...@@ -938,23 +938,23 @@ INIT_XMM
; doesn't cost any cycles. ; doesn't cost any cycles.
%macro PERMUTE 2-* ; takes a list of pairs to swap %macro PERMUTE 2-* ; takes a list of pairs to swap
%rep %0/2 %rep %0/2
%xdefine %%tmp%2 m%2 %xdefine %%tmp%2 m%2
%rotate 2 %rotate 2
%endrep %endrep
%rep %0/2 %rep %0/2
%xdefine m%1 %%tmp%2 %xdefine m%1 %%tmp%2
CAT_XDEFINE nn, m%1, %1 CAT_XDEFINE nn, m%1, %1
%rotate 2 %rotate 2
%endrep %endrep
%endmacro %endmacro
%macro SWAP 2+ ; swaps a single chain (sometimes more concise than pairs) %macro SWAP 2+ ; swaps a single chain (sometimes more concise than pairs)
%ifnum %1 ; SWAP 0, 1, ... %ifnum %1 ; SWAP 0, 1, ...
SWAP_INTERNAL_NUM %1, %2 SWAP_INTERNAL_NUM %1, %2
%else ; SWAP m0, m1, ... %else ; SWAP m0, m1, ...
SWAP_INTERNAL_NAME %1, %2 SWAP_INTERNAL_NAME %1, %2
%endif %endif
%endmacro %endmacro
%macro SWAP_INTERNAL_NUM 2-* %macro SWAP_INTERNAL_NUM 2-*
...@@ -964,7 +964,7 @@ INIT_XMM ...@@ -964,7 +964,7 @@ INIT_XMM
%xdefine m%2 %%tmp %xdefine m%2 %%tmp
CAT_XDEFINE nn, m%1, %1 CAT_XDEFINE nn, m%1, %1
CAT_XDEFINE nn, m%2, %2 CAT_XDEFINE nn, m%2, %2
%rotate 1 %rotate 1
%endrep %endrep
%endmacro %endmacro
...@@ -972,7 +972,7 @@ INIT_XMM ...@@ -972,7 +972,7 @@ INIT_XMM
%xdefine %%args nn %+ %1 %xdefine %%args nn %+ %1
%rep %0-1 %rep %0-1
%xdefine %%args %%args, nn %+ %2 %xdefine %%args %%args, nn %+ %2
%rotate 1 %rotate 1
%endrep %endrep
SWAP_INTERNAL_NUM %%args SWAP_INTERNAL_NUM %%args
%endmacro %endmacro
...@@ -989,7 +989,7 @@ INIT_XMM ...@@ -989,7 +989,7 @@ INIT_XMM
%assign %%i 0 %assign %%i 0
%rep num_mmregs %rep num_mmregs
CAT_XDEFINE %%f, %%i, m %+ %%i CAT_XDEFINE %%f, %%i, m %+ %%i
%assign %%i %%i+1 %assign %%i %%i+1
%endrep %endrep
%endmacro %endmacro
...@@ -999,7 +999,7 @@ INIT_XMM ...@@ -999,7 +999,7 @@ INIT_XMM
%rep num_mmregs %rep num_mmregs
CAT_XDEFINE m, %%i, %1_m %+ %%i CAT_XDEFINE m, %%i, %1_m %+ %%i
CAT_XDEFINE nn, m %+ %%i, %%i CAT_XDEFINE nn, m %+ %%i, %%i
%assign %%i %%i+1 %assign %%i %%i+1
%endrep %endrep
%endif %endif
%endmacro %endmacro
...@@ -1055,7 +1055,7 @@ INIT_XMM ...@@ -1055,7 +1055,7 @@ INIT_XMM
%endif %endif
CAT_XDEFINE sizeofxmm, i, 16 CAT_XDEFINE sizeofxmm, i, 16
CAT_XDEFINE sizeofymm, i, 32 CAT_XDEFINE sizeofymm, i, 32
%assign i i+1 %assign i i+1
%endrep %endrep
%undef i %undef i
...@@ -1432,7 +1432,7 @@ AVX_INSTR pfmul, 3dnow, 1, 0, 1 ...@@ -1432,7 +1432,7 @@ AVX_INSTR pfmul, 3dnow, 1, 0, 1
%else %else
CAT_XDEFINE q, j, i CAT_XDEFINE q, j, i
%endif %endif
%assign i i+1 %assign i i+1
%endrep %endrep
%undef i %undef i
%undef j %undef j
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment