Commit 11b36b1e authored by James Almer's avatar James Almer Committed by Michael Niedermayer

x86/float_dsp: unroll loop in vector_fmac_scalar

~6% faster SSE2 performance. AVX/FMA3 are unaffected.
Signed-off-by: 's avatarJames Almer <jamrial@gmail.com>
Reviewed-by: 's avatarChristophe Gisquet <christophe.gisquet@gmail.com>
Signed-off-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parent 27f184ef
...@@ -61,9 +61,9 @@ VECTOR_FMUL ...@@ -61,9 +61,9 @@ VECTOR_FMUL
%macro VECTOR_FMAC_SCALAR 0 %macro VECTOR_FMAC_SCALAR 0
%if UNIX64 %if UNIX64
cglobal vector_fmac_scalar, 3,3,3, dst, src, len cglobal vector_fmac_scalar, 3,3,5, dst, src, len
%else %else
cglobal vector_fmac_scalar, 4,4,3, dst, src, mul, len cglobal vector_fmac_scalar, 4,4,5, dst, src, mul, len
%endif %endif
%if ARCH_X86_32 %if ARCH_X86_32
VBROADCASTSS m0, mulm VBROADCASTSS m0, mulm
...@@ -78,23 +78,31 @@ cglobal vector_fmac_scalar, 4,4,3, dst, src, mul, len ...@@ -78,23 +78,31 @@ cglobal vector_fmac_scalar, 4,4,3, dst, src, mul, len
%endif %endif
lea lenq, [lend*4-64] lea lenq, [lend*4-64]
.loop: .loop:
%assign a 0
%rep 32/mmsize
%if cpuflag(fma3) %if cpuflag(fma3)
mova m1, [dstq+lenq+(a+0)*mmsize] mova m1, [dstq+lenq]
mova m2, [dstq+lenq+(a+1)*mmsize] mova m2, [dstq+lenq+1*mmsize]
fmaddps m1, m0, [srcq+lenq+(a+0)*mmsize], m1 fmaddps m1, m0, [srcq+lenq], m1
fmaddps m2, m0, [srcq+lenq+(a+1)*mmsize], m2 fmaddps m2, m0, [srcq+lenq+1*mmsize], m2
%else %else ; cpuflag
mulps m1, m0, [srcq+lenq+(a+0)*mmsize] mulps m1, m0, [srcq+lenq]
mulps m2, m0, [srcq+lenq+(a+1)*mmsize] mulps m2, m0, [srcq+lenq+1*mmsize]
addps m1, m1, [dstq+lenq+(a+0)*mmsize] %if mmsize < 32
addps m2, m2, [dstq+lenq+(a+1)*mmsize] mulps m3, m0, [srcq+lenq+2*mmsize]
%endif mulps m4, m0, [srcq+lenq+3*mmsize]
mova [dstq+lenq+(a+0)*mmsize], m1 %endif ; mmsize
mova [dstq+lenq+(a+1)*mmsize], m2 addps m1, m1, [dstq+lenq]
%assign a a+2 addps m2, m2, [dstq+lenq+1*mmsize]
%endrep %if mmsize < 32
addps m3, m3, [dstq+lenq+2*mmsize]
addps m4, m4, [dstq+lenq+3*mmsize]
%endif ; mmsize
%endif ; cpuflag
mova [dstq+lenq], m1
mova [dstq+lenq+1*mmsize], m2
%if mmsize < 32
mova [dstq+lenq+2*mmsize], m3
mova [dstq+lenq+3*mmsize], m4
%endif ; mmsize
sub lenq, 64 sub lenq, 64
jge .loop jge .loop
REP_RET REP_RET
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment