Commit baf3adc6 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge commit '08e3ea60'

* commit '08e3ea60':
  x86: synth filter float: implement SSE2 version

Conflicts:
	libavcodec/x86/dcadsp.asm
	libavcodec/x86/dcadsp_init.c

See: 2cdbcc00Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents 2cdbcc00 08e3ea60
...@@ -178,19 +178,19 @@ DCA_LFE_FIR 1 ...@@ -178,19 +178,19 @@ DCA_LFE_FIR 1
INIT_XMM sse2 INIT_XMM sse2
%macro INNER_LOOP 1 %macro INNER_LOOP 1
; reading backwards: ptr1=synth_buf+j+i ptr2=synth_big+j-i ; reading backwards: ptr1 = synth_buf + j + i; ptr2 = synth_buf + j - i
;~ a += window[i + j ]*(-synth_buf[15 - i + j ]) ;~ a += window[i + j] * (-synth_buf[15 - i + j])
;~ b += window[i + j + 16]*( synth_buf[ i + j ]) ;~ b += window[i + j + 16] * (synth_buf[i + j])
pshufd m5, [ptr2 + j + (15-3)*4], q0123 pshufd m5, [ptr2 + j + (15 - 3) * 4], q0123
mova m6, [ptr1 + j] mova m6, [ptr1 + j]
%if ARCH_X86_64 %if ARCH_X86_64
pshufd m11, [ptr2 + j + (15-3)*4 - mmsize], q0123 pshufd m11, [ptr2 + j + (15 - 3) * 4 - mmsize], q0123
mova m12, [ptr1 + j + mmsize] mova m12, [ptr1 + j + mmsize]
%endif %endif
mulps m6, [win + %1 + j + 16*4] mulps m6, [win + %1 + j + 16 * 4]
mulps m5, [win + %1 + j] mulps m5, [win + %1 + j]
%if ARCH_X86_64 %if ARCH_X86_64
mulps m12, [win + %1 + j + mmsize + 16*4] mulps m12, [win + %1 + j + mmsize + 16 * 4]
mulps m11, [win + %1 + j + mmsize] mulps m11, [win + %1 + j + mmsize]
%endif %endif
addps m2, m6 addps m2, m6
...@@ -199,19 +199,19 @@ INIT_XMM sse2 ...@@ -199,19 +199,19 @@ INIT_XMM sse2
addps m8, m12 addps m8, m12
subps m7, m11 subps m7, m11
%endif %endif
;~ c += window[i + j + 32]*( synth_buf[16 + i + j ]) ;~ c += window[i + j + 32] * (synth_buf[16 + i + j])
;~ d += window[i + j + 48]*( synth_buf[31 - i + j ]) ;~ d += window[i + j + 48] * (synth_buf[31 - i + j])
pshufd m6, [ptr2 + j + (31-3)*4], q0123 pshufd m6, [ptr2 + j + (31 - 3) * 4], q0123
mova m5, [ptr1 + j + 16*4] mova m5, [ptr1 + j + 16 * 4]
%if ARCH_X86_64 %if ARCH_X86_64
pshufd m12, [ptr2 + j + (31-3)*4 - mmsize], q0123 pshufd m12, [ptr2 + j + (31 - 3) * 4 - mmsize], q0123
mova m11, [ptr1 + j + mmsize + 16*4] mova m11, [ptr1 + j + mmsize + 16 * 4]
%endif %endif
mulps m5, [win + %1 + j + 32*4] mulps m5, [win + %1 + j + 32 * 4]
mulps m6, [win + %1 + j + 48*4] mulps m6, [win + %1 + j + 48 * 4]
%if ARCH_X86_64 %if ARCH_X86_64
mulps m11, [win + %1 + j + mmsize + 32*4] mulps m11, [win + %1 + j + mmsize + 32 * 4]
mulps m12, [win + %1 + j + mmsize + 48*4] mulps m12, [win + %1 + j + mmsize + 48 * 4]
%endif %endif
addps m3, m5 addps m3, m5
addps m4, m6 addps m4, m6
...@@ -219,13 +219,13 @@ INIT_XMM sse2 ...@@ -219,13 +219,13 @@ INIT_XMM sse2
addps m9, m11 addps m9, m11
addps m10, m12 addps m10, m12
%endif %endif
sub j, 64*4 sub j, 64 * 4
%endmacro %endmacro
; void ff_synth_filter_inner_sse2(float *synth_buf, float synth_buf2[32], ; void ff_synth_filter_inner_sse2(float *synth_buf, float synth_buf2[32],
; const float window[512], float out[32], ; const float window[512], float out[32],
; intptr_t offset, float scale) ; intptr_t offset, float scale)
cglobal synth_filter_inner, 0,6+4*ARCH_X86_64,7+6*ARCH_X86_64, \ cglobal synth_filter_inner, 0, 6 + 4 * ARCH_X86_64, 7 + 6 * ARCH_X86_64, \
synth_buf, synth_buf2, window, out, off, scale synth_buf, synth_buf2, window, out, off, scale
%define scale m0 %define scale m0
%if ARCH_X86_32 || WIN64 %if ARCH_X86_32 || WIN64
...@@ -243,18 +243,18 @@ cglobal synth_filter_inner, 0,6+4*ARCH_X86_64,7+6*ARCH_X86_64, \ ...@@ -243,18 +243,18 @@ cglobal synth_filter_inner, 0,6+4*ARCH_X86_64,7+6*ARCH_X86_64, \
shl r5q, 2 shl r5q, 2
mov OFFQ, r5q mov OFFQ, r5q
%define i r5q %define i r5q
mov i, 16*4-(ARCH_X86_64+1)*mmsize ; main loop counter mov i, 16 * 4 - (ARCH_X86_64 + 1) * mmsize ; main loop counter
%define buf2 synth_buf2q %define buf2 synth_buf2q
%if ARCH_X86_32 %if ARCH_X86_32
mov buf2, synth_buf2mp mov buf2, synth_buf2mp
%endif %endif
.mainloop .mainloop
; m1=a m2=b m3=c m4=d ; m1 = a m2 = b m3 = c m4 = d
pxor m3, m3 pxor m3, m3
pxor m4, m4 pxor m4, m4
mova m1, [buf2 + i] mova m1, [buf2 + i]
mova m2, [buf2 + i + 16*4] mova m2, [buf2 + i + 16 * 4]
%if ARCH_X86_32 %if ARCH_X86_32
%define ptr1 r0q %define ptr1 r0q
%define ptr2 r1q %define ptr2 r1q
...@@ -264,17 +264,15 @@ cglobal synth_filter_inner, 0,6+4*ARCH_X86_64,7+6*ARCH_X86_64, \ ...@@ -264,17 +264,15 @@ cglobal synth_filter_inner, 0,6+4*ARCH_X86_64,7+6*ARCH_X86_64, \
mov ptr1, synth_bufm mov ptr1, synth_bufm
add win, i add win, i
add ptr1, i add ptr1, i
%else %else ; ARCH_X86_64
%define ptr1 r6q %define ptr1 r6q
%define ptr2 r7q ; must be loaded %define ptr2 r7q ; must be loaded
%define win r8q %define win r8q
%define j r9q %define j r9q
%if ARCH_X86_64
pxor m9, m9 pxor m9, m9
pxor m10, m10 pxor m10, m10
mova m7, [buf2 + i + mmsize] mova m7, [buf2 + i + mmsize]
mova m8, [buf2 + i + mmsize + 16*4] mova m8, [buf2 + i + mmsize + 16 * 4]
%endif
lea win, [windowq + i] lea win, [windowq + i]
lea ptr1, [synth_bufq + i] lea ptr1, [synth_bufq + i]
%endif %endif
...@@ -286,15 +284,15 @@ cglobal synth_filter_inner, 0,6+4*ARCH_X86_64,7+6*ARCH_X86_64, \ ...@@ -286,15 +284,15 @@ cglobal synth_filter_inner, 0,6+4*ARCH_X86_64,7+6*ARCH_X86_64, \
INNER_LOOP 0 INNER_LOOP 0
jge .loop1 jge .loop1
mov j, 448*4 mov j, 448 * 4
sub j, OFFQ sub j, OFFQ
jz .end jz .end
sub ptr1, j sub ptr1, j
sub ptr2, j sub ptr2, j
add win, OFFQ ; now at j-64, so define OFFSET add win, OFFQ ; now at j-64, so define OFFSET
sub j, 64*4 sub j, 64 * 4
.loop2: .loop2:
INNER_LOOP 64*4 INNER_LOOP 64 * 4
jge .loop2 jge .loop2
.end: .end:
...@@ -302,30 +300,30 @@ cglobal synth_filter_inner, 0,6+4*ARCH_X86_64,7+6*ARCH_X86_64, \ ...@@ -302,30 +300,30 @@ cglobal synth_filter_inner, 0,6+4*ARCH_X86_64,7+6*ARCH_X86_64, \
mov buf2, synth_buf2m ; needed for next iteration anyway mov buf2, synth_buf2m ; needed for next iteration anyway
mov outq, outmp ; j, which will be set again during it mov outq, outmp ; j, which will be set again during it
%endif %endif
;~ out[i ] = a*scale; ;~ out[i] = a * scale;
;~ out[i + 16] = b*scale; ;~ out[i + 16] = b * scale;
mulps m1, scale mulps m1, scale
mulps m2, scale mulps m2, scale
%if ARCH_X86_64 %if ARCH_X86_64
mulps m7, scale mulps m7, scale
mulps m8, scale mulps m8, scale
%endif %endif
;~ synth_buf2[i ] = c; ;~ synth_buf2[i] = c;
;~ synth_buf2[i + 16] = d; ;~ synth_buf2[i + 16] = d;
mova [buf2 + i + 0*4], m3 mova [buf2 + i + 0 * 4], m3
mova [buf2 + i + 16*4], m4 mova [buf2 + i + 16 * 4], m4
%if ARCH_X86_64 %if ARCH_X86_64
mova [buf2 + i + 0*4 + mmsize], m9 mova [buf2 + i + 0 * 4 + mmsize], m9
mova [buf2 + i + 16*4 + mmsize], m10 mova [buf2 + i + 16 * 4 + mmsize], m10
%endif %endif
;~ out[i ] = a; ;~ out[i] = a;
;~ out[i + 16] = a; ;~ out[i + 16] = a;
mova [outq + i + 0*4], m1 mova [outq + i + 0 * 4], m1
mova [outq + i + 16*4], m2 mova [outq + i + 16 * 4], m2
%if ARCH_X86_64 %if ARCH_X86_64
mova [outq + i + 0*4 + mmsize], m7 mova [outq + i + 0 * 4 + mmsize], m7
mova [outq + i + 16*4 + mmsize], m8 mova [outq + i + 16 * 4 + mmsize], m8
%endif %endif
sub i, (ARCH_X86_64+1)*mmsize sub i, (ARCH_X86_64 + 1) * mmsize
jge .mainloop jge .mainloop
RET RET
...@@ -66,7 +66,7 @@ static void synth_filter_sse2(FFTContext *imdct, ...@@ -66,7 +66,7 @@ static void synth_filter_sse2(FFTContext *imdct,
ff_synth_filter_inner_sse2(synth_buf, synth_buf2, window, ff_synth_filter_inner_sse2(synth_buf, synth_buf2, window,
out, *synth_buf_offset, scale); out, *synth_buf_offset, scale);
*synth_buf_offset= (*synth_buf_offset - 32)&511; *synth_buf_offset = (*synth_buf_offset - 32) & 511;
} }
av_cold void ff_synth_filter_init_x86(SynthFilterContext *s) av_cold void ff_synth_filter_init_x86(SynthFilterContext *s)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment