Commit 35de5d24 authored by Måns Rullgård's avatar Måns Rullgård

cosmetics: fix indentation after previous commit

Originally committed as revision 20062 to svn://svn.ffmpeg.org/ffmpeg/trunk
parent 952e8721
...@@ -75,19 +75,19 @@ static void vector_fmul_add_altivec(float *dst, const float *src0, ...@@ -75,19 +75,19 @@ static void vector_fmul_add_altivec(float *dst, const float *src0,
vector unsigned char align = vec_lvsr(0,dst), vector unsigned char align = vec_lvsr(0,dst),
mask = vec_lvsl(0, dst); mask = vec_lvsl(0, dst);
for (i=0; i<len-3; i+=4) { for (i=0; i<len-3; i+=4) {
t0 = vec_ld(0, dst+i); t0 = vec_ld(0, dst+i);
t1 = vec_ld(15, dst+i); t1 = vec_ld(15, dst+i);
s0 = vec_ld(0, src0+i); s0 = vec_ld(0, src0+i);
s1 = vec_ld(0, src1+i); s1 = vec_ld(0, src1+i);
s2 = vec_ld(0, src2+i); s2 = vec_ld(0, src2+i);
edges = vec_perm(t1 ,t0, mask); edges = vec_perm(t1 ,t0, mask);
d = vec_madd(s0,s1,s2); d = vec_madd(s0,s1,s2);
t1 = vec_perm(d, edges, align); t1 = vec_perm(d, edges, align);
t0 = vec_perm(edges, d, align); t0 = vec_perm(edges, d, align);
vec_st(t1, 15, dst+i); vec_st(t1, 15, dst+i);
vec_st(t0, 0, dst+i); vec_st(t0, 0, dst+i);
} }
} }
static void vector_fmul_window_altivec(float *dst, const float *src0, const float *src1, const float *win, float add_bias, int len) static void vector_fmul_window_altivec(float *dst, const float *src0, const float *src1, const float *win, float add_bias, int len)
......
...@@ -2128,43 +2128,43 @@ static void vector_fmul_reverse_sse(float *dst, const float *src0, const float * ...@@ -2128,43 +2128,43 @@ static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *
static void vector_fmul_add_3dnow(float *dst, const float *src0, const float *src1, static void vector_fmul_add_3dnow(float *dst, const float *src0, const float *src1,
const float *src2, int len){ const float *src2, int len){
x86_reg i = (len-4)*4; x86_reg i = (len-4)*4;
__asm__ volatile( __asm__ volatile(
"1: \n\t" "1: \n\t"
"movq (%2,%0), %%mm0 \n\t" "movq (%2,%0), %%mm0 \n\t"
"movq 8(%2,%0), %%mm1 \n\t" "movq 8(%2,%0), %%mm1 \n\t"
"pfmul (%3,%0), %%mm0 \n\t" "pfmul (%3,%0), %%mm0 \n\t"
"pfmul 8(%3,%0), %%mm1 \n\t" "pfmul 8(%3,%0), %%mm1 \n\t"
"pfadd (%4,%0), %%mm0 \n\t" "pfadd (%4,%0), %%mm0 \n\t"
"pfadd 8(%4,%0), %%mm1 \n\t" "pfadd 8(%4,%0), %%mm1 \n\t"
"movq %%mm0, (%1,%0) \n\t" "movq %%mm0, (%1,%0) \n\t"
"movq %%mm1, 8(%1,%0) \n\t" "movq %%mm1, 8(%1,%0) \n\t"
"sub $16, %0 \n\t" "sub $16, %0 \n\t"
"jge 1b \n\t" "jge 1b \n\t"
:"+r"(i) :"+r"(i)
:"r"(dst), "r"(src0), "r"(src1), "r"(src2) :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
:"memory" :"memory"
); );
__asm__ volatile("femms"); __asm__ volatile("femms");
} }
static void vector_fmul_add_sse(float *dst, const float *src0, const float *src1, static void vector_fmul_add_sse(float *dst, const float *src0, const float *src1,
const float *src2, int len){ const float *src2, int len){
x86_reg i = (len-8)*4; x86_reg i = (len-8)*4;
__asm__ volatile( __asm__ volatile(
"1: \n\t" "1: \n\t"
"movaps (%2,%0), %%xmm0 \n\t" "movaps (%2,%0), %%xmm0 \n\t"
"movaps 16(%2,%0), %%xmm1 \n\t" "movaps 16(%2,%0), %%xmm1 \n\t"
"mulps (%3,%0), %%xmm0 \n\t" "mulps (%3,%0), %%xmm0 \n\t"
"mulps 16(%3,%0), %%xmm1 \n\t" "mulps 16(%3,%0), %%xmm1 \n\t"
"addps (%4,%0), %%xmm0 \n\t" "addps (%4,%0), %%xmm0 \n\t"
"addps 16(%4,%0), %%xmm1 \n\t" "addps 16(%4,%0), %%xmm1 \n\t"
"movaps %%xmm0, (%1,%0) \n\t" "movaps %%xmm0, (%1,%0) \n\t"
"movaps %%xmm1, 16(%1,%0) \n\t" "movaps %%xmm1, 16(%1,%0) \n\t"
"sub $32, %0 \n\t" "sub $32, %0 \n\t"
"jge 1b \n\t" "jge 1b \n\t"
:"+r"(i) :"+r"(i)
:"r"(dst), "r"(src0), "r"(src1), "r"(src2) :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
:"memory" :"memory"
); );
} }
static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1, static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment