Commit e54e6f25 authored by Mans Rullgard's avatar Mans Rullgard

arm/neon: dsputil: use correct size specifiers on vld1/vst1

Change the size specifiers to match the actual element sizes
of the data.  This makes no practical difference with strict
alignment checking disabled (the default) other than somewhat
documenting the code.  With strict alignment checking on, it
avoids trapping the unaligned loads.
Signed-off-by: 's avatarMans Rullgard <mans@mansr.com>
parent 2eba6898
...@@ -44,22 +44,22 @@ endfunc ...@@ -44,22 +44,22 @@ endfunc
.if \avg .if \avg
mov r12, r0 mov r12, r0
.endif .endif
1: vld1.64 {q0}, [r1], r2 1: vld1.8 {q0}, [r1], r2
vld1.64 {q1}, [r1], r2 vld1.8 {q1}, [r1], r2
vld1.64 {q2}, [r1], r2 vld1.8 {q2}, [r1], r2
pld [r1, r2, lsl #2] pld [r1, r2, lsl #2]
vld1.64 {q3}, [r1], r2 vld1.8 {q3}, [r1], r2
pld [r1] pld [r1]
pld [r1, r2] pld [r1, r2]
pld [r1, r2, lsl #1] pld [r1, r2, lsl #1]
.if \avg .if \avg
vld1.64 {q8}, [r12,:128], r2 vld1.8 {q8}, [r12,:128], r2
vrhadd.u8 q0, q0, q8 vrhadd.u8 q0, q0, q8
vld1.64 {q9}, [r12,:128], r2 vld1.8 {q9}, [r12,:128], r2
vrhadd.u8 q1, q1, q9 vrhadd.u8 q1, q1, q9
vld1.64 {q10}, [r12,:128], r2 vld1.8 {q10}, [r12,:128], r2
vrhadd.u8 q2, q2, q10 vrhadd.u8 q2, q2, q10
vld1.64 {q11}, [r12,:128], r2 vld1.8 {q11}, [r12,:128], r2
vrhadd.u8 q3, q3, q11 vrhadd.u8 q3, q3, q11
.endif .endif
subs r3, r3, #4 subs r3, r3, #4
...@@ -72,8 +72,8 @@ endfunc ...@@ -72,8 +72,8 @@ endfunc
.endm .endm
.macro pixels16_x2 rnd=1, avg=0 .macro pixels16_x2 rnd=1, avg=0
1: vld1.64 {d0-d2}, [r1], r2 1: vld1.8 {d0-d2}, [r1], r2
vld1.64 {d4-d6}, [r1], r2 vld1.8 {d4-d6}, [r1], r2
pld [r1] pld [r1]
pld [r1, r2] pld [r1, r2]
subs r3, r3, #2 subs r3, r3, #2
...@@ -88,21 +88,21 @@ endfunc ...@@ -88,21 +88,21 @@ endfunc
vrhadd.u8 q2, q2, q3 vrhadd.u8 q2, q2, q3
sub r0, r0, r2 sub r0, r0, r2
.endif .endif
vst1.64 {q0}, [r0,:128], r2 vst1.8 {q0}, [r0,:128], r2
vst1.64 {q2}, [r0,:128], r2 vst1.8 {q2}, [r0,:128], r2
bne 1b bne 1b
bx lr bx lr
.endm .endm
.macro pixels16_y2 rnd=1, avg=0 .macro pixels16_y2 rnd=1, avg=0
sub r3, r3, #2 sub r3, r3, #2
vld1.64 {q0}, [r1], r2 vld1.8 {q0}, [r1], r2
vld1.64 {q1}, [r1], r2 vld1.8 {q1}, [r1], r2
1: subs r3, r3, #2 1: subs r3, r3, #2
avg q2, q0, q1 avg q2, q0, q1
vld1.64 {q0}, [r1], r2 vld1.8 {q0}, [r1], r2
avg q3, q0, q1 avg q3, q0, q1
vld1.64 {q1}, [r1], r2 vld1.8 {q1}, [r1], r2
pld [r1] pld [r1]
pld [r1, r2] pld [r1, r2]
.if \avg .if \avg
...@@ -112,12 +112,12 @@ endfunc ...@@ -112,12 +112,12 @@ endfunc
vrhadd.u8 q3, q3, q9 vrhadd.u8 q3, q3, q9
sub r0, r0, r2 sub r0, r0, r2
.endif .endif
vst1.64 {q2}, [r0,:128], r2 vst1.8 {q2}, [r0,:128], r2
vst1.64 {q3}, [r0,:128], r2 vst1.8 {q3}, [r0,:128], r2
bne 1b bne 1b
avg q2, q0, q1 avg q2, q0, q1
vld1.64 {q0}, [r1], r2 vld1.8 {q0}, [r1], r2
avg q3, q0, q1 avg q3, q0, q1
.if \avg .if \avg
vld1.8 {q8}, [r0,:128], r2 vld1.8 {q8}, [r0,:128], r2
...@@ -126,16 +126,16 @@ endfunc ...@@ -126,16 +126,16 @@ endfunc
vrhadd.u8 q3, q3, q9 vrhadd.u8 q3, q3, q9
sub r0, r0, r2 sub r0, r0, r2
.endif .endif
vst1.64 {q2}, [r0,:128], r2 vst1.8 {q2}, [r0,:128], r2
vst1.64 {q3}, [r0,:128], r2 vst1.8 {q3}, [r0,:128], r2
bx lr bx lr
.endm .endm
.macro pixels16_xy2 rnd=1, avg=0 .macro pixels16_xy2 rnd=1, avg=0
sub r3, r3, #2 sub r3, r3, #2
vld1.64 {d0-d2}, [r1], r2 vld1.8 {d0-d2}, [r1], r2
vld1.64 {d4-d6}, [r1], r2 vld1.8 {d4-d6}, [r1], r2
NRND vmov.i16 q13, #1 NRND vmov.i16 q13, #1
pld [r1] pld [r1]
pld [r1, r2] pld [r1, r2]
...@@ -146,7 +146,7 @@ NRND vmov.i16 q13, #1 ...@@ -146,7 +146,7 @@ NRND vmov.i16 q13, #1
vaddl.u8 q9, d4, d6 vaddl.u8 q9, d4, d6
vaddl.u8 q11, d5, d7 vaddl.u8 q11, d5, d7
1: subs r3, r3, #2 1: subs r3, r3, #2
vld1.64 {d0-d2}, [r1], r2 vld1.8 {d0-d2}, [r1], r2
vadd.u16 q12, q8, q9 vadd.u16 q12, q8, q9
pld [r1] pld [r1]
NRND vadd.u16 q12, q12, q13 NRND vadd.u16 q12, q12, q13
...@@ -160,9 +160,9 @@ NRND vadd.u16 q1, q1, q13 ...@@ -160,9 +160,9 @@ NRND vadd.u16 q1, q1, q13
vrhadd.u8 q14, q14, q8 vrhadd.u8 q14, q14, q8
.endif .endif
vaddl.u8 q8, d0, d30 vaddl.u8 q8, d0, d30
vld1.64 {d2-d4}, [r1], r2 vld1.8 {d2-d4}, [r1], r2
vaddl.u8 q10, d1, d31 vaddl.u8 q10, d1, d31
vst1.64 {q14}, [r0,:128], r2 vst1.8 {q14}, [r0,:128], r2
vadd.u16 q12, q8, q9 vadd.u16 q12, q8, q9
pld [r1, r2] pld [r1, r2]
NRND vadd.u16 q12, q12, q13 NRND vadd.u16 q12, q12, q13
...@@ -177,10 +177,10 @@ NRND vadd.u16 q0, q0, q13 ...@@ -177,10 +177,10 @@ NRND vadd.u16 q0, q0, q13
.endif .endif
vaddl.u8 q9, d2, d4 vaddl.u8 q9, d2, d4
vaddl.u8 q11, d3, d5 vaddl.u8 q11, d3, d5
vst1.64 {q15}, [r0,:128], r2 vst1.8 {q15}, [r0,:128], r2
bgt 1b bgt 1b
vld1.64 {d0-d2}, [r1], r2 vld1.8 {d0-d2}, [r1], r2
vadd.u16 q12, q8, q9 vadd.u16 q12, q8, q9
NRND vadd.u16 q12, q12, q13 NRND vadd.u16 q12, q12, q13
vext.8 q15, q0, q1, #1 vext.8 q15, q0, q1, #1
...@@ -194,7 +194,7 @@ NRND vadd.u16 q1, q1, q13 ...@@ -194,7 +194,7 @@ NRND vadd.u16 q1, q1, q13
.endif .endif
vaddl.u8 q8, d0, d30 vaddl.u8 q8, d0, d30
vaddl.u8 q10, d1, d31 vaddl.u8 q10, d1, d31
vst1.64 {q14}, [r0,:128], r2 vst1.8 {q14}, [r0,:128], r2
vadd.u16 q12, q8, q9 vadd.u16 q12, q8, q9
NRND vadd.u16 q12, q12, q13 NRND vadd.u16 q12, q12, q13
vadd.u16 q0, q10, q11 vadd.u16 q0, q10, q11
...@@ -205,44 +205,44 @@ NRND vadd.u16 q0, q0, q13 ...@@ -205,44 +205,44 @@ NRND vadd.u16 q0, q0, q13
vld1.8 {q9}, [r0,:128] vld1.8 {q9}, [r0,:128]
vrhadd.u8 q15, q15, q9 vrhadd.u8 q15, q15, q9
.endif .endif
vst1.64 {q15}, [r0,:128], r2 vst1.8 {q15}, [r0,:128], r2
bx lr bx lr
.endm .endm
.macro pixels8 rnd=1, avg=0 .macro pixels8 rnd=1, avg=0
1: vld1.64 {d0}, [r1], r2 1: vld1.8 {d0}, [r1], r2
vld1.64 {d1}, [r1], r2 vld1.8 {d1}, [r1], r2
vld1.64 {d2}, [r1], r2 vld1.8 {d2}, [r1], r2
pld [r1, r2, lsl #2] pld [r1, r2, lsl #2]
vld1.64 {d3}, [r1], r2 vld1.8 {d3}, [r1], r2
pld [r1] pld [r1]
pld [r1, r2] pld [r1, r2]
pld [r1, r2, lsl #1] pld [r1, r2, lsl #1]
.if \avg .if \avg
vld1.64 {d4}, [r0,:64], r2 vld1.8 {d4}, [r0,:64], r2
vrhadd.u8 d0, d0, d4 vrhadd.u8 d0, d0, d4
vld1.64 {d5}, [r0,:64], r2 vld1.8 {d5}, [r0,:64], r2
vrhadd.u8 d1, d1, d5 vrhadd.u8 d1, d1, d5
vld1.64 {d6}, [r0,:64], r2 vld1.8 {d6}, [r0,:64], r2
vrhadd.u8 d2, d2, d6 vrhadd.u8 d2, d2, d6
vld1.64 {d7}, [r0,:64], r2 vld1.8 {d7}, [r0,:64], r2
vrhadd.u8 d3, d3, d7 vrhadd.u8 d3, d3, d7
sub r0, r0, r2, lsl #2 sub r0, r0, r2, lsl #2
.endif .endif
subs r3, r3, #4 subs r3, r3, #4
vst1.64 {d0}, [r0,:64], r2 vst1.8 {d0}, [r0,:64], r2
vst1.64 {d1}, [r0,:64], r2 vst1.8 {d1}, [r0,:64], r2
vst1.64 {d2}, [r0,:64], r2 vst1.8 {d2}, [r0,:64], r2
vst1.64 {d3}, [r0,:64], r2 vst1.8 {d3}, [r0,:64], r2
bne 1b bne 1b
bx lr bx lr
.endm .endm
.macro pixels8_x2 rnd=1, avg=0 .macro pixels8_x2 rnd=1, avg=0
1: vld1.64 {q0}, [r1], r2 1: vld1.8 {q0}, [r1], r2
vext.8 d1, d0, d1, #1 vext.8 d1, d0, d1, #1
vld1.64 {q1}, [r1], r2 vld1.8 {q1}, [r1], r2
vext.8 d3, d2, d3, #1 vext.8 d3, d2, d3, #1
pld [r1] pld [r1]
pld [r1, r2] pld [r1, r2]
...@@ -255,21 +255,21 @@ NRND vadd.u16 q0, q0, q13 ...@@ -255,21 +255,21 @@ NRND vadd.u16 q0, q0, q13
vrhadd.u8 q0, q0, q2 vrhadd.u8 q0, q0, q2
sub r0, r0, r2 sub r0, r0, r2
.endif .endif
vst1.64 {d0}, [r0,:64], r2 vst1.8 {d0}, [r0,:64], r2
vst1.64 {d1}, [r0,:64], r2 vst1.8 {d1}, [r0,:64], r2
bne 1b bne 1b
bx lr bx lr
.endm .endm
.macro pixels8_y2 rnd=1, avg=0 .macro pixels8_y2 rnd=1, avg=0
sub r3, r3, #2 sub r3, r3, #2
vld1.64 {d0}, [r1], r2 vld1.8 {d0}, [r1], r2
vld1.64 {d1}, [r1], r2 vld1.8 {d1}, [r1], r2
1: subs r3, r3, #2 1: subs r3, r3, #2
avg d4, d0, d1 avg d4, d0, d1
vld1.64 {d0}, [r1], r2 vld1.8 {d0}, [r1], r2
avg d5, d0, d1 avg d5, d0, d1
vld1.64 {d1}, [r1], r2 vld1.8 {d1}, [r1], r2
pld [r1] pld [r1]
pld [r1, r2] pld [r1, r2]
.if \avg .if \avg
...@@ -278,12 +278,12 @@ NRND vadd.u16 q0, q0, q13 ...@@ -278,12 +278,12 @@ NRND vadd.u16 q0, q0, q13
vrhadd.u8 q2, q2, q1 vrhadd.u8 q2, q2, q1
sub r0, r0, r2 sub r0, r0, r2
.endif .endif
vst1.64 {d4}, [r0,:64], r2 vst1.8 {d4}, [r0,:64], r2
vst1.64 {d5}, [r0,:64], r2 vst1.8 {d5}, [r0,:64], r2
bne 1b bne 1b
avg d4, d0, d1 avg d4, d0, d1
vld1.64 {d0}, [r1], r2 vld1.8 {d0}, [r1], r2
avg d5, d0, d1 avg d5, d0, d1
.if \avg .if \avg
vld1.8 {d2}, [r0,:64], r2 vld1.8 {d2}, [r0,:64], r2
...@@ -291,16 +291,16 @@ NRND vadd.u16 q0, q0, q13 ...@@ -291,16 +291,16 @@ NRND vadd.u16 q0, q0, q13
vrhadd.u8 q2, q2, q1 vrhadd.u8 q2, q2, q1
sub r0, r0, r2 sub r0, r0, r2
.endif .endif
vst1.64 {d4}, [r0,:64], r2 vst1.8 {d4}, [r0,:64], r2
vst1.64 {d5}, [r0,:64], r2 vst1.8 {d5}, [r0,:64], r2
bx lr bx lr
.endm .endm
.macro pixels8_xy2 rnd=1, avg=0 .macro pixels8_xy2 rnd=1, avg=0
sub r3, r3, #2 sub r3, r3, #2
vld1.64 {q0}, [r1], r2 vld1.8 {q0}, [r1], r2
vld1.64 {q1}, [r1], r2 vld1.8 {q1}, [r1], r2
NRND vmov.i16 q11, #1 NRND vmov.i16 q11, #1
pld [r1] pld [r1]
pld [r1, r2] pld [r1, r2]
...@@ -309,14 +309,14 @@ NRND vmov.i16 q11, #1 ...@@ -309,14 +309,14 @@ NRND vmov.i16 q11, #1
vaddl.u8 q8, d0, d4 vaddl.u8 q8, d0, d4
vaddl.u8 q9, d2, d6 vaddl.u8 q9, d2, d6
1: subs r3, r3, #2 1: subs r3, r3, #2
vld1.64 {q0}, [r1], r2 vld1.8 {q0}, [r1], r2
pld [r1] pld [r1]
vadd.u16 q10, q8, q9 vadd.u16 q10, q8, q9
vext.8 d4, d0, d1, #1 vext.8 d4, d0, d1, #1
NRND vadd.u16 q10, q10, q11 NRND vadd.u16 q10, q10, q11
vaddl.u8 q8, d0, d4 vaddl.u8 q8, d0, d4
shrn d5, q10, #2 shrn d5, q10, #2
vld1.64 {q1}, [r1], r2 vld1.8 {q1}, [r1], r2
vadd.u16 q10, q8, q9 vadd.u16 q10, q8, q9
pld [r1, r2] pld [r1, r2]
.if \avg .if \avg
...@@ -324,7 +324,7 @@ NRND vadd.u16 q10, q10, q11 ...@@ -324,7 +324,7 @@ NRND vadd.u16 q10, q10, q11
vrhadd.u8 d5, d5, d7 vrhadd.u8 d5, d5, d7
.endif .endif
NRND vadd.u16 q10, q10, q11 NRND vadd.u16 q10, q10, q11
vst1.64 {d5}, [r0,:64], r2 vst1.8 {d5}, [r0,:64], r2
shrn d7, q10, #2 shrn d7, q10, #2
.if \avg .if \avg
vld1.8 {d5}, [r0,:64] vld1.8 {d5}, [r0,:64]
...@@ -332,10 +332,10 @@ NRND vadd.u16 q10, q10, q11 ...@@ -332,10 +332,10 @@ NRND vadd.u16 q10, q10, q11
.endif .endif
vext.8 d6, d2, d3, #1 vext.8 d6, d2, d3, #1
vaddl.u8 q9, d2, d6 vaddl.u8 q9, d2, d6
vst1.64 {d7}, [r0,:64], r2 vst1.8 {d7}, [r0,:64], r2
bgt 1b bgt 1b
vld1.64 {q0}, [r1], r2 vld1.8 {q0}, [r1], r2
vadd.u16 q10, q8, q9 vadd.u16 q10, q8, q9
vext.8 d4, d0, d1, #1 vext.8 d4, d0, d1, #1
NRND vadd.u16 q10, q10, q11 NRND vadd.u16 q10, q10, q11
...@@ -347,13 +347,13 @@ NRND vadd.u16 q10, q10, q11 ...@@ -347,13 +347,13 @@ NRND vadd.u16 q10, q10, q11
vrhadd.u8 d5, d5, d7 vrhadd.u8 d5, d5, d7
.endif .endif
NRND vadd.u16 q10, q10, q11 NRND vadd.u16 q10, q10, q11
vst1.64 {d5}, [r0,:64], r2 vst1.8 {d5}, [r0,:64], r2
shrn d7, q10, #2 shrn d7, q10, #2
.if \avg .if \avg
vld1.8 {d5}, [r0,:64] vld1.8 {d5}, [r0,:64]
vrhadd.u8 d7, d7, d5 vrhadd.u8 d7, d7, d5
.endif .endif
vst1.64 {d7}, [r0,:64], r2 vst1.8 {d7}, [r0,:64], r2
bx lr bx lr
.endm .endm
...@@ -429,147 +429,147 @@ endfunc ...@@ -429,147 +429,147 @@ endfunc
pixfunc2 avg_, pixels8_xy2, avg=1 pixfunc2 avg_, pixels8_xy2, avg=1
function ff_put_pixels_clamped_neon, export=1 function ff_put_pixels_clamped_neon, export=1
vld1.64 {d16-d19}, [r0,:128]! vld1.16 {d16-d19}, [r0,:128]!
vqmovun.s16 d0, q8 vqmovun.s16 d0, q8
vld1.64 {d20-d23}, [r0,:128]! vld1.16 {d20-d23}, [r0,:128]!
vqmovun.s16 d1, q9 vqmovun.s16 d1, q9
vld1.64 {d24-d27}, [r0,:128]! vld1.16 {d24-d27}, [r0,:128]!
vqmovun.s16 d2, q10 vqmovun.s16 d2, q10
vld1.64 {d28-d31}, [r0,:128]! vld1.16 {d28-d31}, [r0,:128]!
vqmovun.s16 d3, q11 vqmovun.s16 d3, q11
vst1.64 {d0}, [r1,:64], r2 vst1.8 {d0}, [r1,:64], r2
vqmovun.s16 d4, q12 vqmovun.s16 d4, q12
vst1.64 {d1}, [r1,:64], r2 vst1.8 {d1}, [r1,:64], r2
vqmovun.s16 d5, q13 vqmovun.s16 d5, q13
vst1.64 {d2}, [r1,:64], r2 vst1.8 {d2}, [r1,:64], r2
vqmovun.s16 d6, q14 vqmovun.s16 d6, q14
vst1.64 {d3}, [r1,:64], r2 vst1.8 {d3}, [r1,:64], r2
vqmovun.s16 d7, q15 vqmovun.s16 d7, q15
vst1.64 {d4}, [r1,:64], r2 vst1.8 {d4}, [r1,:64], r2
vst1.64 {d5}, [r1,:64], r2 vst1.8 {d5}, [r1,:64], r2
vst1.64 {d6}, [r1,:64], r2 vst1.8 {d6}, [r1,:64], r2
vst1.64 {d7}, [r1,:64], r2 vst1.8 {d7}, [r1,:64], r2
bx lr bx lr
endfunc endfunc
function ff_put_signed_pixels_clamped_neon, export=1 function ff_put_signed_pixels_clamped_neon, export=1
vmov.u8 d31, #128 vmov.u8 d31, #128
vld1.64 {d16-d17}, [r0,:128]! vld1.16 {d16-d17}, [r0,:128]!
vqmovn.s16 d0, q8 vqmovn.s16 d0, q8
vld1.64 {d18-d19}, [r0,:128]! vld1.16 {d18-d19}, [r0,:128]!
vqmovn.s16 d1, q9 vqmovn.s16 d1, q9
vld1.64 {d16-d17}, [r0,:128]! vld1.16 {d16-d17}, [r0,:128]!
vqmovn.s16 d2, q8 vqmovn.s16 d2, q8
vld1.64 {d18-d19}, [r0,:128]! vld1.16 {d18-d19}, [r0,:128]!
vadd.u8 d0, d0, d31 vadd.u8 d0, d0, d31
vld1.64 {d20-d21}, [r0,:128]! vld1.16 {d20-d21}, [r0,:128]!
vadd.u8 d1, d1, d31 vadd.u8 d1, d1, d31
vld1.64 {d22-d23}, [r0,:128]! vld1.16 {d22-d23}, [r0,:128]!
vadd.u8 d2, d2, d31 vadd.u8 d2, d2, d31
vst1.64 {d0}, [r1,:64], r2 vst1.8 {d0}, [r1,:64], r2
vqmovn.s16 d3, q9 vqmovn.s16 d3, q9
vst1.64 {d1}, [r1,:64], r2 vst1.8 {d1}, [r1,:64], r2
vqmovn.s16 d4, q10 vqmovn.s16 d4, q10
vst1.64 {d2}, [r1,:64], r2 vst1.8 {d2}, [r1,:64], r2
vqmovn.s16 d5, q11 vqmovn.s16 d5, q11
vld1.64 {d24-d25}, [r0,:128]! vld1.16 {d24-d25}, [r0,:128]!
vadd.u8 d3, d3, d31 vadd.u8 d3, d3, d31
vld1.64 {d26-d27}, [r0,:128]! vld1.16 {d26-d27}, [r0,:128]!
vadd.u8 d4, d4, d31 vadd.u8 d4, d4, d31
vadd.u8 d5, d5, d31 vadd.u8 d5, d5, d31
vst1.64 {d3}, [r1,:64], r2 vst1.8 {d3}, [r1,:64], r2
vqmovn.s16 d6, q12 vqmovn.s16 d6, q12
vst1.64 {d4}, [r1,:64], r2 vst1.8 {d4}, [r1,:64], r2
vqmovn.s16 d7, q13 vqmovn.s16 d7, q13
vst1.64 {d5}, [r1,:64], r2 vst1.8 {d5}, [r1,:64], r2
vadd.u8 d6, d6, d31 vadd.u8 d6, d6, d31
vadd.u8 d7, d7, d31 vadd.u8 d7, d7, d31
vst1.64 {d6}, [r1,:64], r2 vst1.8 {d6}, [r1,:64], r2
vst1.64 {d7}, [r1,:64], r2 vst1.8 {d7}, [r1,:64], r2
bx lr bx lr
endfunc endfunc
function ff_add_pixels_clamped_neon, export=1 function ff_add_pixels_clamped_neon, export=1
mov r3, r1 mov r3, r1
vld1.64 {d16}, [r1,:64], r2 vld1.8 {d16}, [r1,:64], r2
vld1.64 {d0-d1}, [r0,:128]! vld1.16 {d0-d1}, [r0,:128]!
vaddw.u8 q0, q0, d16 vaddw.u8 q0, q0, d16
vld1.64 {d17}, [r1,:64], r2 vld1.8 {d17}, [r1,:64], r2
vld1.64 {d2-d3}, [r0,:128]! vld1.16 {d2-d3}, [r0,:128]!
vqmovun.s16 d0, q0 vqmovun.s16 d0, q0
vld1.64 {d18}, [r1,:64], r2 vld1.8 {d18}, [r1,:64], r2
vaddw.u8 q1, q1, d17 vaddw.u8 q1, q1, d17
vld1.64 {d4-d5}, [r0,:128]! vld1.16 {d4-d5}, [r0,:128]!
vaddw.u8 q2, q2, d18 vaddw.u8 q2, q2, d18
vst1.64 {d0}, [r3,:64], r2 vst1.8 {d0}, [r3,:64], r2
vqmovun.s16 d2, q1 vqmovun.s16 d2, q1
vld1.64 {d19}, [r1,:64], r2 vld1.8 {d19}, [r1,:64], r2
vld1.64 {d6-d7}, [r0,:128]! vld1.16 {d6-d7}, [r0,:128]!
vaddw.u8 q3, q3, d19 vaddw.u8 q3, q3, d19
vqmovun.s16 d4, q2 vqmovun.s16 d4, q2
vst1.64 {d2}, [r3,:64], r2 vst1.8 {d2}, [r3,:64], r2
vld1.64 {d16}, [r1,:64], r2 vld1.8 {d16}, [r1,:64], r2
vqmovun.s16 d6, q3 vqmovun.s16 d6, q3
vld1.64 {d0-d1}, [r0,:128]! vld1.16 {d0-d1}, [r0,:128]!
vaddw.u8 q0, q0, d16 vaddw.u8 q0, q0, d16
vst1.64 {d4}, [r3,:64], r2 vst1.8 {d4}, [r3,:64], r2
vld1.64 {d17}, [r1,:64], r2 vld1.8 {d17}, [r1,:64], r2
vld1.64 {d2-d3}, [r0,:128]! vld1.16 {d2-d3}, [r0,:128]!
vaddw.u8 q1, q1, d17 vaddw.u8 q1, q1, d17
vst1.64 {d6}, [r3,:64], r2 vst1.8 {d6}, [r3,:64], r2
vqmovun.s16 d0, q0 vqmovun.s16 d0, q0
vld1.64 {d18}, [r1,:64], r2 vld1.8 {d18}, [r1,:64], r2
vld1.64 {d4-d5}, [r0,:128]! vld1.16 {d4-d5}, [r0,:128]!
vaddw.u8 q2, q2, d18 vaddw.u8 q2, q2, d18
vst1.64 {d0}, [r3,:64], r2 vst1.8 {d0}, [r3,:64], r2
vqmovun.s16 d2, q1 vqmovun.s16 d2, q1
vld1.64 {d19}, [r1,:64], r2 vld1.8 {d19}, [r1,:64], r2
vqmovun.s16 d4, q2 vqmovun.s16 d4, q2
vld1.64 {d6-d7}, [r0,:128]! vld1.16 {d6-d7}, [r0,:128]!
vaddw.u8 q3, q3, d19 vaddw.u8 q3, q3, d19
vst1.64 {d2}, [r3,:64], r2 vst1.8 {d2}, [r3,:64], r2
vqmovun.s16 d6, q3 vqmovun.s16 d6, q3
vst1.64 {d4}, [r3,:64], r2 vst1.8 {d4}, [r3,:64], r2
vst1.64 {d6}, [r3,:64], r2 vst1.8 {d6}, [r3,:64], r2
bx lr bx lr
endfunc endfunc
function ff_vector_fmul_neon, export=1 function ff_vector_fmul_neon, export=1
subs r3, r3, #8 subs r3, r3, #8
vld1.64 {d0-d3}, [r1,:128]! vld1.32 {d0-d3}, [r1,:128]!
vld1.64 {d4-d7}, [r2,:128]! vld1.32 {d4-d7}, [r2,:128]!
vmul.f32 q8, q0, q2 vmul.f32 q8, q0, q2
vmul.f32 q9, q1, q3 vmul.f32 q9, q1, q3
beq 3f beq 3f
bics ip, r3, #15 bics ip, r3, #15
beq 2f beq 2f
1: subs ip, ip, #16 1: subs ip, ip, #16
vld1.64 {d0-d1}, [r1,:128]! vld1.32 {d0-d1}, [r1,:128]!
vld1.64 {d4-d5}, [r2,:128]! vld1.32 {d4-d5}, [r2,:128]!
vmul.f32 q10, q0, q2 vmul.f32 q10, q0, q2
vld1.64 {d2-d3}, [r1,:128]! vld1.32 {d2-d3}, [r1,:128]!
vld1.64 {d6-d7}, [r2,:128]! vld1.32 {d6-d7}, [r2,:128]!
vmul.f32 q11, q1, q3 vmul.f32 q11, q1, q3
vst1.64 {d16-d19},[r0,:128]! vst1.32 {d16-d19},[r0,:128]!
vld1.64 {d0-d1}, [r1,:128]! vld1.32 {d0-d1}, [r1,:128]!
vld1.64 {d4-d5}, [r2,:128]! vld1.32 {d4-d5}, [r2,:128]!
vmul.f32 q8, q0, q2 vmul.f32 q8, q0, q2
vld1.64 {d2-d3}, [r1,:128]! vld1.32 {d2-d3}, [r1,:128]!
vld1.64 {d6-d7}, [r2,:128]! vld1.32 {d6-d7}, [r2,:128]!
vmul.f32 q9, q1, q3 vmul.f32 q9, q1, q3
vst1.64 {d20-d23},[r0,:128]! vst1.32 {d20-d23},[r0,:128]!
bne 1b bne 1b
ands r3, r3, #15 ands r3, r3, #15
beq 3f beq 3f
2: vld1.64 {d0-d1}, [r1,:128]! 2: vld1.32 {d0-d1}, [r1,:128]!
vld1.64 {d4-d5}, [r2,:128]! vld1.32 {d4-d5}, [r2,:128]!
vst1.64 {d16-d17},[r0,:128]! vst1.32 {d16-d17},[r0,:128]!
vmul.f32 q8, q0, q2 vmul.f32 q8, q0, q2
vld1.64 {d2-d3}, [r1,:128]! vld1.32 {d2-d3}, [r1,:128]!
vld1.64 {d6-d7}, [r2,:128]! vld1.32 {d6-d7}, [r2,:128]!
vst1.64 {d18-d19},[r0,:128]! vst1.32 {d18-d19},[r0,:128]!
vmul.f32 q9, q1, q3 vmul.f32 q9, q1, q3
3: vst1.64 {d16-d19},[r0,:128]! 3: vst1.32 {d16-d19},[r0,:128]!
bx lr bx lr
endfunc endfunc
...@@ -582,10 +582,10 @@ function ff_vector_fmul_window_neon, export=1 ...@@ -582,10 +582,10 @@ function ff_vector_fmul_window_neon, export=1
add r4, r3, r5, lsl #3 add r4, r3, r5, lsl #3
add ip, r0, r5, lsl #3 add ip, r0, r5, lsl #3
mov r5, #-16 mov r5, #-16
vld1.64 {d0,d1}, [r1,:128]! vld1.32 {d0,d1}, [r1,:128]!
vld1.64 {d2,d3}, [r2,:128], r5 vld1.32 {d2,d3}, [r2,:128], r5
vld1.64 {d4,d5}, [r3,:128]! vld1.32 {d4,d5}, [r3,:128]!
vld1.64 {d6,d7}, [r4,:128], r5 vld1.32 {d6,d7}, [r4,:128], r5
1: subs lr, lr, #4 1: subs lr, lr, #4
vmul.f32 d22, d0, d4 vmul.f32 d22, d0, d4
vrev64.32 q3, q3 vrev64.32 q3, q3
...@@ -595,19 +595,19 @@ function ff_vector_fmul_window_neon, export=1 ...@@ -595,19 +595,19 @@ function ff_vector_fmul_window_neon, export=1
vmul.f32 d21, d1, d6 vmul.f32 d21, d1, d6
beq 2f beq 2f
vmla.f32 d22, d3, d7 vmla.f32 d22, d3, d7
vld1.64 {d0,d1}, [r1,:128]! vld1.32 {d0,d1}, [r1,:128]!
vmla.f32 d23, d2, d6 vmla.f32 d23, d2, d6
vld1.64 {d18,d19},[r2,:128], r5 vld1.32 {d18,d19},[r2,:128], r5
vmls.f32 d20, d3, d4 vmls.f32 d20, d3, d4
vld1.64 {d24,d25},[r3,:128]! vld1.32 {d24,d25},[r3,:128]!
vmls.f32 d21, d2, d5 vmls.f32 d21, d2, d5
vld1.64 {d6,d7}, [r4,:128], r5 vld1.32 {d6,d7}, [r4,:128], r5
vmov q1, q9 vmov q1, q9
vrev64.32 q11, q11 vrev64.32 q11, q11
vmov q2, q12 vmov q2, q12
vswp d22, d23 vswp d22, d23
vst1.64 {d20,d21},[r0,:128]! vst1.32 {d20,d21},[r0,:128]!
vst1.64 {d22,d23},[ip,:128], r5 vst1.32 {d22,d23},[ip,:128], r5
b 1b b 1b
2: vmla.f32 d22, d3, d7 2: vmla.f32 d22, d3, d7
vmla.f32 d23, d2, d6 vmla.f32 d23, d2, d6
...@@ -615,8 +615,8 @@ function ff_vector_fmul_window_neon, export=1 ...@@ -615,8 +615,8 @@ function ff_vector_fmul_window_neon, export=1
vmls.f32 d21, d2, d5 vmls.f32 d21, d2, d5
vrev64.32 q11, q11 vrev64.32 q11, q11
vswp d22, d23 vswp d22, d23
vst1.64 {d20,d21},[r0,:128]! vst1.32 {d20,d21},[r0,:128]!
vst1.64 {d22,d23},[ip,:128], r5 vst1.32 {d22,d23},[ip,:128], r5
pop {r4,r5,pc} pop {r4,r5,pc}
endfunc endfunc
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment