Commit 59807fee authored by Mans Rullgard's avatar Mans Rullgard

ARM: h264dsp_neon cosmetics

- Replace 'ip' with 'r12'.
- Use correct size designators for vld1/vst1.
- Whitespace fixes.
Signed-off-by: 's avatarMans Rullgard <mans@mansr.com>
parent a760f530
...@@ -22,13 +22,13 @@ ...@@ -22,13 +22,13 @@
#include "neon.S" #include "neon.S"
/* chroma_mc8(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */ /* chroma_mc8(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
.macro h264_chroma_mc8 type .macro h264_chroma_mc8 type
function ff_\type\()_h264_chroma_mc8_neon, export=1 function ff_\type\()_h264_chroma_mc8_neon, export=1
push {r4-r7, lr} push {r4-r7, lr}
ldrd r4, [sp, #20] ldrd r4, [sp, #20]
.ifc \type,avg .ifc \type,avg
mov lr, r0 mov lr, r0
.endif .endif
pld [r1] pld [r1]
pld [r1, r2] pld [r1, r2]
...@@ -36,7 +36,7 @@ A muls r7, r4, r5 ...@@ -36,7 +36,7 @@ A muls r7, r4, r5
T mul r7, r4, r5 T mul r7, r4, r5
T cmp r7, #0 T cmp r7, #0
rsb r6, r7, r5, lsl #3 rsb r6, r7, r5, lsl #3
rsb ip, r7, r4, lsl #3 rsb r12, r7, r4, lsl #3
sub r4, r7, r4, lsl #3 sub r4, r7, r4, lsl #3
sub r4, r4, r5, lsl #3 sub r4, r4, r5, lsl #3
add r4, r4, #64 add r4, r4, #64
...@@ -47,10 +47,10 @@ T cmp r7, #0 ...@@ -47,10 +47,10 @@ T cmp r7, #0
vdup.8 d0, r4 vdup.8 d0, r4
lsl r4, r2, #1 lsl r4, r2, #1
vdup.8 d1, ip vdup.8 d1, r12
vld1.64 {d4, d5}, [r1], r4 vld1.8 {d4, d5}, [r1], r4
vdup.8 d2, r6 vdup.8 d2, r6
vld1.64 {d6, d7}, [r5], r4 vld1.8 {d6, d7}, [r5], r4
vdup.8 d3, r7 vdup.8 d3, r7
vext.8 d5, d4, d5, #1 vext.8 d5, d4, d5, #1
...@@ -59,7 +59,7 @@ T cmp r7, #0 ...@@ -59,7 +59,7 @@ T cmp r7, #0
1: pld [r5] 1: pld [r5]
vmull.u8 q8, d4, d0 vmull.u8 q8, d4, d0
vmlal.u8 q8, d5, d1 vmlal.u8 q8, d5, d1
vld1.64 {d4, d5}, [r1], r4 vld1.8 {d4, d5}, [r1], r4
vmlal.u8 q8, d6, d2 vmlal.u8 q8, d6, d2
vext.8 d5, d4, d5, #1 vext.8 d5, d4, d5, #1
vmlal.u8 q8, d7, d3 vmlal.u8 q8, d7, d3
...@@ -69,57 +69,57 @@ T cmp r7, #0 ...@@ -69,57 +69,57 @@ T cmp r7, #0
vmlal.u8 q9, d4, d2 vmlal.u8 q9, d4, d2
vmlal.u8 q9, d5, d3 vmlal.u8 q9, d5, d3
vrshrn.u16 d16, q8, #6 vrshrn.u16 d16, q8, #6
vld1.64 {d6, d7}, [r5], r4 vld1.8 {d6, d7}, [r5], r4
pld [r1] pld [r1]
vrshrn.u16 d17, q9, #6 vrshrn.u16 d17, q9, #6
.ifc \type,avg .ifc \type,avg
vld1.64 {d20}, [lr,:64], r2 vld1.8 {d20}, [lr,:64], r2
vld1.64 {d21}, [lr,:64], r2 vld1.8 {d21}, [lr,:64], r2
vrhadd.u8 q8, q8, q10 vrhadd.u8 q8, q8, q10
.endif .endif
vext.8 d7, d6, d7, #1 vext.8 d7, d6, d7, #1
vst1.64 {d16}, [r0,:64], r2 vst1.8 {d16}, [r0,:64], r2
vst1.64 {d17}, [r0,:64], r2 vst1.8 {d17}, [r0,:64], r2
bgt 1b bgt 1b
pop {r4-r7, pc} pop {r4-r7, pc}
2: tst r6, r6 2: tst r6, r6
add ip, ip, r6 add r12, r12, r6
vdup.8 d0, r4 vdup.8 d0, r4
vdup.8 d1, ip vdup.8 d1, r12
beq 4f beq 4f
add r5, r1, r2 add r5, r1, r2
lsl r4, r2, #1 lsl r4, r2, #1
vld1.64 {d4}, [r1], r4 vld1.8 {d4}, [r1], r4
vld1.64 {d6}, [r5], r4 vld1.8 {d6}, [r5], r4
3: pld [r5] 3: pld [r5]
vmull.u8 q8, d4, d0 vmull.u8 q8, d4, d0
vmlal.u8 q8, d6, d1 vmlal.u8 q8, d6, d1
vld1.64 {d4}, [r1], r4 vld1.8 {d4}, [r1], r4
vmull.u8 q9, d6, d0 vmull.u8 q9, d6, d0
vmlal.u8 q9, d4, d1 vmlal.u8 q9, d4, d1
vld1.64 {d6}, [r5], r4 vld1.8 {d6}, [r5], r4
vrshrn.u16 d16, q8, #6 vrshrn.u16 d16, q8, #6
vrshrn.u16 d17, q9, #6 vrshrn.u16 d17, q9, #6
.ifc \type,avg .ifc \type,avg
vld1.64 {d20}, [lr,:64], r2 vld1.8 {d20}, [lr,:64], r2
vld1.64 {d21}, [lr,:64], r2 vld1.8 {d21}, [lr,:64], r2
vrhadd.u8 q8, q8, q10 vrhadd.u8 q8, q8, q10
.endif .endif
subs r3, r3, #2 subs r3, r3, #2
pld [r1] pld [r1]
vst1.64 {d16}, [r0,:64], r2 vst1.8 {d16}, [r0,:64], r2
vst1.64 {d17}, [r0,:64], r2 vst1.8 {d17}, [r0,:64], r2
bgt 3b bgt 3b
pop {r4-r7, pc} pop {r4-r7, pc}
4: vld1.64 {d4, d5}, [r1], r2 4: vld1.8 {d4, d5}, [r1], r2
vld1.64 {d6, d7}, [r1], r2 vld1.8 {d6, d7}, [r1], r2
vext.8 d5, d4, d5, #1 vext.8 d5, d4, d5, #1
vext.8 d7, d6, d7, #1 vext.8 d7, d6, d7, #1
...@@ -127,36 +127,36 @@ T cmp r7, #0 ...@@ -127,36 +127,36 @@ T cmp r7, #0
subs r3, r3, #2 subs r3, r3, #2
vmull.u8 q8, d4, d0 vmull.u8 q8, d4, d0
vmlal.u8 q8, d5, d1 vmlal.u8 q8, d5, d1
vld1.64 {d4, d5}, [r1], r2 vld1.8 {d4, d5}, [r1], r2
vmull.u8 q9, d6, d0 vmull.u8 q9, d6, d0
vmlal.u8 q9, d7, d1 vmlal.u8 q9, d7, d1
pld [r1] pld [r1]
vext.8 d5, d4, d5, #1 vext.8 d5, d4, d5, #1
vrshrn.u16 d16, q8, #6 vrshrn.u16 d16, q8, #6
vrshrn.u16 d17, q9, #6 vrshrn.u16 d17, q9, #6
.ifc \type,avg .ifc \type,avg
vld1.64 {d20}, [lr,:64], r2 vld1.8 {d20}, [lr,:64], r2
vld1.64 {d21}, [lr,:64], r2 vld1.8 {d21}, [lr,:64], r2
vrhadd.u8 q8, q8, q10 vrhadd.u8 q8, q8, q10
.endif .endif
vld1.64 {d6, d7}, [r1], r2 vld1.8 {d6, d7}, [r1], r2
vext.8 d7, d6, d7, #1 vext.8 d7, d6, d7, #1
vst1.64 {d16}, [r0,:64], r2 vst1.8 {d16}, [r0,:64], r2
vst1.64 {d17}, [r0,:64], r2 vst1.8 {d17}, [r0,:64], r2
bgt 5b bgt 5b
pop {r4-r7, pc} pop {r4-r7, pc}
endfunc endfunc
.endm .endm
/* chroma_mc4(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */ /* chroma_mc4(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
.macro h264_chroma_mc4 type .macro h264_chroma_mc4 type
function ff_\type\()_h264_chroma_mc4_neon, export=1 function ff_\type\()_h264_chroma_mc4_neon, export=1
push {r4-r7, lr} push {r4-r7, lr}
ldrd r4, [sp, #20] ldrd r4, [sp, #20]
.ifc \type,avg .ifc \type,avg
mov lr, r0 mov lr, r0
.endif .endif
pld [r1] pld [r1]
pld [r1, r2] pld [r1, r2]
...@@ -164,7 +164,7 @@ A muls r7, r4, r5 ...@@ -164,7 +164,7 @@ A muls r7, r4, r5
T mul r7, r4, r5 T mul r7, r4, r5
T cmp r7, #0 T cmp r7, #0
rsb r6, r7, r5, lsl #3 rsb r6, r7, r5, lsl #3
rsb ip, r7, r4, lsl #3 rsb r12, r7, r4, lsl #3
sub r4, r7, r4, lsl #3 sub r4, r7, r4, lsl #3
sub r4, r4, r5, lsl #3 sub r4, r4, r5, lsl #3
add r4, r4, #64 add r4, r4, #64
...@@ -175,10 +175,10 @@ T cmp r7, #0 ...@@ -175,10 +175,10 @@ T cmp r7, #0
vdup.8 d0, r4 vdup.8 d0, r4
lsl r4, r2, #1 lsl r4, r2, #1
vdup.8 d1, ip vdup.8 d1, r12
vld1.64 {d4}, [r1], r4 vld1.8 {d4}, [r1], r4
vdup.8 d2, r6 vdup.8 d2, r6
vld1.64 {d6}, [r5], r4 vld1.8 {d6}, [r5], r4
vdup.8 d3, r7 vdup.8 d3, r7
vext.8 d5, d4, d5, #1 vext.8 d5, d4, d5, #1
...@@ -192,22 +192,22 @@ T cmp r7, #0 ...@@ -192,22 +192,22 @@ T cmp r7, #0
1: pld [r5] 1: pld [r5]
vmull.u8 q8, d4, d0 vmull.u8 q8, d4, d0
vmlal.u8 q8, d6, d2 vmlal.u8 q8, d6, d2
vld1.64 {d4}, [r1], r4 vld1.8 {d4}, [r1], r4
vext.8 d5, d4, d5, #1 vext.8 d5, d4, d5, #1
vtrn.32 d4, d5 vtrn.32 d4, d5
vmull.u8 q9, d6, d0 vmull.u8 q9, d6, d0
vmlal.u8 q9, d4, d2 vmlal.u8 q9, d4, d2
vld1.64 {d6}, [r5], r4 vld1.8 {d6}, [r5], r4
vadd.i16 d16, d16, d17 vadd.i16 d16, d16, d17
vadd.i16 d17, d18, d19 vadd.i16 d17, d18, d19
vrshrn.u16 d16, q8, #6 vrshrn.u16 d16, q8, #6
subs r3, r3, #2 subs r3, r3, #2
pld [r1] pld [r1]
.ifc \type,avg .ifc \type,avg
vld1.32 {d20[0]}, [lr,:32], r2 vld1.32 {d20[0]}, [lr,:32], r2
vld1.32 {d20[1]}, [lr,:32], r2 vld1.32 {d20[1]}, [lr,:32], r2
vrhadd.u8 d16, d16, d20 vrhadd.u8 d16, d16, d20
.endif .endif
vext.8 d7, d6, d7, #1 vext.8 d7, d6, d7, #1
vtrn.32 d6, d7 vtrn.32 d6, d7
vst1.32 {d16[0]}, [r0,:32], r2 vst1.32 {d16[0]}, [r0,:32], r2
...@@ -217,9 +217,9 @@ T cmp r7, #0 ...@@ -217,9 +217,9 @@ T cmp r7, #0
pop {r4-r7, pc} pop {r4-r7, pc}
2: tst r6, r6 2: tst r6, r6
add ip, ip, r6 add r12, r12, r6
vdup.8 d0, r4 vdup.8 d0, r4
vdup.8 d1, ip vdup.8 d1, r12
vtrn.32 d0, d1 vtrn.32 d0, d1
beq 4f beq 4f
...@@ -238,11 +238,11 @@ T cmp r7, #0 ...@@ -238,11 +238,11 @@ T cmp r7, #0
vadd.i16 d16, d16, d17 vadd.i16 d16, d16, d17
vadd.i16 d17, d18, d19 vadd.i16 d17, d18, d19
vrshrn.u16 d16, q8, #6 vrshrn.u16 d16, q8, #6
.ifc \type,avg .ifc \type,avg
vld1.32 {d20[0]}, [lr,:32], r2 vld1.32 {d20[0]}, [lr,:32], r2
vld1.32 {d20[1]}, [lr,:32], r2 vld1.32 {d20[1]}, [lr,:32], r2
vrhadd.u8 d16, d16, d20 vrhadd.u8 d16, d16, d20
.endif .endif
subs r3, r3, #2 subs r3, r3, #2
pld [r1] pld [r1]
vst1.32 {d16[0]}, [r0,:32], r2 vst1.32 {d16[0]}, [r0,:32], r2
...@@ -251,8 +251,8 @@ T cmp r7, #0 ...@@ -251,8 +251,8 @@ T cmp r7, #0
pop {r4-r7, pc} pop {r4-r7, pc}
4: vld1.64 {d4}, [r1], r2 4: vld1.8 {d4}, [r1], r2
vld1.64 {d6}, [r1], r2 vld1.8 {d6}, [r1], r2
vext.8 d5, d4, d5, #1 vext.8 d5, d4, d5, #1
vext.8 d7, d6, d7, #1 vext.8 d7, d6, d7, #1
vtrn.32 d4, d5 vtrn.32 d4, d5
...@@ -261,19 +261,19 @@ T cmp r7, #0 ...@@ -261,19 +261,19 @@ T cmp r7, #0
5: vmull.u8 q8, d4, d0 5: vmull.u8 q8, d4, d0
vmull.u8 q9, d6, d0 vmull.u8 q9, d6, d0
subs r3, r3, #2 subs r3, r3, #2
vld1.64 {d4}, [r1], r2 vld1.8 {d4}, [r1], r2
vext.8 d5, d4, d5, #1 vext.8 d5, d4, d5, #1
vtrn.32 d4, d5 vtrn.32 d4, d5
vadd.i16 d16, d16, d17 vadd.i16 d16, d16, d17
vadd.i16 d17, d18, d19 vadd.i16 d17, d18, d19
pld [r1] pld [r1]
vrshrn.u16 d16, q8, #6 vrshrn.u16 d16, q8, #6
.ifc \type,avg .ifc \type,avg
vld1.32 {d20[0]}, [lr,:32], r2 vld1.32 {d20[0]}, [lr,:32], r2
vld1.32 {d20[1]}, [lr,:32], r2 vld1.32 {d20[1]}, [lr,:32], r2
vrhadd.u8 d16, d16, d20 vrhadd.u8 d16, d16, d20
.endif .endif
vld1.64 {d6}, [r1], r2 vld1.8 {d6}, [r1], r2
vext.8 d7, d6, d7, #1 vext.8 d7, d6, d7, #1
vtrn.32 d6, d7 vtrn.32 d6, d7
pld [r1] pld [r1]
...@@ -283,9 +283,9 @@ T cmp r7, #0 ...@@ -283,9 +283,9 @@ T cmp r7, #0
pop {r4-r7, pc} pop {r4-r7, pc}
endfunc endfunc
.endm .endm
.macro h264_chroma_mc2 type .macro h264_chroma_mc2 type
function ff_\type\()_h264_chroma_mc2_neon, export=1 function ff_\type\()_h264_chroma_mc2_neon, export=1
push {r4-r6, lr} push {r4-r6, lr}
ldr r4, [sp, #16] ldr r4, [sp, #16]
...@@ -315,29 +315,29 @@ function ff_\type\()_h264_chroma_mc2_neon, export=1 ...@@ -315,29 +315,29 @@ function ff_\type\()_h264_chroma_mc2_neon, export=1
vtrn.16 q2, q3 vtrn.16 q2, q3
vmull.u8 q8, d4, d0 vmull.u8 q8, d4, d0
vmlal.u8 q8, d5, d1 vmlal.u8 q8, d5, d1
.ifc \type,avg .ifc \type,avg
vld1.16 {d18[0]}, [r0,:16], r2 vld1.16 {d18[0]}, [r0,:16], r2
vld1.16 {d18[1]}, [r0,:16] vld1.16 {d18[1]}, [r0,:16]
sub r0, r0, r2 sub r0, r0, r2
.endif .endif
vtrn.32 d16, d17 vtrn.32 d16, d17
vadd.i16 d16, d16, d17 vadd.i16 d16, d16, d17
vrshrn.u16 d16, q8, #6 vrshrn.u16 d16, q8, #6
.ifc \type,avg .ifc \type,avg
vrhadd.u8 d16, d16, d18 vrhadd.u8 d16, d16, d18
.endif .endif
vst1.16 {d16[0]}, [r0,:16], r2 vst1.16 {d16[0]}, [r0,:16], r2
vst1.16 {d16[1]}, [r0,:16], r2 vst1.16 {d16[1]}, [r0,:16], r2
subs r3, r3, #2 subs r3, r3, #2
bgt 1b bgt 1b
pop {r4-r6, pc} pop {r4-r6, pc}
2: 2:
.ifc \type,put .ifc \type,put
ldrh_post r5, r1, r2 ldrh_post r5, r1, r2
strh_post r5, r0, r2 strh_post r5, r0, r2
ldrh_post r6, r1, r2 ldrh_post r6, r1, r2
strh_post r6, r0, r2 strh_post r6, r0, r2
.else .else
vld1.16 {d16[0]}, [r1], r2 vld1.16 {d16[0]}, [r1], r2
vld1.16 {d16[1]}, [r1], r2 vld1.16 {d16[1]}, [r1], r2
vld1.16 {d18[0]}, [r0,:16], r2 vld1.16 {d18[0]}, [r0,:16], r2
...@@ -346,7 +346,7 @@ function ff_\type\()_h264_chroma_mc2_neon, export=1 ...@@ -346,7 +346,7 @@ function ff_\type\()_h264_chroma_mc2_neon, export=1
vrhadd.u8 d16, d16, d18 vrhadd.u8 d16, d16, d18
vst1.16 {d16[0]}, [r0,:16], r2 vst1.16 {d16[0]}, [r0,:16], r2
vst1.16 {d16[1]}, [r0,:16], r2 vst1.16 {d16[1]}, [r0,:16], r2
.endif .endif
subs r3, r3, #2 subs r3, r3, #2
bgt 2b bgt 2b
pop {r4-r6, pc} pop {r4-r6, pc}
...@@ -362,22 +362,22 @@ endfunc ...@@ -362,22 +362,22 @@ endfunc
/* H.264 loop filter */ /* H.264 loop filter */
.macro h264_loop_filter_start .macro h264_loop_filter_start
ldr ip, [sp] ldr r12, [sp]
tst r2, r2 tst r2, r2
ldr ip, [ip] ldr r12, [r12]
it ne it ne
tstne r3, r3 tstne r3, r3
vmov.32 d24[0], ip vmov.32 d24[0], r12
and ip, ip, ip, lsl #16 and r12, r12, r12, lsl #16
it eq it eq
bxeq lr bxeq lr
ands ip, ip, ip, lsl #8 ands r12, r12, r12, lsl #8
it lt it lt
bxlt lr bxlt lr
.endm .endm
.macro h264_loop_filter_luma .macro h264_loop_filter_luma
vdup.8 q11, r2 @ alpha vdup.8 q11, r2 @ alpha
vmovl.u8 q12, d24 vmovl.u8 q12, d24
vabd.u8 q6, q8, q0 @ abs(p0 - q0) vabd.u8 q6, q8, q0 @ abs(p0 - q0)
...@@ -443,29 +443,29 @@ endfunc ...@@ -443,29 +443,29 @@ endfunc
vqmovun.s16 d17, q6 vqmovun.s16 d17, q6
vqmovun.s16 d0, q11 vqmovun.s16 d0, q11
vqmovun.s16 d1, q12 vqmovun.s16 d1, q12
.endm .endm
function ff_h264_v_loop_filter_luma_neon, export=1 function ff_h264_v_loop_filter_luma_neon, export=1
h264_loop_filter_start h264_loop_filter_start
vld1.64 {d0, d1}, [r0,:128], r1 vld1.8 {d0, d1}, [r0,:128], r1
vld1.64 {d2, d3}, [r0,:128], r1 vld1.8 {d2, d3}, [r0,:128], r1
vld1.64 {d4, d5}, [r0,:128], r1 vld1.8 {d4, d5}, [r0,:128], r1
sub r0, r0, r1, lsl #2 sub r0, r0, r1, lsl #2
sub r0, r0, r1, lsl #1 sub r0, r0, r1, lsl #1
vld1.64 {d20,d21}, [r0,:128], r1 vld1.8 {d20,d21}, [r0,:128], r1
vld1.64 {d18,d19}, [r0,:128], r1 vld1.8 {d18,d19}, [r0,:128], r1
vld1.64 {d16,d17}, [r0,:128], r1 vld1.8 {d16,d17}, [r0,:128], r1
vpush {d8-d15} vpush {d8-d15}
h264_loop_filter_luma h264_loop_filter_luma
sub r0, r0, r1, lsl #1 sub r0, r0, r1, lsl #1
vst1.64 {d8, d9}, [r0,:128], r1 vst1.8 {d8, d9}, [r0,:128], r1
vst1.64 {d16,d17}, [r0,:128], r1 vst1.8 {d16,d17}, [r0,:128], r1
vst1.64 {d0, d1}, [r0,:128], r1 vst1.8 {d0, d1}, [r0,:128], r1
vst1.64 {d10,d11}, [r0,:128] vst1.8 {d10,d11}, [r0,:128]
vpop {d8-d15} vpop {d8-d15}
bx lr bx lr
...@@ -475,22 +475,22 @@ function ff_h264_h_loop_filter_luma_neon, export=1 ...@@ -475,22 +475,22 @@ function ff_h264_h_loop_filter_luma_neon, export=1
h264_loop_filter_start h264_loop_filter_start
sub r0, r0, #4 sub r0, r0, #4
vld1.64 {d6}, [r0], r1 vld1.8 {d6}, [r0], r1
vld1.64 {d20}, [r0], r1 vld1.8 {d20}, [r0], r1
vld1.64 {d18}, [r0], r1 vld1.8 {d18}, [r0], r1
vld1.64 {d16}, [r0], r1 vld1.8 {d16}, [r0], r1
vld1.64 {d0}, [r0], r1 vld1.8 {d0}, [r0], r1
vld1.64 {d2}, [r0], r1 vld1.8 {d2}, [r0], r1
vld1.64 {d4}, [r0], r1 vld1.8 {d4}, [r0], r1
vld1.64 {d26}, [r0], r1 vld1.8 {d26}, [r0], r1
vld1.64 {d7}, [r0], r1 vld1.8 {d7}, [r0], r1
vld1.64 {d21}, [r0], r1 vld1.8 {d21}, [r0], r1
vld1.64 {d19}, [r0], r1 vld1.8 {d19}, [r0], r1
vld1.64 {d17}, [r0], r1 vld1.8 {d17}, [r0], r1
vld1.64 {d1}, [r0], r1 vld1.8 {d1}, [r0], r1
vld1.64 {d3}, [r0], r1 vld1.8 {d3}, [r0], r1
vld1.64 {d5}, [r0], r1 vld1.8 {d5}, [r0], r1
vld1.64 {d27}, [r0], r1 vld1.8 {d27}, [r0], r1
transpose_8x8 q3, q10, q9, q8, q0, q1, q2, q13 transpose_8x8 q3, q10, q9, q8, q0, q1, q2, q13
...@@ -523,7 +523,7 @@ function ff_h264_h_loop_filter_luma_neon, export=1 ...@@ -523,7 +523,7 @@ function ff_h264_h_loop_filter_luma_neon, export=1
bx lr bx lr
endfunc endfunc
.macro h264_loop_filter_chroma .macro h264_loop_filter_chroma
vdup.8 d22, r2 @ alpha vdup.8 d22, r2 @ alpha
vmovl.u8 q12, d24 vmovl.u8 q12, d24
vabd.u8 d26, d16, d0 @ abs(p0 - q0) vabd.u8 d26, d16, d0 @ abs(p0 - q0)
...@@ -552,22 +552,22 @@ endfunc ...@@ -552,22 +552,22 @@ endfunc
vsubw.s8 q11, q11, d4 vsubw.s8 q11, q11, d4
vqmovun.s16 d16, q14 vqmovun.s16 d16, q14
vqmovun.s16 d0, q11 vqmovun.s16 d0, q11
.endm .endm
function ff_h264_v_loop_filter_chroma_neon, export=1 function ff_h264_v_loop_filter_chroma_neon, export=1
h264_loop_filter_start h264_loop_filter_start
sub r0, r0, r1, lsl #1 sub r0, r0, r1, lsl #1
vld1.64 {d18}, [r0,:64], r1 vld1.8 {d18}, [r0,:64], r1
vld1.64 {d16}, [r0,:64], r1 vld1.8 {d16}, [r0,:64], r1
vld1.64 {d0}, [r0,:64], r1 vld1.8 {d0}, [r0,:64], r1
vld1.64 {d2}, [r0,:64] vld1.8 {d2}, [r0,:64]
h264_loop_filter_chroma h264_loop_filter_chroma
sub r0, r0, r1, lsl #1 sub r0, r0, r1, lsl #1
vst1.64 {d16}, [r0,:64], r1 vst1.8 {d16}, [r0,:64], r1
vst1.64 {d0}, [r0,:64], r1 vst1.8 {d0}, [r0,:64], r1
bx lr bx lr
endfunc endfunc
...@@ -612,20 +612,20 @@ endfunc ...@@ -612,20 +612,20 @@ endfunc
/* H.264 qpel MC */ /* H.264 qpel MC */
.macro lowpass_const r .macro lowpass_const r
movw \r, #5 movw \r, #5
movt \r, #20 movt \r, #20
vmov.32 d6[0], \r vmov.32 d6[0], \r
.endm .endm
.macro lowpass_8 r0, r1, r2, r3, d0, d1, narrow=1 .macro lowpass_8 r0, r1, r2, r3, d0, d1, narrow=1
.if \narrow .if \narrow
t0 .req q0 t0 .req q0
t1 .req q8 t1 .req q8
.else .else
t0 .req \d0 t0 .req \d0
t1 .req \d1 t1 .req \d1
.endif .endif
vext.8 d2, \r0, \r1, #2 vext.8 d2, \r0, \r1, #2
vext.8 d3, \r0, \r1, #3 vext.8 d3, \r0, \r1, #3
vaddl.u8 q1, d2, d3 vaddl.u8 q1, d2, d3
...@@ -646,20 +646,20 @@ endfunc ...@@ -646,20 +646,20 @@ endfunc
vaddl.u8 t1, \r2, d31 vaddl.u8 t1, \r2, d31
vmla.i16 t1, q9, d6[1] vmla.i16 t1, q9, d6[1]
vmls.i16 t1, q10, d6[0] vmls.i16 t1, q10, d6[0]
.if \narrow .if \narrow
vqrshrun.s16 \d0, t0, #5 vqrshrun.s16 \d0, t0, #5
vqrshrun.s16 \d1, t1, #5 vqrshrun.s16 \d1, t1, #5
.endif .endif
.unreq t0 .unreq t0
.unreq t1 .unreq t1
.endm .endm
.macro lowpass_8_1 r0, r1, d0, narrow=1 .macro lowpass_8_1 r0, r1, d0, narrow=1
.if \narrow .if \narrow
t0 .req q0 t0 .req q0
.else .else
t0 .req \d0 t0 .req \d0
.endif .endif
vext.8 d2, \r0, \r1, #2 vext.8 d2, \r0, \r1, #2
vext.8 d3, \r0, \r1, #3 vext.8 d3, \r0, \r1, #3
vaddl.u8 q1, d2, d3 vaddl.u8 q1, d2, d3
...@@ -670,13 +670,13 @@ endfunc ...@@ -670,13 +670,13 @@ endfunc
vaddl.u8 t0, \r0, d30 vaddl.u8 t0, \r0, d30
vmla.i16 t0, q1, d6[1] vmla.i16 t0, q1, d6[1]
vmls.i16 t0, q2, d6[0] vmls.i16 t0, q2, d6[0]
.if \narrow .if \narrow
vqrshrun.s16 \d0, t0, #5 vqrshrun.s16 \d0, t0, #5
.endif .endif
.unreq t0 .unreq t0
.endm .endm
.macro lowpass_8.16 r0, r1, l0, h0, l1, h1, d .macro lowpass_8.16 r0, r1, l0, h0, l1, h1, d
vext.16 q1, \r0, \r1, #2 vext.16 q1, \r0, \r1, #2
vext.16 q0, \r0, \r1, #3 vext.16 q0, \r0, \r1, #3
vaddl.s16 q9, d2, d0 vaddl.s16 q9, d2, d0
...@@ -711,59 +711,59 @@ endfunc ...@@ -711,59 +711,59 @@ endfunc
vrshrn.s32 d19, q1, #10 vrshrn.s32 d19, q1, #10
vqmovun.s16 \d, q9 vqmovun.s16 \d, q9
.endm .endm
function put_h264_qpel16_h_lowpass_neon_packed function put_h264_qpel16_h_lowpass_neon_packed
mov r4, lr mov r4, lr
mov ip, #16 mov r12, #16
mov r3, #8 mov r3, #8
bl put_h264_qpel8_h_lowpass_neon bl put_h264_qpel8_h_lowpass_neon
sub r1, r1, r2, lsl #4 sub r1, r1, r2, lsl #4
add r1, r1, #8 add r1, r1, #8
mov ip, #16 mov r12, #16
mov lr, r4 mov lr, r4
b put_h264_qpel8_h_lowpass_neon b put_h264_qpel8_h_lowpass_neon
endfunc endfunc
.macro h264_qpel_h_lowpass type .macro h264_qpel_h_lowpass type
function \type\()_h264_qpel16_h_lowpass_neon function \type\()_h264_qpel16_h_lowpass_neon
push {lr} push {lr}
mov ip, #16 mov r12, #16
bl \type\()_h264_qpel8_h_lowpass_neon bl \type\()_h264_qpel8_h_lowpass_neon
sub r0, r0, r3, lsl #4 sub r0, r0, r3, lsl #4
sub r1, r1, r2, lsl #4 sub r1, r1, r2, lsl #4
add r0, r0, #8 add r0, r0, #8
add r1, r1, #8 add r1, r1, #8
mov ip, #16 mov r12, #16
pop {lr} pop {lr}
endfunc endfunc
function \type\()_h264_qpel8_h_lowpass_neon function \type\()_h264_qpel8_h_lowpass_neon
1: vld1.64 {d0, d1}, [r1], r2 1: vld1.8 {d0, d1}, [r1], r2
vld1.64 {d16,d17}, [r1], r2 vld1.8 {d16,d17}, [r1], r2
subs ip, ip, #2 subs r12, r12, #2
lowpass_8 d0, d1, d16, d17, d0, d16 lowpass_8 d0, d1, d16, d17, d0, d16
.ifc \type,avg .ifc \type,avg
vld1.8 {d2}, [r0,:64], r3 vld1.8 {d2}, [r0,:64], r3
vrhadd.u8 d0, d0, d2 vrhadd.u8 d0, d0, d2
vld1.8 {d3}, [r0,:64] vld1.8 {d3}, [r0,:64]
vrhadd.u8 d16, d16, d3 vrhadd.u8 d16, d16, d3
sub r0, r0, r3 sub r0, r0, r3
.endif .endif
vst1.64 {d0}, [r0,:64], r3 vst1.8 {d0}, [r0,:64], r3
vst1.64 {d16}, [r0,:64], r3 vst1.8 {d16}, [r0,:64], r3
bne 1b bne 1b
bx lr bx lr
endfunc endfunc
.endm .endm
h264_qpel_h_lowpass put h264_qpel_h_lowpass put
h264_qpel_h_lowpass avg h264_qpel_h_lowpass avg
.macro h264_qpel_h_lowpass_l2 type .macro h264_qpel_h_lowpass_l2 type
function \type\()_h264_qpel16_h_lowpass_l2_neon function \type\()_h264_qpel16_h_lowpass_l2_neon
push {lr} push {lr}
mov ip, #16 mov r12, #16
bl \type\()_h264_qpel8_h_lowpass_l2_neon bl \type\()_h264_qpel8_h_lowpass_l2_neon
sub r0, r0, r2, lsl #4 sub r0, r0, r2, lsl #4
sub r1, r1, r2, lsl #4 sub r1, r1, r2, lsl #4
...@@ -771,31 +771,31 @@ function \type\()_h264_qpel16_h_lowpass_l2_neon ...@@ -771,31 +771,31 @@ function \type\()_h264_qpel16_h_lowpass_l2_neon
add r0, r0, #8 add r0, r0, #8
add r1, r1, #8 add r1, r1, #8
add r3, r3, #8 add r3, r3, #8
mov ip, #16 mov r12, #16
pop {lr} pop {lr}
endfunc endfunc
function \type\()_h264_qpel8_h_lowpass_l2_neon function \type\()_h264_qpel8_h_lowpass_l2_neon
1: vld1.64 {d0, d1}, [r1], r2 1: vld1.8 {d0, d1}, [r1], r2
vld1.64 {d16,d17}, [r1], r2 vld1.8 {d16,d17}, [r1], r2
vld1.64 {d28}, [r3], r2 vld1.8 {d28}, [r3], r2
vld1.64 {d29}, [r3], r2 vld1.8 {d29}, [r3], r2
subs ip, ip, #2 subs r12, r12, #2
lowpass_8 d0, d1, d16, d17, d0, d1 lowpass_8 d0, d1, d16, d17, d0, d1
vrhadd.u8 q0, q0, q14 vrhadd.u8 q0, q0, q14
.ifc \type,avg .ifc \type,avg
vld1.8 {d2}, [r0,:64], r2 vld1.8 {d2}, [r0,:64], r2
vrhadd.u8 d0, d0, d2 vrhadd.u8 d0, d0, d2
vld1.8 {d3}, [r0,:64] vld1.8 {d3}, [r0,:64]
vrhadd.u8 d1, d1, d3 vrhadd.u8 d1, d1, d3
sub r0, r0, r2 sub r0, r0, r2
.endif .endif
vst1.64 {d0}, [r0,:64], r2 vst1.8 {d0}, [r0,:64], r2
vst1.64 {d1}, [r0,:64], r2 vst1.8 {d1}, [r0,:64], r2
bne 1b bne 1b
bx lr bx lr
endfunc endfunc
.endm .endm
h264_qpel_h_lowpass_l2 put h264_qpel_h_lowpass_l2 put
h264_qpel_h_lowpass_l2 avg h264_qpel_h_lowpass_l2 avg
...@@ -815,7 +815,7 @@ function put_h264_qpel16_v_lowpass_neon_packed ...@@ -815,7 +815,7 @@ function put_h264_qpel16_v_lowpass_neon_packed
b put_h264_qpel8_v_lowpass_neon b put_h264_qpel8_v_lowpass_neon
endfunc endfunc
.macro h264_qpel_v_lowpass type .macro h264_qpel_v_lowpass type
function \type\()_h264_qpel16_v_lowpass_neon function \type\()_h264_qpel16_v_lowpass_neon
mov r4, lr mov r4, lr
bl \type\()_h264_qpel8_v_lowpass_neon bl \type\()_h264_qpel8_v_lowpass_neon
...@@ -832,19 +832,19 @@ function \type\()_h264_qpel16_v_lowpass_neon ...@@ -832,19 +832,19 @@ function \type\()_h264_qpel16_v_lowpass_neon
endfunc endfunc
function \type\()_h264_qpel8_v_lowpass_neon function \type\()_h264_qpel8_v_lowpass_neon
vld1.64 {d8}, [r1], r3 vld1.8 {d8}, [r1], r3
vld1.64 {d10}, [r1], r3 vld1.8 {d10}, [r1], r3
vld1.64 {d12}, [r1], r3 vld1.8 {d12}, [r1], r3
vld1.64 {d14}, [r1], r3 vld1.8 {d14}, [r1], r3
vld1.64 {d22}, [r1], r3 vld1.8 {d22}, [r1], r3
vld1.64 {d24}, [r1], r3 vld1.8 {d24}, [r1], r3
vld1.64 {d26}, [r1], r3 vld1.8 {d26}, [r1], r3
vld1.64 {d28}, [r1], r3 vld1.8 {d28}, [r1], r3
vld1.64 {d9}, [r1], r3 vld1.8 {d9}, [r1], r3
vld1.64 {d11}, [r1], r3 vld1.8 {d11}, [r1], r3
vld1.64 {d13}, [r1], r3 vld1.8 {d13}, [r1], r3
vld1.64 {d15}, [r1], r3 vld1.8 {d15}, [r1], r3
vld1.64 {d23}, [r1] vld1.8 {d23}, [r1]
transpose_8x8 q4, q5, q6, q7, q11, q12, q13, q14 transpose_8x8 q4, q5, q6, q7, q11, q12, q13, q14
lowpass_8 d8, d9, d10, d11, d8, d10 lowpass_8 d8, d9, d10, d11, d8, d10
...@@ -853,7 +853,7 @@ function \type\()_h264_qpel8_v_lowpass_neon ...@@ -853,7 +853,7 @@ function \type\()_h264_qpel8_v_lowpass_neon
lowpass_8 d26, d27, d28, d29, d26, d28 lowpass_8 d26, d27, d28, d29, d26, d28
transpose_8x8 d8, d10, d12, d14, d22, d24, d26, d28 transpose_8x8 d8, d10, d12, d14, d22, d24, d26, d28
.ifc \type,avg .ifc \type,avg
vld1.8 {d9}, [r0,:64], r2 vld1.8 {d9}, [r0,:64], r2
vrhadd.u8 d8, d8, d9 vrhadd.u8 d8, d8, d9
vld1.8 {d11}, [r0,:64], r2 vld1.8 {d11}, [r0,:64], r2
...@@ -871,34 +871,34 @@ function \type\()_h264_qpel8_v_lowpass_neon ...@@ -871,34 +871,34 @@ function \type\()_h264_qpel8_v_lowpass_neon
vld1.8 {d29}, [r0,:64], r2 vld1.8 {d29}, [r0,:64], r2
vrhadd.u8 d28, d28, d29 vrhadd.u8 d28, d28, d29
sub r0, r0, r2, lsl #3 sub r0, r0, r2, lsl #3
.endif .endif
vst1.64 {d8}, [r0,:64], r2 vst1.8 {d8}, [r0,:64], r2
vst1.64 {d10}, [r0,:64], r2 vst1.8 {d10}, [r0,:64], r2
vst1.64 {d12}, [r0,:64], r2 vst1.8 {d12}, [r0,:64], r2
vst1.64 {d14}, [r0,:64], r2 vst1.8 {d14}, [r0,:64], r2
vst1.64 {d22}, [r0,:64], r2 vst1.8 {d22}, [r0,:64], r2
vst1.64 {d24}, [r0,:64], r2 vst1.8 {d24}, [r0,:64], r2
vst1.64 {d26}, [r0,:64], r2 vst1.8 {d26}, [r0,:64], r2
vst1.64 {d28}, [r0,:64], r2 vst1.8 {d28}, [r0,:64], r2
bx lr bx lr
endfunc endfunc
.endm .endm
h264_qpel_v_lowpass put h264_qpel_v_lowpass put
h264_qpel_v_lowpass avg h264_qpel_v_lowpass avg
.macro h264_qpel_v_lowpass_l2 type .macro h264_qpel_v_lowpass_l2 type
function \type\()_h264_qpel16_v_lowpass_l2_neon function \type\()_h264_qpel16_v_lowpass_l2_neon
mov r4, lr mov r4, lr
bl \type\()_h264_qpel8_v_lowpass_l2_neon bl \type\()_h264_qpel8_v_lowpass_l2_neon
sub r1, r1, r3, lsl #2 sub r1, r1, r3, lsl #2
bl \type\()_h264_qpel8_v_lowpass_l2_neon bl \type\()_h264_qpel8_v_lowpass_l2_neon
sub r0, r0, r3, lsl #4 sub r0, r0, r3, lsl #4
sub ip, ip, r2, lsl #4 sub r12, r12, r2, lsl #4
add r0, r0, #8 add r0, r0, #8
add ip, ip, #8 add r12, r12, #8
sub r1, r1, r3, lsl #4 sub r1, r1, r3, lsl #4
sub r1, r1, r3, lsl #2 sub r1, r1, r3, lsl #2
add r1, r1, #8 add r1, r1, #8
...@@ -908,19 +908,19 @@ function \type\()_h264_qpel16_v_lowpass_l2_neon ...@@ -908,19 +908,19 @@ function \type\()_h264_qpel16_v_lowpass_l2_neon
endfunc endfunc
function \type\()_h264_qpel8_v_lowpass_l2_neon function \type\()_h264_qpel8_v_lowpass_l2_neon
vld1.64 {d8}, [r1], r3 vld1.8 {d8}, [r1], r3
vld1.64 {d10}, [r1], r3 vld1.8 {d10}, [r1], r3
vld1.64 {d12}, [r1], r3 vld1.8 {d12}, [r1], r3
vld1.64 {d14}, [r1], r3 vld1.8 {d14}, [r1], r3
vld1.64 {d22}, [r1], r3 vld1.8 {d22}, [r1], r3
vld1.64 {d24}, [r1], r3 vld1.8 {d24}, [r1], r3
vld1.64 {d26}, [r1], r3 vld1.8 {d26}, [r1], r3
vld1.64 {d28}, [r1], r3 vld1.8 {d28}, [r1], r3
vld1.64 {d9}, [r1], r3 vld1.8 {d9}, [r1], r3
vld1.64 {d11}, [r1], r3 vld1.8 {d11}, [r1], r3
vld1.64 {d13}, [r1], r3 vld1.8 {d13}, [r1], r3
vld1.64 {d15}, [r1], r3 vld1.8 {d15}, [r1], r3
vld1.64 {d23}, [r1] vld1.8 {d23}, [r1]
transpose_8x8 q4, q5, q6, q7, q11, q12, q13, q14 transpose_8x8 q4, q5, q6, q7, q11, q12, q13, q14
lowpass_8 d8, d9, d10, d11, d8, d9 lowpass_8 d8, d9, d10, d11, d8, d9
...@@ -929,20 +929,20 @@ function \type\()_h264_qpel8_v_lowpass_l2_neon ...@@ -929,20 +929,20 @@ function \type\()_h264_qpel8_v_lowpass_l2_neon
lowpass_8 d26, d27, d28, d29, d26, d27 lowpass_8 d26, d27, d28, d29, d26, d27
transpose_8x8 d8, d9, d12, d13, d22, d23, d26, d27 transpose_8x8 d8, d9, d12, d13, d22, d23, d26, d27
vld1.64 {d0}, [ip], r2 vld1.8 {d0}, [r12], r2
vld1.64 {d1}, [ip], r2 vld1.8 {d1}, [r12], r2
vld1.64 {d2}, [ip], r2 vld1.8 {d2}, [r12], r2
vld1.64 {d3}, [ip], r2 vld1.8 {d3}, [r12], r2
vld1.64 {d4}, [ip], r2 vld1.8 {d4}, [r12], r2
vrhadd.u8 q0, q0, q4 vrhadd.u8 q0, q0, q4
vld1.64 {d5}, [ip], r2 vld1.8 {d5}, [r12], r2
vrhadd.u8 q1, q1, q6 vrhadd.u8 q1, q1, q6
vld1.64 {d10}, [ip], r2 vld1.8 {d10}, [r12], r2
vrhadd.u8 q2, q2, q11 vrhadd.u8 q2, q2, q11
vld1.64 {d11}, [ip], r2 vld1.8 {d11}, [r12], r2
vrhadd.u8 q5, q5, q13 vrhadd.u8 q5, q5, q13
.ifc \type,avg .ifc \type,avg
vld1.8 {d16}, [r0,:64], r3 vld1.8 {d16}, [r0,:64], r3
vrhadd.u8 d0, d0, d16 vrhadd.u8 d0, d0, d16
vld1.8 {d17}, [r0,:64], r3 vld1.8 {d17}, [r0,:64], r3
...@@ -960,51 +960,51 @@ function \type\()_h264_qpel8_v_lowpass_l2_neon ...@@ -960,51 +960,51 @@ function \type\()_h264_qpel8_v_lowpass_l2_neon
vld1.8 {d17}, [r0,:64], r3 vld1.8 {d17}, [r0,:64], r3
vrhadd.u8 d11, d11, d17 vrhadd.u8 d11, d11, d17
sub r0, r0, r3, lsl #3 sub r0, r0, r3, lsl #3
.endif .endif
vst1.64 {d0}, [r0,:64], r3 vst1.8 {d0}, [r0,:64], r3
vst1.64 {d1}, [r0,:64], r3 vst1.8 {d1}, [r0,:64], r3
vst1.64 {d2}, [r0,:64], r3 vst1.8 {d2}, [r0,:64], r3
vst1.64 {d3}, [r0,:64], r3 vst1.8 {d3}, [r0,:64], r3
vst1.64 {d4}, [r0,:64], r3 vst1.8 {d4}, [r0,:64], r3
vst1.64 {d5}, [r0,:64], r3 vst1.8 {d5}, [r0,:64], r3
vst1.64 {d10}, [r0,:64], r3 vst1.8 {d10}, [r0,:64], r3
vst1.64 {d11}, [r0,:64], r3 vst1.8 {d11}, [r0,:64], r3
bx lr bx lr
endfunc endfunc
.endm .endm
h264_qpel_v_lowpass_l2 put h264_qpel_v_lowpass_l2 put
h264_qpel_v_lowpass_l2 avg h264_qpel_v_lowpass_l2 avg
function put_h264_qpel8_hv_lowpass_neon_top function put_h264_qpel8_hv_lowpass_neon_top
lowpass_const ip lowpass_const r12
mov ip, #12 mov r12, #12
1: vld1.64 {d0, d1}, [r1], r3 1: vld1.8 {d0, d1}, [r1], r3
vld1.64 {d16,d17}, [r1], r3 vld1.8 {d16,d17}, [r1], r3
subs ip, ip, #2 subs r12, r12, #2
lowpass_8 d0, d1, d16, d17, q11, q12, narrow=0 lowpass_8 d0, d1, d16, d17, q11, q12, narrow=0
vst1.64 {d22-d25}, [r4,:128]! vst1.8 {d22-d25}, [r4,:128]!
bne 1b bne 1b
vld1.64 {d0, d1}, [r1] vld1.8 {d0, d1}, [r1]
lowpass_8_1 d0, d1, q12, narrow=0 lowpass_8_1 d0, d1, q12, narrow=0
mov ip, #-16 mov r12, #-16
add r4, r4, ip add r4, r4, r12
vld1.64 {d30,d31}, [r4,:128], ip vld1.8 {d30,d31}, [r4,:128], r12
vld1.64 {d20,d21}, [r4,:128], ip vld1.8 {d20,d21}, [r4,:128], r12
vld1.64 {d18,d19}, [r4,:128], ip vld1.8 {d18,d19}, [r4,:128], r12
vld1.64 {d16,d17}, [r4,:128], ip vld1.8 {d16,d17}, [r4,:128], r12
vld1.64 {d14,d15}, [r4,:128], ip vld1.8 {d14,d15}, [r4,:128], r12
vld1.64 {d12,d13}, [r4,:128], ip vld1.8 {d12,d13}, [r4,:128], r12
vld1.64 {d10,d11}, [r4,:128], ip vld1.8 {d10,d11}, [r4,:128], r12
vld1.64 {d8, d9}, [r4,:128], ip vld1.8 {d8, d9}, [r4,:128], r12
vld1.64 {d6, d7}, [r4,:128], ip vld1.8 {d6, d7}, [r4,:128], r12
vld1.64 {d4, d5}, [r4,:128], ip vld1.8 {d4, d5}, [r4,:128], r12
vld1.64 {d2, d3}, [r4,:128], ip vld1.8 {d2, d3}, [r4,:128], r12
vld1.64 {d0, d1}, [r4,:128] vld1.8 {d0, d1}, [r4,:128]
swap4 d1, d3, d5, d7, d8, d10, d12, d14 swap4 d1, d3, d5, d7, d8, d10, d12, d14
transpose16_4x4 q0, q1, q2, q3, q4, q5, q6, q7 transpose16_4x4 q0, q1, q2, q3, q4, q5, q6, q7
...@@ -1012,31 +1012,31 @@ function put_h264_qpel8_hv_lowpass_neon_top ...@@ -1012,31 +1012,31 @@ function put_h264_qpel8_hv_lowpass_neon_top
swap4 d17, d19, d21, d31, d24, d26, d28, d22 swap4 d17, d19, d21, d31, d24, d26, d28, d22
transpose16_4x4 q8, q9, q10, q15, q12, q13, q14, q11 transpose16_4x4 q8, q9, q10, q15, q12, q13, q14, q11
vst1.64 {d30,d31}, [r4,:128]! vst1.8 {d30,d31}, [r4,:128]!
vst1.64 {d6, d7}, [r4,:128]! vst1.8 {d6, d7}, [r4,:128]!
vst1.64 {d20,d21}, [r4,:128]! vst1.8 {d20,d21}, [r4,:128]!
vst1.64 {d4, d5}, [r4,:128]! vst1.8 {d4, d5}, [r4,:128]!
vst1.64 {d18,d19}, [r4,:128]! vst1.8 {d18,d19}, [r4,:128]!
vst1.64 {d2, d3}, [r4,:128]! vst1.8 {d2, d3}, [r4,:128]!
vst1.64 {d16,d17}, [r4,:128]! vst1.8 {d16,d17}, [r4,:128]!
vst1.64 {d0, d1}, [r4,:128] vst1.8 {d0, d1}, [r4,:128]
lowpass_8.16 q4, q12, d8, d9, d24, d25, d8 lowpass_8.16 q4, q12, d8, d9, d24, d25, d8
lowpass_8.16 q5, q13, d10, d11, d26, d27, d9 lowpass_8.16 q5, q13, d10, d11, d26, d27, d9
lowpass_8.16 q6, q14, d12, d13, d28, d29, d10 lowpass_8.16 q6, q14, d12, d13, d28, d29, d10
lowpass_8.16 q7, q11, d14, d15, d22, d23, d11 lowpass_8.16 q7, q11, d14, d15, d22, d23, d11
vld1.64 {d16,d17}, [r4,:128], ip vld1.8 {d16,d17}, [r4,:128], r12
vld1.64 {d30,d31}, [r4,:128], ip vld1.8 {d30,d31}, [r4,:128], r12
lowpass_8.16 q8, q15, d16, d17, d30, d31, d12 lowpass_8.16 q8, q15, d16, d17, d30, d31, d12
vld1.64 {d16,d17}, [r4,:128], ip vld1.8 {d16,d17}, [r4,:128], r12
vld1.64 {d30,d31}, [r4,:128], ip vld1.8 {d30,d31}, [r4,:128], r12
lowpass_8.16 q8, q15, d16, d17, d30, d31, d13 lowpass_8.16 q8, q15, d16, d17, d30, d31, d13
vld1.64 {d16,d17}, [r4,:128], ip vld1.8 {d16,d17}, [r4,:128], r12
vld1.64 {d30,d31}, [r4,:128], ip vld1.8 {d30,d31}, [r4,:128], r12
lowpass_8.16 q8, q15, d16, d17, d30, d31, d14 lowpass_8.16 q8, q15, d16, d17, d30, d31, d14
vld1.64 {d16,d17}, [r4,:128], ip vld1.8 {d16,d17}, [r4,:128], r12
vld1.64 {d30,d31}, [r4,:128] vld1.8 {d30,d31}, [r4,:128]
lowpass_8.16 q8, q15, d16, d17, d30, d31, d15 lowpass_8.16 q8, q15, d16, d17, d30, d31, d15
transpose_8x8 d12, d13, d14, d15, d8, d9, d10, d11 transpose_8x8 d12, d13, d14, d15, d8, d9, d10, d11
...@@ -1044,11 +1044,11 @@ function put_h264_qpel8_hv_lowpass_neon_top ...@@ -1044,11 +1044,11 @@ function put_h264_qpel8_hv_lowpass_neon_top
bx lr bx lr
endfunc endfunc
.macro h264_qpel8_hv_lowpass type .macro h264_qpel8_hv_lowpass type
function \type\()_h264_qpel8_hv_lowpass_neon function \type\()_h264_qpel8_hv_lowpass_neon
mov r10, lr mov r10, lr
bl put_h264_qpel8_hv_lowpass_neon_top bl put_h264_qpel8_hv_lowpass_neon_top
.ifc \type,avg .ifc \type,avg
vld1.8 {d0}, [r0,:64], r2 vld1.8 {d0}, [r0,:64], r2
vrhadd.u8 d12, d12, d0 vrhadd.u8 d12, d12, d0
vld1.8 {d1}, [r0,:64], r2 vld1.8 {d1}, [r0,:64], r2
...@@ -1066,39 +1066,39 @@ function \type\()_h264_qpel8_hv_lowpass_neon ...@@ -1066,39 +1066,39 @@ function \type\()_h264_qpel8_hv_lowpass_neon
vld1.8 {d7}, [r0,:64], r2 vld1.8 {d7}, [r0,:64], r2
vrhadd.u8 d11, d11, d7 vrhadd.u8 d11, d11, d7
sub r0, r0, r2, lsl #3 sub r0, r0, r2, lsl #3
.endif .endif
vst1.64 {d12}, [r0,:64], r2 vst1.8 {d12}, [r0,:64], r2
vst1.64 {d13}, [r0,:64], r2 vst1.8 {d13}, [r0,:64], r2
vst1.64 {d14}, [r0,:64], r2 vst1.8 {d14}, [r0,:64], r2
vst1.64 {d15}, [r0,:64], r2 vst1.8 {d15}, [r0,:64], r2
vst1.64 {d8}, [r0,:64], r2 vst1.8 {d8}, [r0,:64], r2
vst1.64 {d9}, [r0,:64], r2 vst1.8 {d9}, [r0,:64], r2
vst1.64 {d10}, [r0,:64], r2 vst1.8 {d10}, [r0,:64], r2
vst1.64 {d11}, [r0,:64], r2 vst1.8 {d11}, [r0,:64], r2
mov lr, r10 mov lr, r10
bx lr bx lr
endfunc endfunc
.endm .endm
h264_qpel8_hv_lowpass put h264_qpel8_hv_lowpass put
h264_qpel8_hv_lowpass avg h264_qpel8_hv_lowpass avg
.macro h264_qpel8_hv_lowpass_l2 type .macro h264_qpel8_hv_lowpass_l2 type
function \type\()_h264_qpel8_hv_lowpass_l2_neon function \type\()_h264_qpel8_hv_lowpass_l2_neon
mov r10, lr mov r10, lr
bl put_h264_qpel8_hv_lowpass_neon_top bl put_h264_qpel8_hv_lowpass_neon_top
vld1.64 {d0, d1}, [r2,:128]! vld1.8 {d0, d1}, [r2,:128]!
vld1.64 {d2, d3}, [r2,:128]! vld1.8 {d2, d3}, [r2,:128]!
vrhadd.u8 q0, q0, q6 vrhadd.u8 q0, q0, q6
vld1.64 {d4, d5}, [r2,:128]! vld1.8 {d4, d5}, [r2,:128]!
vrhadd.u8 q1, q1, q7 vrhadd.u8 q1, q1, q7
vld1.64 {d6, d7}, [r2,:128]! vld1.8 {d6, d7}, [r2,:128]!
vrhadd.u8 q2, q2, q4 vrhadd.u8 q2, q2, q4
vrhadd.u8 q3, q3, q5 vrhadd.u8 q3, q3, q5
.ifc \type,avg .ifc \type,avg
vld1.8 {d16}, [r0,:64], r3 vld1.8 {d16}, [r0,:64], r3
vrhadd.u8 d0, d0, d16 vrhadd.u8 d0, d0, d16
vld1.8 {d17}, [r0,:64], r3 vld1.8 {d17}, [r0,:64], r3
...@@ -1116,25 +1116,25 @@ function \type\()_h264_qpel8_hv_lowpass_l2_neon ...@@ -1116,25 +1116,25 @@ function \type\()_h264_qpel8_hv_lowpass_l2_neon
vld1.8 {d23}, [r0,:64], r3 vld1.8 {d23}, [r0,:64], r3
vrhadd.u8 d7, d7, d23 vrhadd.u8 d7, d7, d23
sub r0, r0, r3, lsl #3 sub r0, r0, r3, lsl #3
.endif .endif
vst1.64 {d0}, [r0,:64], r3 vst1.8 {d0}, [r0,:64], r3
vst1.64 {d1}, [r0,:64], r3 vst1.8 {d1}, [r0,:64], r3
vst1.64 {d2}, [r0,:64], r3 vst1.8 {d2}, [r0,:64], r3
vst1.64 {d3}, [r0,:64], r3 vst1.8 {d3}, [r0,:64], r3
vst1.64 {d4}, [r0,:64], r3 vst1.8 {d4}, [r0,:64], r3
vst1.64 {d5}, [r0,:64], r3 vst1.8 {d5}, [r0,:64], r3
vst1.64 {d6}, [r0,:64], r3 vst1.8 {d6}, [r0,:64], r3
vst1.64 {d7}, [r0,:64], r3 vst1.8 {d7}, [r0,:64], r3
mov lr, r10 mov lr, r10
bx lr bx lr
endfunc endfunc
.endm .endm
h264_qpel8_hv_lowpass_l2 put h264_qpel8_hv_lowpass_l2 put
h264_qpel8_hv_lowpass_l2 avg h264_qpel8_hv_lowpass_l2 avg
.macro h264_qpel16_hv type .macro h264_qpel16_hv type
function \type\()_h264_qpel16_hv_lowpass_neon function \type\()_h264_qpel16_hv_lowpass_neon
mov r9, lr mov r9, lr
bl \type\()_h264_qpel8_hv_lowpass_neon bl \type\()_h264_qpel8_hv_lowpass_neon
...@@ -1167,17 +1167,17 @@ function \type\()_h264_qpel16_hv_lowpass_l2_neon ...@@ -1167,17 +1167,17 @@ function \type\()_h264_qpel16_hv_lowpass_l2_neon
mov lr, r9 mov lr, r9
b \type\()_h264_qpel8_hv_lowpass_l2_neon b \type\()_h264_qpel8_hv_lowpass_l2_neon
endfunc endfunc
.endm .endm
h264_qpel16_hv put h264_qpel16_hv put
h264_qpel16_hv avg h264_qpel16_hv avg
.macro h264_qpel8 type .macro h264_qpel8 type
function ff_\type\()_h264_qpel8_mc10_neon, export=1 function ff_\type\()_h264_qpel8_mc10_neon, export=1
lowpass_const r3 lowpass_const r3
mov r3, r1 mov r3, r1
sub r1, r1, #2 sub r1, r1, #2
mov ip, #8 mov r12, #8
b \type\()_h264_qpel8_h_lowpass_l2_neon b \type\()_h264_qpel8_h_lowpass_l2_neon
endfunc endfunc
...@@ -1185,7 +1185,7 @@ function ff_\type\()_h264_qpel8_mc20_neon, export=1 ...@@ -1185,7 +1185,7 @@ function ff_\type\()_h264_qpel8_mc20_neon, export=1
lowpass_const r3 lowpass_const r3
sub r1, r1, #2 sub r1, r1, #2
mov r3, r2 mov r3, r2
mov ip, #8 mov r12, #8
b \type\()_h264_qpel8_h_lowpass_neon b \type\()_h264_qpel8_h_lowpass_neon
endfunc endfunc
...@@ -1193,13 +1193,13 @@ function ff_\type\()_h264_qpel8_mc30_neon, export=1 ...@@ -1193,13 +1193,13 @@ function ff_\type\()_h264_qpel8_mc30_neon, export=1
lowpass_const r3 lowpass_const r3
add r3, r1, #1 add r3, r1, #1
sub r1, r1, #2 sub r1, r1, #2
mov ip, #8 mov r12, #8
b \type\()_h264_qpel8_h_lowpass_l2_neon b \type\()_h264_qpel8_h_lowpass_l2_neon
endfunc endfunc
function ff_\type\()_h264_qpel8_mc01_neon, export=1 function ff_\type\()_h264_qpel8_mc01_neon, export=1
push {lr} push {lr}
mov ip, r1 mov r12, r1
\type\()_h264_qpel8_mc01: \type\()_h264_qpel8_mc01:
lowpass_const r3 lowpass_const r3
mov r3, r2 mov r3, r2
...@@ -1222,12 +1222,12 @@ T mov sp, r0 ...@@ -1222,12 +1222,12 @@ T mov sp, r0
mov r0, sp mov r0, sp
sub r1, r1, #2 sub r1, r1, #2
mov r3, #8 mov r3, #8
mov ip, #8 mov r12, #8
vpush {d8-d15} vpush {d8-d15}
bl put_h264_qpel8_h_lowpass_neon bl put_h264_qpel8_h_lowpass_neon
ldrd r0, [r11], #8 ldrd r0, [r11], #8
mov r3, r2 mov r3, r2
add ip, sp, #64 add r12, sp, #64
sub r1, r1, r2, lsl #1 sub r1, r1, r2, lsl #1
mov r2, #8 mov r2, #8
bl \type\()_h264_qpel8_v_lowpass_l2_neon bl \type\()_h264_qpel8_v_lowpass_l2_neon
...@@ -1248,7 +1248,7 @@ T mov sp, r0 ...@@ -1248,7 +1248,7 @@ T mov sp, r0
sub r1, r1, #2 sub r1, r1, #2
mov r3, #8 mov r3, #8
mov r0, sp mov r0, sp
mov ip, #8 mov r12, #8
vpush {d8-d15} vpush {d8-d15}
bl put_h264_qpel8_h_lowpass_neon bl put_h264_qpel8_h_lowpass_neon
mov r4, r0 mov r4, r0
...@@ -1333,7 +1333,7 @@ endfunc ...@@ -1333,7 +1333,7 @@ endfunc
function ff_\type\()_h264_qpel8_mc03_neon, export=1 function ff_\type\()_h264_qpel8_mc03_neon, export=1
push {lr} push {lr}
add ip, r1, r2 add r12, r1, r2
b \type\()_h264_qpel8_mc01 b \type\()_h264_qpel8_mc01
endfunc endfunc
...@@ -1356,12 +1356,12 @@ function ff_\type\()_h264_qpel8_mc33_neon, export=1 ...@@ -1356,12 +1356,12 @@ function ff_\type\()_h264_qpel8_mc33_neon, export=1
sub r1, r1, #1 sub r1, r1, #1
b \type\()_h264_qpel8_mc11 b \type\()_h264_qpel8_mc11
endfunc endfunc
.endm .endm
h264_qpel8 put h264_qpel8 put
h264_qpel8 avg h264_qpel8 avg
.macro h264_qpel16 type .macro h264_qpel16 type
function ff_\type\()_h264_qpel16_mc10_neon, export=1 function ff_\type\()_h264_qpel16_mc10_neon, export=1
lowpass_const r3 lowpass_const r3
mov r3, r1 mov r3, r1
...@@ -1385,7 +1385,7 @@ endfunc ...@@ -1385,7 +1385,7 @@ endfunc
function ff_\type\()_h264_qpel16_mc01_neon, export=1 function ff_\type\()_h264_qpel16_mc01_neon, export=1
push {r4, lr} push {r4, lr}
mov ip, r1 mov r12, r1
\type\()_h264_qpel16_mc01: \type\()_h264_qpel16_mc01:
lowpass_const r3 lowpass_const r3
mov r3, r2 mov r3, r2
...@@ -1412,7 +1412,7 @@ T mov sp, r0 ...@@ -1412,7 +1412,7 @@ T mov sp, r0
bl put_h264_qpel16_h_lowpass_neon bl put_h264_qpel16_h_lowpass_neon
ldrd r0, [r11], #8 ldrd r0, [r11], #8
mov r3, r2 mov r3, r2
add ip, sp, #64 add r12, sp, #64
sub r1, r1, r2, lsl #1 sub r1, r1, r2, lsl #1
mov r2, #16 mov r2, #16
bl \type\()_h264_qpel16_v_lowpass_l2_neon bl \type\()_h264_qpel16_v_lowpass_l2_neon
...@@ -1515,7 +1515,7 @@ endfunc ...@@ -1515,7 +1515,7 @@ endfunc
function ff_\type\()_h264_qpel16_mc03_neon, export=1 function ff_\type\()_h264_qpel16_mc03_neon, export=1
push {r4, lr} push {r4, lr}
add ip, r1, r2 add r12, r1, r2
b \type\()_h264_qpel16_mc01 b \type\()_h264_qpel16_mc01
endfunc endfunc
...@@ -1538,14 +1538,14 @@ function ff_\type\()_h264_qpel16_mc33_neon, export=1 ...@@ -1538,14 +1538,14 @@ function ff_\type\()_h264_qpel16_mc33_neon, export=1
sub r1, r1, #1 sub r1, r1, #1
b \type\()_h264_qpel16_mc11 b \type\()_h264_qpel16_mc11
endfunc endfunc
.endm .endm
h264_qpel16 put h264_qpel16 put
h264_qpel16 avg h264_qpel16 avg
@ Biweighted prediction @ Biweighted prediction
.macro biweight_16 macs, macd .macro biweight_16 macs, macd
vdup.8 d0, r4 vdup.8 d0, r4
vdup.8 d1, r5 vdup.8 d1, r5
vmov q2, q8 vmov q2, q8
...@@ -1583,9 +1583,9 @@ endfunc ...@@ -1583,9 +1583,9 @@ endfunc
vst1.8 {d24-d25},[r6,:128], r2 vst1.8 {d24-d25},[r6,:128], r2
bne 1b bne 1b
pop {r4-r6, pc} pop {r4-r6, pc}
.endm .endm
.macro biweight_8 macs, macd .macro biweight_8 macs, macd
vdup.8 d0, r4 vdup.8 d0, r4
vdup.8 d1, r5 vdup.8 d1, r5
vmov q1, q8 vmov q1, q8
...@@ -1613,9 +1613,9 @@ endfunc ...@@ -1613,9 +1613,9 @@ endfunc
vst1.8 {d4},[r6,:64], r2 vst1.8 {d4},[r6,:64], r2
bne 1b bne 1b
pop {r4-r6, pc} pop {r4-r6, pc}
.endm .endm
.macro biweight_4 macs, macd .macro biweight_4 macs, macd
vdup.8 d0, r4 vdup.8 d0, r4
vdup.8 d1, r5 vdup.8 d1, r5
vmov q1, q8 vmov q1, q8
...@@ -1655,9 +1655,9 @@ endfunc ...@@ -1655,9 +1655,9 @@ endfunc
vst1.32 {d2[0]},[r6,:32], r2 vst1.32 {d2[0]},[r6,:32], r2
vst1.32 {d2[1]},[r6,:32], r2 vst1.32 {d2[1]},[r6,:32], r2
pop {r4-r6, pc} pop {r4-r6, pc}
.endm .endm
.macro biweight_func w .macro biweight_func w
function ff_biweight_h264_pixels_\w\()_neon, export=1 function ff_biweight_h264_pixels_\w\()_neon, export=1
push {r4-r6, lr} push {r4-r6, lr}
ldr r12, [sp, #16] ldr r12, [sp, #16]
...@@ -1687,7 +1687,7 @@ function ff_biweight_h264_pixels_\w\()_neon, export=1 ...@@ -1687,7 +1687,7 @@ function ff_biweight_h264_pixels_\w\()_neon, export=1
40: rsb r5, r5, #0 40: rsb r5, r5, #0
biweight_\w vmlsl.u8, vmlal.u8 biweight_\w vmlsl.u8, vmlal.u8
endfunc endfunc
.endm .endm
biweight_func 16 biweight_func 16
biweight_func 8 biweight_func 8
...@@ -1695,7 +1695,7 @@ endfunc ...@@ -1695,7 +1695,7 @@ endfunc
@ Weighted prediction @ Weighted prediction
.macro weight_16 add .macro weight_16 add
vdup.8 d0, r12 vdup.8 d0, r12
1: subs r2, r2, #2 1: subs r2, r2, #2
vld1.8 {d20-d21},[r0,:128], r1 vld1.8 {d20-d21},[r0,:128], r1
...@@ -1722,9 +1722,9 @@ endfunc ...@@ -1722,9 +1722,9 @@ endfunc
vst1.8 {d24-d25},[r4,:128], r1 vst1.8 {d24-d25},[r4,:128], r1
bne 1b bne 1b
pop {r4, pc} pop {r4, pc}
.endm .endm
.macro weight_8 add .macro weight_8 add
vdup.8 d0, r12 vdup.8 d0, r12
1: subs r2, r2, #2 1: subs r2, r2, #2
vld1.8 {d4},[r0,:64], r1 vld1.8 {d4},[r0,:64], r1
...@@ -1743,9 +1743,9 @@ endfunc ...@@ -1743,9 +1743,9 @@ endfunc
vst1.8 {d4},[r4,:64], r1 vst1.8 {d4},[r4,:64], r1
bne 1b bne 1b
pop {r4, pc} pop {r4, pc}
.endm .endm
.macro weight_4 add .macro weight_4 add
vdup.8 d0, r12 vdup.8 d0, r12
vmov q1, q8 vmov q1, q8
vmov q10, q8 vmov q10, q8
...@@ -1779,9 +1779,9 @@ endfunc ...@@ -1779,9 +1779,9 @@ endfunc
vst1.32 {d2[0]},[r4,:32], r1 vst1.32 {d2[0]},[r4,:32], r1
vst1.32 {d2[1]},[r4,:32], r1 vst1.32 {d2[1]},[r4,:32], r1
pop {r4, pc} pop {r4, pc}
.endm .endm
.macro weight_func w .macro weight_func w
function ff_weight_h264_pixels_\w\()_neon, export=1 function ff_weight_h264_pixels_\w\()_neon, export=1
push {r4, lr} push {r4, lr}
ldr r12, [sp, #8] ldr r12, [sp, #8]
...@@ -1806,7 +1806,7 @@ function ff_weight_h264_pixels_\w\()_neon, export=1 ...@@ -1806,7 +1806,7 @@ function ff_weight_h264_pixels_\w\()_neon, export=1
10: rsb r12, r12, #0 10: rsb r12, r12, #0
weight_\w vsub.s16 weight_\w vsub.s16
endfunc endfunc
.endm .endm
weight_func 16 weight_func 16
weight_func 8 weight_func 8
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment