Commit 2b156269 authored by Kaustubh Raste's avatar Kaustubh Raste Committed by Michael Niedermayer

avcodec/mips: preload data in hevc sao edge 90 degree filter msa functions

Signed-off-by: 's avatarKaustubh Raste <kaustubh.raste@imgtec.com>
Reviewed-by: 's avatarManojkumar Bhosale <Manojkumar.Bhosale@imgtec.com>
Signed-off-by: 's avatarMichael Niedermayer <michael@niedermayer.cc>
parent 21e077fc
...@@ -1568,23 +1568,25 @@ static void hevc_sao_edge_filter_90degree_4width_msa(uint8_t *dst, ...@@ -1568,23 +1568,25 @@ static void hevc_sao_edge_filter_90degree_4width_msa(uint8_t *dst,
int16_t *sao_offset_val, int16_t *sao_offset_val,
int32_t height) int32_t height)
{ {
int32_t h_cnt;
uint32_t dst_val0, dst_val1; uint32_t dst_val0, dst_val1;
v8i16 edge_idx = { 1, 2, 0, 3, 4, 0, 0, 0 }; v16i8 edge_idx = { 1, 2, 0, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
v16u8 const1 = (v16u8) __msa_ldi_b(1); v16u8 const1 = (v16u8) __msa_ldi_b(1);
v16i8 dst0; v16i8 dst0;
v16i8 zero = { 0 }; v16i8 sao_offset = LD_SB(sao_offset_val);
v16u8 cmp_minus10, diff_minus10, cmp_minus11, diff_minus11; v16u8 cmp_minus10, diff_minus10, cmp_minus11, diff_minus11;
v16u8 src_minus10, src_minus11, src10, src11; v16u8 src_minus10, src_minus11, src10, src11;
v16i8 src_zero0, src_zero1; v16i8 src_zero0, src_zero1;
v8i16 sao_offset, src00, src01, offset_mask0, offset_mask1; v16i8 offset;
v8i16 offset_mask0, offset_mask1;
sao_offset = LD_SH(sao_offset_val); sao_offset = __msa_pckev_b(sao_offset, sao_offset);
/* load in advance */
LD_UB2(src - src_stride, src_stride, src_minus10, src_minus11); LD_UB2(src - src_stride, src_stride, src_minus10, src_minus11);
LD_UB2(src + src_stride, src_stride, src10, src11);
for (h_cnt = (height >> 1); h_cnt--;) { for (height -= 2; height; height -= 2) {
LD_UB2(src + src_stride, src_stride, src10, src11); src += (src_stride << 1);
src_minus10 = (v16u8) __msa_ilvr_b((v16i8) src10, (v16i8) src_minus10); src_minus10 = (v16u8) __msa_ilvr_b((v16i8) src10, (v16i8) src_minus10);
src_zero0 = __msa_ilvr_b((v16i8) src_minus11, (v16i8) src_minus11); src_zero0 = __msa_ilvr_b((v16i8) src_minus11, (v16i8) src_minus11);
...@@ -1604,19 +1606,22 @@ static void hevc_sao_edge_filter_90degree_4width_msa(uint8_t *dst, ...@@ -1604,19 +1606,22 @@ static void hevc_sao_edge_filter_90degree_4width_msa(uint8_t *dst,
offset_mask0 = (v8i16) (__msa_hadd_u_h(diff_minus10, diff_minus10) + 2); offset_mask0 = (v8i16) (__msa_hadd_u_h(diff_minus10, diff_minus10) + 2);
offset_mask1 = (v8i16) (__msa_hadd_u_h(diff_minus11, diff_minus11) + 2); offset_mask1 = (v8i16) (__msa_hadd_u_h(diff_minus11, diff_minus11) + 2);
VSHF_H2_SH(edge_idx, edge_idx, sao_offset, sao_offset, offset_mask0, offset = __msa_pckev_b((v16i8) offset_mask1, (v16i8) offset_mask0);
offset_mask0, offset_mask0, offset_mask0); dst0 = __msa_pckev_b((v16i8) src_zero1, (v16i8) src_zero0);
VSHF_H2_SH(edge_idx, edge_idx, sao_offset, sao_offset, offset_mask1,
offset_mask1, offset_mask1, offset_mask1); VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset, offset,
ILVEV_B2_SH(src_zero0, zero, src_zero1, zero, src00, src01); offset, offset);
ADD2(offset_mask0, src00, offset_mask1, src01, offset_mask0,
offset_mask1); dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128);
CLIP_SH2_0_255(offset_mask0, offset_mask1); dst0 = __msa_adds_s_b(dst0, offset);
dst0 = __msa_pckev_b((v16i8) offset_mask1, (v16i8) offset_mask0); dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128);
src_minus10 = src10; src_minus10 = src10;
src_minus11 = src11; src_minus11 = src11;
/* load in advance */
LD_UB2(src + src_stride, src_stride, src10, src11);
dst_val0 = __msa_copy_u_w((v4i32) dst0, 0); dst_val0 = __msa_copy_u_w((v4i32) dst0, 0);
dst_val1 = __msa_copy_u_w((v4i32) dst0, 2); dst_val1 = __msa_copy_u_w((v4i32) dst0, 2);
SW(dst_val0, dst); SW(dst_val0, dst);
...@@ -1624,8 +1629,41 @@ static void hevc_sao_edge_filter_90degree_4width_msa(uint8_t *dst, ...@@ -1624,8 +1629,41 @@ static void hevc_sao_edge_filter_90degree_4width_msa(uint8_t *dst,
SW(dst_val1, dst); SW(dst_val1, dst);
dst += dst_stride; dst += dst_stride;
src += (src_stride << 1);
} }
src_minus10 = (v16u8) __msa_ilvr_b((v16i8) src10, (v16i8) src_minus10);
src_zero0 = __msa_ilvr_b((v16i8) src_minus11, (v16i8) src_minus11);
src_minus11 = (v16u8) __msa_ilvr_b((v16i8) src11, (v16i8) src_minus11);
src_zero1 = __msa_ilvr_b((v16i8) src10, (v16i8) src10);
cmp_minus10 = ((v16u8) src_zero0 == src_minus10);
diff_minus10 = __msa_nor_v(cmp_minus10, cmp_minus10);
cmp_minus10 = (src_minus10 < (v16u8) src_zero0);
diff_minus10 = __msa_bmnz_v(diff_minus10, const1, cmp_minus10);
cmp_minus11 = ((v16u8) src_zero1 == src_minus11);
diff_minus11 = __msa_nor_v(cmp_minus11, cmp_minus11);
cmp_minus11 = (src_minus11 < (v16u8) src_zero1);
diff_minus11 = __msa_bmnz_v(diff_minus11, const1, cmp_minus11);
offset_mask0 = (v8i16) (__msa_hadd_u_h(diff_minus10, diff_minus10) + 2);
offset_mask1 = (v8i16) (__msa_hadd_u_h(diff_minus11, diff_minus11) + 2);
offset = __msa_pckev_b((v16i8) offset_mask1, (v16i8) offset_mask0);
dst0 = __msa_pckev_b((v16i8) src_zero1, (v16i8) src_zero0);
VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset,
offset, offset, offset);
dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128);
dst0 = __msa_adds_s_b(dst0, offset);
dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128);
dst_val0 = __msa_copy_u_w((v4i32) dst0, 0);
dst_val1 = __msa_copy_u_w((v4i32) dst0, 2);
SW(dst_val0, dst);
dst += dst_stride;
SW(dst_val1, dst);
} }
static void hevc_sao_edge_filter_90degree_8width_msa(uint8_t *dst, static void hevc_sao_edge_filter_90degree_8width_msa(uint8_t *dst,
...@@ -1635,22 +1673,23 @@ static void hevc_sao_edge_filter_90degree_8width_msa(uint8_t *dst, ...@@ -1635,22 +1673,23 @@ static void hevc_sao_edge_filter_90degree_8width_msa(uint8_t *dst,
int16_t *sao_offset_val, int16_t *sao_offset_val,
int32_t height) int32_t height)
{ {
int32_t h_cnt;
uint64_t dst_val0, dst_val1; uint64_t dst_val0, dst_val1;
v8i16 edge_idx = { 1, 2, 0, 3, 4, 0, 0, 0 }; v16i8 edge_idx = { 1, 2, 0, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
v16u8 const1 = (v16u8) __msa_ldi_b(1); v16u8 const1 = (v16u8) __msa_ldi_b(1);
v16i8 zero = { 0 }; v16i8 offset, sao_offset = LD_SB(sao_offset_val);
v16i8 src_zero0, src_zero1, dst0, dst1; v16i8 src_zero0, src_zero1, dst0;
v16u8 cmp_minus10, diff_minus10, cmp_minus11, diff_minus11; v16u8 cmp_minus10, diff_minus10, cmp_minus11, diff_minus11;
v16u8 src_minus10, src_minus11, src10, src11; v16u8 src_minus10, src_minus11, src10, src11;
v8i16 sao_offset, src00, offset_mask0, src01, offset_mask1; v8i16 offset_mask0, offset_mask1;
sao_offset = LD_SH(sao_offset_val); sao_offset = __msa_pckev_b(sao_offset, sao_offset);
/* load in advance */
LD_UB2(src - src_stride, src_stride, src_minus10, src_minus11); LD_UB2(src - src_stride, src_stride, src_minus10, src_minus11);
LD_UB2(src + src_stride, src_stride, src10, src11);
for (h_cnt = (height >> 1); h_cnt--;) { for (height -= 2; height; height -= 2) {
LD_UB2(src + src_stride, src_stride, src10, src11); src += (src_stride << 1);
src_minus10 = (v16u8) __msa_ilvr_b((v16i8) src10, (v16i8) src_minus10); src_minus10 = (v16u8) __msa_ilvr_b((v16i8) src10, (v16i8) src_minus10);
src_zero0 = __msa_ilvr_b((v16i8) src_minus11, (v16i8) src_minus11); src_zero0 = __msa_ilvr_b((v16i8) src_minus11, (v16i8) src_minus11);
...@@ -1670,28 +1709,63 @@ static void hevc_sao_edge_filter_90degree_8width_msa(uint8_t *dst, ...@@ -1670,28 +1709,63 @@ static void hevc_sao_edge_filter_90degree_8width_msa(uint8_t *dst,
offset_mask0 = (v8i16) (__msa_hadd_u_h(diff_minus10, diff_minus10) + 2); offset_mask0 = (v8i16) (__msa_hadd_u_h(diff_minus10, diff_minus10) + 2);
offset_mask1 = (v8i16) (__msa_hadd_u_h(diff_minus11, diff_minus11) + 2); offset_mask1 = (v8i16) (__msa_hadd_u_h(diff_minus11, diff_minus11) + 2);
VSHF_H2_SH(edge_idx, edge_idx, sao_offset, sao_offset, offset_mask0, offset = __msa_pckev_b((v16i8) offset_mask1, (v16i8) offset_mask0);
offset_mask0, offset_mask0, offset_mask0); dst0 = __msa_pckev_b((v16i8) src_zero1, (v16i8) src_zero0);
VSHF_H2_SH(edge_idx, edge_idx, sao_offset, sao_offset, offset_mask1,
offset_mask1, offset_mask1, offset_mask1); VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset,
ILVEV_B2_SH(src_zero0, zero, src_zero1, zero, src00, src01); offset, offset, offset);
ADD2(offset_mask0, src00, offset_mask1, src01, offset_mask0,
offset_mask1); dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128);
CLIP_SH2_0_255(offset_mask0, offset_mask1); dst0 = __msa_adds_s_b(dst0, offset);
PCKEV_B2_SB(offset_mask0, offset_mask0, offset_mask1, offset_mask1, dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128);
dst0, dst1);
src_minus10 = src10; src_minus10 = src10;
src_minus11 = src11; src_minus11 = src11;
/* load in advance */
LD_UB2(src + src_stride, src_stride, src10, src11);
dst_val0 = __msa_copy_u_d((v2i64) dst0, 0); dst_val0 = __msa_copy_u_d((v2i64) dst0, 0);
dst_val1 = __msa_copy_u_d((v2i64) dst1, 0); dst_val1 = __msa_copy_u_d((v2i64) dst0, 1);
SD(dst_val0, dst); SD(dst_val0, dst);
dst += dst_stride; dst += dst_stride;
SD(dst_val1, dst); SD(dst_val1, dst);
dst += dst_stride; dst += dst_stride;
src += (src_stride << 1);
} }
src_minus10 = (v16u8) __msa_ilvr_b((v16i8) src10, (v16i8) src_minus10);
src_zero0 = __msa_ilvr_b((v16i8) src_minus11, (v16i8) src_minus11);
src_minus11 = (v16u8) __msa_ilvr_b((v16i8) src11, (v16i8) src_minus11);
src_zero1 = __msa_ilvr_b((v16i8) src10, (v16i8) src10);
cmp_minus10 = ((v16u8) src_zero0 == src_minus10);
diff_minus10 = __msa_nor_v(cmp_minus10, cmp_minus10);
cmp_minus10 = (src_minus10 < (v16u8) src_zero0);
diff_minus10 = __msa_bmnz_v(diff_minus10, const1, cmp_minus10);
cmp_minus11 = ((v16u8) src_zero1 == src_minus11);
diff_minus11 = __msa_nor_v(cmp_minus11, cmp_minus11);
cmp_minus11 = (src_minus11 < (v16u8) src_zero1);
diff_minus11 = __msa_bmnz_v(diff_minus11, const1, cmp_minus11);
offset_mask0 = (v8i16) (__msa_hadd_u_h(diff_minus10, diff_minus10) + 2);
offset_mask1 = (v8i16) (__msa_hadd_u_h(diff_minus11, diff_minus11) + 2);
offset = __msa_pckev_b((v16i8) offset_mask1, (v16i8) offset_mask0);
dst0 = __msa_pckev_b((v16i8) src_zero1, (v16i8) src_zero0);
VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset, offset,
offset, offset);
dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128);
dst0 = __msa_adds_s_b(dst0, offset);
dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128);
dst_val0 = __msa_copy_u_d((v2i64) dst0, 0);
dst_val1 = __msa_copy_u_d((v2i64) dst0, 1);
SD(dst_val0, dst);
dst += dst_stride;
SD(dst_val1, dst);
} }
static void hevc_sao_edge_filter_90degree_16multiple_msa(uint8_t *dst, static void hevc_sao_edge_filter_90degree_16multiple_msa(uint8_t *dst,
...@@ -1715,15 +1789,13 @@ static void hevc_sao_edge_filter_90degree_16multiple_msa(uint8_t *dst, ...@@ -1715,15 +1789,13 @@ static void hevc_sao_edge_filter_90degree_16multiple_msa(uint8_t *dst,
v16u8 src10, src_minus10, dst0, src11, src_minus11, dst1; v16u8 src10, src_minus10, dst0, src11, src_minus11, dst1;
v16u8 src12, dst2, src13, dst3; v16u8 src12, dst2, src13, dst3;
v16i8 offset_mask0, offset_mask1, offset_mask2, offset_mask3, sao_offset; v16i8 offset_mask0, offset_mask1, offset_mask2, offset_mask3, sao_offset;
v8i16 src0, src1, src2, src3, src4, src5, src6, src7;
v8i16 temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
sao_offset = LD_SB(sao_offset_val); sao_offset = LD_SB(sao_offset_val);
sao_offset = __msa_pckev_b(sao_offset, sao_offset); sao_offset = __msa_pckev_b(sao_offset, sao_offset);
for (v_cnt = 0; v_cnt < (width >> 4); v_cnt++) { for (v_cnt = 0; v_cnt < width; v_cnt += 16) {
src = src_orig + (v_cnt << 4); src = src_orig + v_cnt;
dst = dst_orig + (v_cnt << 4); dst = dst_orig + v_cnt;
LD_UB2(src - src_stride, src_stride, src_minus10, src_minus11); LD_UB2(src - src_stride, src_stride, src_minus10, src_minus11);
...@@ -1779,24 +1851,15 @@ static void hevc_sao_edge_filter_90degree_16multiple_msa(uint8_t *dst, ...@@ -1779,24 +1851,15 @@ static void hevc_sao_edge_filter_90degree_16multiple_msa(uint8_t *dst,
VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset,
offset_mask3, offset_mask3, offset_mask3, offset_mask3); offset_mask3, offset_mask3, offset_mask3, offset_mask3);
UNPCK_UB_SH(src_minus11, src0, src1);
UNPCK_SB_SH(offset_mask0, temp0, temp1);
UNPCK_UB_SH(src10, src2, src3);
UNPCK_SB_SH(offset_mask1, temp2, temp3);
UNPCK_UB_SH(src11, src4, src5);
UNPCK_SB_SH(offset_mask2, temp4, temp5);
UNPCK_UB_SH(src12, src6, src7);
UNPCK_SB_SH(offset_mask3, temp6, temp7);
ADD4(temp0, src0, temp1, src1, temp2, src2, temp3, src3, temp0,
temp1, temp2, temp3);
ADD4(temp4, src4, temp5, src5, temp6, src6, temp7, src7, temp4,
temp5, temp6, temp7);
CLIP_SH4_0_255(temp0, temp1, temp2, temp3);
CLIP_SH4_0_255(temp4, temp5, temp6, temp7);
PCKEV_B4_UB(temp1, temp0, temp3, temp2, temp5, temp4, temp7, temp6,
dst0, dst1, dst2, dst3);
src_minus10 = src12; src_minus10 = src12;
XORI_B4_128_UB(src_minus11, src10, src11, src12);
dst0 = (v16u8) __msa_adds_s_b((v16i8) src_minus11, offset_mask0);
dst1 = (v16u8) __msa_adds_s_b((v16i8) src10, offset_mask1);
dst2 = (v16u8) __msa_adds_s_b((v16i8) src11, offset_mask2);
dst3 = (v16u8) __msa_adds_s_b((v16i8) src12, offset_mask3);
XORI_B4_128_UB(dst0, dst1, dst2, dst3);
src_minus11 = src13; src_minus11 = src13;
ST_UB4(dst0, dst1, dst2, dst3, dst, dst_stride); ST_UB4(dst0, dst1, dst2, dst3, dst, dst_stride);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment