Commit fb92f3ec authored by Shivraj Patil's avatar Shivraj Patil Committed by Michael Niedermayer

avcodec/mips: MSA (MIPS-SIMD-Arch) optimizations for AVC idct functions

This patch adds MSA (MIPS-SIMD-Arch) optimizations for AVC idct functions in new file h264idct_msa.c
Adds new generic macros (needed for this patch) in libavutil/mips/generic_macros_msa.h
Signed-off-by: 's avatarShivraj Patil <shivraj.patil@imgtec.com>
Signed-off-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parent 1d70b6fe
......@@ -31,7 +31,8 @@ MSA-OBJS-$(CONFIG_HEVC_DECODER) += mips/hevcdsp_msa.o \
mips/hevc_idct_msa.o \
mips/hevc_lpf_sao_msa.o \
mips/hevcpred_msa.o
MSA-OBJS-$(CONFIG_H264DSP) += mips/h264dsp_msa.o
MSA-OBJS-$(CONFIG_H264DSP) += mips/h264dsp_msa.o \
mips/h264idct_msa.o
MSA-OBJS-$(CONFIG_H264CHROMA) += mips/h264chroma_msa.o
MSA-OBJS-$(CONFIG_H264PRED) += mips/h264pred_msa.o
LOONGSON3-OBJS-$(CONFIG_H264DSP) += mips/h264dsp_mmi.o
......
......@@ -62,6 +62,21 @@ static av_cold void h264dsp_init_msa(H264DSPContext *c,
c->biweight_h264_pixels_tab[0] = ff_biweight_h264_pixels16_8_msa;
c->biweight_h264_pixels_tab[1] = ff_biweight_h264_pixels8_8_msa;
c->biweight_h264_pixels_tab[2] = ff_biweight_h264_pixels4_8_msa;
c->h264_idct_add = ff_h264_idct_add_msa;
c->h264_idct8_add = ff_h264_idct8_addblk_msa;
c->h264_idct_dc_add = ff_h264_idct4x4_addblk_dc_msa;
c->h264_idct8_dc_add = ff_h264_idct8_dc_addblk_msa;
c->h264_idct_add16 = ff_h264_idct_add16_msa;
c->h264_idct8_add4 = ff_h264_idct8_add4_msa;
if (chroma_format_idc <= 1)
c->h264_idct_add8 = ff_h264_idct_add8_msa;
else
c->h264_idct_add8 = ff_h264_idct_add8_422_msa;
c->h264_idct_add16intra = ff_h264_idct_add16_intra_msa;
c->h264_luma_dc_dequant_idct = ff_h264_deq_idct_luma_dc_msa;
} // if (8 == bit_depth)
}
#endif // #if HAVE_MSA
......
......@@ -41,6 +41,30 @@ void ff_h264_h_loop_filter_luma_mbaff_msa(uint8_t *src, int32_t stride,
int32_t alpha, int32_t beta,
int8_t *tc0);
void ff_h264_idct_add_msa(uint8_t *dst, int16_t *src, int32_t dst_stride);
void ff_h264_idct4x4_addblk_dc_msa(uint8_t *dst, int16_t *src,
int32_t dst_stride);
void ff_h264_deq_idct_luma_dc_msa(int16_t *dst, int16_t *src,
int32_t de_q_val);
void ff_h264_idct_add16_msa(uint8_t *dst, const int32_t *blk_offset,
int16_t *block, int32_t stride,
const uint8_t nnzc[15 * 8]);
void ff_h264_idct_add16_intra_msa(uint8_t *dst, const int32_t *blk_offset,
int16_t *block, int32_t dst_stride,
const uint8_t nnzc[15 * 8]);
void ff_h264_idct_add8_msa(uint8_t **dst, const int32_t *blk_offset,
int16_t *block, int32_t dst_stride,
const uint8_t nnzc[15 * 8]);
void ff_h264_idct_add8_422_msa(uint8_t **dst, const int32_t *blk_offset,
int16_t *block, int32_t dst_stride,
const uint8_t nnzc[15 * 8]);
void ff_h264_idct8_addblk_msa(uint8_t *dst, int16_t *src, int32_t dst_stride);
void ff_h264_idct8_dc_addblk_msa(uint8_t *dst, int16_t *src,
int32_t dst_stride);
void ff_h264_idct8_add4_msa(uint8_t *dst, const int *blk_offset,
int16_t *blk, int dst_stride,
const uint8_t nnzc[15 * 8]);
void ff_h264_h_lpf_luma_intra_msa(uint8_t *src, int stride,
int alpha, int beta);
void ff_h264_v_lpf_luma_intra_msa(uint8_t *src, int stride,
......
/*
* Copyright (c) 2015 Manojkumar Bhosale (Manojkumar.Bhosale@imgtec.com)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/mips/generic_macros_msa.h"
#include "h264dsp_mips.h"
#include "libavcodec/bit_depth_template.c"
#define AVC_ITRANS_H(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
\
tmp0_m = in0 + in2; \
tmp1_m = in0 - in2; \
tmp2_m = in1 >> 1; \
tmp2_m = tmp2_m - in3; \
tmp3_m = in3 >> 1; \
tmp3_m = in1 + tmp3_m; \
\
BUTTERFLY_4(tmp0_m, tmp1_m, tmp2_m, tmp3_m, out0, out1, out2, out3); \
}
static void avc_idct4x4_addblk_msa(uint8_t *dst, int16_t *src,
int32_t dst_stride)
{
v8i16 src0, src1, src2, src3;
v8i16 hres0, hres1, hres2, hres3;
v8i16 vres0, vres1, vres2, vres3;
v8i16 zeros = { 0 };
LD4x4_SH(src, src0, src1, src2, src3);
AVC_ITRANS_H(src0, src1, src2, src3, hres0, hres1, hres2, hres3);
TRANSPOSE4x4_SH_SH(hres0, hres1, hres2, hres3, hres0, hres1, hres2, hres3);
AVC_ITRANS_H(hres0, hres1, hres2, hres3, vres0, vres1, vres2, vres3);
SRARI_H4_SH(vres0, vres1, vres2, vres3, 6);
ADDBLK_ST4x4_UB(vres0, vres1, vres2, vres3, dst, dst_stride);
ST_SH2(zeros, zeros, src, 8);
}
static void avc_idct4x4_addblk_dc_msa(uint8_t *dst, int16_t *src,
int32_t dst_stride)
{
int16_t dc;
uint32_t src0, src1, src2, src3;
v16u8 pred = { 0 };
v16i8 out;
v8i16 input_dc, pred_r, pred_l;
dc = (src[0] + 32) >> 6;
input_dc = __msa_fill_h(dc);
src[0] = 0;
LW4(dst, dst_stride, src0, src1, src2, src3);
INSERT_W4_UB(src0, src1, src2, src3, pred);
UNPCK_UB_SH(pred, pred_r, pred_l);
pred_r += input_dc;
pred_l += input_dc;
CLIP_SH2_0_255(pred_r, pred_l);
out = __msa_pckev_b((v16i8) pred_l, (v16i8) pred_r);
ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
}
static void avc_deq_idct_luma_dc_msa(int16_t *dst, int16_t *src,
int32_t de_q_val)
{
#define DC_DEST_STRIDE 16
int16_t out0, out1, out2, out3;
v8i16 src0, src1, src2, src3;
v8i16 vec0, vec1, vec2, vec3;
v8i16 hres0, hres1, hres2, hres3;
v8i16 vres0, vres1, vres2, vres3;
v4i32 vres0_r, vres1_r, vres2_r, vres3_r;
v4i32 de_q_vec = __msa_fill_w(de_q_val);
LD4x4_SH(src, src0, src1, src2, src3);
TRANSPOSE4x4_SH_SH(src0, src1, src2, src3, src0, src1, src2, src3);
BUTTERFLY_4(src0, src2, src3, src1, vec0, vec3, vec2, vec1);
BUTTERFLY_4(vec0, vec1, vec2, vec3, hres0, hres3, hres2, hres1);
TRANSPOSE4x4_SH_SH(hres0, hres1, hres2, hres3, hres0, hres1, hres2, hres3);
BUTTERFLY_4(hres0, hres1, hres3, hres2, vec0, vec3, vec2, vec1);
BUTTERFLY_4(vec0, vec1, vec2, vec3, vres0, vres1, vres2, vres3);
UNPCK_R_SH_SW(vres0, vres0_r);
UNPCK_R_SH_SW(vres1, vres1_r);
UNPCK_R_SH_SW(vres2, vres2_r);
UNPCK_R_SH_SW(vres3, vres3_r);
vres0_r *= de_q_vec;
vres1_r *= de_q_vec;
vres2_r *= de_q_vec;
vres3_r *= de_q_vec;
SRARI_W4_SW(vres0_r, vres1_r, vres2_r, vres3_r, 8);
PCKEV_H2_SH(vres1_r, vres0_r, vres3_r, vres2_r, vec0, vec1);
out0 = __msa_copy_s_h(vec0, 0);
out1 = __msa_copy_s_h(vec0, 1);
out2 = __msa_copy_s_h(vec0, 2);
out3 = __msa_copy_s_h(vec0, 3);
SH(out0, dst);
SH(out1, (dst + 2 * DC_DEST_STRIDE));
SH(out2, (dst + 8 * DC_DEST_STRIDE));
SH(out3, (dst + 10 * DC_DEST_STRIDE));
dst += DC_DEST_STRIDE;
out0 = __msa_copy_s_h(vec0, 4);
out1 = __msa_copy_s_h(vec0, 5);
out2 = __msa_copy_s_h(vec0, 6);
out3 = __msa_copy_s_h(vec0, 7);
SH(out0, dst);
SH(out1, (dst + 2 * DC_DEST_STRIDE));
SH(out2, (dst + 8 * DC_DEST_STRIDE));
SH(out3, (dst + 10 * DC_DEST_STRIDE));
dst += (3 * DC_DEST_STRIDE);
out0 = __msa_copy_s_h(vec1, 0);
out1 = __msa_copy_s_h(vec1, 1);
out2 = __msa_copy_s_h(vec1, 2);
out3 = __msa_copy_s_h(vec1, 3);
SH(out0, dst);
SH(out1, (dst + 2 * DC_DEST_STRIDE));
SH(out2, (dst + 8 * DC_DEST_STRIDE));
SH(out3, (dst + 10 * DC_DEST_STRIDE));
dst += DC_DEST_STRIDE;
out0 = __msa_copy_s_h(vec1, 4);
out1 = __msa_copy_s_h(vec1, 5);
out2 = __msa_copy_s_h(vec1, 6);
out3 = __msa_copy_s_h(vec1, 7);
SH(out0, dst);
SH(out1, (dst + 2 * DC_DEST_STRIDE));
SH(out2, (dst + 8 * DC_DEST_STRIDE));
SH(out3, (dst + 10 * DC_DEST_STRIDE));
#undef DC_DEST_STRIDE
}
static void avc_idct8_addblk_msa(uint8_t *dst, int16_t *src, int32_t dst_stride)
{
v8i16 src0, src1, src2, src3, src4, src5, src6, src7;
v8i16 vec0, vec1, vec2, vec3;
v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
v8i16 res0, res1, res2, res3, res4, res5, res6, res7;
v4i32 tmp0_r, tmp1_r, tmp2_r, tmp3_r, tmp4_r, tmp5_r, tmp6_r, tmp7_r;
v4i32 tmp0_l, tmp1_l, tmp2_l, tmp3_l, tmp4_l, tmp5_l, tmp6_l, tmp7_l;
v4i32 vec0_r, vec1_r, vec2_r, vec3_r, vec0_l, vec1_l, vec2_l, vec3_l;
v4i32 res0_r, res1_r, res2_r, res3_r, res4_r, res5_r, res6_r, res7_r;
v4i32 res0_l, res1_l, res2_l, res3_l, res4_l, res5_l, res6_l, res7_l;
v16i8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
v16i8 zeros = { 0 };
src[0] += 32;
LD_SH8(src, 8, src0, src1, src2, src3, src4, src5, src6, src7);
vec0 = src0 + src4;
vec1 = src0 - src4;
vec2 = src2 >> 1;
vec2 = vec2 - src6;
vec3 = src6 >> 1;
vec3 = src2 + vec3;
BUTTERFLY_4(vec0, vec1, vec2, vec3, tmp0, tmp1, tmp2, tmp3);
vec0 = src7 >> 1;
vec0 = src5 - vec0 - src3 - src7;
vec1 = src3 >> 1;
vec1 = src1 - vec1 + src7 - src3;
vec2 = src5 >> 1;
vec2 = vec2 - src1 + src7 + src5;
vec3 = src1 >> 1;
vec3 = vec3 + src3 + src5 + src1;
tmp4 = vec3 >> 2;
tmp4 += vec0;
tmp5 = vec2 >> 2;
tmp5 += vec1;
tmp6 = vec1 >> 2;
tmp6 -= vec2;
tmp7 = vec0 >> 2;
tmp7 = vec3 - tmp7;
BUTTERFLY_8(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
res0, res1, res2, res3, res4, res5, res6, res7);
TRANSPOSE8x8_SH_SH(res0, res1, res2, res3, res4, res5, res6, res7,
res0, res1, res2, res3, res4, res5, res6, res7);
UNPCK_SH_SW(res0, tmp0_r, tmp0_l);
UNPCK_SH_SW(res1, tmp1_r, tmp1_l);
UNPCK_SH_SW(res2, tmp2_r, tmp2_l);
UNPCK_SH_SW(res3, tmp3_r, tmp3_l);
UNPCK_SH_SW(res4, tmp4_r, tmp4_l);
UNPCK_SH_SW(res5, tmp5_r, tmp5_l);
UNPCK_SH_SW(res6, tmp6_r, tmp6_l);
UNPCK_SH_SW(res7, tmp7_r, tmp7_l);
BUTTERFLY_4(tmp0_r, tmp0_l, tmp4_l, tmp4_r, vec0_r, vec0_l, vec1_l, vec1_r);
vec2_r = tmp2_r >> 1;
vec2_l = tmp2_l >> 1;
vec2_r -= tmp6_r;
vec2_l -= tmp6_l;
vec3_r = tmp6_r >> 1;
vec3_l = tmp6_l >> 1;
vec3_r += tmp2_r;
vec3_l += tmp2_l;
BUTTERFLY_4(vec0_r, vec1_r, vec2_r, vec3_r, tmp0_r, tmp2_r, tmp4_r, tmp6_r);
BUTTERFLY_4(vec0_l, vec1_l, vec2_l, vec3_l, tmp0_l, tmp2_l, tmp4_l, tmp6_l);
vec0_r = tmp7_r >> 1;
vec0_l = tmp7_l >> 1;
vec0_r = tmp5_r - vec0_r - tmp3_r - tmp7_r;
vec0_l = tmp5_l - vec0_l - tmp3_l - tmp7_l;
vec1_r = tmp3_r >> 1;
vec1_l = tmp3_l >> 1;
vec1_r = tmp1_r - vec1_r + tmp7_r - tmp3_r;
vec1_l = tmp1_l - vec1_l + tmp7_l - tmp3_l;
vec2_r = tmp5_r >> 1;
vec2_l = tmp5_l >> 1;
vec2_r = vec2_r - tmp1_r + tmp7_r + tmp5_r;
vec2_l = vec2_l - tmp1_l + tmp7_l + tmp5_l;
vec3_r = tmp1_r >> 1;
vec3_l = tmp1_l >> 1;
vec3_r = vec3_r + tmp3_r + tmp5_r + tmp1_r;
vec3_l = vec3_l + tmp3_l + tmp5_l + tmp1_l;
tmp1_r = vec3_r >> 2;
tmp1_l = vec3_l >> 2;
tmp1_r += vec0_r;
tmp1_l += vec0_l;
tmp3_r = vec2_r >> 2;
tmp3_l = vec2_l >> 2;
tmp3_r += vec1_r;
tmp3_l += vec1_l;
tmp5_r = vec1_r >> 2;
tmp5_l = vec1_l >> 2;
tmp5_r -= vec2_r;
tmp5_l -= vec2_l;
tmp7_r = vec0_r >> 2;
tmp7_l = vec0_l >> 2;
tmp7_r = vec3_r - tmp7_r;
tmp7_l = vec3_l - tmp7_l;
BUTTERFLY_4(tmp0_r, tmp0_l, tmp7_l, tmp7_r, res0_r, res0_l, res7_l, res7_r);
BUTTERFLY_4(tmp2_r, tmp2_l, tmp5_l, tmp5_r, res1_r, res1_l, res6_l, res6_r);
BUTTERFLY_4(tmp4_r, tmp4_l, tmp3_l, tmp3_r, res2_r, res2_l, res5_l, res5_r);
BUTTERFLY_4(tmp6_r, tmp6_l, tmp1_l, tmp1_r, res3_r, res3_l, res4_l, res4_r);
SRA_4V(res0_r, res0_l, res1_r, res1_l, 6);
SRA_4V(res2_r, res2_l, res3_r, res3_l, 6);
SRA_4V(res4_r, res4_l, res5_r, res5_l, 6);
SRA_4V(res6_r, res6_l, res7_r, res7_l, 6);
PCKEV_H4_SH(res0_l, res0_r, res1_l, res1_r, res2_l, res2_r, res3_l, res3_r,
res0, res1, res2, res3);
PCKEV_H4_SH(res4_l, res4_r, res5_l, res5_r, res6_l, res6_r, res7_l, res7_r,
res4, res5, res6, res7);
LD_SB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7);
ILVR_B4_SH(zeros, dst0, zeros, dst1, zeros, dst2, zeros, dst3,
tmp0, tmp1, tmp2, tmp3);
ILVR_B4_SH(zeros, dst4, zeros, dst5, zeros, dst6, zeros, dst7,
tmp4, tmp5, tmp6, tmp7);
ADD4(res0, tmp0, res1, tmp1, res2, tmp2, res3, tmp3,
res0, res1, res2, res3);
ADD4(res4, tmp4, res5, tmp5, res6, tmp6, res7, tmp7,
res4, res5, res6, res7);
CLIP_SH4_0_255(res0, res1, res2, res3);
CLIP_SH4_0_255(res4, res5, res6, res7);
PCKEV_B4_SB(res1, res0, res3, res2, res5, res4, res7, res6,
dst0, dst1, dst2, dst3);
ST8x4_UB(dst0, dst1, dst, dst_stride);
dst += (4 * dst_stride);
ST8x4_UB(dst2, dst3, dst, dst_stride);
}
static void avc_idct8_dc_addblk_msa(uint8_t *dst, int16_t *src,
int32_t dst_stride)
{
int32_t dc_val;
v16i8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
v8i16 dst0_r, dst1_r, dst2_r, dst3_r, dst4_r, dst5_r, dst6_r, dst7_r;
v8i16 dc;
v16i8 zeros = { 0 };
dc_val = (src[0] + 32) >> 6;
dc = __msa_fill_h(dc_val);
src[0] = 0;
LD_SB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7);
ILVR_B4_SH(zeros, dst0, zeros, dst1, zeros, dst2, zeros, dst3,
dst0_r, dst1_r, dst2_r, dst3_r);
ILVR_B4_SH(zeros, dst4, zeros, dst5, zeros, dst6, zeros, dst7,
dst4_r, dst5_r, dst6_r, dst7_r);
ADD4(dst0_r, dc, dst1_r, dc, dst2_r, dc, dst3_r, dc,
dst0_r, dst1_r, dst2_r, dst3_r);
ADD4(dst4_r, dc, dst5_r, dc, dst6_r, dc, dst7_r, dc,
dst4_r, dst5_r, dst6_r, dst7_r);
CLIP_SH4_0_255(dst0_r, dst1_r, dst2_r, dst3_r);
CLIP_SH4_0_255(dst4_r, dst5_r, dst6_r, dst7_r);
PCKEV_B4_SB(dst1_r, dst0_r, dst3_r, dst2_r, dst5_r, dst4_r, dst7_r, dst6_r,
dst0, dst1, dst2, dst3);
ST8x4_UB(dst0, dst1, dst, dst_stride);
dst += (4 * dst_stride);
ST8x4_UB(dst2, dst3, dst, dst_stride);
}
void ff_h264_idct_add_msa(uint8_t *dst, int16_t *src,
int32_t dst_stride)
{
avc_idct4x4_addblk_msa(dst, src, dst_stride);
memset(src, 0, 16 * sizeof(dctcoef));
}
void ff_h264_idct8_addblk_msa(uint8_t *dst, int16_t *src,
int32_t dst_stride)
{
avc_idct8_addblk_msa(dst, src, dst_stride);
memset(src, 0, 64 * sizeof(dctcoef));
}
void ff_h264_idct4x4_addblk_dc_msa(uint8_t *dst, int16_t *src,
int32_t dst_stride)
{
avc_idct4x4_addblk_dc_msa(dst, src, dst_stride);
}
void ff_h264_idct8_dc_addblk_msa(uint8_t *dst, int16_t *src,
int32_t dst_stride)
{
avc_idct8_dc_addblk_msa(dst, src, dst_stride);
}
void ff_h264_idct_add16_msa(uint8_t *dst,
const int32_t *blk_offset,
int16_t *block, int32_t dst_stride,
const uint8_t nzc[15 * 8])
{
int32_t i;
for (i = 0; i < 16; i++) {
int32_t nnz = nzc[scan8[i]];
if (nnz) {
if (nnz == 1 && ((dctcoef *) block)[i * 16])
ff_h264_idct4x4_addblk_dc_msa(dst + blk_offset[i],
block + i * 16 * sizeof(pixel),
dst_stride);
else
ff_h264_idct_add_msa(dst + blk_offset[i],
block + i * 16 * sizeof(pixel),
dst_stride);
}
}
}
void ff_h264_idct8_add4_msa(uint8_t *dst, const int32_t *blk_offset,
int16_t *block, int32_t dst_stride,
const uint8_t nzc[15 * 8])
{
int32_t cnt;
for (cnt = 0; cnt < 16; cnt += 4) {
int32_t nnz = nzc[scan8[cnt]];
if (nnz) {
if (nnz == 1 && ((dctcoef *) block)[cnt * 16])
ff_h264_idct8_dc_addblk_msa(dst + blk_offset[cnt],
block + cnt * 16 * sizeof(pixel),
dst_stride);
else
ff_h264_idct8_addblk_msa(dst + blk_offset[cnt],
block + cnt * 16 * sizeof(pixel),
dst_stride);
}
}
}
void ff_h264_idct_add8_msa(uint8_t **dst,
const int32_t *blk_offset,
int16_t *block, int32_t dst_stride,
const uint8_t nzc[15 * 8])
{
int32_t i, j;
for (j = 1; j < 3; j++) {
for (i = (j * 16); i < (j * 16 + 4); i++) {
if (nzc[scan8[i]])
ff_h264_idct_add_msa(dst[j - 1] + blk_offset[i],
block + i * 16 * sizeof(pixel),
dst_stride);
else if (((dctcoef *) block)[i * 16])
ff_h264_idct4x4_addblk_dc_msa(dst[j - 1] + blk_offset[i],
block + i * 16 * sizeof(pixel),
dst_stride);
}
}
}
void ff_h264_idct_add8_422_msa(uint8_t **dst,
const int32_t *blk_offset,
int16_t *block, int32_t dst_stride,
const uint8_t nzc[15 * 8])
{
int32_t i, j;
for (j = 1; j < 3; j++) {
for (i = (j * 16); i < (j * 16 + 4); i++) {
if (nzc[scan8[i]])
ff_h264_idct_add_msa(dst[j - 1] + blk_offset[i],
block + i * 16 * sizeof(pixel),
dst_stride);
else if (((dctcoef *) block)[i * 16])
ff_h264_idct4x4_addblk_dc_msa(dst[j - 1] + blk_offset[i],
block + i * 16 * sizeof(pixel),
dst_stride);
}
}
for (j = 1; j < 3; j++) {
for (i = (j * 16 + 4); i < (j * 16 + 8); i++) {
if (nzc[scan8[i + 4]])
ff_h264_idct_add_msa(dst[j - 1] + blk_offset[i + 4],
block + i * 16 * sizeof(pixel),
dst_stride);
else if (((dctcoef *) block)[i * 16])
ff_h264_idct4x4_addblk_dc_msa(dst[j - 1] + blk_offset[i + 4],
block + i * 16 * sizeof(pixel),
dst_stride);
}
}
}
void ff_h264_idct_add16_intra_msa(uint8_t *dst,
const int32_t *blk_offset,
int16_t *block,
int32_t dst_stride,
const uint8_t nzc[15 * 8])
{
int32_t i;
for (i = 0; i < 16; i++) {
if (nzc[scan8[i]])
ff_h264_idct_add_msa(dst + blk_offset[i],
block + i * 16 * sizeof(pixel), dst_stride);
else if (((dctcoef *) block)[i * 16])
ff_h264_idct4x4_addblk_dc_msa(dst + blk_offset[i],
block + i * 16 * sizeof(pixel),
dst_stride);
}
}
void ff_h264_deq_idct_luma_dc_msa(int16_t *dst, int16_t *src,
int32_t de_qval)
{
avc_deq_idct_luma_dc_msa(dst, src, de_qval);
}
......@@ -403,6 +403,19 @@
}
#define LD_SH16(...) LD_H16(v8i16, __VA_ARGS__)
/* Description : Load as 4x4 block of signed halfword elements from 1D source
data into 4 vectors (Each vector with 4 signed halfwords)
Arguments : Inputs - psrc
Outputs - out0, out1, out2, out3
*/
#define LD4x4_SH(psrc, out0, out1, out2, out3) \
{ \
out0 = LD_SH(psrc); \
out2 = LD_SH(psrc + 8); \
out1 = (v8i16) __msa_ilvl_d((v2i64) out0, (v2i64) out0); \
out3 = (v8i16) __msa_ilvl_d((v2i64) out2, (v2i64) out2); \
}
/* Description : Load 2 vectors of signed word elements with stride
Arguments : Inputs - psrc (source pointer to load from)
- stride
......@@ -2025,6 +2038,22 @@
out1 = in2 - in3; \
}
/* Description : Sign extend halfword elements from right half of the vector
Arguments : Inputs - in (input halfword vector)
Outputs - out (sign extended word vectors)
Return Type - signed word
Details : Sign bit of halfword elements from input vector 'in' is
extracted and interleaved with same vector 'in0' to generate
4 word elements keeping sign intact
*/
#define UNPCK_R_SH_SW(in, out) \
{ \
v8i16 sign_m; \
\
sign_m = __msa_clti_s_h((v8i16) in, 0); \
out = (v4i32) __msa_ilvr_h(sign_m, (v8i16) in); \
}
/* Description : Sign extend byte elements from input vector and return
halfword results in pair of vectors
Arguments : Inputs - in (1 input byte vector)
......@@ -2103,6 +2132,25 @@
out3 = in0 - in3; \
}
/* Description : Butterfly of 8 input vectors
Arguments : Inputs - in0 ... in7
Outputs - out0 .. out7
Details : Butterfly operation
*/
#define BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, \
out0, out1, out2, out3, out4, out5, out6, out7) \
{ \
out0 = in0 + in7; \
out1 = in1 + in6; \
out2 = in2 + in5; \
out3 = in3 + in4; \
\
out4 = in3 - in4; \
out5 = in2 - in5; \
out6 = in1 - in6; \
out7 = in0 - in7; \
}
/* Description : Transposes input 4x4 byte block
Arguments : Inputs - in0, in1, in2, in3 (input 4x4 byte block)
Outputs - out0, out1, out2, out3 (output 4x4 byte block)
......@@ -2222,6 +2270,22 @@
out7 = (v16u8) __msa_ilvod_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
}
/* Description : Transposes 4x4 block with half word elements in vectors
Arguments : Inputs - in0, in1, in2, in3
Outputs - out0, out1, out2, out3
Return Type - signed halfword
Details :
*/
#define TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
v8i16 s0_m, s1_m; \
\
ILVR_H2_SH(in1, in0, in3, in2, s0_m, s1_m); \
ILVRL_W2_SH(s1_m, s0_m, out0, out2); \
out1 = (v8i16) __msa_ilvl_d((v2i64) out0, (v2i64) out0); \
out3 = (v8i16) __msa_ilvl_d((v2i64) out0, (v2i64) out2); \
}
/* Description : Transposes 8x8 block with half word elements in vectors
Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
Outputs - out0, out1, out2, out3, out4, out5, out6, out7
......@@ -2272,6 +2336,38 @@
out3 = (v4i32) __msa_ilvl_d((v2i64) s3_m, (v2i64) s1_m); \
}
/* Description : Add block 4x4
Arguments : Inputs - in0, in1, in2, in3, pdst, stride
Outputs -
Return Type - unsigned bytes
Details : Least significant 4 bytes from each input vector are added to
the destination bytes, clipped between 0-255 and then stored.
*/
#define ADDBLK_ST4x4_UB(in0, in1, in2, in3, pdst, stride) \
{ \
uint32_t src0_m, src1_m, src2_m, src3_m; \
uint32_t out0_m, out1_m, out2_m, out3_m; \
v8i16 inp0_m, inp1_m, res0_m, res1_m; \
v16i8 dst0_m = { 0 }; \
v16i8 dst1_m = { 0 }; \
v16i8 zero_m = { 0 }; \
\
ILVR_D2_SH(in1, in0, in3, in2, inp0_m, inp1_m) \
LW4(pdst, stride, src0_m, src1_m, src2_m, src3_m); \
INSERT_W2_SB(src0_m, src1_m, dst0_m); \
INSERT_W2_SB(src2_m, src3_m, dst1_m); \
ILVR_B2_SH(zero_m, dst0_m, zero_m, dst1_m, res0_m, res1_m); \
ADD2(res0_m, inp0_m, res1_m, inp1_m, res0_m, res1_m); \
CLIP_SH2_0_255(res0_m, res1_m); \
PCKEV_B2_SB(res0_m, res0_m, res1_m, res1_m, dst0_m, dst1_m); \
\
out0_m = __msa_copy_u_w((v4i32) dst0_m, 0); \
out1_m = __msa_copy_u_w((v4i32) dst0_m, 1); \
out2_m = __msa_copy_u_w((v4i32) dst1_m, 0); \
out3_m = __msa_copy_u_w((v4i32) dst1_m, 1); \
SW4(out0_m, out1_m, out2_m, out3_m, pdst, stride); \
}
/* Description : Pack even elements of input vectors & xor with 128
Arguments : Inputs - in0, in1
Outputs - out_m
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment