Commit 8013da73 authored by David Conrad's avatar David Conrad

VC1: add and use avg_no_rnd chroma MC functions

Originally committed as revision 18518 to svn://svn.ffmpeg.org/ffmpeg/trunk
parent c374691b
...@@ -1703,6 +1703,30 @@ static void put_no_rnd_vc1_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*a ...@@ -1703,6 +1703,30 @@ static void put_no_rnd_vc1_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*a
} }
} }
static void avg_no_rnd_vc1_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){
const int A=(8-x)*(8-y);
const int B=( x)*(8-y);
const int C=(8-x)*( y);
const int D=( x)*( y);
int i;
assert(x<8 && y<8 && x>=0 && y>=0);
for(i=0; i<h; i++)
{
dst[0] = avg2(dst[0], ((A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + 32 - 4) >> 6));
dst[1] = avg2(dst[1], ((A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + 32 - 4) >> 6));
dst[2] = avg2(dst[2], ((A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + 32 - 4) >> 6));
dst[3] = avg2(dst[3], ((A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + 32 - 4) >> 6));
dst[4] = avg2(dst[4], ((A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + 32 - 4) >> 6));
dst[5] = avg2(dst[5], ((A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + 32 - 4) >> 6));
dst[6] = avg2(dst[6], ((A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + 32 - 4) >> 6));
dst[7] = avg2(dst[7], ((A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + 32 - 4) >> 6));
dst+= stride;
src+= stride;
}
}
#define QPEL_MC(r, OPNAME, RND, OP) \ #define QPEL_MC(r, OPNAME, RND, OP) \
static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
...@@ -4490,6 +4514,7 @@ void dsputil_init(DSPContext* c, AVCodecContext *avctx) ...@@ -4490,6 +4514,7 @@ void dsputil_init(DSPContext* c, AVCodecContext *avctx)
c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_c; c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_c;
c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_c; c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_c;
c->put_no_rnd_vc1_chroma_pixels_tab[0]= put_no_rnd_vc1_chroma_mc8_c; c->put_no_rnd_vc1_chroma_pixels_tab[0]= put_no_rnd_vc1_chroma_mc8_c;
c->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_no_rnd_vc1_chroma_mc8_c;
c->weight_h264_pixels_tab[0]= weight_h264_pixels16x16_c; c->weight_h264_pixels_tab[0]= weight_h264_pixels16x16_c;
c->weight_h264_pixels_tab[1]= weight_h264_pixels16x8_c; c->weight_h264_pixels_tab[1]= weight_h264_pixels16x8_c;
......
...@@ -319,6 +319,7 @@ typedef struct DSPContext { ...@@ -319,6 +319,7 @@ typedef struct DSPContext {
h264_chroma_mc_func avg_h264_chroma_pixels_tab[3]; h264_chroma_mc_func avg_h264_chroma_pixels_tab[3];
/* This is really one func used in VC-1 decoding */ /* This is really one func used in VC-1 decoding */
h264_chroma_mc_func put_no_rnd_vc1_chroma_pixels_tab[3]; h264_chroma_mc_func put_no_rnd_vc1_chroma_pixels_tab[3];
h264_chroma_mc_func avg_no_rnd_vc1_chroma_pixels_tab[3];
qpel_mc_func put_h264_qpel_pixels_tab[4][16]; qpel_mc_func put_h264_qpel_pixels_tab[4][16];
qpel_mc_func avg_h264_qpel_pixels_tab[4][16]; qpel_mc_func avg_h264_qpel_pixels_tab[4][16];
......
...@@ -1925,8 +1925,13 @@ static void vc1_interp_mc(VC1Context *v) ...@@ -1925,8 +1925,13 @@ static void vc1_interp_mc(VC1Context *v)
uvdxy = ((uvmy & 3) << 2) | (uvmx & 3); uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
uvmx = (uvmx&3)<<1; uvmx = (uvmx&3)<<1;
uvmy = (uvmy&3)<<1; uvmy = (uvmy&3)<<1;
dsp->avg_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy); if(!v->rnd){
dsp->avg_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy); dsp->avg_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
dsp->avg_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
}else{
dsp->avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
dsp->avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
}
} }
static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs) static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
......
...@@ -2790,6 +2790,8 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -2790,6 +2790,8 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_mmx2; c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_mmx2;
c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_mmx2; c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_mmx2;
c->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_vc1_chroma_mc8_mmx2_nornd;
c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2_rnd; c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2_rnd;
c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2; c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2;
c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_mmx2; c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_mmx2;
...@@ -2940,6 +2942,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -2940,6 +2942,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
H264_QPEL_FUNCS(3, 2, ssse3); H264_QPEL_FUNCS(3, 2, ssse3);
H264_QPEL_FUNCS(3, 3, ssse3); H264_QPEL_FUNCS(3, 3, ssse3);
c->put_no_rnd_vc1_chroma_pixels_tab[0]= put_vc1_chroma_mc8_ssse3_nornd; c->put_no_rnd_vc1_chroma_pixels_tab[0]= put_vc1_chroma_mc8_ssse3_nornd;
c->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_vc1_chroma_mc8_ssse3_nornd;
c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_rnd; c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_rnd;
c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_ssse3_rnd; c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_ssse3_rnd;
c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_ssse3; c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_ssse3;
......
...@@ -2129,6 +2129,10 @@ static void avg_h264_chroma_mc8_mmx2_rnd(uint8_t *dst/*align 8*/, uint8_t *src/* ...@@ -2129,6 +2129,10 @@ static void avg_h264_chroma_mc8_mmx2_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*
{ {
avg_h264_chroma_generic_mc8_mmx2(dst, src, stride, h, x, y, h264_rnd_reg); avg_h264_chroma_generic_mc8_mmx2(dst, src, stride, h, x, y, h264_rnd_reg);
} }
static void avg_vc1_chroma_mc8_mmx2_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
{
avg_h264_chroma_generic_mc8_mmx2(dst, src, stride, h, x, y, h264_rnd_reg+2);
}
static void avg_h264_chroma_mc4_mmx2(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) static void avg_h264_chroma_mc4_mmx2(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
{ {
avg_h264_chroma_generic_mc4_mmx2(dst, src, stride, h, x, y, h264_rnd_reg); avg_h264_chroma_generic_mc4_mmx2(dst, src, stride, h, x, y, h264_rnd_reg);
...@@ -2191,6 +2195,10 @@ static void avg_h264_chroma_mc8_ssse3_rnd(uint8_t *dst/*align 8*/, uint8_t *src/ ...@@ -2191,6 +2195,10 @@ static void avg_h264_chroma_mc8_ssse3_rnd(uint8_t *dst/*align 8*/, uint8_t *src/
{ {
avg_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 1); avg_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 1);
} }
static void avg_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
{
avg_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 0);
}
#undef AVG_OP #undef AVG_OP
#undef H264_CHROMA_MC8_TMPL #undef H264_CHROMA_MC8_TMPL
#undef H264_CHROMA_MC4_TMPL #undef H264_CHROMA_MC4_TMPL
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment