Commit f2e9d44a authored by Diego Biurrun's avatar Diego Biurrun

x86: Drop unnecessary ff_ name prefixes from static functions

parent 383fd4d4
...@@ -413,22 +413,22 @@ static void OPNAME ## cavs_qpel16_h_ ## MMX(uint8_t *dst, uint8_t *src, int dstS ...@@ -413,22 +413,22 @@ static void OPNAME ## cavs_qpel16_h_ ## MMX(uint8_t *dst, uint8_t *src, int dstS
}\ }\
#define CAVS_MC(OPNAME, SIZE, MMX) \ #define CAVS_MC(OPNAME, SIZE, MMX) \
static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ static void OPNAME ## cavs_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\ {\
OPNAME ## cavs_qpel ## SIZE ## _h_ ## MMX(dst, src, stride, stride);\ OPNAME ## cavs_qpel ## SIZE ## _h_ ## MMX(dst, src, stride, stride);\
}\ }\
\ \
static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ static void OPNAME ## cavs_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\ {\
OPNAME ## cavs_qpel ## SIZE ## _v1_ ## MMX(dst, src, stride, stride);\ OPNAME ## cavs_qpel ## SIZE ## _v1_ ## MMX(dst, src, stride, stride);\
}\ }\
\ \
static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ static void OPNAME ## cavs_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\ {\
OPNAME ## cavs_qpel ## SIZE ## _v2_ ## MMX(dst, src, stride, stride);\ OPNAME ## cavs_qpel ## SIZE ## _v2_ ## MMX(dst, src, stride, stride);\
}\ }\
\ \
static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ static void OPNAME ## cavs_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\ {\
OPNAME ## cavs_qpel ## SIZE ## _v3_ ## MMX(dst, src, stride, stride);\ OPNAME ## cavs_qpel ## SIZE ## _v3_ ## MMX(dst, src, stride, stride);\
}\ }\
...@@ -459,11 +459,11 @@ static av_cold void cavsdsp_init_mmx(CAVSDSPContext *c, ...@@ -459,11 +459,11 @@ static av_cold void cavsdsp_init_mmx(CAVSDSPContext *c,
} }
#endif /* HAVE_MMX_INLINE */ #endif /* HAVE_MMX_INLINE */
#define DSPFUNC(PFX, IDX, NUM, EXT) \ #define DSPFUNC(PFX, IDX, NUM, EXT) \
c->PFX ## _cavs_qpel_pixels_tab[IDX][ 2] = ff_ ## PFX ## _cavs_qpel ## NUM ## _mc20_ ## EXT; \ c->PFX ## _cavs_qpel_pixels_tab[IDX][ 2] = PFX ## _cavs_qpel ## NUM ## _mc20_ ## EXT; \
c->PFX ## _cavs_qpel_pixels_tab[IDX][ 4] = ff_ ## PFX ## _cavs_qpel ## NUM ## _mc01_ ## EXT; \ c->PFX ## _cavs_qpel_pixels_tab[IDX][ 4] = PFX ## _cavs_qpel ## NUM ## _mc01_ ## EXT; \
c->PFX ## _cavs_qpel_pixels_tab[IDX][ 8] = ff_ ## PFX ## _cavs_qpel ## NUM ## _mc02_ ## EXT; \ c->PFX ## _cavs_qpel_pixels_tab[IDX][ 8] = PFX ## _cavs_qpel ## NUM ## _mc02_ ## EXT; \
c->PFX ## _cavs_qpel_pixels_tab[IDX][12] = ff_ ## PFX ## _cavs_qpel ## NUM ## _mc03_ ## EXT; \ c->PFX ## _cavs_qpel_pixels_tab[IDX][12] = PFX ## _cavs_qpel ## NUM ## _mc03_ ## EXT; \
#if HAVE_MMXEXT_INLINE #if HAVE_MMXEXT_INLINE
QPEL_CAVS(put_, PUT_OP, mmxext) QPEL_CAVS(put_, PUT_OP, mmxext)
...@@ -474,8 +474,8 @@ CAVS_MC(put_, 16, mmxext) ...@@ -474,8 +474,8 @@ CAVS_MC(put_, 16, mmxext)
CAVS_MC(avg_, 8, mmxext) CAVS_MC(avg_, 8, mmxext)
CAVS_MC(avg_, 16, mmxext) CAVS_MC(avg_, 16, mmxext)
static av_cold void ff_cavsdsp_init_mmxext(CAVSDSPContext *c, static av_cold void cavsdsp_init_mmxext(CAVSDSPContext *c,
AVCodecContext *avctx) AVCodecContext *avctx)
{ {
DSPFUNC(put, 0, 16, mmxext); DSPFUNC(put, 0, 16, mmxext);
DSPFUNC(put, 1, 8, mmxext); DSPFUNC(put, 1, 8, mmxext);
...@@ -493,8 +493,8 @@ CAVS_MC(put_, 16,3dnow) ...@@ -493,8 +493,8 @@ CAVS_MC(put_, 16,3dnow)
CAVS_MC(avg_, 8, 3dnow) CAVS_MC(avg_, 8, 3dnow)
CAVS_MC(avg_, 16,3dnow) CAVS_MC(avg_, 16,3dnow)
static av_cold void ff_cavsdsp_init_3dnow(CAVSDSPContext *c, static av_cold void cavsdsp_init_3dnow(CAVSDSPContext *c,
AVCodecContext *avctx) AVCodecContext *avctx)
{ {
DSPFUNC(put, 0, 16, 3dnow); DSPFUNC(put, 0, 16, 3dnow);
DSPFUNC(put, 1, 8, 3dnow); DSPFUNC(put, 1, 8, 3dnow);
...@@ -512,9 +512,11 @@ av_cold void ff_cavsdsp_init_x86(CAVSDSPContext *c, AVCodecContext *avctx) ...@@ -512,9 +512,11 @@ av_cold void ff_cavsdsp_init_x86(CAVSDSPContext *c, AVCodecContext *avctx)
cavsdsp_init_mmx(c, avctx); cavsdsp_init_mmx(c, avctx);
#endif /* HAVE_MMX_INLINE */ #endif /* HAVE_MMX_INLINE */
#if HAVE_MMXEXT_INLINE #if HAVE_MMXEXT_INLINE
if (mm_flags & AV_CPU_FLAG_MMXEXT) ff_cavsdsp_init_mmxext(c, avctx); if (mm_flags & AV_CPU_FLAG_MMXEXT)
cavsdsp_init_mmxext(c, avctx);
#endif /* HAVE_MMXEXT_INLINE */ #endif /* HAVE_MMXEXT_INLINE */
#if HAVE_AMD3DNOW_INLINE #if HAVE_AMD3DNOW_INLINE
if (mm_flags & AV_CPU_FLAG_3DNOW) ff_cavsdsp_init_3dnow(c, avctx); if (mm_flags & AV_CPU_FLAG_3DNOW)
cavsdsp_init_3dnow(c, avctx);
#endif /* HAVE_AMD3DNOW_INLINE */ #endif /* HAVE_AMD3DNOW_INLINE */
} }
...@@ -206,7 +206,12 @@ static av_always_inline void ff_ ## OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint ...@@ -206,7 +206,12 @@ static av_always_inline void ff_ ## OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint
ff_ ## OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\ ff_ ## OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
} }
static av_always_inline void ff_put_h264_qpel8or16_hv1_lowpass_sse2(int16_t *tmp, uint8_t *src, int tmpStride, int srcStride, int size){ static av_always_inline void put_h264_qpel8or16_hv1_lowpass_sse2(int16_t *tmp,
uint8_t *src,
int tmpStride,
int srcStride,
int size)
{
int w = (size+8)>>3; int w = (size+8)>>3;
src -= 2*srcStride+2; src -= 2*srcStride+2;
while(w--){ while(w--){
...@@ -218,7 +223,7 @@ static av_always_inline void ff_put_h264_qpel8or16_hv1_lowpass_sse2(int16_t *tmp ...@@ -218,7 +223,7 @@ static av_always_inline void ff_put_h264_qpel8or16_hv1_lowpass_sse2(int16_t *tmp
#define QPEL_H264_HV_XMM(OPNAME, OP, MMX)\ #define QPEL_H264_HV_XMM(OPNAME, OP, MMX)\
static av_always_inline void ff_ ## OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\ static av_always_inline void ff_ ## OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\
ff_put_h264_qpel8or16_hv1_lowpass_sse2(tmp, src, tmpStride, srcStride, size);\ put_h264_qpel8or16_hv1_lowpass_sse2(tmp, src, tmpStride, srcStride, size);\
ff_ ## OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(dst, tmp, dstStride, tmpStride, size);\ ff_ ## OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(dst, tmp, dstStride, tmpStride, size);\
}\ }\
static av_always_inline void ff_ ## OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ static av_always_inline void ff_ ## OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
......
...@@ -132,8 +132,8 @@ LF_FUNCS(uint16_t, 10) ...@@ -132,8 +132,8 @@ LF_FUNCS(uint16_t, 10)
#if ARCH_X86_32 && HAVE_MMXEXT_EXTERNAL #if ARCH_X86_32 && HAVE_MMXEXT_EXTERNAL
LF_FUNC(v8, luma, 8, mmxext) LF_FUNC(v8, luma, 8, mmxext)
static void ff_deblock_v_luma_8_mmxext(uint8_t *pix, int stride, int alpha, static void deblock_v_luma_8_mmxext(uint8_t *pix, int stride, int alpha,
int beta, int8_t *tc0) int beta, int8_t *tc0)
{ {
if ((tc0[0] & tc0[1]) >= 0) if ((tc0[0] & tc0[1]) >= 0)
ff_deblock_v8_luma_8_mmxext(pix + 0, stride, alpha, beta, tc0); ff_deblock_v8_luma_8_mmxext(pix + 0, stride, alpha, beta, tc0);
...@@ -141,8 +141,8 @@ static void ff_deblock_v_luma_8_mmxext(uint8_t *pix, int stride, int alpha, ...@@ -141,8 +141,8 @@ static void ff_deblock_v_luma_8_mmxext(uint8_t *pix, int stride, int alpha,
ff_deblock_v8_luma_8_mmxext(pix + 8, stride, alpha, beta, tc0 + 2); ff_deblock_v8_luma_8_mmxext(pix + 8, stride, alpha, beta, tc0 + 2);
} }
LF_IFUNC(v8, luma_intra, 8, mmxext) LF_IFUNC(v8, luma_intra, 8, mmxext)
static void ff_deblock_v_luma_intra_8_mmxext(uint8_t *pix, int stride, static void deblock_v_luma_intra_8_mmxext(uint8_t *pix, int stride,
int alpha, int beta) int alpha, int beta)
{ {
ff_deblock_v8_luma_intra_8_mmxext(pix + 0, stride, alpha, beta); ff_deblock_v8_luma_intra_8_mmxext(pix + 0, stride, alpha, beta);
ff_deblock_v8_luma_intra_8_mmxext(pix + 8, stride, alpha, beta); ff_deblock_v8_luma_intra_8_mmxext(pix + 8, stride, alpha, beta);
...@@ -247,9 +247,9 @@ av_cold void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth, ...@@ -247,9 +247,9 @@ av_cold void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth,
c->h264_h_loop_filter_chroma_intra = ff_deblock_h_chroma_intra_8_mmxext; c->h264_h_loop_filter_chroma_intra = ff_deblock_h_chroma_intra_8_mmxext;
} }
#if ARCH_X86_32 && HAVE_MMXEXT_EXTERNAL #if ARCH_X86_32 && HAVE_MMXEXT_EXTERNAL
c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_mmxext; c->h264_v_loop_filter_luma = deblock_v_luma_8_mmxext;
c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_mmxext; c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_mmxext;
c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_mmxext; c->h264_v_loop_filter_luma_intra = deblock_v_luma_intra_8_mmxext;
c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_mmxext; c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_mmxext;
#endif /* ARCH_X86_32 && HAVE_MMXEXT_EXTERNAL */ #endif /* ARCH_X86_32 && HAVE_MMXEXT_EXTERNAL */
c->weight_h264_pixels_tab[0] = ff_h264_weight_16_mmxext; c->weight_h264_pixels_tab[0] = ff_h264_weight_16_mmxext;
......
...@@ -25,52 +25,52 @@ ...@@ -25,52 +25,52 @@
*/ */
//FIXME the following could be optimized too ... //FIXME the following could be optimized too ...
static void DEF(ff_put_no_rnd_pixels16_x2)(uint8_t *block, static void DEF(put_no_rnd_pixels16_x2)(uint8_t *block,
const uint8_t *pixels, const uint8_t *pixels,
ptrdiff_t line_size, int h) ptrdiff_t line_size, int h)
{ {
DEF(ff_put_no_rnd_pixels8_x2)(block, pixels, line_size, h); DEF(ff_put_no_rnd_pixels8_x2)(block, pixels, line_size, h);
DEF(ff_put_no_rnd_pixels8_x2)(block + 8, pixels + 8, line_size, h); DEF(ff_put_no_rnd_pixels8_x2)(block + 8, pixels + 8, line_size, h);
} }
static void DEF(ff_put_pixels16_y2)(uint8_t *block, const uint8_t *pixels, static void DEF(put_pixels16_y2)(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h) ptrdiff_t line_size, int h)
{ {
DEF(ff_put_pixels8_y2)(block, pixels, line_size, h); DEF(ff_put_pixels8_y2)(block, pixels, line_size, h);
DEF(ff_put_pixels8_y2)(block + 8, pixels + 8, line_size, h); DEF(ff_put_pixels8_y2)(block + 8, pixels + 8, line_size, h);
} }
static void DEF(ff_put_no_rnd_pixels16_y2)(uint8_t *block, static void DEF(put_no_rnd_pixels16_y2)(uint8_t *block,
const uint8_t *pixels, const uint8_t *pixels,
ptrdiff_t line_size, int h) ptrdiff_t line_size, int h)
{ {
DEF(ff_put_no_rnd_pixels8_y2)(block, pixels, line_size, h); DEF(ff_put_no_rnd_pixels8_y2)(block, pixels, line_size, h);
DEF(ff_put_no_rnd_pixels8_y2)(block + 8, pixels + 8, line_size, h); DEF(ff_put_no_rnd_pixels8_y2)(block + 8, pixels + 8, line_size, h);
} }
static void DEF(ff_avg_pixels16)(uint8_t *block, const uint8_t *pixels, static void DEF(avg_pixels16)(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h) ptrdiff_t line_size, int h)
{ {
DEF(ff_avg_pixels8)(block, pixels, line_size, h); DEF(ff_avg_pixels8)(block, pixels, line_size, h);
DEF(ff_avg_pixels8)(block + 8, pixels + 8, line_size, h); DEF(ff_avg_pixels8)(block + 8, pixels + 8, line_size, h);
} }
static void DEF(ff_avg_pixels16_x2)(uint8_t *block, const uint8_t *pixels, static void DEF(avg_pixels16_x2)(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h) ptrdiff_t line_size, int h)
{ {
DEF(ff_avg_pixels8_x2)(block, pixels, line_size, h); DEF(ff_avg_pixels8_x2)(block, pixels, line_size, h);
DEF(ff_avg_pixels8_x2)(block + 8, pixels + 8, line_size, h); DEF(ff_avg_pixels8_x2)(block + 8, pixels + 8, line_size, h);
} }
static void DEF(ff_avg_pixels16_y2)(uint8_t *block, const uint8_t *pixels, static void DEF(avg_pixels16_y2)(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h) ptrdiff_t line_size, int h)
{ {
DEF(ff_avg_pixels8_y2)(block, pixels, line_size, h); DEF(ff_avg_pixels8_y2)(block, pixels, line_size, h);
DEF(ff_avg_pixels8_y2)(block + 8, pixels + 8, line_size, h); DEF(ff_avg_pixels8_y2)(block + 8, pixels + 8, line_size, h);
} }
static void DEF(ff_avg_pixels16_xy2)(uint8_t *block, const uint8_t *pixels, static void DEF(avg_pixels16_xy2)(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h) ptrdiff_t line_size, int h)
{ {
DEF(ff_avg_pixels8_xy2)(block, pixels, line_size, h); DEF(ff_avg_pixels8_xy2)(block, pixels, line_size, h);
DEF(ff_avg_pixels8_xy2)(block + 8, pixels + 8, line_size, h); DEF(ff_avg_pixels8_xy2)(block + 8, pixels + 8, line_size, h);
......
...@@ -226,11 +226,11 @@ static void hpeldsp_init_mmxext(HpelDSPContext *c, int flags, int mm_flags) ...@@ -226,11 +226,11 @@ static void hpeldsp_init_mmxext(HpelDSPContext *c, int flags, int mm_flags)
{ {
#if HAVE_YASM #if HAVE_YASM
c->put_pixels_tab[0][1] = ff_put_pixels16_x2_mmxext; c->put_pixels_tab[0][1] = ff_put_pixels16_x2_mmxext;
c->put_pixels_tab[0][2] = ff_put_pixels16_y2_mmxext; c->put_pixels_tab[0][2] = put_pixels16_y2_mmxext;
c->avg_pixels_tab[0][0] = ff_avg_pixels16_mmxext; c->avg_pixels_tab[0][0] = avg_pixels16_mmxext;
c->avg_pixels_tab[0][1] = ff_avg_pixels16_x2_mmxext; c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmxext;
c->avg_pixels_tab[0][2] = ff_avg_pixels16_y2_mmxext; c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmxext;
c->put_pixels_tab[1][1] = ff_put_pixels8_x2_mmxext; c->put_pixels_tab[1][1] = ff_put_pixels8_x2_mmxext;
c->put_pixels_tab[1][2] = ff_put_pixels8_y2_mmxext; c->put_pixels_tab[1][2] = ff_put_pixels8_y2_mmxext;
...@@ -240,12 +240,12 @@ static void hpeldsp_init_mmxext(HpelDSPContext *c, int flags, int mm_flags) ...@@ -240,12 +240,12 @@ static void hpeldsp_init_mmxext(HpelDSPContext *c, int flags, int mm_flags)
c->avg_pixels_tab[1][2] = ff_avg_pixels8_y2_mmxext; c->avg_pixels_tab[1][2] = ff_avg_pixels8_y2_mmxext;
if (!(flags & CODEC_FLAG_BITEXACT)) { if (!(flags & CODEC_FLAG_BITEXACT)) {
c->put_no_rnd_pixels_tab[0][1] = ff_put_no_rnd_pixels16_x2_mmxext; c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmxext;
c->put_no_rnd_pixels_tab[0][2] = ff_put_no_rnd_pixels16_y2_mmxext; c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmxext;
c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_mmxext; c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_mmxext;
c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_mmxext; c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_mmxext;
c->avg_pixels_tab[0][3] = ff_avg_pixels16_xy2_mmxext; c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmxext;
c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_mmxext; c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_mmxext;
} }
...@@ -260,11 +260,11 @@ static void hpeldsp_init_3dnow(HpelDSPContext *c, int flags, int mm_flags) ...@@ -260,11 +260,11 @@ static void hpeldsp_init_3dnow(HpelDSPContext *c, int flags, int mm_flags)
{ {
#if HAVE_YASM #if HAVE_YASM
c->put_pixels_tab[0][1] = ff_put_pixels16_x2_3dnow; c->put_pixels_tab[0][1] = ff_put_pixels16_x2_3dnow;
c->put_pixels_tab[0][2] = ff_put_pixels16_y2_3dnow; c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
c->avg_pixels_tab[0][0] = ff_avg_pixels16_3dnow; c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
c->avg_pixels_tab[0][1] = ff_avg_pixels16_x2_3dnow; c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
c->avg_pixels_tab[0][2] = ff_avg_pixels16_y2_3dnow; c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
c->put_pixels_tab[1][1] = ff_put_pixels8_x2_3dnow; c->put_pixels_tab[1][1] = ff_put_pixels8_x2_3dnow;
c->put_pixels_tab[1][2] = ff_put_pixels8_y2_3dnow; c->put_pixels_tab[1][2] = ff_put_pixels8_y2_3dnow;
...@@ -274,12 +274,12 @@ static void hpeldsp_init_3dnow(HpelDSPContext *c, int flags, int mm_flags) ...@@ -274,12 +274,12 @@ static void hpeldsp_init_3dnow(HpelDSPContext *c, int flags, int mm_flags)
c->avg_pixels_tab[1][2] = ff_avg_pixels8_y2_3dnow; c->avg_pixels_tab[1][2] = ff_avg_pixels8_y2_3dnow;
if (!(flags & CODEC_FLAG_BITEXACT)){ if (!(flags & CODEC_FLAG_BITEXACT)){
c->put_no_rnd_pixels_tab[0][1] = ff_put_no_rnd_pixels16_x2_3dnow; c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
c->put_no_rnd_pixels_tab[0][2] = ff_put_no_rnd_pixels16_y2_3dnow; c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_3dnow; c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_3dnow;
c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_3dnow; c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_3dnow;
c->avg_pixels_tab[0][3] = ff_avg_pixels16_xy2_3dnow; c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_3dnow; c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_3dnow;
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment