Commit 82d1605f authored by Dominik Mierzejewski's avatar Dominik Mierzejewski

Remove duplicated MM_* macros for CPU capabilities from dsputil.h.

Add missing one for FF_MM_ALTIVEC to avcodec.h.
Rename all the occurences of MM_* to the corresponding FF_MM_*.

Originally committed as revision 15770 to svn://svn.ffmpeg.org/ffmpeg/trunk
parent c6eaba62
...@@ -117,7 +117,7 @@ static void simple_idct_ipp_add(uint8_t *dest, int line_size, DCTELEM *block) ...@@ -117,7 +117,7 @@ static void simple_idct_ipp_add(uint8_t *dest, int line_size, DCTELEM *block)
int mm_support(void) int mm_support(void)
{ {
return ENABLE_IWMMXT * MM_IWMMXT; return ENABLE_IWMMXT * FF_MM_IWMMXT;
} }
void dsputil_init_armv4l(DSPContext* c, AVCodecContext *avctx) void dsputil_init_armv4l(DSPContext* c, AVCodecContext *avctx)
......
...@@ -150,7 +150,7 @@ static void nop(uint8_t *block, const uint8_t *pixels, int line_size, int h) ...@@ -150,7 +150,7 @@ static void nop(uint8_t *block, const uint8_t *pixels, int line_size, int h)
/* A run time test is not simple. If this file is compiled in /* A run time test is not simple. If this file is compiled in
* then we should install the functions * then we should install the functions
*/ */
int mm_flags = MM_IWMMXT; /* multimedia extension flags */ int mm_flags = FF_MM_IWMMXT; /* multimedia extension flags */
void dsputil_init_iwmmxt(DSPContext* c, AVCodecContext *avctx) void dsputil_init_iwmmxt(DSPContext* c, AVCodecContext *avctx)
{ {
...@@ -161,7 +161,7 @@ void dsputil_init_iwmmxt(DSPContext* c, AVCodecContext *avctx) ...@@ -161,7 +161,7 @@ void dsputil_init_iwmmxt(DSPContext* c, AVCodecContext *avctx)
mm_flags &= ~(avctx->dsp_mask & 0xffff); mm_flags &= ~(avctx->dsp_mask & 0xffff);
} }
if (!(mm_flags & MM_IWMMXT)) return; if (!(mm_flags & FF_MM_IWMMXT)) return;
c->add_pixels_clamped = add_pixels_clamped_iwmmxt; c->add_pixels_clamped = add_pixels_clamped_iwmmxt;
......
...@@ -110,7 +110,7 @@ static void dct_unquantize_h263_inter_iwmmxt(MpegEncContext *s, ...@@ -110,7 +110,7 @@ static void dct_unquantize_h263_inter_iwmmxt(MpegEncContext *s,
void MPV_common_init_iwmmxt(MpegEncContext *s) void MPV_common_init_iwmmxt(MpegEncContext *s)
{ {
if (!(mm_flags & MM_IWMMXT)) return; if (!(mm_flags & FF_MM_IWMMXT)) return;
s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_iwmmxt; s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_iwmmxt;
#if 0 #if 0
......
...@@ -1431,6 +1431,7 @@ typedef struct AVCodecContext { ...@@ -1431,6 +1431,7 @@ typedef struct AVCodecContext {
#define FF_MM_SSE3 0x0040 ///< Prescott SSE3 functions #define FF_MM_SSE3 0x0040 ///< Prescott SSE3 functions
#define FF_MM_SSSE3 0x0080 ///< Conroe SSSE3 functions #define FF_MM_SSSE3 0x0080 ///< Conroe SSSE3 functions
#define FF_MM_IWMMXT 0x0100 ///< XScale IWMMXT #define FF_MM_IWMMXT 0x0100 ///< XScale IWMMXT
#define FF_MM_ALTIVEC 0x0001 ///< standard AltiVec
/** /**
* bits per sample/pixel from the demuxer (needed for huffyuv). * bits per sample/pixel from the demuxer (needed for huffyuv).
......
...@@ -91,23 +91,23 @@ struct algo algos[] = { ...@@ -91,23 +91,23 @@ struct algo algos[] = {
{"SIMPLE-C", 1, ff_simple_idct, idct, NO_PERM}, {"SIMPLE-C", 1, ff_simple_idct, idct, NO_PERM},
#ifdef HAVE_MMX #ifdef HAVE_MMX
{"MMX", 0, ff_fdct_mmx, fdct, NO_PERM, MM_MMX}, {"MMX", 0, ff_fdct_mmx, fdct, NO_PERM, FF_MM_MMX},
#ifdef HAVE_MMX2 #ifdef HAVE_MMX2
{"MMX2", 0, ff_fdct_mmx2, fdct, NO_PERM, MM_MMXEXT}, {"MMX2", 0, ff_fdct_mmx2, fdct, NO_PERM, FF_MM_MMXEXT},
#endif #endif
#ifdef CONFIG_GPL #ifdef CONFIG_GPL
{"LIBMPEG2-MMX", 1, ff_mmx_idct, idct, MMX_PERM, MM_MMX}, {"LIBMPEG2-MMX", 1, ff_mmx_idct, idct, MMX_PERM, FF_MM_MMX},
{"LIBMPEG2-MMXEXT", 1, ff_mmxext_idct, idct, MMX_PERM, MM_MMXEXT}, {"LIBMPEG2-MMXEXT", 1, ff_mmxext_idct, idct, MMX_PERM, FF_MM_MMXEXT},
#endif #endif
{"SIMPLE-MMX", 1, ff_simple_idct_mmx, idct, MMX_SIMPLE_PERM, MM_MMX}, {"SIMPLE-MMX", 1, ff_simple_idct_mmx, idct, MMX_SIMPLE_PERM, FF_MM_MMX},
{"XVID-MMX", 1, ff_idct_xvid_mmx, idct, NO_PERM, MM_MMX}, {"XVID-MMX", 1, ff_idct_xvid_mmx, idct, NO_PERM, FF_MM_MMX},
{"XVID-MMX2", 1, ff_idct_xvid_mmx2, idct, NO_PERM, MM_MMXEXT}, {"XVID-MMX2", 1, ff_idct_xvid_mmx2, idct, NO_PERM, FF_MM_MMXEXT},
{"XVID-SSE2", 1, ff_idct_xvid_sse2, idct, SSE2_PERM, MM_SSE2}, {"XVID-SSE2", 1, ff_idct_xvid_sse2, idct, SSE2_PERM, FF_MM_SSE2},
#endif #endif
#ifdef HAVE_ALTIVEC #ifdef HAVE_ALTIVEC
{"altivecfdct", 0, fdct_altivec, fdct, NO_PERM, MM_ALTIVEC}, {"altivecfdct", 0, fdct_altivec, fdct, NO_PERM, FF_MM_ALTIVEC},
#endif #endif
#ifdef ARCH_BFIN #ifdef ARCH_BFIN
...@@ -176,7 +176,7 @@ static DCTELEM block_org[64] __attribute__ ((aligned (8))); ...@@ -176,7 +176,7 @@ static DCTELEM block_org[64] __attribute__ ((aligned (8)));
static inline void mmx_emms(void) static inline void mmx_emms(void)
{ {
#ifdef HAVE_MMX #ifdef HAVE_MMX
if (cpu_flags & MM_MMX) if (cpu_flags & FF_MM_MMX)
__asm__ volatile ("emms\n\t"); __asm__ volatile ("emms\n\t");
#endif #endif
} }
......
...@@ -562,15 +562,6 @@ void dsputil_init_vis(DSPContext* c, AVCodecContext *avctx); ...@@ -562,15 +562,6 @@ void dsputil_init_vis(DSPContext* c, AVCodecContext *avctx);
#undef emms_c #undef emms_c
#define MM_MMX 0x0001 /* standard MMX */
#define MM_3DNOW 0x0004 /* AMD 3DNOW */
#define MM_MMXEXT 0x0002 /* SSE integer functions or AMD MMX ext */
#define MM_SSE 0x0008 /* SSE functions */
#define MM_SSE2 0x0010 /* PIV SSE2 functions */
#define MM_3DNOWEXT 0x0020 /* AMD 3DNowExt */
#define MM_SSE3 0x0040 /* Prescott SSE3 functions */
#define MM_SSSE3 0x0080 /* Conroe SSSE3 functions */
extern int mm_flags; extern int mm_flags;
void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size); void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size);
...@@ -585,7 +576,7 @@ static inline void emms(void) ...@@ -585,7 +576,7 @@ static inline void emms(void)
#define emms_c() \ #define emms_c() \
{\ {\
if (mm_flags & MM_MMX)\ if (mm_flags & FF_MM_MMX)\
emms();\ emms();\
} }
...@@ -593,8 +584,6 @@ void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx); ...@@ -593,8 +584,6 @@ void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx);
#elif defined(ARCH_ARMV4L) #elif defined(ARCH_ARMV4L)
#define MM_IWMMXT 0x0100 /* XScale IWMMXT */
extern int mm_flags; extern int mm_flags;
#ifdef HAVE_NEON #ifdef HAVE_NEON
...@@ -604,8 +593,6 @@ extern int mm_flags; ...@@ -604,8 +593,6 @@ extern int mm_flags;
#elif defined(ARCH_POWERPC) #elif defined(ARCH_POWERPC)
#define MM_ALTIVEC 0x0001 /* standard AltiVec */
extern int mm_flags; extern int mm_flags;
#define DECLARE_ALIGNED_8(t, v) DECLARE_ALIGNED(16, t, v) #define DECLARE_ALIGNED_8(t, v) DECLARE_ALIGNED(16, t, v)
......
...@@ -93,18 +93,18 @@ int ff_fft_init(FFTContext *s, int nbits, int inverse) ...@@ -93,18 +93,18 @@ int ff_fft_init(FFTContext *s, int nbits, int inverse)
#if defined HAVE_MMX && defined HAVE_YASM #if defined HAVE_MMX && defined HAVE_YASM
has_vectors = mm_support(); has_vectors = mm_support();
if (has_vectors & MM_SSE) { if (has_vectors & FF_MM_SSE) {
/* SSE for P3/P4/K8 */ /* SSE for P3/P4/K8 */
s->imdct_calc = ff_imdct_calc_sse; s->imdct_calc = ff_imdct_calc_sse;
s->imdct_half = ff_imdct_half_sse; s->imdct_half = ff_imdct_half_sse;
s->fft_permute = ff_fft_permute_sse; s->fft_permute = ff_fft_permute_sse;
s->fft_calc = ff_fft_calc_sse; s->fft_calc = ff_fft_calc_sse;
} else if (has_vectors & MM_3DNOWEXT) { } else if (has_vectors & FF_MM_3DNOWEXT) {
/* 3DNowEx for K7 */ /* 3DNowEx for K7 */
s->imdct_calc = ff_imdct_calc_3dn2; s->imdct_calc = ff_imdct_calc_3dn2;
s->imdct_half = ff_imdct_half_3dn2; s->imdct_half = ff_imdct_half_3dn2;
s->fft_calc = ff_fft_calc_3dn2; s->fft_calc = ff_fft_calc_3dn2;
} else if (has_vectors & MM_3DNOW) { } else if (has_vectors & FF_MM_3DNOW) {
/* 3DNow! for K6-2/3 */ /* 3DNow! for K6-2/3 */
s->imdct_calc = ff_imdct_calc_3dn; s->imdct_calc = ff_imdct_calc_3dn;
s->imdct_half = ff_imdct_half_3dn; s->imdct_half = ff_imdct_half_3dn;
...@@ -112,7 +112,7 @@ int ff_fft_init(FFTContext *s, int nbits, int inverse) ...@@ -112,7 +112,7 @@ int ff_fft_init(FFTContext *s, int nbits, int inverse)
} }
#elif defined HAVE_ALTIVEC && !defined ALTIVEC_USE_REFERENCE_C_CODE #elif defined HAVE_ALTIVEC && !defined ALTIVEC_USE_REFERENCE_C_CODE
has_vectors = mm_support(); has_vectors = mm_support();
if (has_vectors & MM_ALTIVEC) { if (has_vectors & FF_MM_ALTIVEC) {
s->fft_calc = ff_fft_calc_altivec; s->fft_calc = ff_fft_calc_altivec;
split_radix = 0; split_radix = 0;
} }
......
...@@ -549,7 +549,7 @@ retry: ...@@ -549,7 +549,7 @@ retry:
#endif #endif
#if defined(HAVE_MMX) #if defined(HAVE_MMX)
if(s->codec_id == CODEC_ID_MPEG4 && s->xvid_build && avctx->idct_algo == FF_IDCT_AUTO && (mm_flags & MM_MMX)){ if(s->codec_id == CODEC_ID_MPEG4 && s->xvid_build && avctx->idct_algo == FF_IDCT_AUTO && (mm_flags & FF_MM_MMX)){
avctx->idct_algo= FF_IDCT_XVIDMMX; avctx->idct_algo= FF_IDCT_XVIDMMX;
avctx->coded_width= 0; // force reinit avctx->coded_width= 0; // force reinit
// dsputil_init(&s->dsp, avctx); // dsputil_init(&s->dsp, avctx);
......
...@@ -2498,20 +2498,20 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -2498,20 +2498,20 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
#if 0 #if 0
av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:"); av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
if (mm_flags & MM_MMX) if (mm_flags & FF_MM_MMX)
av_log(avctx, AV_LOG_INFO, " mmx"); av_log(avctx, AV_LOG_INFO, " mmx");
if (mm_flags & MM_MMXEXT) if (mm_flags & FF_MM_MMXEXT)
av_log(avctx, AV_LOG_INFO, " mmxext"); av_log(avctx, AV_LOG_INFO, " mmxext");
if (mm_flags & MM_3DNOW) if (mm_flags & FF_MM_3DNOW)
av_log(avctx, AV_LOG_INFO, " 3dnow"); av_log(avctx, AV_LOG_INFO, " 3dnow");
if (mm_flags & MM_SSE) if (mm_flags & FF_MM_SSE)
av_log(avctx, AV_LOG_INFO, " sse"); av_log(avctx, AV_LOG_INFO, " sse");
if (mm_flags & MM_SSE2) if (mm_flags & FF_MM_SSE2)
av_log(avctx, AV_LOG_INFO, " sse2"); av_log(avctx, AV_LOG_INFO, " sse2");
av_log(avctx, AV_LOG_INFO, "\n"); av_log(avctx, AV_LOG_INFO, "\n");
#endif #endif
if (mm_flags & MM_MMX) { if (mm_flags & FF_MM_MMX) {
const int idct_algo= avctx->idct_algo; const int idct_algo= avctx->idct_algo;
if(avctx->lowres==0){ if(avctx->lowres==0){
...@@ -2522,7 +2522,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -2522,7 +2522,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->idct_permutation_type= FF_SIMPLE_IDCT_PERM; c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
#ifdef CONFIG_GPL #ifdef CONFIG_GPL
}else if(idct_algo==FF_IDCT_LIBMPEG2MMX){ }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
if(mm_flags & MM_MMXEXT){ if(mm_flags & FF_MM_MMXEXT){
c->idct_put= ff_libmpeg2mmx2_idct_put; c->idct_put= ff_libmpeg2mmx2_idct_put;
c->idct_add= ff_libmpeg2mmx2_idct_add; c->idct_add= ff_libmpeg2mmx2_idct_add;
c->idct = ff_mmxext_idct; c->idct = ff_mmxext_idct;
...@@ -2535,7 +2535,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -2535,7 +2535,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
#endif #endif
}else if((ENABLE_VP3_DECODER || ENABLE_VP5_DECODER || ENABLE_VP6_DECODER || ENABLE_THEORA_DECODER) && }else if((ENABLE_VP3_DECODER || ENABLE_VP5_DECODER || ENABLE_VP6_DECODER || ENABLE_THEORA_DECODER) &&
idct_algo==FF_IDCT_VP3){ idct_algo==FF_IDCT_VP3){
if(mm_flags & MM_SSE2){ if(mm_flags & FF_MM_SSE2){
c->idct_put= ff_vp3_idct_put_sse2; c->idct_put= ff_vp3_idct_put_sse2;
c->idct_add= ff_vp3_idct_add_sse2; c->idct_add= ff_vp3_idct_add_sse2;
c->idct = ff_vp3_idct_sse2; c->idct = ff_vp3_idct_sse2;
...@@ -2549,12 +2549,12 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -2549,12 +2549,12 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
}else if(idct_algo==FF_IDCT_CAVS){ }else if(idct_algo==FF_IDCT_CAVS){
c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM; c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
}else if(idct_algo==FF_IDCT_XVIDMMX){ }else if(idct_algo==FF_IDCT_XVIDMMX){
if(mm_flags & MM_SSE2){ if(mm_flags & FF_MM_SSE2){
c->idct_put= ff_idct_xvid_sse2_put; c->idct_put= ff_idct_xvid_sse2_put;
c->idct_add= ff_idct_xvid_sse2_add; c->idct_add= ff_idct_xvid_sse2_add;
c->idct = ff_idct_xvid_sse2; c->idct = ff_idct_xvid_sse2;
c->idct_permutation_type= FF_SSE2_IDCT_PERM; c->idct_permutation_type= FF_SSE2_IDCT_PERM;
}else if(mm_flags & MM_MMXEXT){ }else if(mm_flags & FF_MM_MMXEXT){
c->idct_put= ff_idct_xvid_mmx2_put; c->idct_put= ff_idct_xvid_mmx2_put;
c->idct_add= ff_idct_xvid_mmx2_add; c->idct_add= ff_idct_xvid_mmx2_add;
c->idct = ff_idct_xvid_mmx2; c->idct = ff_idct_xvid_mmx2;
...@@ -2605,10 +2605,10 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -2605,10 +2605,10 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->h264_idct_add= ff_h264_idct_add_mmx; c->h264_idct_add= ff_h264_idct_add_mmx;
c->h264_idct8_dc_add= c->h264_idct8_dc_add=
c->h264_idct8_add= ff_h264_idct8_add_mmx; c->h264_idct8_add= ff_h264_idct8_add_mmx;
if (mm_flags & MM_SSE2) if (mm_flags & FF_MM_SSE2)
c->h264_idct8_add= ff_h264_idct8_add_sse2; c->h264_idct8_add= ff_h264_idct8_add_sse2;
if (mm_flags & MM_MMXEXT) { if (mm_flags & FF_MM_MMXEXT) {
c->prefetch = prefetch_mmx2; c->prefetch = prefetch_mmx2;
c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2; c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
...@@ -2716,7 +2716,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -2716,7 +2716,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
ff_vc1dsp_init_mmx(c, avctx); ff_vc1dsp_init_mmx(c, avctx);
c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2; c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2;
} else if (mm_flags & MM_3DNOW) { } else if (mm_flags & FF_MM_3DNOW) {
c->prefetch = prefetch_3dnow; c->prefetch = prefetch_3dnow;
c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow; c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
...@@ -2774,7 +2774,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -2774,7 +2774,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\ c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\
c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\ c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\
c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU; c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU;
if((mm_flags & MM_SSE2) && !(mm_flags & MM_3DNOW)){ if((mm_flags & FF_MM_SSE2) && !(mm_flags & FF_MM_3DNOW)){
// these functions are slower than mmx on AMD, but faster on Intel // these functions are slower than mmx on AMD, but faster on Intel
/* FIXME works in most codecs, but crashes svq1 due to unaligned chroma /* FIXME works in most codecs, but crashes svq1 due to unaligned chroma
c->put_pixels_tab[0][0] = put_pixels16_sse2; c->put_pixels_tab[0][0] = put_pixels16_sse2;
...@@ -2782,7 +2782,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -2782,7 +2782,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
*/ */
H264_QPEL_FUNCS(0, 0, sse2); H264_QPEL_FUNCS(0, 0, sse2);
} }
if(mm_flags & MM_SSE2){ if(mm_flags & FF_MM_SSE2){
H264_QPEL_FUNCS(0, 1, sse2); H264_QPEL_FUNCS(0, 1, sse2);
H264_QPEL_FUNCS(0, 2, sse2); H264_QPEL_FUNCS(0, 2, sse2);
H264_QPEL_FUNCS(0, 3, sse2); H264_QPEL_FUNCS(0, 3, sse2);
...@@ -2797,7 +2797,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -2797,7 +2797,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
H264_QPEL_FUNCS(3, 3, sse2); H264_QPEL_FUNCS(3, 3, sse2);
} }
#ifdef HAVE_SSSE3 #ifdef HAVE_SSSE3
if(mm_flags & MM_SSSE3){ if(mm_flags & FF_MM_SSSE3){
H264_QPEL_FUNCS(1, 0, ssse3); H264_QPEL_FUNCS(1, 0, ssse3);
H264_QPEL_FUNCS(1, 1, ssse3); H264_QPEL_FUNCS(1, 1, ssse3);
H264_QPEL_FUNCS(1, 2, ssse3); H264_QPEL_FUNCS(1, 2, ssse3);
...@@ -2820,7 +2820,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -2820,7 +2820,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
#endif #endif
#ifdef CONFIG_SNOW_DECODER #ifdef CONFIG_SNOW_DECODER
if(mm_flags & MM_SSE2 & 0){ if(mm_flags & FF_MM_SSE2 & 0){
c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2; c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2;
#ifdef HAVE_7REGS #ifdef HAVE_7REGS
c->vertical_compose97i = ff_snow_vertical_compose97i_sse2; c->vertical_compose97i = ff_snow_vertical_compose97i_sse2;
...@@ -2828,7 +2828,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -2828,7 +2828,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->inner_add_yblock = ff_snow_inner_add_yblock_sse2; c->inner_add_yblock = ff_snow_inner_add_yblock_sse2;
} }
else{ else{
if(mm_flags & MM_MMXEXT){ if(mm_flags & FF_MM_MMXEXT){
c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx; c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx;
#ifdef HAVE_7REGS #ifdef HAVE_7REGS
c->vertical_compose97i = ff_snow_vertical_compose97i_mmx; c->vertical_compose97i = ff_snow_vertical_compose97i_mmx;
...@@ -2838,7 +2838,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -2838,7 +2838,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
} }
#endif #endif
if(mm_flags & MM_3DNOW){ if(mm_flags & FF_MM_3DNOW){
c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow; c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
c->vector_fmul = vector_fmul_3dnow; c->vector_fmul = vector_fmul_3dnow;
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
...@@ -2846,14 +2846,14 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -2846,14 +2846,14 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->float_to_int16_interleave = float_to_int16_interleave_3dnow; c->float_to_int16_interleave = float_to_int16_interleave_3dnow;
} }
} }
if(mm_flags & MM_3DNOWEXT){ if(mm_flags & FF_MM_3DNOWEXT){
c->vector_fmul_reverse = vector_fmul_reverse_3dnow2; c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
c->vector_fmul_window = vector_fmul_window_3dnow2; c->vector_fmul_window = vector_fmul_window_3dnow2;
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
c->float_to_int16_interleave = float_to_int16_interleave_3dn2; c->float_to_int16_interleave = float_to_int16_interleave_3dn2;
} }
} }
if(mm_flags & MM_SSE){ if(mm_flags & FF_MM_SSE){
c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse; c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
c->ac3_downmix = ac3_downmix_sse; c->ac3_downmix = ac3_downmix_sse;
c->vector_fmul = vector_fmul_sse; c->vector_fmul = vector_fmul_sse;
...@@ -2864,9 +2864,9 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -2864,9 +2864,9 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->float_to_int16 = float_to_int16_sse; c->float_to_int16 = float_to_int16_sse;
c->float_to_int16_interleave = float_to_int16_interleave_sse; c->float_to_int16_interleave = float_to_int16_interleave_sse;
} }
if(mm_flags & MM_3DNOW) if(mm_flags & FF_MM_3DNOW)
c->vector_fmul_add_add = vector_fmul_add_add_3dnow; // faster than sse c->vector_fmul_add_add = vector_fmul_add_add_3dnow; // faster than sse
if(mm_flags & MM_SSE2){ if(mm_flags & FF_MM_SSE2){
c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse2; c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse2;
c->float_to_int16 = float_to_int16_sse2; c->float_to_int16 = float_to_int16_sse2;
c->float_to_int16_interleave = float_to_int16_interleave_sse2; c->float_to_int16_interleave = float_to_int16_interleave_sse2;
......
...@@ -1354,12 +1354,12 @@ void ff_flac_compute_autocorr_sse2(const int32_t *data, int len, int lag, ...@@ -1354,12 +1354,12 @@ void ff_flac_compute_autocorr_sse2(const int32_t *data, int len, int lag,
void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx) void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
{ {
if (mm_flags & MM_MMX) { if (mm_flags & FF_MM_MMX) {
const int dct_algo = avctx->dct_algo; const int dct_algo = avctx->dct_algo;
if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){ if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){
if(mm_flags & MM_SSE2){ if(mm_flags & FF_MM_SSE2){
c->fdct = ff_fdct_sse2; c->fdct = ff_fdct_sse2;
}else if(mm_flags & MM_MMXEXT){ }else if(mm_flags & FF_MM_MMXEXT){
c->fdct = ff_fdct_mmx2; c->fdct = ff_fdct_mmx2;
}else{ }else{
c->fdct = ff_fdct_mmx; c->fdct = ff_fdct_mmx;
...@@ -1377,7 +1377,7 @@ void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -1377,7 +1377,7 @@ void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->hadamard8_diff[1]= hadamard8_diff_mmx; c->hadamard8_diff[1]= hadamard8_diff_mmx;
c->pix_norm1 = pix_norm1_mmx; c->pix_norm1 = pix_norm1_mmx;
c->sse[0] = (mm_flags & MM_SSE2) ? sse16_sse2 : sse16_mmx; c->sse[0] = (mm_flags & FF_MM_SSE2) ? sse16_sse2 : sse16_mmx;
c->sse[1] = sse8_mmx; c->sse[1] = sse8_mmx;
c->vsad[4]= vsad_intra16_mmx; c->vsad[4]= vsad_intra16_mmx;
...@@ -1395,7 +1395,7 @@ void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -1395,7 +1395,7 @@ void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->ssd_int8_vs_int16 = ssd_int8_vs_int16_mmx; c->ssd_int8_vs_int16 = ssd_int8_vs_int16_mmx;
if (mm_flags & MM_MMXEXT) { if (mm_flags & FF_MM_MMXEXT) {
c->sum_abs_dctelem= sum_abs_dctelem_mmx2; c->sum_abs_dctelem= sum_abs_dctelem_mmx2;
c->hadamard8_diff[0]= hadamard8_diff16_mmx2; c->hadamard8_diff[0]= hadamard8_diff16_mmx2;
c->hadamard8_diff[1]= hadamard8_diff_mmx2; c->hadamard8_diff[1]= hadamard8_diff_mmx2;
...@@ -1408,7 +1408,7 @@ void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -1408,7 +1408,7 @@ void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2; c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2;
} }
if(mm_flags & MM_SSE2){ if(mm_flags & FF_MM_SSE2){
c->get_pixels = get_pixels_sse2; c->get_pixels = get_pixels_sse2;
c->sum_abs_dctelem= sum_abs_dctelem_sse2; c->sum_abs_dctelem= sum_abs_dctelem_sse2;
c->hadamard8_diff[0]= hadamard8_diff16_sse2; c->hadamard8_diff[0]= hadamard8_diff16_sse2;
...@@ -1418,7 +1418,7 @@ void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -1418,7 +1418,7 @@ void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
} }
#ifdef HAVE_SSSE3 #ifdef HAVE_SSSE3
if(mm_flags & MM_SSSE3){ if(mm_flags & FF_MM_SSSE3){
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
c->try_8x8basis= try_8x8basis_ssse3; c->try_8x8basis= try_8x8basis_ssse3;
} }
...@@ -1429,7 +1429,7 @@ void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -1429,7 +1429,7 @@ void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
} }
#endif #endif
if(mm_flags & MM_3DNOW){ if(mm_flags & FF_MM_3DNOW){
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
c->try_8x8basis= try_8x8basis_3dnow; c->try_8x8basis= try_8x8basis_3dnow;
} }
......
...@@ -426,7 +426,7 @@ PIX_SAD(mmx2) ...@@ -426,7 +426,7 @@ PIX_SAD(mmx2)
void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx) void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx)
{ {
if (mm_flags & MM_MMX) { if (mm_flags & FF_MM_MMX) {
c->pix_abs[0][0] = sad16_mmx; c->pix_abs[0][0] = sad16_mmx;
c->pix_abs[0][1] = sad16_x2_mmx; c->pix_abs[0][1] = sad16_x2_mmx;
c->pix_abs[0][2] = sad16_y2_mmx; c->pix_abs[0][2] = sad16_y2_mmx;
...@@ -439,7 +439,7 @@ void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -439,7 +439,7 @@ void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx)
c->sad[0]= sad16_mmx; c->sad[0]= sad16_mmx;
c->sad[1]= sad8_mmx; c->sad[1]= sad8_mmx;
} }
if (mm_flags & MM_MMXEXT) { if (mm_flags & FF_MM_MMXEXT) {
c->pix_abs[0][0] = sad16_mmx2; c->pix_abs[0][0] = sad16_mmx2;
c->pix_abs[1][0] = sad8_mmx2; c->pix_abs[1][0] = sad8_mmx2;
...@@ -455,7 +455,7 @@ void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx) ...@@ -455,7 +455,7 @@ void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx)
c->pix_abs[1][3] = sad8_xy2_mmx2; c->pix_abs[1][3] = sad8_xy2_mmx2;
} }
} }
if ((mm_flags & MM_SSE2) && !(mm_flags & MM_3DNOW)) { if ((mm_flags & FF_MM_SSE2) && !(mm_flags & FF_MM_3DNOW)) {
c->sad[0]= sad16_sse2; c->sad[0]= sad16_sse2;
} }
} }
...@@ -619,7 +619,7 @@ static void denoise_dct_sse2(MpegEncContext *s, DCTELEM *block){ ...@@ -619,7 +619,7 @@ static void denoise_dct_sse2(MpegEncContext *s, DCTELEM *block){
void MPV_common_init_mmx(MpegEncContext *s) void MPV_common_init_mmx(MpegEncContext *s)
{ {
if (mm_flags & MM_MMX) { if (mm_flags & FF_MM_MMX) {
const int dct_algo = s->avctx->dct_algo; const int dct_algo = s->avctx->dct_algo;
s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_mmx; s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_mmx;
...@@ -630,7 +630,7 @@ void MPV_common_init_mmx(MpegEncContext *s) ...@@ -630,7 +630,7 @@ void MPV_common_init_mmx(MpegEncContext *s)
s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_mmx; s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_mmx;
s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_mmx; s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_mmx;
if (mm_flags & MM_SSE2) { if (mm_flags & FF_MM_SSE2) {
s->denoise_dct= denoise_dct_sse2; s->denoise_dct= denoise_dct_sse2;
} else { } else {
s->denoise_dct= denoise_dct_mmx; s->denoise_dct= denoise_dct_mmx;
...@@ -638,13 +638,13 @@ void MPV_common_init_mmx(MpegEncContext *s) ...@@ -638,13 +638,13 @@ void MPV_common_init_mmx(MpegEncContext *s)
if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){ if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){
#ifdef HAVE_SSSE3 #ifdef HAVE_SSSE3
if(mm_flags & MM_SSSE3){ if(mm_flags & FF_MM_SSSE3){
s->dct_quantize= dct_quantize_SSSE3; s->dct_quantize= dct_quantize_SSSE3;
} else } else
#endif #endif
if(mm_flags & MM_SSE2){ if(mm_flags & FF_MM_SSE2){
s->dct_quantize= dct_quantize_SSE2; s->dct_quantize= dct_quantize_SSE2;
} else if(mm_flags & MM_MMXEXT){ } else if(mm_flags & FF_MM_MMXEXT){
s->dct_quantize= dct_quantize_MMX2; s->dct_quantize= dct_quantize_MMX2;
} else { } else {
s->dct_quantize= dct_quantize_MMX; s->dct_quantize= dct_quantize_MMX;
......
...@@ -343,7 +343,7 @@ static void h_resample(uint8_t *dst, int dst_width, const uint8_t *src, ...@@ -343,7 +343,7 @@ static void h_resample(uint8_t *dst, int dst_width, const uint8_t *src,
n = dst_width; n = dst_width;
} }
#ifdef HAVE_MMX #ifdef HAVE_MMX
if ((mm_flags & MM_MMX) && NB_TAPS == 4) if ((mm_flags & FF_MM_MMX) && NB_TAPS == 4)
h_resample_fast4_mmx(dst, n, h_resample_fast4_mmx(dst, n,
src, src_width, src_start, src_incr, filters); src, src_width, src_start, src_incr, filters);
else else
...@@ -401,14 +401,14 @@ static void component_resample(ImgReSampleContext *s, ...@@ -401,14 +401,14 @@ static void component_resample(ImgReSampleContext *s,
phase_y = get_phase(src_y); phase_y = get_phase(src_y);
#ifdef HAVE_MMX #ifdef HAVE_MMX
/* desactivated MMX because loss of precision */ /* desactivated MMX because loss of precision */
if ((mm_flags & MM_MMX) && NB_TAPS == 4 && 0) if ((mm_flags & FF_MM_MMX) && NB_TAPS == 4 && 0)
v_resample4_mmx(output, owidth, v_resample4_mmx(output, owidth,
s->line_buf + (ring_y - NB_TAPS + 1) * owidth, owidth, s->line_buf + (ring_y - NB_TAPS + 1) * owidth, owidth,
&s->v_filters[phase_y][0]); &s->v_filters[phase_y][0]);
else else
#endif #endif
#ifdef HAVE_ALTIVEC #ifdef HAVE_ALTIVEC
if ((mm_flags & MM_ALTIVEC) && NB_TAPS == 4 && FILTER_BITS <= 6) if ((mm_flags & FF_MM_ALTIVEC) && NB_TAPS == 4 && FILTER_BITS <= 6)
v_resample16_altivec(output, owidth, v_resample16_altivec(output, owidth,
s->line_buf + (ring_y - NB_TAPS + 1) * owidth, s->line_buf + (ring_y - NB_TAPS + 1) * owidth,
owidth, &s->v_filters[phase_y][0]); owidth, &s->v_filters[phase_y][0]);
...@@ -811,7 +811,7 @@ int main(int argc, char **argv) ...@@ -811,7 +811,7 @@ int main(int argc, char **argv)
fact = 0.72; fact = 0.72;
xsize = (int)(XSIZE * fact); xsize = (int)(XSIZE * fact);
ysize = (int)(YSIZE * fact); ysize = (int)(YSIZE * fact);
mm_flags = MM_MMX; mm_flags = FF_MM_MMX;
s = img_resample_init(xsize, ysize, XSIZE, YSIZE); s = img_resample_init(xsize, ysize, XSIZE, YSIZE);
component_resample(s, img1, xsize, xsize, ysize, component_resample(s, img1, xsize, xsize, ysize,
img, XSIZE, XSIZE, YSIZE); img, XSIZE, XSIZE, YSIZE);
......
...@@ -50,7 +50,7 @@ int mm_support(void) ...@@ -50,7 +50,7 @@ int mm_support(void)
int result = 0; int result = 0;
#ifdef HAVE_ALTIVEC #ifdef HAVE_ALTIVEC
if (has_altivec()) { if (has_altivec()) {
result |= MM_ALTIVEC; result |= FF_MM_ALTIVEC;
} }
#endif /* result */ #endif /* result */
return result; return result;
...@@ -265,7 +265,7 @@ void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx) ...@@ -265,7 +265,7 @@ void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx)
if(ENABLE_H264_DECODER) dsputil_h264_init_ppc(c, avctx); if(ENABLE_H264_DECODER) dsputil_h264_init_ppc(c, avctx);
if (has_altivec()) { if (has_altivec()) {
mm_flags |= MM_ALTIVEC; mm_flags |= FF_MM_ALTIVEC;
dsputil_init_altivec(c, avctx); dsputil_init_altivec(c, avctx);
if(ENABLE_SNOW_DECODER) snow_init_altivec(c, avctx); if(ENABLE_SNOW_DECODER) snow_init_altivec(c, avctx);
......
...@@ -590,7 +590,7 @@ extern void idct_add_altivec(uint8_t *dest, int line_size, int16_t *block); ...@@ -590,7 +590,7 @@ extern void idct_add_altivec(uint8_t *dest, int line_size, int16_t *block);
void MPV_common_init_altivec(MpegEncContext *s) void MPV_common_init_altivec(MpegEncContext *s)
{ {
if ((mm_flags & MM_ALTIVEC) == 0) return; if ((mm_flags & FF_MM_ALTIVEC) == 0) return;
if (s->avctx->lowres==0) { if (s->avctx->lowres==0) {
if ((s->avctx->idct_algo == FF_IDCT_AUTO) || if ((s->avctx->idct_algo == FF_IDCT_AUTO) ||
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment