Commit 6e1c66bc authored by Aurelien Jacobs's avatar Aurelien Jacobs

adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64

Originally committed as revision 13721 to svn://svn.mplayerhq.hu/mplayer/trunk/postproc
parent ed861c6b
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include "../config.h" #include "../config.h"
#include "rgb2rgb.h" #include "rgb2rgb.h"
#include "swscale.h" #include "swscale.h"
#include "../cpudetect.h"
#include "../mangle.h" #include "../mangle.h"
#include "../bswap.h" #include "../bswap.h"
#include "../libvo/fastmemcpy.h" #include "../libvo/fastmemcpy.h"
...@@ -68,7 +69,7 @@ void (*yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *sr ...@@ -68,7 +69,7 @@ void (*yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *sr
int srcStride1, int srcStride2, int srcStride1, int srcStride2,
int srcStride3, int dstStride); int srcStride3, int dstStride);
#ifdef ARCH_X86 #if defined(ARCH_X86) || defined(ARCH_X86_64)
static const uint64_t mmx_null __attribute__((aligned(8))) = 0x0000000000000000ULL; static const uint64_t mmx_null __attribute__((aligned(8))) = 0x0000000000000000ULL;
static const uint64_t mmx_one __attribute__((aligned(8))) = 0xFFFFFFFFFFFFFFFFULL; static const uint64_t mmx_one __attribute__((aligned(8))) = 0xFFFFFFFFFFFFFFFFULL;
static const uint64_t mask32b attribute_used __attribute__((aligned(8))) = 0x000000FF000000FFULL; static const uint64_t mask32b attribute_used __attribute__((aligned(8))) = 0x000000FF000000FFULL;
...@@ -152,7 +153,7 @@ static uint64_t __attribute__((aligned(8))) dither8[2]={ ...@@ -152,7 +153,7 @@ static uint64_t __attribute__((aligned(8))) dither8[2]={
#define RENAME(a) a ## _C #define RENAME(a) a ## _C
#include "rgb2rgb_template.c" #include "rgb2rgb_template.c"
#ifdef ARCH_X86 #if defined(ARCH_X86) || defined(ARCH_X86_64)
//MMX versions //MMX versions
#undef RENAME #undef RENAME
...@@ -181,7 +182,7 @@ static uint64_t __attribute__((aligned(8))) dither8[2]={ ...@@ -181,7 +182,7 @@ static uint64_t __attribute__((aligned(8))) dither8[2]={
#define RENAME(a) a ## _3DNOW #define RENAME(a) a ## _3DNOW
#include "rgb2rgb_template.c" #include "rgb2rgb_template.c"
#endif //ARCH_X86 #endif //ARCH_X86 || ARCH_X86_64
/* /*
rgb15->rgb16 Original by Strepto/Astral rgb15->rgb16 Original by Strepto/Astral
...@@ -191,7 +192,7 @@ static uint64_t __attribute__((aligned(8))) dither8[2]={ ...@@ -191,7 +192,7 @@ static uint64_t __attribute__((aligned(8))) dither8[2]={
*/ */
void sws_rgb2rgb_init(int flags){ void sws_rgb2rgb_init(int flags){
#ifdef ARCH_X86 #if defined(ARCH_X86) || defined(ARCH_X86_64)
if(flags & SWS_CPU_CAPS_MMX2){ if(flags & SWS_CPU_CAPS_MMX2){
rgb15to16= rgb15to16_MMX2; rgb15to16= rgb15to16_MMX2;
rgb15to24= rgb15to24_MMX2; rgb15to24= rgb15to24_MMX2;
......
This diff is collapsed.
...@@ -104,7 +104,7 @@ static void doTest(uint8_t *ref[3], int refStride[3], int w, int h, int srcForma ...@@ -104,7 +104,7 @@ static void doTest(uint8_t *ref[3], int refStride[3], int w, int h, int srcForma
sws_scale(dstContext, src, srcStride, 0, srcH, dst, dstStride); sws_scale(dstContext, src, srcStride, 0, srcH, dst, dstStride);
sws_scale(outContext, dst, dstStride, 0, dstH, out, refStride); sws_scale(outContext, dst, dstStride, 0, dstH, out, refStride);
#ifdef ARCH_X86 #if defined(ARCH_X86) || defined(ARCH_X86_64)
asm volatile ("emms\n\t"); asm volatile ("emms\n\t");
#endif #endif
...@@ -199,14 +199,14 @@ int main(int argc, char **argv){ ...@@ -199,14 +199,14 @@ int main(int argc, char **argv){
rgb_data[ x + y*4*W]= random(); rgb_data[ x + y*4*W]= random();
} }
} }
#ifdef ARCH_X86 #if defined(ARCH_X86) || defined(ARCH_X86_64)
sws_rgb2rgb_init(SWS_CPU_CAPS_MMX*0); sws_rgb2rgb_init(SWS_CPU_CAPS_MMX*0);
#else #else
sws_rgb2rgb_init(0); sws_rgb2rgb_init(0);
#endif #endif
sws_scale(sws, rgb_src, rgb_stride, 0, H , src, stride); sws_scale(sws, rgb_src, rgb_stride, 0, H , src, stride);
#ifdef ARCH_X86 #if defined(ARCH_X86) || defined(ARCH_X86_64)
asm volatile ("emms\n\t"); asm volatile ("emms\n\t");
#endif #endif
......
...@@ -145,7 +145,7 @@ write special BGR->BGR scaler ...@@ -145,7 +145,7 @@ write special BGR->BGR scaler
#define MIN(a,b) ((a) > (b) ? (b) : (a)) #define MIN(a,b) ((a) > (b) ? (b) : (a))
#define MAX(a,b) ((a) < (b) ? (b) : (a)) #define MAX(a,b) ((a) < (b) ? (b) : (a))
#ifdef ARCH_X86 #if defined(ARCH_X86) || defined(ARCH_X86_64)
static uint64_t attribute_used __attribute__((aligned(8))) bF8= 0xF8F8F8F8F8F8F8F8LL; static uint64_t attribute_used __attribute__((aligned(8))) bF8= 0xF8F8F8F8F8F8F8F8LL;
static uint64_t attribute_used __attribute__((aligned(8))) bFC= 0xFCFCFCFCFCFCFCFCLL; static uint64_t attribute_used __attribute__((aligned(8))) bFC= 0xFCFCFCFCFCFCFCFCLL;
static uint64_t __attribute__((aligned(8))) w10= 0x0010001000100010LL; static uint64_t __attribute__((aligned(8))) w10= 0x0010001000100010LL;
...@@ -204,7 +204,7 @@ extern const uint8_t dither_8x8_32[8][8]; ...@@ -204,7 +204,7 @@ extern const uint8_t dither_8x8_32[8][8];
extern const uint8_t dither_8x8_73[8][8]; extern const uint8_t dither_8x8_73[8][8];
extern const uint8_t dither_8x8_220[8][8]; extern const uint8_t dither_8x8_220[8][8];
#ifdef ARCH_X86 #if defined(ARCH_X86) || defined(ARCH_X86_64)
void in_asm_used_var_warning_killer() void in_asm_used_var_warning_killer()
{ {
volatile int i= bF8+bFC+w10+ volatile int i= bF8+bFC+w10+
...@@ -679,7 +679,7 @@ static inline void yuv2packedXinC(SwsContext *c, int16_t *lumFilter, int16_t **l ...@@ -679,7 +679,7 @@ static inline void yuv2packedXinC(SwsContext *c, int16_t *lumFilter, int16_t **l
#endif //HAVE_ALTIVEC #endif //HAVE_ALTIVEC
#endif //ARCH_POWERPC #endif //ARCH_POWERPC
#ifdef ARCH_X86 #if defined(ARCH_X86) || defined(ARCH_X86_64)
#if (defined (HAVE_MMX) && !defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT) #if (defined (HAVE_MMX) && !defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
#define COMPILE_MMX #define COMPILE_MMX
...@@ -692,7 +692,7 @@ static inline void yuv2packedXinC(SwsContext *c, int16_t *lumFilter, int16_t **l ...@@ -692,7 +692,7 @@ static inline void yuv2packedXinC(SwsContext *c, int16_t *lumFilter, int16_t **l
#if (defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT) #if (defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
#define COMPILE_3DNOW #define COMPILE_3DNOW
#endif #endif
#endif //ARCH_X86 #endif //ARCH_X86 || ARCH_X86_64
#undef HAVE_MMX #undef HAVE_MMX
#undef HAVE_MMX2 #undef HAVE_MMX2
...@@ -716,7 +716,7 @@ static inline void yuv2packedXinC(SwsContext *c, int16_t *lumFilter, int16_t **l ...@@ -716,7 +716,7 @@ static inline void yuv2packedXinC(SwsContext *c, int16_t *lumFilter, int16_t **l
#endif #endif
#endif //ARCH_POWERPC #endif //ARCH_POWERPC
#ifdef ARCH_X86 #if defined(ARCH_X86) || defined(ARCH_X86_64)
//X86 versions //X86 versions
/* /*
...@@ -758,7 +758,7 @@ static inline void yuv2packedXinC(SwsContext *c, int16_t *lumFilter, int16_t **l ...@@ -758,7 +758,7 @@ static inline void yuv2packedXinC(SwsContext *c, int16_t *lumFilter, int16_t **l
#include "swscale_template.c" #include "swscale_template.c"
#endif #endif
#endif //ARCH_X86 #endif //ARCH_X86 || ARCH_X86_64
// minor note: the HAVE_xyz is messed up after that line so don't use it // minor note: the HAVE_xyz is messed up after that line so don't use it
...@@ -783,7 +783,7 @@ static inline void initFilter(int16_t **outFilter, int16_t **filterPos, int *out ...@@ -783,7 +783,7 @@ static inline void initFilter(int16_t **outFilter, int16_t **filterPos, int *out
int minFilterSize; int minFilterSize;
double *filter=NULL; double *filter=NULL;
double *filter2=NULL; double *filter2=NULL;
#ifdef ARCH_X86 #if defined(ARCH_X86) || defined(ARCH_X86_64)
if(flags & SWS_CPU_CAPS_MMX) if(flags & SWS_CPU_CAPS_MMX)
asm volatile("emms\n\t"::: "memory"); //FIXME this shouldnt be required but it IS (even for non mmx versions) asm volatile("emms\n\t"::: "memory"); //FIXME this shouldnt be required but it IS (even for non mmx versions)
#endif #endif
...@@ -1142,17 +1142,17 @@ static inline void initFilter(int16_t **outFilter, int16_t **filterPos, int *out ...@@ -1142,17 +1142,17 @@ static inline void initFilter(int16_t **outFilter, int16_t **filterPos, int *out
free(filter); free(filter);
} }
#ifdef ARCH_X86 #if defined(ARCH_X86) || defined(ARCH_X86_64)
static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode, int16_t *filter, int32_t *filterPos, int numSplits) static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode, int16_t *filter, int32_t *filterPos, int numSplits)
{ {
uint8_t *fragmentA; uint8_t *fragmentA;
int imm8OfPShufW1A; long imm8OfPShufW1A;
int imm8OfPShufW2A; long imm8OfPShufW2A;
int fragmentLengthA; long fragmentLengthA;
uint8_t *fragmentB; uint8_t *fragmentB;
int imm8OfPShufW1B; long imm8OfPShufW1B;
int imm8OfPShufW2B; long imm8OfPShufW2B;
int fragmentLengthB; long fragmentLengthB;
int fragmentPos; int fragmentPos;
int xpos, i; int xpos, i;
...@@ -1165,9 +1165,9 @@ static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode, int16_t *fil ...@@ -1165,9 +1165,9 @@ static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode, int16_t *fil
"jmp 9f \n\t" "jmp 9f \n\t"
// Begin // Begin
"0: \n\t" "0: \n\t"
"movq (%%edx, %%eax), %%mm3 \n\t" "movq (%%"REG_d", %%"REG_a"), %%mm3\n\t"
"movd (%%ecx, %%esi), %%mm0 \n\t" "movd (%%"REG_c", %%"REG_S"), %%mm0\n\t"
"movd 1(%%ecx, %%esi), %%mm1 \n\t" "movd 1(%%"REG_c", %%"REG_S"), %%mm1\n\t"
"punpcklbw %%mm7, %%mm1 \n\t" "punpcklbw %%mm7, %%mm1 \n\t"
"punpcklbw %%mm7, %%mm0 \n\t" "punpcklbw %%mm7, %%mm0 \n\t"
"pshufw $0xFF, %%mm1, %%mm1 \n\t" "pshufw $0xFF, %%mm1, %%mm1 \n\t"
...@@ -1175,26 +1175,26 @@ static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode, int16_t *fil ...@@ -1175,26 +1175,26 @@ static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode, int16_t *fil
"pshufw $0xFF, %%mm0, %%mm0 \n\t" "pshufw $0xFF, %%mm0, %%mm0 \n\t"
"2: \n\t" "2: \n\t"
"psubw %%mm1, %%mm0 \n\t" "psubw %%mm1, %%mm0 \n\t"
"movl 8(%%ebx, %%eax), %%esi \n\t" "mov 8(%%"REG_b", %%"REG_a"), %%"REG_S"\n\t"
"pmullw %%mm3, %%mm0 \n\t" "pmullw %%mm3, %%mm0 \n\t"
"psllw $7, %%mm1 \n\t" "psllw $7, %%mm1 \n\t"
"paddw %%mm1, %%mm0 \n\t" "paddw %%mm1, %%mm0 \n\t"
"movq %%mm0, (%%edi, %%eax) \n\t" "movq %%mm0, (%%"REG_D", %%"REG_a")\n\t"
"addl $8, %%eax \n\t" "add $8, %%"REG_a" \n\t"
// End // End
"9: \n\t" "9: \n\t"
// "int $3\n\t" // "int $3\n\t"
"leal 0b, %0 \n\t" "lea 0b, %0 \n\t"
"leal 1b, %1 \n\t" "lea 1b, %1 \n\t"
"leal 2b, %2 \n\t" "lea 2b, %2 \n\t"
"decl %1 \n\t" "dec %1 \n\t"
"decl %2 \n\t" "dec %2 \n\t"
"subl %0, %1 \n\t" "sub %0, %1 \n\t"
"subl %0, %2 \n\t" "sub %0, %2 \n\t"
"leal 9b, %3 \n\t" "lea 9b, %3 \n\t"
"subl %0, %3 \n\t" "sub %0, %3 \n\t"
:"=r" (fragmentA), "=r" (imm8OfPShufW1A), "=r" (imm8OfPShufW2A), :"=r" (fragmentA), "=r" (imm8OfPShufW1A), "=r" (imm8OfPShufW2A),
...@@ -1205,34 +1205,34 @@ static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode, int16_t *fil ...@@ -1205,34 +1205,34 @@ static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode, int16_t *fil
"jmp 9f \n\t" "jmp 9f \n\t"
// Begin // Begin
"0: \n\t" "0: \n\t"
"movq (%%edx, %%eax), %%mm3 \n\t" "movq (%%"REG_d", %%"REG_a"), %%mm3\n\t"
"movd (%%ecx, %%esi), %%mm0 \n\t" "movd (%%"REG_c", %%"REG_S"), %%mm0\n\t"
"punpcklbw %%mm7, %%mm0 \n\t" "punpcklbw %%mm7, %%mm0 \n\t"
"pshufw $0xFF, %%mm0, %%mm1 \n\t" "pshufw $0xFF, %%mm0, %%mm1 \n\t"
"1: \n\t" "1: \n\t"
"pshufw $0xFF, %%mm0, %%mm0 \n\t" "pshufw $0xFF, %%mm0, %%mm0 \n\t"
"2: \n\t" "2: \n\t"
"psubw %%mm1, %%mm0 \n\t" "psubw %%mm1, %%mm0 \n\t"
"movl 8(%%ebx, %%eax), %%esi \n\t" "mov 8(%%"REG_b", %%"REG_a"), %%"REG_S"\n\t"
"pmullw %%mm3, %%mm0 \n\t" "pmullw %%mm3, %%mm0 \n\t"
"psllw $7, %%mm1 \n\t" "psllw $7, %%mm1 \n\t"
"paddw %%mm1, %%mm0 \n\t" "paddw %%mm1, %%mm0 \n\t"
"movq %%mm0, (%%edi, %%eax) \n\t" "movq %%mm0, (%%"REG_D", %%"REG_a")\n\t"
"addl $8, %%eax \n\t" "add $8, %%"REG_a" \n\t"
// End // End
"9: \n\t" "9: \n\t"
// "int $3\n\t" // "int $3\n\t"
"leal 0b, %0 \n\t" "lea 0b, %0 \n\t"
"leal 1b, %1 \n\t" "lea 1b, %1 \n\t"
"leal 2b, %2 \n\t" "lea 2b, %2 \n\t"
"decl %1 \n\t" "dec %1 \n\t"
"decl %2 \n\t" "dec %2 \n\t"
"subl %0, %1 \n\t" "sub %0, %1 \n\t"
"subl %0, %2 \n\t" "sub %0, %2 \n\t"
"leal 9b, %3 \n\t" "lea 9b, %3 \n\t"
"subl %0, %3 \n\t" "sub %0, %3 \n\t"
:"=r" (fragmentB), "=r" (imm8OfPShufW1B), "=r" (imm8OfPShufW2B), :"=r" (fragmentB), "=r" (imm8OfPShufW1B), "=r" (imm8OfPShufW2B),
...@@ -1313,7 +1313,7 @@ static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode, int16_t *fil ...@@ -1313,7 +1313,7 @@ static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode, int16_t *fil
} }
filterPos[i/2]= xpos>>16; // needed to jump to the next part filterPos[i/2]= xpos>>16; // needed to jump to the next part
} }
#endif // ARCH_X86 #endif // ARCH_X86 || ARCH_X86_64
static void globalInit(){ static void globalInit(){
// generating tables: // generating tables:
...@@ -1327,7 +1327,7 @@ static void globalInit(){ ...@@ -1327,7 +1327,7 @@ static void globalInit(){
static SwsFunc getSwsFunc(int flags){ static SwsFunc getSwsFunc(int flags){
#ifdef RUNTIME_CPUDETECT #ifdef RUNTIME_CPUDETECT
#ifdef ARCH_X86 #if defined(ARCH_X86) || defined(ARCH_X86_64)
// ordered per speed fasterst first // ordered per speed fasterst first
if(flags & SWS_CPU_CAPS_MMX2) if(flags & SWS_CPU_CAPS_MMX2)
return swScale_MMX2; return swScale_MMX2;
...@@ -1755,7 +1755,7 @@ SwsContext *sws_getContext(int srcW, int srcH, int origSrcFormat, int dstW, int ...@@ -1755,7 +1755,7 @@ SwsContext *sws_getContext(int srcW, int srcH, int origSrcFormat, int dstW, int
int unscaled, needsDither; int unscaled, needsDither;
int srcFormat, dstFormat; int srcFormat, dstFormat;
SwsFilter dummyFilter= {NULL, NULL, NULL, NULL}; SwsFilter dummyFilter= {NULL, NULL, NULL, NULL};
#ifdef ARCH_X86 #if defined(ARCH_X86) || defined(ARCH_X86_64)
if(flags & SWS_CPU_CAPS_MMX) if(flags & SWS_CPU_CAPS_MMX)
asm volatile("emms\n\t"::: "memory"); asm volatile("emms\n\t"::: "memory");
#endif #endif
...@@ -1995,7 +1995,7 @@ SwsContext *sws_getContext(int srcW, int srcH, int origSrcFormat, int dstW, int ...@@ -1995,7 +1995,7 @@ SwsContext *sws_getContext(int srcW, int srcH, int origSrcFormat, int dstW, int
(flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags, (flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags,
srcFilter->chrH, dstFilter->chrH, c->param); srcFilter->chrH, dstFilter->chrH, c->param);
#ifdef ARCH_X86 #if defined(ARCH_X86) || defined(ARCH_X86_64)
// can't downscale !!! // can't downscale !!!
if(c->canMMX2BeUsed && (flags & SWS_FAST_BILINEAR)) if(c->canMMX2BeUsed && (flags & SWS_FAST_BILINEAR))
{ {
...@@ -2136,7 +2136,7 @@ SwsContext *sws_getContext(int srcW, int srcH, int origSrcFormat, int dstW, int ...@@ -2136,7 +2136,7 @@ SwsContext *sws_getContext(int srcW, int srcH, int origSrcFormat, int dstW, int
} }
else else
{ {
#ifdef ARCH_X86 #if defined(ARCH_X86) || defined(ARCH_X86_64)
MSG_V("SwScaler: using X86-Asm scaler for horizontal scaling\n"); MSG_V("SwScaler: using X86-Asm scaler for horizontal scaling\n");
#else #else
if(flags & SWS_FAST_BILINEAR) if(flags & SWS_FAST_BILINEAR)
......
This diff is collapsed.
...@@ -156,7 +156,7 @@ const uint8_t __attribute__((aligned(8))) dither_8x8_220[8][8]={ ...@@ -156,7 +156,7 @@ const uint8_t __attribute__((aligned(8))) dither_8x8_220[8][8]={
}; };
#endif #endif
#ifdef ARCH_X86 #if defined(ARCH_X86) || defined(ARCH_X86_64)
/* hope these constant values are cache line aligned */ /* hope these constant values are cache line aligned */
uint64_t attribute_used __attribute__((aligned(8))) mmx_00ffw = 0x00ff00ff00ff00ffULL; uint64_t attribute_used __attribute__((aligned(8))) mmx_00ffw = 0x00ff00ff00ff00ffULL;
...@@ -183,14 +183,12 @@ uint64_t __attribute__((aligned(8))) dither8[2]={ ...@@ -183,14 +183,12 @@ uint64_t __attribute__((aligned(8))) dither8[2]={
0x0004000400040004LL,}; 0x0004000400040004LL,};
#undef HAVE_MMX #undef HAVE_MMX
#undef ARCH_X86
//MMX versions //MMX versions
#undef RENAME #undef RENAME
#define HAVE_MMX #define HAVE_MMX
#undef HAVE_MMX2 #undef HAVE_MMX2
#undef HAVE_3DNOW #undef HAVE_3DNOW
#define ARCH_X86
#define RENAME(a) a ## _MMX #define RENAME(a) a ## _MMX
#include "yuv2rgb_template.c" #include "yuv2rgb_template.c"
...@@ -199,7 +197,6 @@ uint64_t __attribute__((aligned(8))) dither8[2]={ ...@@ -199,7 +197,6 @@ uint64_t __attribute__((aligned(8))) dither8[2]={
#define HAVE_MMX #define HAVE_MMX
#define HAVE_MMX2 #define HAVE_MMX2
#undef HAVE_3DNOW #undef HAVE_3DNOW
#define ARCH_X86
#define RENAME(a) a ## _MMX2 #define RENAME(a) a ## _MMX2
#include "yuv2rgb_template.c" #include "yuv2rgb_template.c"
...@@ -583,7 +580,7 @@ EPILOG(1) ...@@ -583,7 +580,7 @@ EPILOG(1)
SwsFunc yuv2rgb_get_func_ptr (SwsContext *c) SwsFunc yuv2rgb_get_func_ptr (SwsContext *c)
{ {
#ifdef ARCH_X86 #if defined(ARCH_X86) || defined(ARCH_X86_64)
if(c->flags & SWS_CPU_CAPS_MMX2){ if(c->flags & SWS_CPU_CAPS_MMX2){
switch(c->dstFormat){ switch(c->dstFormat){
case IMGFMT_BGR32: return yuv420_rgb32_MMX2; case IMGFMT_BGR32: return yuv420_rgb32_MMX2;
......
...@@ -143,7 +143,7 @@ static inline int RENAME(yuv420_rgb16)(SwsContext *c, uint8_t* src[], int srcStr ...@@ -143,7 +143,7 @@ static inline int RENAME(yuv420_rgb16)(SwsContext *c, uint8_t* src[], int srcStr
uint8_t *_py = src[0] + y*srcStride[0]; uint8_t *_py = src[0] + y*srcStride[0];
uint8_t *_pu = src[1] + (y>>1)*srcStride[1]; uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
uint8_t *_pv = src[2] + (y>>1)*srcStride[2]; uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
int index= -h_size/2; long index= -h_size/2;
b5Dither= dither8[y&1]; b5Dither= dither8[y&1];
g6Dither= dither4[y&1]; g6Dither= dither4[y&1];
...@@ -204,8 +204,8 @@ YUV2RGB ...@@ -204,8 +204,8 @@ YUV2RGB
MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */ MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */
"addl $16, %1 \n\t" "add $16, %1 \n\t"
"addl $4, %0 \n\t" "add $4, %0 \n\t"
" js 1b \n\t" " js 1b \n\t"
: "+r" (index), "+r" (_image) : "+r" (index), "+r" (_image)
...@@ -238,7 +238,7 @@ static inline int RENAME(yuv420_rgb15)(SwsContext *c, uint8_t* src[], int srcStr ...@@ -238,7 +238,7 @@ static inline int RENAME(yuv420_rgb15)(SwsContext *c, uint8_t* src[], int srcStr
uint8_t *_py = src[0] + y*srcStride[0]; uint8_t *_py = src[0] + y*srcStride[0];
uint8_t *_pu = src[1] + (y>>1)*srcStride[1]; uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
uint8_t *_pv = src[2] + (y>>1)*srcStride[2]; uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
int index= -h_size/2; long index= -h_size/2;
b5Dither= dither8[y&1]; b5Dither= dither8[y&1];
g6Dither= dither4[y&1]; g6Dither= dither4[y&1];
...@@ -295,8 +295,8 @@ YUV2RGB ...@@ -295,8 +295,8 @@ YUV2RGB
MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */ MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */
"addl $16, %1 \n\t" "add $16, %1 \n\t"
"addl $4, %0 \n\t" "add $4, %0 \n\t"
" js 1b \n\t" " js 1b \n\t"
: "+r" (index), "+r" (_image) : "+r" (index), "+r" (_image)
: "r" (_pu - index), "r" (_pv - index), "r"(&c->redDither), "r" (_py - 2*index) : "r" (_pu - index), "r" (_pv - index), "r"(&c->redDither), "r" (_py - 2*index)
...@@ -326,7 +326,7 @@ static inline int RENAME(yuv420_rgb24)(SwsContext *c, uint8_t* src[], int srcStr ...@@ -326,7 +326,7 @@ static inline int RENAME(yuv420_rgb24)(SwsContext *c, uint8_t* src[], int srcStr
uint8_t *_py = src[0] + y*srcStride[0]; uint8_t *_py = src[0] + y*srcStride[0];
uint8_t *_pu = src[1] + (y>>1)*srcStride[1]; uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
uint8_t *_pv = src[2] + (y>>1)*srcStride[2]; uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
int index= -h_size/2; long index= -h_size/2;
/* this mmx assembly code deals with SINGLE scan line at a time, it convert 8 /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
pixels in each iteration */ pixels in each iteration */
...@@ -440,8 +440,8 @@ YUV2RGB ...@@ -440,8 +440,8 @@ YUV2RGB
"pxor %%mm4, %%mm4 \n\t" "pxor %%mm4, %%mm4 \n\t"
#endif #endif
"addl $24, %1 \n\t" "add $24, %1 \n\t"
"addl $4, %0 \n\t" "add $4, %0 \n\t"
" js 1b \n\t" " js 1b \n\t"
: "+r" (index), "+r" (_image) : "+r" (index), "+r" (_image)
...@@ -472,7 +472,7 @@ static inline int RENAME(yuv420_rgb32)(SwsContext *c, uint8_t* src[], int srcStr ...@@ -472,7 +472,7 @@ static inline int RENAME(yuv420_rgb32)(SwsContext *c, uint8_t* src[], int srcStr
uint8_t *_py = src[0] + y*srcStride[0]; uint8_t *_py = src[0] + y*srcStride[0];
uint8_t *_pu = src[1] + (y>>1)*srcStride[1]; uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
uint8_t *_pv = src[2] + (y>>1)*srcStride[2]; uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
int index= -h_size/2; long index= -h_size/2;
/* this mmx assembly code deals with SINGLE scan line at a time, it convert 8 /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
pixels in each iteration */ pixels in each iteration */
...@@ -526,8 +526,8 @@ YUV2RGB ...@@ -526,8 +526,8 @@ YUV2RGB
"pxor %%mm4, %%mm4;" /* zero mm4 */ "pxor %%mm4, %%mm4;" /* zero mm4 */
"movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
"addl $32, %1 \n\t" "add $32, %1 \n\t"
"addl $4, %0 \n\t" "add $4, %0 \n\t"
" js 1b \n\t" " js 1b \n\t"
: "+r" (index), "+r" (_image) : "+r" (index), "+r" (_image)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment