Commit c12f7b2d authored by Ramiro Polla's avatar Ramiro Polla

rgb2rgb: don't misuse HAVE_* defines

Introduce and use COMPILE_TEMPLATE_* instead.

Originally committed as revision 32241 to svn://svn.mplayerhq.hu/mplayer/trunk/libswscale
parent c157fe63
...@@ -149,14 +149,10 @@ DECLARE_ASM_CONST(8, uint64_t, blue_15mask) = 0x0000001f0000001fULL; ...@@ -149,14 +149,10 @@ DECLARE_ASM_CONST(8, uint64_t, blue_15mask) = 0x0000001f0000001fULL;
//Note: We have C, MMX, MMX2, 3DNOW versions, there is no 3DNOW + MMX2 one. //Note: We have C, MMX, MMX2, 3DNOW versions, there is no 3DNOW + MMX2 one.
//plain C versions //plain C versions
#undef HAVE_MMX #define COMPILE_TEMPLATE_MMX 0
#undef HAVE_MMX2 #define COMPILE_TEMPLATE_MMX2 0
#undef HAVE_AMD3DNOW #define COMPILE_TEMPLATE_AMD3DNOW 0
#undef HAVE_SSE2 #define COMPILE_TEMPLATE_SSE2 0
#define HAVE_MMX 0
#define HAVE_MMX2 0
#define HAVE_AMD3DNOW 0
#define HAVE_SSE2 0
#define RENAME(a) a ## _C #define RENAME(a) a ## _C
#include "rgb2rgb_template.c" #include "rgb2rgb_template.c"
...@@ -164,33 +160,33 @@ DECLARE_ASM_CONST(8, uint64_t, blue_15mask) = 0x0000001f0000001fULL; ...@@ -164,33 +160,33 @@ DECLARE_ASM_CONST(8, uint64_t, blue_15mask) = 0x0000001f0000001fULL;
//MMX versions //MMX versions
#undef RENAME #undef RENAME
#undef HAVE_MMX #undef COMPILE_TEMPLATE_MMX
#define HAVE_MMX 1 #define COMPILE_TEMPLATE_MMX 1
#define RENAME(a) a ## _MMX #define RENAME(a) a ## _MMX
#include "rgb2rgb_template.c" #include "rgb2rgb_template.c"
//MMX2 versions //MMX2 versions
#undef RENAME #undef RENAME
#undef HAVE_MMX2 #undef COMPILE_TEMPLATE_MMX2
#define HAVE_MMX2 1 #define COMPILE_TEMPLATE_MMX2 1
#define RENAME(a) a ## _MMX2 #define RENAME(a) a ## _MMX2
#include "rgb2rgb_template.c" #include "rgb2rgb_template.c"
//SSE2 versions //SSE2 versions
#undef RENAME #undef RENAME
#undef HAVE_SSE2 #undef COMPILE_TEMPLATE_SSE2
#define HAVE_SSE2 1 #define COMPILE_TEMPLATE_SSE2 1
#define RENAME(a) a ## _SSE2 #define RENAME(a) a ## _SSE2
#include "rgb2rgb_template.c" #include "rgb2rgb_template.c"
//3DNOW versions //3DNOW versions
#undef RENAME #undef RENAME
#undef HAVE_MMX2 #undef COMPILE_TEMPLATE_MMX2
#undef HAVE_SSE2 #undef COMPILE_TEMPLATE_SSE2
#undef HAVE_AMD3DNOW #undef COMPILE_TEMPLATE_AMD3DNOW
#define HAVE_MMX2 0 #define COMPILE_TEMPLATE_MMX2 0
#define HAVE_SSE2 0 #define COMPILE_TEMPLATE_SSE2 1
#define HAVE_AMD3DNOW 1 #define COMPILE_TEMPLATE_AMD3DNOW 1
#define RENAME(a) a ## _3DNOW #define RENAME(a) a ## _3DNOW
#include "rgb2rgb_template.c" #include "rgb2rgb_template.c"
......
...@@ -33,30 +33,30 @@ ...@@ -33,30 +33,30 @@
#undef MMREG_SIZE #undef MMREG_SIZE
#undef PAVGB #undef PAVGB
#if HAVE_SSE2 #if COMPILE_TEMPLATE_SSE2
#define MMREG_SIZE 16 #define MMREG_SIZE 16
#else #else
#define MMREG_SIZE 8 #define MMREG_SIZE 8
#endif #endif
#if HAVE_AMD3DNOW #if COMPILE_TEMPLATE_AMD3DNOW
#define PREFETCH "prefetch" #define PREFETCH "prefetch"
#define PAVGB "pavgusb" #define PAVGB "pavgusb"
#elif HAVE_MMX2 #elif COMPILE_TEMPLATE_MMX2
#define PREFETCH "prefetchnta" #define PREFETCH "prefetchnta"
#define PAVGB "pavgb" #define PAVGB "pavgb"
#else #else
#define PREFETCH " # nop" #define PREFETCH " # nop"
#endif #endif
#if HAVE_AMD3DNOW #if COMPILE_TEMPLATE_AMD3DNOW
/* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */ /* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */
#define EMMS "femms" #define EMMS "femms"
#else #else
#define EMMS "emms" #define EMMS "emms"
#endif #endif
#if HAVE_MMX2 #if COMPILE_TEMPLATE_MMX2
#define MOVNTQ "movntq" #define MOVNTQ "movntq"
#define SFENCE "sfence" #define SFENCE "sfence"
#else #else
...@@ -69,11 +69,11 @@ static inline void RENAME(rgb24tobgr32)(const uint8_t *src, uint8_t *dst, long s ...@@ -69,11 +69,11 @@ static inline void RENAME(rgb24tobgr32)(const uint8_t *src, uint8_t *dst, long s
uint8_t *dest = dst; uint8_t *dest = dst;
const uint8_t *s = src; const uint8_t *s = src;
const uint8_t *end; const uint8_t *end;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
const uint8_t *mm_end; const uint8_t *mm_end;
#endif #endif
end = s + src_size; end = s + src_size;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
mm_end = end - 23; mm_end = end - 23;
__asm__ volatile("movq %0, %%mm7"::"m"(mask32a):"memory"); __asm__ volatile("movq %0, %%mm7"::"m"(mask32a):"memory");
...@@ -164,11 +164,11 @@ static inline void RENAME(rgb32tobgr24)(const uint8_t *src, uint8_t *dst, long s ...@@ -164,11 +164,11 @@ static inline void RENAME(rgb32tobgr24)(const uint8_t *src, uint8_t *dst, long s
uint8_t *dest = dst; uint8_t *dest = dst;
const uint8_t *s = src; const uint8_t *s = src;
const uint8_t *end; const uint8_t *end;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
const uint8_t *mm_end; const uint8_t *mm_end;
#endif #endif
end = s + src_size; end = s + src_size;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
mm_end = end - 31; mm_end = end - 31;
while (s < mm_end) { while (s < mm_end) {
...@@ -222,7 +222,7 @@ static inline void RENAME(rgb15to16)(const uint8_t *src, uint8_t *dst, long src_ ...@@ -222,7 +222,7 @@ static inline void RENAME(rgb15to16)(const uint8_t *src, uint8_t *dst, long src_
register const uint8_t *end; register const uint8_t *end;
const uint8_t *mm_end; const uint8_t *mm_end;
end = s + src_size; end = s + src_size;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*s)); __asm__ volatile(PREFETCH" %0"::"m"(*s));
__asm__ volatile("movq %0, %%mm4"::"m"(mask15s)); __asm__ volatile("movq %0, %%mm4"::"m"(mask15s));
mm_end = end - 15; mm_end = end - 15;
...@@ -268,7 +268,7 @@ static inline void RENAME(rgb16to15)(const uint8_t *src, uint8_t *dst, long src_ ...@@ -268,7 +268,7 @@ static inline void RENAME(rgb16to15)(const uint8_t *src, uint8_t *dst, long src_
register const uint8_t *end; register const uint8_t *end;
const uint8_t *mm_end; const uint8_t *mm_end;
end = s + src_size; end = s + src_size;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*s)); __asm__ volatile(PREFETCH" %0"::"m"(*s));
__asm__ volatile("movq %0, %%mm7"::"m"(mask15rg)); __asm__ volatile("movq %0, %%mm7"::"m"(mask15rg));
__asm__ volatile("movq %0, %%mm6"::"m"(mask15b)); __asm__ volatile("movq %0, %%mm6"::"m"(mask15b));
...@@ -316,12 +316,12 @@ static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, long src_ ...@@ -316,12 +316,12 @@ static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, long src_
{ {
const uint8_t *s = src; const uint8_t *s = src;
const uint8_t *end; const uint8_t *end;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
const uint8_t *mm_end; const uint8_t *mm_end;
#endif #endif
uint16_t *d = (uint16_t *)dst; uint16_t *d = (uint16_t *)dst;
end = s + src_size; end = s + src_size;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
mm_end = end - 15; mm_end = end - 15;
#if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster) #if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster)
__asm__ volatile( __asm__ volatile(
...@@ -412,12 +412,12 @@ static inline void RENAME(rgb32tobgr16)(const uint8_t *src, uint8_t *dst, long s ...@@ -412,12 +412,12 @@ static inline void RENAME(rgb32tobgr16)(const uint8_t *src, uint8_t *dst, long s
{ {
const uint8_t *s = src; const uint8_t *s = src;
const uint8_t *end; const uint8_t *end;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
const uint8_t *mm_end; const uint8_t *mm_end;
#endif #endif
uint16_t *d = (uint16_t *)dst; uint16_t *d = (uint16_t *)dst;
end = s + src_size; end = s + src_size;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
__asm__ volatile( __asm__ volatile(
"movq %0, %%mm7 \n\t" "movq %0, %%mm7 \n\t"
...@@ -471,12 +471,12 @@ static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, long src_ ...@@ -471,12 +471,12 @@ static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, long src_
{ {
const uint8_t *s = src; const uint8_t *s = src;
const uint8_t *end; const uint8_t *end;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
const uint8_t *mm_end; const uint8_t *mm_end;
#endif #endif
uint16_t *d = (uint16_t *)dst; uint16_t *d = (uint16_t *)dst;
end = s + src_size; end = s + src_size;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
mm_end = end - 15; mm_end = end - 15;
#if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster) #if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster)
__asm__ volatile( __asm__ volatile(
...@@ -567,12 +567,12 @@ static inline void RENAME(rgb32tobgr15)(const uint8_t *src, uint8_t *dst, long s ...@@ -567,12 +567,12 @@ static inline void RENAME(rgb32tobgr15)(const uint8_t *src, uint8_t *dst, long s
{ {
const uint8_t *s = src; const uint8_t *s = src;
const uint8_t *end; const uint8_t *end;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
const uint8_t *mm_end; const uint8_t *mm_end;
#endif #endif
uint16_t *d = (uint16_t *)dst; uint16_t *d = (uint16_t *)dst;
end = s + src_size; end = s + src_size;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
__asm__ volatile( __asm__ volatile(
"movq %0, %%mm7 \n\t" "movq %0, %%mm7 \n\t"
...@@ -626,12 +626,12 @@ static inline void RENAME(rgb24tobgr16)(const uint8_t *src, uint8_t *dst, long s ...@@ -626,12 +626,12 @@ static inline void RENAME(rgb24tobgr16)(const uint8_t *src, uint8_t *dst, long s
{ {
const uint8_t *s = src; const uint8_t *s = src;
const uint8_t *end; const uint8_t *end;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
const uint8_t *mm_end; const uint8_t *mm_end;
#endif #endif
uint16_t *d = (uint16_t *)dst; uint16_t *d = (uint16_t *)dst;
end = s + src_size; end = s + src_size;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
__asm__ volatile( __asm__ volatile(
"movq %0, %%mm7 \n\t" "movq %0, %%mm7 \n\t"
...@@ -687,12 +687,12 @@ static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, long src_ ...@@ -687,12 +687,12 @@ static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, long src_
{ {
const uint8_t *s = src; const uint8_t *s = src;
const uint8_t *end; const uint8_t *end;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
const uint8_t *mm_end; const uint8_t *mm_end;
#endif #endif
uint16_t *d = (uint16_t *)dst; uint16_t *d = (uint16_t *)dst;
end = s + src_size; end = s + src_size;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
__asm__ volatile( __asm__ volatile(
"movq %0, %%mm7 \n\t" "movq %0, %%mm7 \n\t"
...@@ -748,12 +748,12 @@ static inline void RENAME(rgb24tobgr15)(const uint8_t *src, uint8_t *dst, long s ...@@ -748,12 +748,12 @@ static inline void RENAME(rgb24tobgr15)(const uint8_t *src, uint8_t *dst, long s
{ {
const uint8_t *s = src; const uint8_t *s = src;
const uint8_t *end; const uint8_t *end;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
const uint8_t *mm_end; const uint8_t *mm_end;
#endif #endif
uint16_t *d = (uint16_t *)dst; uint16_t *d = (uint16_t *)dst;
end = s + src_size; end = s + src_size;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
__asm__ volatile( __asm__ volatile(
"movq %0, %%mm7 \n\t" "movq %0, %%mm7 \n\t"
...@@ -809,12 +809,12 @@ static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, long src_ ...@@ -809,12 +809,12 @@ static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, long src_
{ {
const uint8_t *s = src; const uint8_t *s = src;
const uint8_t *end; const uint8_t *end;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
const uint8_t *mm_end; const uint8_t *mm_end;
#endif #endif
uint16_t *d = (uint16_t *)dst; uint16_t *d = (uint16_t *)dst;
end = s + src_size; end = s + src_size;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
__asm__ volatile( __asm__ volatile(
"movq %0, %%mm7 \n\t" "movq %0, %%mm7 \n\t"
...@@ -890,13 +890,13 @@ static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, long src_ ...@@ -890,13 +890,13 @@ static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, long src_
static inline void RENAME(rgb15tobgr24)(const uint8_t *src, uint8_t *dst, long src_size) static inline void RENAME(rgb15tobgr24)(const uint8_t *src, uint8_t *dst, long src_size)
{ {
const uint16_t *end; const uint16_t *end;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
const uint16_t *mm_end; const uint16_t *mm_end;
#endif #endif
uint8_t *d = dst; uint8_t *d = dst;
const uint16_t *s = (const uint16_t*)src; const uint16_t *s = (const uint16_t*)src;
end = s + src_size/2; end = s + src_size/2;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
mm_end = end - 7; mm_end = end - 7;
while (s < mm_end) { while (s < mm_end) {
...@@ -997,13 +997,13 @@ static inline void RENAME(rgb15tobgr24)(const uint8_t *src, uint8_t *dst, long s ...@@ -997,13 +997,13 @@ static inline void RENAME(rgb15tobgr24)(const uint8_t *src, uint8_t *dst, long s
static inline void RENAME(rgb16tobgr24)(const uint8_t *src, uint8_t *dst, long src_size) static inline void RENAME(rgb16tobgr24)(const uint8_t *src, uint8_t *dst, long src_size)
{ {
const uint16_t *end; const uint16_t *end;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
const uint16_t *mm_end; const uint16_t *mm_end;
#endif #endif
uint8_t *d = (uint8_t *)dst; uint8_t *d = (uint8_t *)dst;
const uint16_t *s = (const uint16_t *)src; const uint16_t *s = (const uint16_t *)src;
end = s + src_size/2; end = s + src_size/2;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
mm_end = end - 7; mm_end = end - 7;
while (s < mm_end) { while (s < mm_end) {
...@@ -1122,13 +1122,13 @@ static inline void RENAME(rgb16tobgr24)(const uint8_t *src, uint8_t *dst, long s ...@@ -1122,13 +1122,13 @@ static inline void RENAME(rgb16tobgr24)(const uint8_t *src, uint8_t *dst, long s
static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, long src_size) static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, long src_size)
{ {
const uint16_t *end; const uint16_t *end;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
const uint16_t *mm_end; const uint16_t *mm_end;
#endif #endif
uint8_t *d = dst; uint8_t *d = dst;
const uint16_t *s = (const uint16_t *)src; const uint16_t *s = (const uint16_t *)src;
end = s + src_size/2; end = s + src_size/2;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
__asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory"); __asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory");
__asm__ volatile("pcmpeqd %%mm6,%%mm6 \n\t":::"memory"); __asm__ volatile("pcmpeqd %%mm6,%%mm6 \n\t":::"memory");
...@@ -1175,13 +1175,13 @@ static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, long src_ ...@@ -1175,13 +1175,13 @@ static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, long src_
static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, long src_size) static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, long src_size)
{ {
const uint16_t *end; const uint16_t *end;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
const uint16_t *mm_end; const uint16_t *mm_end;
#endif #endif
uint8_t *d = dst; uint8_t *d = dst;
const uint16_t *s = (const uint16_t*)src; const uint16_t *s = (const uint16_t*)src;
end = s + src_size/2; end = s + src_size/2;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
__asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory"); __asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory");
__asm__ volatile("pcmpeqd %%mm6,%%mm6 \n\t":::"memory"); __asm__ volatile("pcmpeqd %%mm6,%%mm6 \n\t":::"memory");
...@@ -1230,7 +1230,7 @@ static inline void RENAME(shuffle_bytes_2103)(const uint8_t *src, uint8_t *dst, ...@@ -1230,7 +1230,7 @@ static inline void RENAME(shuffle_bytes_2103)(const uint8_t *src, uint8_t *dst,
x86_reg idx = 15 - src_size; x86_reg idx = 15 - src_size;
const uint8_t *s = src-idx; const uint8_t *s = src-idx;
uint8_t *d = dst-idx; uint8_t *d = dst-idx;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__ volatile( __asm__ volatile(
"test %0, %0 \n\t" "test %0, %0 \n\t"
"jns 2f \n\t" "jns 2f \n\t"
...@@ -1244,7 +1244,7 @@ static inline void RENAME(shuffle_bytes_2103)(const uint8_t *src, uint8_t *dst, ...@@ -1244,7 +1244,7 @@ static inline void RENAME(shuffle_bytes_2103)(const uint8_t *src, uint8_t *dst,
PREFETCH" 32(%1, %0) \n\t" PREFETCH" 32(%1, %0) \n\t"
"movq (%1, %0), %%mm0 \n\t" "movq (%1, %0), %%mm0 \n\t"
"movq 8(%1, %0), %%mm1 \n\t" "movq 8(%1, %0), %%mm1 \n\t"
# if HAVE_MMX2 # if COMPILE_TEMPLATE_MMX2
"pshufw $177, %%mm0, %%mm3 \n\t" "pshufw $177, %%mm0, %%mm3 \n\t"
"pshufw $177, %%mm1, %%mm5 \n\t" "pshufw $177, %%mm1, %%mm5 \n\t"
"pand %%mm7, %%mm0 \n\t" "pand %%mm7, %%mm0 \n\t"
...@@ -1292,7 +1292,7 @@ static inline void RENAME(shuffle_bytes_2103)(const uint8_t *src, uint8_t *dst, ...@@ -1292,7 +1292,7 @@ static inline void RENAME(shuffle_bytes_2103)(const uint8_t *src, uint8_t *dst,
static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long src_size) static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long src_size)
{ {
unsigned i; unsigned i;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
x86_reg mmx_size= 23 - src_size; x86_reg mmx_size= 23 - src_size;
__asm__ volatile ( __asm__ volatile (
"test %%"REG_a", %%"REG_a" \n\t" "test %%"REG_a", %%"REG_a" \n\t"
...@@ -1365,7 +1365,7 @@ static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *u ...@@ -1365,7 +1365,7 @@ static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *u
long y; long y;
const x86_reg chromWidth= width>>1; const x86_reg chromWidth= width>>1;
for (y=0; y<height; y++) { for (y=0; y<height; y++) {
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
//FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway) //FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway)
__asm__ volatile( __asm__ volatile(
"xor %%"REG_a", %%"REG_a" \n\t" "xor %%"REG_a", %%"REG_a" \n\t"
...@@ -1489,7 +1489,7 @@ static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *u ...@@ -1489,7 +1489,7 @@ static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *u
ysrc += lumStride; ysrc += lumStride;
dst += dstStride; dst += dstStride;
} }
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__(EMMS" \n\t" __asm__(EMMS" \n\t"
SFENCE" \n\t" SFENCE" \n\t"
:::"memory"); :::"memory");
...@@ -1515,7 +1515,7 @@ static inline void RENAME(yuvPlanartouyvy)(const uint8_t *ysrc, const uint8_t *u ...@@ -1515,7 +1515,7 @@ static inline void RENAME(yuvPlanartouyvy)(const uint8_t *ysrc, const uint8_t *u
long y; long y;
const x86_reg chromWidth= width>>1; const x86_reg chromWidth= width>>1;
for (y=0; y<height; y++) { for (y=0; y<height; y++) {
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
//FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway) //FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway)
__asm__ volatile( __asm__ volatile(
"xor %%"REG_a", %%"REG_a" \n\t" "xor %%"REG_a", %%"REG_a" \n\t"
...@@ -1593,7 +1593,7 @@ static inline void RENAME(yuvPlanartouyvy)(const uint8_t *ysrc, const uint8_t *u ...@@ -1593,7 +1593,7 @@ static inline void RENAME(yuvPlanartouyvy)(const uint8_t *ysrc, const uint8_t *u
ysrc += lumStride; ysrc += lumStride;
dst += dstStride; dst += dstStride;
} }
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__(EMMS" \n\t" __asm__(EMMS" \n\t"
SFENCE" \n\t" SFENCE" \n\t"
:::"memory"); :::"memory");
...@@ -1643,7 +1643,7 @@ static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t ...@@ -1643,7 +1643,7 @@ static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t
long y; long y;
const x86_reg chromWidth= width>>1; const x86_reg chromWidth= width>>1;
for (y=0; y<height; y+=2) { for (y=0; y<height; y+=2) {
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__ volatile( __asm__ volatile(
"xor %%"REG_a", %%"REG_a" \n\t" "xor %%"REG_a", %%"REG_a" \n\t"
"pcmpeqw %%mm7, %%mm7 \n\t" "pcmpeqw %%mm7, %%mm7 \n\t"
...@@ -1746,7 +1746,7 @@ static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t ...@@ -1746,7 +1746,7 @@ static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t
ydst += lumStride; ydst += lumStride;
src += srcStride; src += srcStride;
} }
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__ volatile(EMMS" \n\t" __asm__ volatile(EMMS" \n\t"
SFENCE" \n\t" SFENCE" \n\t"
:::"memory"); :::"memory");
...@@ -1769,7 +1769,7 @@ static inline void RENAME(planar2x)(const uint8_t *src, uint8_t *dst, long srcWi ...@@ -1769,7 +1769,7 @@ static inline void RENAME(planar2x)(const uint8_t *src, uint8_t *dst, long srcWi
dst+= dstStride; dst+= dstStride;
for (y=1; y<srcHeight; y++) { for (y=1; y<srcHeight; y++) {
#if HAVE_MMX2 || HAVE_AMD3DNOW #if COMPILE_TEMPLATE_MMX2 || COMPILE_TEMPLATE_AMD3DNOW
const x86_reg mmxSize= srcWidth&~15; const x86_reg mmxSize= srcWidth&~15;
__asm__ volatile( __asm__ volatile(
"mov %4, %%"REG_a" \n\t" "mov %4, %%"REG_a" \n\t"
...@@ -1859,7 +1859,7 @@ static inline void RENAME(planar2x)(const uint8_t *src, uint8_t *dst, long srcWi ...@@ -1859,7 +1859,7 @@ static inline void RENAME(planar2x)(const uint8_t *src, uint8_t *dst, long srcWi
} }
#endif #endif
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__ volatile(EMMS" \n\t" __asm__ volatile(EMMS" \n\t"
SFENCE" \n\t" SFENCE" \n\t"
:::"memory"); :::"memory");
...@@ -1879,7 +1879,7 @@ static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t ...@@ -1879,7 +1879,7 @@ static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t
long y; long y;
const x86_reg chromWidth= width>>1; const x86_reg chromWidth= width>>1;
for (y=0; y<height; y+=2) { for (y=0; y<height; y+=2) {
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__ volatile( __asm__ volatile(
"xor %%"REG_a", %%"REG_a" \n\t" "xor %%"REG_a", %%"REG_a" \n\t"
"pcmpeqw %%mm7, %%mm7 \n\t" "pcmpeqw %%mm7, %%mm7 \n\t"
...@@ -1982,7 +1982,7 @@ static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t ...@@ -1982,7 +1982,7 @@ static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t
ydst += lumStride; ydst += lumStride;
src += srcStride; src += srcStride;
} }
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__ volatile(EMMS" \n\t" __asm__ volatile(EMMS" \n\t"
SFENCE" \n\t" SFENCE" \n\t"
:::"memory"); :::"memory");
...@@ -2002,7 +2002,7 @@ static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_ ...@@ -2002,7 +2002,7 @@ static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_
{ {
long y; long y;
const x86_reg chromWidth= width>>1; const x86_reg chromWidth= width>>1;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
for (y=0; y<height-2; y+=2) { for (y=0; y<height-2; y+=2) {
long i; long i;
for (i=0; i<2; i++) { for (i=0; i<2; i++) {
...@@ -2090,7 +2090,7 @@ static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_ ...@@ -2090,7 +2090,7 @@ static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_
"1: \n\t" "1: \n\t"
PREFETCH" 64(%0, %%"REG_d") \n\t" PREFETCH" 64(%0, %%"REG_d") \n\t"
PREFETCH" 64(%1, %%"REG_d") \n\t" PREFETCH" 64(%1, %%"REG_d") \n\t"
#if HAVE_MMX2 || HAVE_AMD3DNOW #if COMPILE_TEMPLATE_MMX2 || COMPILE_TEMPLATE_AMD3DNOW
"movq (%0, %%"REG_d"), %%mm0 \n\t" "movq (%0, %%"REG_d"), %%mm0 \n\t"
"movq (%1, %%"REG_d"), %%mm1 \n\t" "movq (%1, %%"REG_d"), %%mm1 \n\t"
"movq 6(%0, %%"REG_d"), %%mm2 \n\t" "movq 6(%0, %%"REG_d"), %%mm2 \n\t"
...@@ -2151,7 +2151,7 @@ static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_ ...@@ -2151,7 +2151,7 @@ static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_
"packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0 "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
"psraw $7, %%mm0 \n\t" "psraw $7, %%mm0 \n\t"
#if HAVE_MMX2 || HAVE_AMD3DNOW #if COMPILE_TEMPLATE_MMX2 || COMPILE_TEMPLATE_AMD3DNOW
"movq 12(%0, %%"REG_d"), %%mm4 \n\t" "movq 12(%0, %%"REG_d"), %%mm4 \n\t"
"movq 12(%1, %%"REG_d"), %%mm1 \n\t" "movq 12(%1, %%"REG_d"), %%mm1 \n\t"
"movq 18(%0, %%"REG_d"), %%mm2 \n\t" "movq 18(%0, %%"REG_d"), %%mm2 \n\t"
...@@ -2296,8 +2296,8 @@ static void RENAME(interleaveBytes)(const uint8_t *src1, const uint8_t *src2, ui ...@@ -2296,8 +2296,8 @@ static void RENAME(interleaveBytes)(const uint8_t *src1, const uint8_t *src2, ui
for (h=0; h < height; h++) { for (h=0; h < height; h++) {
long w; long w;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
#if HAVE_SSE2 #if COMPILE_TEMPLATE_SSE2
__asm__( __asm__(
"xor %%"REG_a", %%"REG_a" \n\t" "xor %%"REG_a", %%"REG_a" \n\t"
"1: \n\t" "1: \n\t"
...@@ -2357,7 +2357,7 @@ static void RENAME(interleaveBytes)(const uint8_t *src1, const uint8_t *src2, ui ...@@ -2357,7 +2357,7 @@ static void RENAME(interleaveBytes)(const uint8_t *src1, const uint8_t *src2, ui
src1 += src1Stride; src1 += src1Stride;
src2 += src2Stride; src2 += src2Stride;
} }
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__( __asm__(
EMMS" \n\t" EMMS" \n\t"
SFENCE" \n\t" SFENCE" \n\t"
...@@ -2375,7 +2375,7 @@ static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2, ...@@ -2375,7 +2375,7 @@ static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2,
x86_reg y; x86_reg y;
long x,w,h; long x,w,h;
w=width/2; h=height/2; w=width/2; h=height/2;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__ volatile( __asm__ volatile(
PREFETCH" %0 \n\t" PREFETCH" %0 \n\t"
PREFETCH" %1 \n\t" PREFETCH" %1 \n\t"
...@@ -2385,7 +2385,7 @@ static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2, ...@@ -2385,7 +2385,7 @@ static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2,
const uint8_t* s1=src1+srcStride1*(y>>1); const uint8_t* s1=src1+srcStride1*(y>>1);
uint8_t* d=dst1+dstStride1*y; uint8_t* d=dst1+dstStride1*y;
x=0; x=0;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
for (;x<w-31;x+=32) { for (;x<w-31;x+=32) {
__asm__ volatile( __asm__ volatile(
PREFETCH" 32%1 \n\t" PREFETCH" 32%1 \n\t"
...@@ -2424,7 +2424,7 @@ static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2, ...@@ -2424,7 +2424,7 @@ static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2,
const uint8_t* s2=src2+srcStride2*(y>>1); const uint8_t* s2=src2+srcStride2*(y>>1);
uint8_t* d=dst2+dstStride2*y; uint8_t* d=dst2+dstStride2*y;
x=0; x=0;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
for (;x<w-31;x+=32) { for (;x<w-31;x+=32) {
__asm__ volatile( __asm__ volatile(
PREFETCH" 32%1 \n\t" PREFETCH" 32%1 \n\t"
...@@ -2459,7 +2459,7 @@ static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2, ...@@ -2459,7 +2459,7 @@ static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2,
#endif #endif
for (;x<w;x++) d[2*x]=d[2*x+1]=s2[x]; for (;x<w;x++) d[2*x]=d[2*x+1]=s2[x];
} }
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__( __asm__(
EMMS" \n\t" EMMS" \n\t"
SFENCE" \n\t" SFENCE" \n\t"
...@@ -2483,7 +2483,7 @@ static inline void RENAME(yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2 ...@@ -2483,7 +2483,7 @@ static inline void RENAME(yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2
const uint8_t* vp=src3+srcStride3*(y>>2); const uint8_t* vp=src3+srcStride3*(y>>2);
uint8_t* d=dst+dstStride*y; uint8_t* d=dst+dstStride*y;
x=0; x=0;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
for (;x<w-7;x+=8) { for (;x<w-7;x+=8) {
__asm__ volatile( __asm__ volatile(
PREFETCH" 32(%1, %0) \n\t" PREFETCH" 32(%1, %0) \n\t"
...@@ -2549,7 +2549,7 @@ static inline void RENAME(yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2 ...@@ -2549,7 +2549,7 @@ static inline void RENAME(yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2
d[8*x+7] = vp[x]; d[8*x+7] = vp[x];
} }
} }
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__( __asm__(
EMMS" \n\t" EMMS" \n\t"
SFENCE" \n\t" SFENCE" \n\t"
...@@ -2564,7 +2564,7 @@ static void RENAME(extract_even)(const uint8_t *src, uint8_t *dst, x86_reg count ...@@ -2564,7 +2564,7 @@ static void RENAME(extract_even)(const uint8_t *src, uint8_t *dst, x86_reg count
src += 2*count; src += 2*count;
count= - count; count= - count;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
if(count <= -16) { if(count <= -16) {
count += 15; count += 15;
__asm__ volatile( __asm__ volatile(
...@@ -2603,7 +2603,7 @@ static void RENAME(extract_even2)(const uint8_t *src, uint8_t *dst0, uint8_t *ds ...@@ -2603,7 +2603,7 @@ static void RENAME(extract_even2)(const uint8_t *src, uint8_t *dst0, uint8_t *ds
dst1+= count; dst1+= count;
src += 4*count; src += 4*count;
count= - count; count= - count;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
if(count <= -8) { if(count <= -8) {
count += 7; count += 7;
__asm__ volatile( __asm__ volatile(
...@@ -2704,7 +2704,7 @@ static void RENAME(extract_odd2)(const uint8_t *src, uint8_t *dst0, uint8_t *dst ...@@ -2704,7 +2704,7 @@ static void RENAME(extract_odd2)(const uint8_t *src, uint8_t *dst0, uint8_t *dst
dst1+= count; dst1+= count;
src += 4*count; src += 4*count;
count= - count; count= - count;
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
if(count <= -8) { if(count <= -8) {
count += 7; count += 7;
__asm__ volatile( __asm__ volatile(
...@@ -2820,7 +2820,7 @@ static void RENAME(yuyvtoyuv420)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, co ...@@ -2820,7 +2820,7 @@ static void RENAME(yuyvtoyuv420)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, co
src += srcStride; src += srcStride;
ydst+= lumStride; ydst+= lumStride;
} }
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__( __asm__(
EMMS" \n\t" EMMS" \n\t"
SFENCE" \n\t" SFENCE" \n\t"
...@@ -2845,7 +2845,7 @@ static void RENAME(yuyvtoyuv422)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, co ...@@ -2845,7 +2845,7 @@ static void RENAME(yuyvtoyuv422)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, co
udst+= chromStride; udst+= chromStride;
vdst+= chromStride; vdst+= chromStride;
} }
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__( __asm__(
EMMS" \n\t" EMMS" \n\t"
SFENCE" \n\t" SFENCE" \n\t"
...@@ -2872,7 +2872,7 @@ static void RENAME(uyvytoyuv420)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, co ...@@ -2872,7 +2872,7 @@ static void RENAME(uyvytoyuv420)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, co
src += srcStride; src += srcStride;
ydst+= lumStride; ydst+= lumStride;
} }
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__( __asm__(
EMMS" \n\t" EMMS" \n\t"
SFENCE" \n\t" SFENCE" \n\t"
...@@ -2897,7 +2897,7 @@ static void RENAME(uyvytoyuv422)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, co ...@@ -2897,7 +2897,7 @@ static void RENAME(uyvytoyuv422)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, co
udst+= chromStride; udst+= chromStride;
vdst+= chromStride; vdst+= chromStride;
} }
#if HAVE_MMX #if COMPILE_TEMPLATE_MMX
__asm__( __asm__(
EMMS" \n\t" EMMS" \n\t"
SFENCE" \n\t" SFENCE" \n\t"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment