Commit f7728bf6 authored by Falk Hüffner's avatar Falk Hüffner

Use asms instead of builtins when compiling for generic Alpha. Less

ugly.

Originally committed as revision 2270 to svn://svn.ffmpeg.org/ffmpeg/trunk
parent a018d318
......@@ -171,17 +171,6 @@ libpostproc/libpostproc.a:
%.o: %.S
$(CC) $(CFLAGS) -c -o $@ $<
# motion_est_alpha uses the MVI extension, which is not available with
# -mcpu=ev4 (default) or ev5/ev56. Thus, force -mcpu=pca56 in those
# cases.
ifeq ($(TARGET_ARCH_ALPHA),yes)
alpha/motion_est_alpha.o: alpha/motion_est_alpha.c
cpu=`echo "$(CFLAGS)" | sed -n 's,.*-mcpu=\([a-zA-Z0-9]*\).*,\1,p'`; \
case x"$$cpu" in x|xev[45]*) newcpu=pca56;; *) newcpu=$$cpu;; esac; \
echo $(CC) $(CFLAGS) -mcpu=$$newcpu -c -o $@ $<;\
$(CC) $(CFLAGS) -mcpu=$$newcpu -c -o $@ $<
endif
depend: $(SRCS)
$(CC) -MM $(CFLAGS) $^ 1>.depend
......
......@@ -63,27 +63,15 @@ static inline uint64_t WORD_VEC(uint64_t x)
#define sextw(x) ((int16_t) (x))
#ifdef __GNUC__
#define ASM_ACCEPT_MVI asm (".arch pca56")
struct unaligned_long { uint64_t l; } __attribute__((packed));
#define ldq_u(p) (*(const uint64_t *) (((uint64_t) (p)) & ~7ul))
#define uldq(a) (((const struct unaligned_long *) (a))->l)
#if GNUC_PREREQ(3,0)
/* Unfortunately, __builtin_prefetch is slightly buggy on Alpha. The
defines here are kludged so we still get the right
instruction. This needs to be adapted as soon as gcc is fixed. */
# define prefetch(p) __builtin_prefetch((p), 0, 1)
# define prefetch_en(p) __builtin_prefetch((p), 1, 1)
# define prefetch_m(p) __builtin_prefetch((p), 0, 0)
# define prefetch_men(p) __builtin_prefetch((p), 1, 0)
#else
# define prefetch(p) asm volatile("ldl $31,%0" : : "m"(*(const char *) (p)) : "memory")
# define prefetch_en(p) asm volatile("ldq $31,%0" : : "m"(*(const char *) (p)) : "memory")
# define prefetch_m(p) asm volatile("lds $f31,%0" : : "m"(*(const char *) (p)) : "memory")
# define prefetch_men(p) asm volatile("ldt $f31,%0" : : "m"(*(const char *) (p)) : "memory")
#endif
#if GNUC_PREREQ(3,3)
#define prefetch(p) __builtin_prefetch((p), 0, 1)
#define prefetch_en(p) __builtin_prefetch((p), 0, 0)
#define prefetch_m(p) __builtin_prefetch((p), 1, 1)
#define prefetch_men(p) __builtin_prefetch((p), 1, 0)
#define cmpbge __builtin_alpha_cmpbge
/* Avoid warnings. */
#define extql(a, b) __builtin_alpha_extql(a, (uint64_t) (b))
......@@ -94,6 +82,24 @@ struct unaligned_long { uint64_t l; } __attribute__((packed));
#define amask __builtin_alpha_amask
#define implver __builtin_alpha_implver
#define rpcc __builtin_alpha_rpcc
#else
#define prefetch(p) asm volatile("ldl $31,%0" : : "m"(*(const char *) (p)) : "memory")
#define prefetch_en(p) asm volatile("ldq $31,%0" : : "m"(*(const char *) (p)) : "memory")
#define prefetch_m(p) asm volatile("lds $f31,%0" : : "m"(*(const char *) (p)) : "memory")
#define prefetch_men(p) asm volatile("ldt $f31,%0" : : "m"(*(const char *) (p)) : "memory")
#define cmpbge(a, b) ({ uint64_t __r; asm ("cmpbge %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
#define extql(a, b) ({ uint64_t __r; asm ("extql %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
#define extwl(a, b) ({ uint64_t __r; asm ("extwl %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
#define extqh(a, b) ({ uint64_t __r; asm ("extqh %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
#define zap(a, b) ({ uint64_t __r; asm ("zap %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
#define zapnot(a, b) ({ uint64_t __r; asm ("zapnot %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
#define amask(a) ({ uint64_t __r; asm ("amask %1,%0" : "=r" (__r) : "rI" (a)); __r; })
#define implver() ({ uint64_t __r; asm ("implver %0" : "=r" (__r)); __r; })
#define rpcc() ({ uint64_t __r; asm volatile ("rpcc %0" : "=r" (__r)); __r; })
#endif
#define wh64(p) asm volatile("wh64 (%0)" : : "r"(p) : "memory")
#if GNUC_PREREQ(3,3) && defined(__alpha_max__)
#define minub8 __builtin_alpha_minub8
#define minsb8 __builtin_alpha_minsb8
#define minuw4 __builtin_alpha_minuw4
......@@ -108,34 +114,24 @@ struct unaligned_long { uint64_t l; } __attribute__((packed));
#define unpkbl __builtin_alpha_unpkbl
#define unpkbw __builtin_alpha_unpkbw
#else
#define cmpbge(a, b) ({ uint64_t __r; asm ("cmpbge %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
#define extql(a, b) ({ uint64_t __r; asm ("extql %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
#define extwl(a, b) ({ uint64_t __r; asm ("extwl %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
#define extqh(a, b) ({ uint64_t __r; asm ("extqh %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
#define zap(a, b) ({ uint64_t __r; asm ("zap %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
#define zapnot(a, b) ({ uint64_t __r; asm ("zapnot %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
#define amask(a) ({ uint64_t __r; asm ("amask %1,%0" : "=r" (__r) : "rI" (a)); __r; })
#define implver() ({ uint64_t __r; asm ("implver %0" : "=r" (__r)); __r; })
#define rpcc() ({ uint64_t __r; asm volatile ("rpcc %0" : "=r" (__r)); __r; })
#define minub8(a, b) ({ uint64_t __r; asm ("minub8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
#define minsb8(a, b) ({ uint64_t __r; asm ("minsb8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
#define minuw4(a, b) ({ uint64_t __r; asm ("minuw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
#define minsw4(a, b) ({ uint64_t __r; asm ("minsw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
#define maxub8(a, b) ({ uint64_t __r; asm ("maxub8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
#define maxsb8(a, b) ({ uint64_t __r; asm ("maxsb8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
#define maxuw4(a, b) ({ uint64_t __r; asm ("maxuw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
#define maxsw4(a, b) ({ uint64_t __r; asm ("maxsw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
#define perr(a, b) ({ uint64_t __r; asm ("perr %r1,%r2,%0" : "=r" (__r) : "%rJ" (a), "rJ" (b)); __r; })
#define pklb(a) ({ uint64_t __r; asm ("pklb %r1,%0" : "=r" (__r) : "rJ" (a)); __r; })
#define pkwb(a) ({ uint64_t __r; asm ("pkwb %r1,%0" : "=r" (__r) : "rJ" (a)); __r; })
#define unpkbl(a) ({ uint64_t __r; asm ("unpkbl %r1,%0" : "=r" (__r) : "rJ" (a)); __r; })
#define unpkbw(a) ({ uint64_t __r; asm ("unpkbw %r1,%0" : "=r" (__r) : "rJ" (a)); __r; })
#define minub8(a, b) ({ uint64_t __r; asm (".arch ev6; minub8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
#define minsb8(a, b) ({ uint64_t __r; asm (".arch ev6; minsb8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
#define minuw4(a, b) ({ uint64_t __r; asm (".arch ev6; minuw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
#define minsw4(a, b) ({ uint64_t __r; asm (".arch ev6; minsw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
#define maxub8(a, b) ({ uint64_t __r; asm (".arch ev6; maxub8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
#define maxsb8(a, b) ({ uint64_t __r; asm (".arch ev6; maxsb8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
#define maxuw4(a, b) ({ uint64_t __r; asm (".arch ev6; maxuw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
#define maxsw4(a, b) ({ uint64_t __r; asm (".arch ev6; maxsw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
#define perr(a, b) ({ uint64_t __r; asm (".arch ev6; perr %r1,%r2,%0" : "=r" (__r) : "%rJ" (a), "rJ" (b)); __r; })
#define pklb(a) ({ uint64_t __r; asm (".arch ev6; pklb %r1,%0" : "=r" (__r) : "rJ" (a)); __r; })
#define pkwb(a) ({ uint64_t __r; asm (".arch ev6; pkwb %r1,%0" : "=r" (__r) : "rJ" (a)); __r; })
#define unpkbl(a) ({ uint64_t __r; asm (".arch ev6; unpkbl %r1,%0" : "=r" (__r) : "rJ" (a)); __r; })
#define unpkbw(a) ({ uint64_t __r; asm (".arch ev6; unpkbw %r1,%0" : "=r" (__r) : "rJ" (a)); __r; })
#endif
#elif defined(__DECC) /* Digital/Compaq/hp "ccc" compiler */
#include <c_asm.h>
#define ASM_ACCEPT_MVI
#define ldq_u(a) asm ("ldq_u %v0,0(%a0)", a)
#define uldq(a) (*(const __unaligned uint64_t *) (a))
#define cmpbge(a, b) asm ("cmpbge %a0,%a1,%v0", a, b)
......@@ -160,6 +156,7 @@ struct unaligned_long { uint64_t l; } __attribute__((packed));
#define pkwb(a) asm ("pkwb %a0,%v0", a)
#define unpkbl(a) asm ("unpkbl %a0,%v0", a)
#define unpkbw(a) asm ("unpkbw %a0,%v0", a)
#define wh64(a) asm ("wh64 %a0", a)
#else
#error "Unknown compiler!"
......
......@@ -54,8 +54,6 @@ static void put_pixels_clamped_mvi(const DCTELEM *block, uint8_t *pixels,
int i = 8;
uint64_t clampmask = zap(-1, 0xaa); /* 0x00ff00ff00ff00ff */
ASM_ACCEPT_MVI;
do {
uint64_t shorts0, shorts1;
......@@ -84,8 +82,6 @@ void add_pixels_clamped_mvi(const DCTELEM *block, uint8_t *pixels,
uint64_t signmask = zap(-1, 0x33);
signmask ^= signmask >> 1; /* 0x8000800080008000 */
ASM_ACCEPT_MVI;
do {
uint64_t shorts0, pix0, signs0;
uint64_t shorts1, pix1, signs1;
......
......@@ -59,7 +59,6 @@ static void dct_unquantize_h263_axp(MpegEncContext *s, DCTELEM *block,
#ifdef __alpha_max__
/* I don't think the speed difference justifies runtime
detection. */
ASM_ACCEPT_MVI;
negmask = maxsw4(levels, -1); /* negative -> ffff (-1) */
negmask = minsw4(negmask, 0); /* positive -> 0000 (0) */
#else
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment