Commit 1898c2f4 authored by Matt Oliver's avatar Matt Oliver Committed by Michael Niedermayer

inline asm: fix arrays as named constraints.

Signed-off-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parent 134206ca
...@@ -111,7 +111,7 @@ ...@@ -111,7 +111,7 @@
"2: \n\t" "2: \n\t"
#else /* BROKEN_RELOCATIONS */ #else /* BROKEN_RELOCATIONS */
#define TABLES_ARG NAMED_CONSTRAINTS_ADD(ff_h264_cabac_tables) #define TABLES_ARG NAMED_CONSTRAINTS_ARRAY_ADD(ff_h264_cabac_tables)
#define RIP_ARG #define RIP_ARG
#if HAVE_FAST_CMOV #if HAVE_FAST_CMOV
...@@ -185,7 +185,7 @@ static av_always_inline int get_cabac_inline_x86(CABACContext *c, ...@@ -185,7 +185,7 @@ static av_always_inline int get_cabac_inline_x86(CABACContext *c,
__asm__ volatile( __asm__ volatile(
"lea "MANGLE(ff_h264_cabac_tables)", %0 \n\t" "lea "MANGLE(ff_h264_cabac_tables)", %0 \n\t"
: "=&r"(tables) : "=&r"(tables)
: NAMED_CONSTRAINTS(ff_h264_cabac_tables) : NAMED_CONSTRAINTS_ARRAY(ff_h264_cabac_tables)
); );
#endif #endif
......
...@@ -55,7 +55,7 @@ static int decode_significance_x86(CABACContext *c, int max_coeff, ...@@ -55,7 +55,7 @@ static int decode_significance_x86(CABACContext *c, int max_coeff,
__asm__ volatile( __asm__ volatile(
"lea "MANGLE(ff_h264_cabac_tables)", %0 \n\t" "lea "MANGLE(ff_h264_cabac_tables)", %0 \n\t"
: "=&r"(tables) : "=&r"(tables)
: NAMED_CONSTRAINTS(ff_h264_cabac_tables) : NAMED_CONSTRAINTS_ARRAY(ff_h264_cabac_tables)
); );
#endif #endif
...@@ -131,7 +131,7 @@ static int decode_significance_8x8_x86(CABACContext *c, ...@@ -131,7 +131,7 @@ static int decode_significance_8x8_x86(CABACContext *c,
__asm__ volatile( __asm__ volatile(
"lea "MANGLE(ff_h264_cabac_tables)", %0 \n\t" "lea "MANGLE(ff_h264_cabac_tables)", %0 \n\t"
: "=&r"(tables) : "=&r"(tables)
: NAMED_CONSTRAINTS(ff_h264_cabac_tables) : NAMED_CONSTRAINTS_ARRAY(ff_h264_cabac_tables)
); );
#endif #endif
......
...@@ -381,7 +381,7 @@ av_extern_inline void ff_idct_xvid_sse2(short *block) ...@@ -381,7 +381,7 @@ av_extern_inline void ff_idct_xvid_sse2(short *block)
iLLM_PASS("%0") iLLM_PASS("%0")
"6: \n\t" "6: \n\t"
: "+r"(block) : "+r"(block)
: NAMED_CONSTRAINTS(m127,iTab1,walkenIdctRounders,iTab2,iTab3,iTab4,tan3,tan1,tan2,sqrt2) : NAMED_CONSTRAINTS_ARRAY(m127,iTab1,walkenIdctRounders,iTab2,iTab3,iTab4,tan3,tan1,tan2,sqrt2)
: XMM_CLOBBERS("%xmm0" , "%xmm1" , "%xmm2" , "%xmm3" , : XMM_CLOBBERS("%xmm0" , "%xmm1" , "%xmm2" , "%xmm3" ,
"%xmm4" , "%xmm5" , "%xmm6" , "%xmm7" ,) "%xmm4" , "%xmm5" , "%xmm6" , "%xmm7" ,)
#if ARCH_X86_64 #if ARCH_X86_64
......
...@@ -72,7 +72,7 @@ static void lpc_apply_welch_window_sse2(const int32_t *data, int len, ...@@ -72,7 +72,7 @@ static void lpc_apply_welch_window_sse2(const int32_t *data, int len,
"3: \n\t" "3: \n\t"
:"+&r"(i), "+&r"(j) :"+&r"(i), "+&r"(j)
:"r"(w_data+n2), "r"(data+n2), "m"(c), "r"(len) :"r"(w_data+n2), "r"(data+n2), "m"(c), "r"(len)
NAMED_CONSTRAINTS_ADD(pd_1,pd_2) NAMED_CONSTRAINTS_ARRAY_ADD(pd_1,pd_2)
XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3", XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3",
"%xmm5", "%xmm6", "%xmm7") "%xmm5", "%xmm6", "%xmm7")
); );
...@@ -117,7 +117,7 @@ static void lpc_compute_autocorr_sse2(const double *data, int len, int lag, ...@@ -117,7 +117,7 @@ static void lpc_compute_autocorr_sse2(const double *data, int len, int lag,
"movsd %%xmm2, 16(%1) \n\t" "movsd %%xmm2, 16(%1) \n\t"
:"+&r"(i) :"+&r"(i)
:"r"(autoc+j), "r"(data+len), "r"(data+len-j) :"r"(autoc+j), "r"(data+len), "r"(data+len-j)
NAMED_CONSTRAINTS_ADD(pd_1) NAMED_CONSTRAINTS_ARRAY_ADD(pd_1)
:"memory" :"memory"
); );
} else { } else {
...@@ -141,7 +141,7 @@ static void lpc_compute_autocorr_sse2(const double *data, int len, int lag, ...@@ -141,7 +141,7 @@ static void lpc_compute_autocorr_sse2(const double *data, int len, int lag,
"movsd %%xmm1, %2 \n\t" "movsd %%xmm1, %2 \n\t"
:"+&r"(i), "=m"(autoc[j]), "=m"(autoc[j+1]) :"+&r"(i), "=m"(autoc[j]), "=m"(autoc[j+1])
:"r"(data+len), "r"(data+len-j) :"r"(data+len), "r"(data+len-j)
NAMED_CONSTRAINTS_ADD(pd_1) NAMED_CONSTRAINTS_ARRAY_ADD(pd_1)
); );
} }
} }
......
...@@ -111,6 +111,8 @@ typedef int x86_reg; ...@@ -111,6 +111,8 @@ typedef int x86_reg;
# define MANGLE(a) EXTERN_PREFIX LOCAL_MANGLE(a) # define MANGLE(a) EXTERN_PREFIX LOCAL_MANGLE(a)
# define NAMED_CONSTRAINTS_ADD(...) # define NAMED_CONSTRAINTS_ADD(...)
# define NAMED_CONSTRAINTS(...) # define NAMED_CONSTRAINTS(...)
# define NAMED_CONSTRAINTS_ARRAY_ADD(...)
# define NAMED_CONSTRAINTS_ARRAY(...)
#else #else
/* When direct symbol references are used in code passed to a compiler that does not support them /* When direct symbol references are used in code passed to a compiler that does not support them
* then these references need to be converted to named asm constraints instead. * then these references need to be converted to named asm constraints instead.
...@@ -141,6 +143,10 @@ typedef int x86_reg; ...@@ -141,6 +143,10 @@ typedef int x86_reg;
# define NAMED_CONSTRAINTS_ADD(...) , FOR_EACH_VA(NAME_CONSTRAINT,__VA_ARGS__) # define NAMED_CONSTRAINTS_ADD(...) , FOR_EACH_VA(NAME_CONSTRAINT,__VA_ARGS__)
// Same but without comma for when there are no previously defined constraints // Same but without comma for when there are no previously defined constraints
# define NAMED_CONSTRAINTS(...) FOR_EACH_VA(NAME_CONSTRAINT,__VA_ARGS__) # define NAMED_CONSTRAINTS(...) FOR_EACH_VA(NAME_CONSTRAINT,__VA_ARGS__)
// Same as above NAMED_CONSTRAINTS except used for passing arrays/pointers instead of normal variables
# define NAME_CONSTRAINT_ARRAY(x) [x] "m"(*x)
# define NAMED_CONSTRAINTS_ARRAY_ADD(...) , FOR_EACH_VA(NAME_CONSTRAINT_ARRAY,__VA_ARGS__)
# define NAMED_CONSTRAINTS_ARRAY(...) FOR_EACH_VA(NAME_CONSTRAINT_ARRAY,__VA_ARGS__)
#endif #endif
#endif /* AVUTIL_X86_ASM_H */ #endif /* AVUTIL_X86_ASM_H */
...@@ -48,7 +48,7 @@ __asm__ volatile(\ ...@@ -48,7 +48,7 @@ __asm__ volatile(\
: "r" (((uint8_t*)(src+sample_index))-len),\ : "r" (((uint8_t*)(src+sample_index))-len),\
"r" (((uint8_t*)filter)-len),\ "r" (((uint8_t*)filter)-len),\
"r" (dst+dst_index)\ "r" (dst+dst_index)\
NAMED_CONSTRAINTS_ADD(ff_resample_int16_rounder)\ NAMED_CONSTRAINTS_ARRAY_ADD(ff_resample_int16_rounder)\
); );
#define LINEAR_CORE_INT16_MMX2 \ #define LINEAR_CORE_INT16_MMX2 \
...@@ -100,7 +100,7 @@ __asm__ volatile(\ ...@@ -100,7 +100,7 @@ __asm__ volatile(\
: "r" (((uint8_t*)(src+sample_index))-len),\ : "r" (((uint8_t*)(src+sample_index))-len),\
"r" (((uint8_t*)filter)-len),\ "r" (((uint8_t*)filter)-len),\
"r" (dst+dst_index)\ "r" (dst+dst_index)\
NAMED_CONSTRAINTS_ADD(ff_resample_int16_rounder)\ NAMED_CONSTRAINTS_ARRAY_ADD(ff_resample_int16_rounder)\
XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\ XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
); );
......
...@@ -141,7 +141,7 @@ ...@@ -141,7 +141,7 @@
#if COMPILE_TEMPLATE_MMXEXT #if COMPILE_TEMPLATE_MMXEXT
#undef RGB_PACK24_B_OPERANDS #undef RGB_PACK24_B_OPERANDS
#define RGB_PACK24_B_OPERANDS NAMED_CONSTRAINTS_ADD(mask1101,mask0110,mask0100,mask0010,mask1001) #define RGB_PACK24_B_OPERANDS NAMED_CONSTRAINTS_ARRAY_ADD(mask1101,mask0110,mask0100,mask0010,mask1001)
#else #else
#undef RGB_PACK24_B_OPERANDS #undef RGB_PACK24_B_OPERANDS
#define RGB_PACK24_B_OPERANDS #define RGB_PACK24_B_OPERANDS
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment