Commit 51daafb0 authored by Ronald S. Bultje's avatar Ronald S. Bultje Committed by Janne Grunau

x86: videodsp: Properly mark sse2 instructions in emulated_edge_mc as such.

Should fix crashes or corrupt output on pre-SSE2 CPUs when they were
using SSE2-code (e.g. AMD Athlon XP 2400+ or Intel Pentium III) in
hfix or hvar single-edge (left/right) extension functions.
Signed-off-by: 's avatarJanne Grunau <janne-libav@jannau.net>
parent 64ba831d
...@@ -102,8 +102,8 @@ cglobal emu_edge_hvar, 5, 6, 1, dst, dst_stride, start_x, n_words, h, w ...@@ -102,8 +102,8 @@ cglobal emu_edge_hvar, 5, 6, 1, dst, dst_stride, start_x, n_words, h, w
imul wd, 0x01010101 ; w *= 0x01010101 imul wd, 0x01010101 ; w *= 0x01010101
movd m0, wd movd m0, wd
mov wq, n_wordsq ; initialize w mov wq, n_wordsq ; initialize w
%if cpuflag(sse) %if cpuflag(sse2)
shufps m0, m0, q0000 ; splat pshufd m0, m0, q0000 ; splat
%else ; mmx %else ; mmx
punpckldq m0, m0 ; splat punpckldq m0, m0 ; splat
%endif ; mmx/sse %endif ; mmx/sse
...@@ -124,7 +124,7 @@ INIT_MMX mmx ...@@ -124,7 +124,7 @@ INIT_MMX mmx
hvar_fn hvar_fn
%endif %endif
INIT_XMM sse INIT_XMM sse2
hvar_fn hvar_fn
; macro to read/write a horizontal number of pixels (%2) to/from registers ; macro to read/write a horizontal number of pixels (%2) to/from registers
...@@ -137,42 +137,49 @@ hvar_fn ...@@ -137,42 +137,49 @@ hvar_fn
; - if (%2 & 3) fills 1, 2 or 4 bytes in eax ; - if (%2 & 3) fills 1, 2 or 4 bytes in eax
; writing data out is in the same way ; writing data out is in the same way
%macro READ_NUM_BYTES 2 %macro READ_NUM_BYTES 2
%assign %%off 0 ; offset in source buffer %assign %%off 0 ; offset in source buffer
%assign %%idx 0 ; mmx/xmm register index %assign %%mmx_idx 0 ; mmx register index
%assign %%xmm_idx 0 ; xmm register index
%rep %2/mmsize %rep %2/mmsize
movu m %+ %%idx, [srcq+%%off] %if mmsize == 16
movu xmm %+ %%xmm_idx, [srcq+%%off]
%assign %%xmm_idx %%xmm_idx+1
%else ; mmx
movu mm %+ %%mmx_idx, [srcq+%%off]
%assign %%mmx_idx %%mmx_idx+1
%endif
%assign %%off %%off+mmsize %assign %%off %%off+mmsize
%assign %%idx %%idx+1
%endrep ; %2/mmsize %endrep ; %2/mmsize
%if mmsize == 16 %if mmsize == 16
%if (%2-%%off) >= 8 %if (%2-%%off) >= 8
%if %2 > 16 && (%2-%%off) > 8 %if %2 > 16 && (%2-%%off) > 8
movu m %+ %%idx, [srcq+%2-16] movu xmm %+ %%xmm_idx, [srcq+%2-16]
%assign %%xmm_idx %%xmm_idx+1
%assign %%off %2 %assign %%off %2
%else %else
movq m %+ %%idx, [srcq+%%off] movq mm %+ %%mmx_idx, [srcq+%%off]
%assign %%mmx_idx %%mmx_idx+1
%assign %%off %%off+8 %assign %%off %%off+8
%endif %endif
%assign %%idx %%idx+1
%endif ; (%2-%%off) >= 8 %endif ; (%2-%%off) >= 8
%endif %endif
%if (%2-%%off) >= 4 %if (%2-%%off) >= 4
%if %2 > 8 && (%2-%%off) > 4 %if %2 > 8 && (%2-%%off) > 4
movq m %+ %%idx, [srcq+%2-8] movq mm %+ %%mmx_idx, [srcq+%2-8]
%assign %%off %2 %assign %%off %2
%else %else
movd m %+ %%idx, [srcq+%%off] movd mm %+ %%mmx_idx, [srcq+%%off]
%assign %%off %%off+4 %assign %%off %%off+4
%endif %endif
%assign %%idx %%idx+1 %assign %%mmx_idx %%mmx_idx+1
%endif ; (%2-%%off) >= 4 %endif ; (%2-%%off) >= 4
%if (%2-%%off) >= 1 %if (%2-%%off) >= 1
%if %2 >= 4 %if %2 >= 4
movd m %+ %%idx, [srcq+%2-4] movd mm %+ %%mmx_idx, [srcq+%2-4]
%elif (%2-%%off) == 1 %elif (%2-%%off) == 1
mov valb, [srcq+%2-1] mov valb, [srcq+%2-1]
%elif (%2-%%off) == 2 %elif (%2-%%off) == 2
...@@ -180,48 +187,55 @@ hvar_fn ...@@ -180,48 +187,55 @@ hvar_fn
%elifidn %1, body %elifidn %1, body
mov vald, [srcq+%2-3] mov vald, [srcq+%2-3]
%else %else
movd m %+ %%idx, [srcq+%2-3] movd mm %+ %%mmx_idx, [srcq+%2-3]
%endif %endif
%endif ; (%2-%%off) >= 1 %endif ; (%2-%%off) >= 1
%endmacro ; READ_NUM_BYTES %endmacro ; READ_NUM_BYTES
%macro WRITE_NUM_BYTES 2 %macro WRITE_NUM_BYTES 2
%assign %%off 0 ; offset in destination buffer %assign %%off 0 ; offset in destination buffer
%assign %%idx 0 ; mmx/xmm register index %assign %%mmx_idx 0 ; mmx register index
%assign %%xmm_idx 0 ; xmm register index
%rep %2/mmsize %rep %2/mmsize
movu [dstq+%%off], m %+ %%idx %if mmsize == 16
movu [dstq+%%off], xmm %+ %%xmm_idx
%assign %%xmm_idx %%xmm_idx+1
%else ; mmx
movu [dstq+%%off], mm %+ %%mmx_idx
%assign %%mmx_idx %%mmx_idx+1
%endif
%assign %%off %%off+mmsize %assign %%off %%off+mmsize
%assign %%idx %%idx+1
%endrep ; %2/mmsize %endrep ; %2/mmsize
%if mmsize == 16 %if mmsize == 16
%if (%2-%%off) >= 8 %if (%2-%%off) >= 8
%if %2 > 16 && (%2-%%off) > 8 %if %2 > 16 && (%2-%%off) > 8
movu [dstq+%2-16], m %+ %%idx movu [dstq+%2-16], xmm %+ %%xmm_idx
%assign %%xmm_idx %%xmm_idx+1
%assign %%off %2 %assign %%off %2
%else %else
movq [dstq+%%off], m %+ %%idx movq [dstq+%%off], mm %+ %%mmx_idx
%assign %%mmx_idx %%mmx_idx+1
%assign %%off %%off+8 %assign %%off %%off+8
%endif %endif
%assign %%idx %%idx+1
%endif ; (%2-%%off) >= 8 %endif ; (%2-%%off) >= 8
%endif %endif
%if (%2-%%off) >= 4 %if (%2-%%off) >= 4
%if %2 > 8 && (%2-%%off) > 4 %if %2 > 8 && (%2-%%off) > 4
movq [dstq+%2-8], m %+ %%idx movq [dstq+%2-8], mm %+ %%mmx_idx
%assign %%off %2 %assign %%off %2
%else %else
movd [dstq+%%off], m %+ %%idx movd [dstq+%%off], mm %+ %%mmx_idx
%assign %%off %%off+4 %assign %%off %%off+4
%endif %endif
%assign %%idx %%idx+1 %assign %%mmx_idx %%mmx_idx+1
%endif ; (%2-%%off) >= 4 %endif ; (%2-%%off) >= 4
%if (%2-%%off) >= 1 %if (%2-%%off) >= 1
%if %2 >= 4 %if %2 >= 4
movd [dstq+%2-4], m %+ %%idx movd [dstq+%2-4], mm %+ %%mmx_idx
%elif (%2-%%off) == 1 %elif (%2-%%off) == 1
mov [dstq+%2-1], valb mov [dstq+%2-1], valb
%elif (%2-%%off) == 2 %elif (%2-%%off) == 2
...@@ -231,7 +245,7 @@ hvar_fn ...@@ -231,7 +245,7 @@ hvar_fn
shr vald, 16 shr vald, 16
mov [dstq+%2-1], valb mov [dstq+%2-1], valb
%else %else
movd vald, m %+ %%idx movd vald, mm %+ %%mmx_idx
mov [dstq+%2-3], valw mov [dstq+%2-3], valw
shr vald, 16 shr vald, 16
mov [dstq+%2-1], valb mov [dstq+%2-1], valb
...@@ -339,7 +353,7 @@ VERTICAL_EXTEND 16, 22 ...@@ -339,7 +353,7 @@ VERTICAL_EXTEND 16, 22
%if %1 >= 8 %if %1 >= 8
movd m0, vald movd m0, vald
%if mmsize == 16 %if mmsize == 16
shufps m0, m0, q0000 pshufd m0, m0, q0000
%else %else
punpckldq m0, m0 punpckldq m0, m0
%endif %endif
...@@ -409,7 +423,7 @@ H_EXTEND 2, 14 ...@@ -409,7 +423,7 @@ H_EXTEND 2, 14
H_EXTEND 16, 22 H_EXTEND 16, 22
%endif %endif
INIT_XMM sse INIT_XMM sse2
H_EXTEND 16, 22 H_EXTEND 16, 22
%macro PREFETCH_FN 1 %macro PREFETCH_FN 1
......
...@@ -115,17 +115,17 @@ static emu_edge_hfix_func *hfixtbl_mmx[11] = { ...@@ -115,17 +115,17 @@ static emu_edge_hfix_func *hfixtbl_mmx[11] = {
}; };
#endif #endif
extern emu_edge_hvar_func ff_emu_edge_hvar_mmx; extern emu_edge_hvar_func ff_emu_edge_hvar_mmx;
extern emu_edge_hfix_func ff_emu_edge_hfix16_sse; extern emu_edge_hfix_func ff_emu_edge_hfix16_sse2;
extern emu_edge_hfix_func ff_emu_edge_hfix18_sse; extern emu_edge_hfix_func ff_emu_edge_hfix18_sse2;
extern emu_edge_hfix_func ff_emu_edge_hfix20_sse; extern emu_edge_hfix_func ff_emu_edge_hfix20_sse2;
extern emu_edge_hfix_func ff_emu_edge_hfix22_sse; extern emu_edge_hfix_func ff_emu_edge_hfix22_sse2;
static emu_edge_hfix_func *hfixtbl_sse[11] = { static emu_edge_hfix_func *hfixtbl_sse2[11] = {
ff_emu_edge_hfix2_mmx, ff_emu_edge_hfix4_mmx, ff_emu_edge_hfix6_mmx, ff_emu_edge_hfix2_mmx, ff_emu_edge_hfix4_mmx, ff_emu_edge_hfix6_mmx,
ff_emu_edge_hfix8_mmx, ff_emu_edge_hfix10_mmx, ff_emu_edge_hfix12_mmx, ff_emu_edge_hfix8_mmx, ff_emu_edge_hfix10_mmx, ff_emu_edge_hfix12_mmx,
ff_emu_edge_hfix14_mmx, ff_emu_edge_hfix16_sse, ff_emu_edge_hfix18_sse, ff_emu_edge_hfix14_mmx, ff_emu_edge_hfix16_sse2, ff_emu_edge_hfix18_sse2,
ff_emu_edge_hfix20_sse, ff_emu_edge_hfix22_sse ff_emu_edge_hfix20_sse2, ff_emu_edge_hfix22_sse2
}; };
extern emu_edge_hvar_func ff_emu_edge_hvar_sse; extern emu_edge_hvar_func ff_emu_edge_hvar_sse2;
static av_always_inline void emulated_edge_mc(uint8_t *dst, const uint8_t *src, static av_always_inline void emulated_edge_mc(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride, ptrdiff_t dst_stride,
...@@ -211,7 +211,6 @@ static av_noinline void emulated_edge_mc_mmx(uint8_t *buf, const uint8_t *src, ...@@ -211,7 +211,6 @@ static av_noinline void emulated_edge_mc_mmx(uint8_t *buf, const uint8_t *src,
src_x, src_y, w, h, vfixtbl_mmx, &ff_emu_edge_vvar_mmx, src_x, src_y, w, h, vfixtbl_mmx, &ff_emu_edge_vvar_mmx,
hfixtbl_mmx, &ff_emu_edge_hvar_mmx); hfixtbl_mmx, &ff_emu_edge_hvar_mmx);
} }
#endif
static av_noinline void emulated_edge_mc_sse(uint8_t * buf,const uint8_t *src, static av_noinline void emulated_edge_mc_sse(uint8_t * buf,const uint8_t *src,
ptrdiff_t buf_stride, ptrdiff_t buf_stride,
...@@ -221,7 +220,20 @@ static av_noinline void emulated_edge_mc_sse(uint8_t * buf,const uint8_t *src, ...@@ -221,7 +220,20 @@ static av_noinline void emulated_edge_mc_sse(uint8_t * buf,const uint8_t *src,
{ {
emulated_edge_mc(buf, src, buf_stride, src_stride, block_w, block_h, emulated_edge_mc(buf, src, buf_stride, src_stride, block_w, block_h,
src_x, src_y, w, h, vfixtbl_sse, &ff_emu_edge_vvar_sse, src_x, src_y, w, h, vfixtbl_sse, &ff_emu_edge_vvar_sse,
hfixtbl_sse, &ff_emu_edge_hvar_sse); hfixtbl_mmx, &ff_emu_edge_hvar_mmx);
}
#endif
static av_noinline void emulated_edge_mc_sse2(uint8_t *buf, const uint8_t *src,
ptrdiff_t buf_stride,
ptrdiff_t src_stride,
int block_w, int block_h,
int src_x, int src_y, int w,
int h)
{
emulated_edge_mc(buf, src, buf_stride, src_stride, block_w, block_h, src_x,
src_y, w, h, vfixtbl_sse, &ff_emu_edge_vvar_sse,
hfixtbl_sse2, &ff_emu_edge_hvar_sse2);
} }
#endif /* HAVE_YASM */ #endif /* HAVE_YASM */
...@@ -244,8 +256,13 @@ av_cold void ff_videodsp_init_x86(VideoDSPContext *ctx, int bpc) ...@@ -244,8 +256,13 @@ av_cold void ff_videodsp_init_x86(VideoDSPContext *ctx, int bpc)
if (EXTERNAL_MMXEXT(cpu_flags)) { if (EXTERNAL_MMXEXT(cpu_flags)) {
ctx->prefetch = ff_prefetch_mmxext; ctx->prefetch = ff_prefetch_mmxext;
} }
#if ARCH_X86_32
if (EXTERNAL_SSE(cpu_flags) && bpc <= 8) { if (EXTERNAL_SSE(cpu_flags) && bpc <= 8) {
ctx->emulated_edge_mc = emulated_edge_mc_sse; ctx->emulated_edge_mc = emulated_edge_mc_sse;
} }
#endif /* ARCH_X86_32 */
if (EXTERNAL_SSE2(cpu_flags) && bpc <= 8) {
ctx->emulated_edge_mc = emulated_edge_mc_sse2;
}
#endif /* HAVE_YASM */ #endif /* HAVE_YASM */
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment