Commit 45549339 authored by Ronald S. Bultje's avatar Ronald S. Bultje

vp8: disable mmx functions with sse/sse2 counterparts on x86-64.

x86-64 is guaranteed to have at least SSE2, therefore the MMX/MMX2
functions will never be used in practice.
parent bd66f073
...@@ -138,6 +138,7 @@ static void ff_put_vp8_ ## FILTERTYPE ## 8_ ## TAPTYPE ## _ ## OPT( \ ...@@ -138,6 +138,7 @@ static void ff_put_vp8_ ## FILTERTYPE ## 8_ ## TAPTYPE ## _ ## OPT( \
dst + 4, dststride, src + 4, srcstride, height, mx, my); \ dst + 4, dststride, src + 4, srcstride, height, mx, my); \
} }
#if ARCH_X86_32
TAP_W8 (mmxext, epel, h4) TAP_W8 (mmxext, epel, h4)
TAP_W8 (mmxext, epel, h6) TAP_W8 (mmxext, epel, h6)
TAP_W16(mmxext, epel, h6) TAP_W16(mmxext, epel, h6)
...@@ -148,6 +149,7 @@ TAP_W8 (mmxext, bilinear, h) ...@@ -148,6 +149,7 @@ TAP_W8 (mmxext, bilinear, h)
TAP_W16(mmxext, bilinear, h) TAP_W16(mmxext, bilinear, h)
TAP_W8 (mmxext, bilinear, v) TAP_W8 (mmxext, bilinear, v)
TAP_W16(mmxext, bilinear, v) TAP_W16(mmxext, bilinear, v)
#endif
TAP_W16(sse2, epel, h6) TAP_W16(sse2, epel, h6)
TAP_W16(sse2, epel, v6) TAP_W16(sse2, epel, v6)
...@@ -173,15 +175,21 @@ static void ff_put_vp8_epel ## SIZE ## _h ## TAPNUMX ## v ## TAPNUMY ## _ ## OPT ...@@ -173,15 +175,21 @@ static void ff_put_vp8_epel ## SIZE ## _h ## TAPNUMX ## v ## TAPNUMY ## _ ## OPT
dst, dststride, tmpptr, SIZE, height, mx, my); \ dst, dststride, tmpptr, SIZE, height, mx, my); \
} }
#if ARCH_X86_32
#define HVTAPMMX(x, y) \ #define HVTAPMMX(x, y) \
HVTAP(mmxext, 8, x, y, 4, 8) \ HVTAP(mmxext, 8, x, y, 4, 8) \
HVTAP(mmxext, 8, x, y, 8, 16) HVTAP(mmxext, 8, x, y, 8, 16)
HVTAP(mmxext, 8, 6, 6, 16, 16)
#else
#define HVTAPMMX(x, y) \
HVTAP(mmxext, 8, x, y, 4, 8)
#endif
HVTAPMMX(4, 4) HVTAPMMX(4, 4)
HVTAPMMX(4, 6) HVTAPMMX(4, 6)
HVTAPMMX(6, 4) HVTAPMMX(6, 4)
HVTAPMMX(6, 6) HVTAPMMX(6, 6)
HVTAP(mmxext, 8, 6, 6, 16, 16)
#define HVTAPSSE2(x, y, w) \ #define HVTAPSSE2(x, y, w) \
HVTAP(sse2, 16, x, y, w, 16) \ HVTAP(sse2, 16, x, y, w, 16) \
...@@ -211,8 +219,10 @@ static void ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT( \ ...@@ -211,8 +219,10 @@ static void ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT( \
} }
HVBILIN(mmxext, 8, 4, 8) HVBILIN(mmxext, 8, 4, 8)
#if ARCH_X86_32
HVBILIN(mmxext, 8, 8, 16) HVBILIN(mmxext, 8, 8, 16)
HVBILIN(mmxext, 8, 16, 16) HVBILIN(mmxext, 8, 16, 16)
#endif
HVBILIN(sse2, 8, 8, 16) HVBILIN(sse2, 8, 8, 16)
HVBILIN(sse2, 8, 16, 16) HVBILIN(sse2, 8, 16, 16)
HVBILIN(ssse3, 8, 4, 8) HVBILIN(ssse3, 8, 4, 8)
...@@ -311,15 +321,18 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c) ...@@ -311,15 +321,18 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c)
if (mm_flags & AV_CPU_FLAG_MMX) { if (mm_flags & AV_CPU_FLAG_MMX) {
c->vp8_idct_dc_add = ff_vp8_idct_dc_add_mmx; c->vp8_idct_dc_add = ff_vp8_idct_dc_add_mmx;
c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_mmx;
c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_mmx; c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_mmx;
#if ARCH_X86_32
c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_mmx;
c->vp8_idct_add = ff_vp8_idct_add_mmx; c->vp8_idct_add = ff_vp8_idct_add_mmx;
c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_mmx; c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_mmx;
c->put_vp8_epel_pixels_tab[0][0][0] = c->put_vp8_epel_pixels_tab[0][0][0] =
c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_mmx; c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_mmx;
#endif
c->put_vp8_epel_pixels_tab[1][0][0] = c->put_vp8_epel_pixels_tab[1][0][0] =
c->put_vp8_bilinear_pixels_tab[1][0][0] = ff_put_vp8_pixels8_mmx; c->put_vp8_bilinear_pixels_tab[1][0][0] = ff_put_vp8_pixels8_mmx;
#if ARCH_X86_32
c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmx; c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmx;
c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmx; c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmx;
...@@ -332,17 +345,19 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c) ...@@ -332,17 +345,19 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c)
c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmx; c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmx;
c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmx; c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmx;
c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmx; c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmx;
#endif
} }
/* note that 4-tap width=16 functions are missing because w=16 /* note that 4-tap width=16 functions are missing because w=16
* is only used for luma, and luma is always a copy or sixtap. */ * is only used for luma, and luma is always a copy or sixtap. */
if (mm_flags & AV_CPU_FLAG_MMX2) { if (mm_flags & AV_CPU_FLAG_MMX2) {
VP8_MC_FUNC(2, 4, mmxext);
VP8_BILINEAR_MC_FUNC(2, 4, mmxext);
#if ARCH_X86_32
VP8_LUMA_MC_FUNC(0, 16, mmxext); VP8_LUMA_MC_FUNC(0, 16, mmxext);
VP8_MC_FUNC(1, 8, mmxext); VP8_MC_FUNC(1, 8, mmxext);
VP8_MC_FUNC(2, 4, mmxext);
VP8_BILINEAR_MC_FUNC(0, 16, mmxext); VP8_BILINEAR_MC_FUNC(0, 16, mmxext);
VP8_BILINEAR_MC_FUNC(1, 8, mmxext); VP8_BILINEAR_MC_FUNC(1, 8, mmxext);
VP8_BILINEAR_MC_FUNC(2, 4, mmxext);
c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmxext; c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmxext;
c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmxext; c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmxext;
...@@ -356,6 +371,7 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c) ...@@ -356,6 +371,7 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c)
c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmxext; c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmxext;
c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmxext; c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmxext;
c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmxext; c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmxext;
#endif
} }
if (mm_flags & AV_CPU_FLAG_SSE) { if (mm_flags & AV_CPU_FLAG_SSE) {
......
...@@ -865,6 +865,7 @@ cglobal put_vp8_pixels8_mmx, 5,5 ...@@ -865,6 +865,7 @@ cglobal put_vp8_pixels8_mmx, 5,5
jg .nextrow jg .nextrow
REP_RET REP_RET
%if ARCH_X86_32
cglobal put_vp8_pixels16_mmx, 5,5 cglobal put_vp8_pixels16_mmx, 5,5
.nextrow: .nextrow:
movq mm0, [r2+r3*0+0] movq mm0, [r2+r3*0+0]
...@@ -880,6 +881,7 @@ cglobal put_vp8_pixels16_mmx, 5,5 ...@@ -880,6 +881,7 @@ cglobal put_vp8_pixels16_mmx, 5,5
sub r4d, 2 sub r4d, 2
jg .nextrow jg .nextrow
REP_RET REP_RET
%endif
cglobal put_vp8_pixels16_sse, 5,5,2 cglobal put_vp8_pixels16_sse, 5,5,2
.nextrow: .nextrow:
...@@ -973,6 +975,7 @@ cglobal vp8_idct_dc_add_sse4, 3, 3, 6 ...@@ -973,6 +975,7 @@ cglobal vp8_idct_dc_add_sse4, 3, 3, 6
; void vp8_idct_dc_add4y_<opt>(uint8_t *dst, DCTELEM block[4][16], int stride); ; void vp8_idct_dc_add4y_<opt>(uint8_t *dst, DCTELEM block[4][16], int stride);
;----------------------------------------------------------------------------- ;-----------------------------------------------------------------------------
%if ARCH_X86_32
INIT_MMX INIT_MMX
cglobal vp8_idct_dc_add4y_mmx, 3, 3 cglobal vp8_idct_dc_add4y_mmx, 3, 3
; load data ; load data
...@@ -1007,6 +1010,7 @@ cglobal vp8_idct_dc_add4y_mmx, 3, 3 ...@@ -1007,6 +1010,7 @@ cglobal vp8_idct_dc_add4y_mmx, 3, 3
ADD_DC m0, m6, 0, mova ADD_DC m0, m6, 0, mova
ADD_DC m1, m7, 8, mova ADD_DC m1, m7, 8, mova
RET RET
%endif
INIT_XMM INIT_XMM
cglobal vp8_idct_dc_add4y_sse2, 3, 3, 6 cglobal vp8_idct_dc_add4y_sse2, 3, 3, 6
...@@ -1152,7 +1156,9 @@ cglobal vp8_idct_add_%1, 3, 3 ...@@ -1152,7 +1156,9 @@ cglobal vp8_idct_add_%1, 3, 3
RET RET
%endmacro %endmacro
%if ARCH_X86_32
VP8_IDCT_ADD mmx VP8_IDCT_ADD mmx
%endif
VP8_IDCT_ADD sse VP8_IDCT_ADD sse
;----------------------------------------------------------------------------- ;-----------------------------------------------------------------------------
...@@ -1217,7 +1223,9 @@ cglobal vp8_luma_dc_wht_%1, 2,3 ...@@ -1217,7 +1223,9 @@ cglobal vp8_luma_dc_wht_%1, 2,3
%endmacro %endmacro
INIT_MMX INIT_MMX
%if ARCH_X86_32
VP8_DC_WHT mmx VP8_DC_WHT mmx
%endif
VP8_DC_WHT sse VP8_DC_WHT sse
;----------------------------------------------------------------------------- ;-----------------------------------------------------------------------------
...@@ -1610,6 +1618,7 @@ cglobal vp8_%2_loop_filter_simple_%1, 3, %3, %4 ...@@ -1610,6 +1618,7 @@ cglobal vp8_%2_loop_filter_simple_%1, 3, %3, %4
%endif %endif
%endmacro %endmacro
%if ARCH_X86_32
INIT_MMX INIT_MMX
%define SPLATB_REG SPLATB_REG_MMX %define SPLATB_REG SPLATB_REG_MMX
SIMPLE_LOOPFILTER mmx, v, 4, 0 SIMPLE_LOOPFILTER mmx, v, 4, 0
...@@ -1617,6 +1626,8 @@ SIMPLE_LOOPFILTER mmx, h, 5, 0 ...@@ -1617,6 +1626,8 @@ SIMPLE_LOOPFILTER mmx, h, 5, 0
%define SPLATB_REG SPLATB_REG_MMXEXT %define SPLATB_REG SPLATB_REG_MMXEXT
SIMPLE_LOOPFILTER mmxext, v, 4, 0 SIMPLE_LOOPFILTER mmxext, v, 4, 0
SIMPLE_LOOPFILTER mmxext, h, 5, 0 SIMPLE_LOOPFILTER mmxext, h, 5, 0
%endif
INIT_XMM INIT_XMM
%define SPLATB_REG SPLATB_REG_SSE2 %define SPLATB_REG SPLATB_REG_SSE2
%define WRITE_8W WRITE_8W_SSE2 %define WRITE_8W WRITE_8W_SSE2
...@@ -2118,6 +2129,7 @@ cglobal vp8_%2_loop_filter16y_inner_%1, 5, %3, %5 ...@@ -2118,6 +2129,7 @@ cglobal vp8_%2_loop_filter16y_inner_%1, 5, %3, %5
RET RET
%endmacro %endmacro
%if ARCH_X86_32
INIT_MMX INIT_MMX
%define SPLATB_REG SPLATB_REG_MMX %define SPLATB_REG SPLATB_REG_MMX
INNER_LOOPFILTER mmx, v, 6, 16, 0 INNER_LOOPFILTER mmx, v, 6, 16, 0
...@@ -2130,6 +2142,7 @@ INNER_LOOPFILTER mmxext, v, 6, 16, 0 ...@@ -2130,6 +2142,7 @@ INNER_LOOPFILTER mmxext, v, 6, 16, 0
INNER_LOOPFILTER mmxext, h, 6, 16, 0 INNER_LOOPFILTER mmxext, h, 6, 16, 0
INNER_LOOPFILTER mmxext, v, 6, 8, 0 INNER_LOOPFILTER mmxext, v, 6, 8, 0
INNER_LOOPFILTER mmxext, h, 6, 8, 0 INNER_LOOPFILTER mmxext, h, 6, 8, 0
%endif
INIT_XMM INIT_XMM
%define SPLATB_REG SPLATB_REG_SSE2 %define SPLATB_REG SPLATB_REG_SSE2
...@@ -2814,6 +2827,7 @@ cglobal vp8_%2_loop_filter16y_mbedge_%1, 5, %3, %5 ...@@ -2814,6 +2827,7 @@ cglobal vp8_%2_loop_filter16y_mbedge_%1, 5, %3, %5
RET RET
%endmacro %endmacro
%if ARCH_X86_32
INIT_MMX INIT_MMX
%define SPLATB_REG SPLATB_REG_MMX %define SPLATB_REG SPLATB_REG_MMX
MBEDGE_LOOPFILTER mmx, v, 6, 16, 0 MBEDGE_LOOPFILTER mmx, v, 6, 16, 0
...@@ -2826,6 +2840,7 @@ MBEDGE_LOOPFILTER mmxext, v, 6, 16, 0 ...@@ -2826,6 +2840,7 @@ MBEDGE_LOOPFILTER mmxext, v, 6, 16, 0
MBEDGE_LOOPFILTER mmxext, h, 6, 16, 0 MBEDGE_LOOPFILTER mmxext, h, 6, 16, 0
MBEDGE_LOOPFILTER mmxext, v, 6, 8, 0 MBEDGE_LOOPFILTER mmxext, v, 6, 8, 0
MBEDGE_LOOPFILTER mmxext, h, 6, 8, 0 MBEDGE_LOOPFILTER mmxext, h, 6, 8, 0
%endif
INIT_XMM INIT_XMM
%define SPLATB_REG SPLATB_REG_SSE2 %define SPLATB_REG SPLATB_REG_SSE2
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment