Commit c4148a66 authored by Clément Bœsch's avatar Clément Bœsch

x86/vp9mc: add vp9 namespace.

parent bcd5fd53
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#if HAVE_YASM #if HAVE_YASM
#define fpel_func(avg, sz, opt) \ #define fpel_func(avg, sz, opt) \
void ff_##avg##sz##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \ void ff_vp9_##avg##sz##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \
const uint8_t *src, ptrdiff_t src_stride, \ const uint8_t *src, ptrdiff_t src_stride, \
int h, int mx, int my) int h, int mx, int my)
fpel_func(put, 4, mmx); fpel_func(put, 4, mmx);
...@@ -46,7 +46,7 @@ fpel_func(avg, 64, sse2); ...@@ -46,7 +46,7 @@ fpel_func(avg, 64, sse2);
#undef fpel_func #undef fpel_func
#define mc_func(avg, sz, dir, opt) \ #define mc_func(avg, sz, dir, opt) \
void ff_##avg##_8tap_1d_##dir##_##sz##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \ void ff_vp9_##avg##_8tap_1d_##dir##_##sz##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \
const uint8_t *src, ptrdiff_t src_stride, \ const uint8_t *src, ptrdiff_t src_stride, \
int h, const int8_t (*filter)[16]) int h, const int8_t (*filter)[16])
#define mc_funcs(sz) \ #define mc_funcs(sz) \
...@@ -66,13 +66,13 @@ mc_funcs(16); ...@@ -66,13 +66,13 @@ mc_funcs(16);
#define mc_rep_func(avg, sz, hsz, dir, opt) \ #define mc_rep_func(avg, sz, hsz, dir, opt) \
static av_always_inline void \ static av_always_inline void \
ff_##avg##_8tap_1d_##dir##_##sz##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \ ff_vp9_##avg##_8tap_1d_##dir##_##sz##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \
const uint8_t *src, ptrdiff_t src_stride, \ const uint8_t *src, ptrdiff_t src_stride, \
int h, const int8_t (*filter)[16]) \ int h, const int8_t (*filter)[16]) \
{ \ { \
ff_##avg##_8tap_1d_##dir##_##hsz##_##opt(dst, dst_stride, src, \ ff_vp9_##avg##_8tap_1d_##dir##_##hsz##_##opt(dst, dst_stride, src, \
src_stride, h, filter); \ src_stride, h, filter); \
ff_##avg##_8tap_1d_##dir##_##hsz##_##opt(dst + hsz, dst_stride, src + hsz, \ ff_vp9_##avg##_8tap_1d_##dir##_##hsz##_##opt(dst + hsz, dst_stride, src + hsz, \
src_stride, h, filter); \ src_stride, h, filter); \
} }
...@@ -99,9 +99,9 @@ static void op##_8tap_##fname##_##sz##hv_ssse3(uint8_t *dst, ptrdiff_t dst_strid ...@@ -99,9 +99,9 @@ static void op##_8tap_##fname##_##sz##hv_ssse3(uint8_t *dst, ptrdiff_t dst_strid
int h, int mx, int my) \ int h, int mx, int my) \
{ \ { \
LOCAL_ALIGNED_16(uint8_t, temp, [71 * 64]); \ LOCAL_ALIGNED_16(uint8_t, temp, [71 * 64]); \
ff_put_8tap_1d_h_##sz##_ssse3(temp, 64, src - 3 * src_stride, src_stride, \ ff_vp9_put_8tap_1d_h_##sz##_ssse3(temp, 64, src - 3 * src_stride, src_stride, \
h + 7, ff_filters_ssse3[f][mx - 1]); \ h + 7, ff_filters_ssse3[f][mx - 1]); \
ff_##op##_8tap_1d_v_##sz##_ssse3(dst, dst_stride, temp + 3 * 64, 64, \ ff_vp9_##op##_8tap_1d_v_##sz##_ssse3(dst, dst_stride, temp + 3 * 64, 64, \
h, ff_filters_ssse3[f][my - 1]); \ h, ff_filters_ssse3[f][my - 1]); \
} }
...@@ -129,7 +129,7 @@ static void op##_8tap_##fname##_##sz##dir##_ssse3(uint8_t *dst, ptrdiff_t dst_st ...@@ -129,7 +129,7 @@ static void op##_8tap_##fname##_##sz##dir##_ssse3(uint8_t *dst, ptrdiff_t dst_st
const uint8_t *src, ptrdiff_t src_stride, \ const uint8_t *src, ptrdiff_t src_stride, \
int h, int mx, int my) \ int h, int mx, int my) \
{ \ { \
ff_##op##_8tap_1d_##dir##_##sz##_ssse3(dst, dst_stride, src, src_stride, \ ff_vp9_##op##_8tap_1d_##dir##_##sz##_ssse3(dst, dst_stride, src, src_stride, \
h, ff_filters_ssse3[f][dvar - 1]); \ h, ff_filters_ssse3[f][dvar - 1]); \
} }
...@@ -256,7 +256,7 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) ...@@ -256,7 +256,7 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp)
dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][0][0] = \ dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][0][0] = \
dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][0][0] = \ dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][0][0] = \
dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][0][0] = \ dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][0][0] = \
dsp->mc[idx1][FILTER_BILINEAR ][idx2][0][0] = ff_##type##sz##_##opt dsp->mc[idx1][FILTER_BILINEAR ][idx2][0][0] = ff_vp9_##type##sz##_##opt
#define init_subpel1(idx1, idx2, idxh, idxv, sz, dir, type, opt) \ #define init_subpel1(idx1, idx2, idxh, idxv, sz, dir, type, opt) \
dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][idxh][idxv] = type##_8tap_smooth_##sz##dir##_##opt; \ dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][idxh][idxv] = type##_8tap_smooth_##sz##dir##_##opt; \
......
...@@ -87,7 +87,7 @@ SECTION .text ...@@ -87,7 +87,7 @@ SECTION .text
%macro filter_h_fn 1 %macro filter_h_fn 1
%assign %%px mmsize/2 %assign %%px mmsize/2
cglobal %1_8tap_1d_h_ %+ %%px, 6, 6, 11, dst, dstride, src, sstride, h, filtery cglobal vp9_%1_8tap_1d_h_ %+ %%px, 6, 6, 11, dst, dstride, src, sstride, h, filtery
mova m6, [pw_256] mova m6, [pw_256]
mova m7, [filteryq+ 0] mova m7, [filteryq+ 0]
%if ARCH_X86_64 && mmsize > 8 %if ARCH_X86_64 && mmsize > 8
...@@ -148,7 +148,7 @@ filter_h_fn avg ...@@ -148,7 +148,7 @@ filter_h_fn avg
%if ARCH_X86_64 %if ARCH_X86_64
%macro filter_hx2_fn 1 %macro filter_hx2_fn 1
%assign %%px mmsize %assign %%px mmsize
cglobal %1_8tap_1d_h_ %+ %%px, 6, 6, 14, dst, dstride, src, sstride, h, filtery cglobal vp9_%1_8tap_1d_h_ %+ %%px, 6, 6, 14, dst, dstride, src, sstride, h, filtery
mova m13, [pw_256] mova m13, [pw_256]
mova m8, [filteryq+ 0] mova m8, [filteryq+ 0]
mova m9, [filteryq+16] mova m9, [filteryq+16]
...@@ -204,9 +204,9 @@ filter_hx2_fn avg ...@@ -204,9 +204,9 @@ filter_hx2_fn avg
%macro filter_v_fn 1 %macro filter_v_fn 1
%assign %%px mmsize/2 %assign %%px mmsize/2
%if ARCH_X86_64 %if ARCH_X86_64
cglobal %1_8tap_1d_v_ %+ %%px, 6, 8, 11, dst, dstride, src, sstride, h, filtery, src4, sstride3 cglobal vp9_%1_8tap_1d_v_ %+ %%px, 6, 8, 11, dst, dstride, src, sstride, h, filtery, src4, sstride3
%else %else
cglobal %1_8tap_1d_v_ %+ %%px, 4, 7, 11, dst, dstride, src, sstride, filtery, src4, sstride3 cglobal vp9_%1_8tap_1d_v_ %+ %%px, 4, 7, 11, dst, dstride, src, sstride, filtery, src4, sstride3
mov filteryq, r5mp mov filteryq, r5mp
%define hd r4mp %define hd r4mp
%endif %endif
...@@ -278,7 +278,7 @@ filter_v_fn avg ...@@ -278,7 +278,7 @@ filter_v_fn avg
%macro filter_vx2_fn 1 %macro filter_vx2_fn 1
%assign %%px mmsize %assign %%px mmsize
cglobal %1_8tap_1d_v_ %+ %%px, 6, 8, 14, dst, dstride, src, sstride, h, filtery, src4, sstride3 cglobal vp9_%1_8tap_1d_v_ %+ %%px, 6, 8, 14, dst, dstride, src, sstride, h, filtery, src4, sstride3
mova m13, [pw_256] mova m13, [pw_256]
lea sstride3q, [sstrideq*3] lea sstride3q, [sstrideq*3]
lea src4q, [srcq+sstrideq] lea src4q, [srcq+sstrideq]
...@@ -348,11 +348,11 @@ filter_vx2_fn avg ...@@ -348,11 +348,11 @@ filter_vx2_fn avg
%endif %endif
%if %2 <= 16 %if %2 <= 16
cglobal %1%2, 5, 7, 4, dst, dstride, src, sstride, h, dstride3, sstride3 cglobal vp9_%1%2, 5, 7, 4, dst, dstride, src, sstride, h, dstride3, sstride3
lea sstride3q, [sstrideq*3] lea sstride3q, [sstrideq*3]
lea dstride3q, [dstrideq*3] lea dstride3q, [dstrideq*3]
%else %else
cglobal %1%2, 5, 5, 4, dst, dstride, src, sstride, h cglobal vp9_%1%2, 5, 5, 4, dst, dstride, src, sstride, h
%endif %endif
.loop: .loop:
%%srcfn m0, [srcq] %%srcfn m0, [srcq]
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment