Commit 726501a3 authored by Ronald S. Bultje's avatar Ronald S. Bultje

vp9: add 32x32 idct AVX2 implementation.

About 1.8x speedup compared to AVX version for full IDCT. Other
sub-IDCT scenarios also see speedups. Full --bench output for
idct_32x32_add_{bpp}_${subidct}_${opt} (50k cycles):

nop: 16.5
vp9_inv_dct_dct_32x32_add_8_1_c: 2284.4
vp9_inv_dct_dct_32x32_add_8_1_sse2: 145.0
vp9_inv_dct_dct_32x32_add_8_1_ssse3: 137.4
vp9_inv_dct_dct_32x32_add_8_1_avx: 137.1
vp9_inv_dct_dct_32x32_add_8_1_avx2: 73.2
vp9_inv_dct_dct_32x32_add_8_2_c: 14680.8
vp9_inv_dct_dct_32x32_add_8_2_sse2: 2617.2
vp9_inv_dct_dct_32x32_add_8_2_ssse3: 982.9
vp9_inv_dct_dct_32x32_add_8_2_avx: 958.5
vp9_inv_dct_dct_32x32_add_8_2_avx2: 704.2
vp9_inv_dct_dct_32x32_add_8_4_c: 14443.1
vp9_inv_dct_dct_32x32_add_8_4_sse2: 2717.1
vp9_inv_dct_dct_32x32_add_8_4_ssse3: 965.7
vp9_inv_dct_dct_32x32_add_8_4_avx: 1000.7
vp9_inv_dct_dct_32x32_add_8_4_avx2: 717.1
vp9_inv_dct_dct_32x32_add_8_8_c: 14436.4
vp9_inv_dct_dct_32x32_add_8_8_sse2: 2671.8
vp9_inv_dct_dct_32x32_add_8_8_ssse3: 1038.5
vp9_inv_dct_dct_32x32_add_8_8_avx: 983.0
vp9_inv_dct_dct_32x32_add_8_8_avx2: 729.4
vp9_inv_dct_dct_32x32_add_8_16_c: 14614.7
vp9_inv_dct_dct_32x32_add_8_16_sse2: 2701.7
vp9_inv_dct_dct_32x32_add_8_16_ssse3: 1334.4
vp9_inv_dct_dct_32x32_add_8_16_avx: 1276.7
vp9_inv_dct_dct_32x32_add_8_16_avx2: 719.5
vp9_inv_dct_dct_32x32_add_8_32_c: 14363.6
vp9_inv_dct_dct_32x32_add_8_32_sse2: 2575.6
vp9_inv_dct_dct_32x32_add_8_32_ssse3: 2633.9
vp9_inv_dct_dct_32x32_add_8_32_avx: 2539.6
vp9_inv_dct_dct_32x32_add_8_32_avx2: 1395.0
parent c6e900e9
......@@ -115,6 +115,7 @@ itxfm_func(idct, idct, 32, ssse3);
itxfm_func(idct, idct, 32, avx);
itxfm_func(iwht, iwht, 4, mmx);
itxfm_func(idct, idct, 16, avx2);
itxfm_func(idct, idct, 32, avx2);
#undef itxfm_func
#undef itxfm_funcs
......@@ -384,6 +385,7 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp, int bpp, int bitexact)
if (ARCH_X86_64) {
#if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
dsp->itxfm_add[TX_16X16][DCT_DCT] = ff_vp9_idct_idct_16x16_add_avx2;
dsp->itxfm_add[TX_32X32][DCT_DCT] = ff_vp9_idct_idct_32x32_add_avx2;
init_subpel3_32_64(0, put, 8, avx2);
init_subpel3_32_64(1, avg, 8, avx2);
#endif
......
......@@ -1539,13 +1539,12 @@ cglobal vp9_idct_idct_16x16_add, 4, 4, 16, dst, stride, block, eob
jg .idctfull
; dc-only
movd xm0, [blockq]
mova m1, [pw_11585x2]
vpbroadcastw m0, [blockq]
pmulhrsw m0, m1
pmulhrsw m0, m1
vpbroadcastw m0, xm0
pmulhrsw m0, [pw_512]
pxor m5, m5
pmulhrsw m0, [pw_512]
movd [blockq], xm5
DEFINE_ARGS dst, stride, stride3, cnt
......@@ -1993,7 +1992,12 @@ IADST16_FN iadst, IADST16, iadst, IADST16, avx
;---------------------------------------------------------------------------------------------
%macro VP9_IDCT32_1D 2-3 32 ; src, pass, nnzc
%assign %%str 16*%2*%2
%if %2 == 1
%assign %%str mmsize
%else
%assign %%str 64
%endif
; first do t0-15, this can be done identical to idct16x16
VP9_IDCT16_1D_START %1, %3/2, 64*2, tmpq, 2*%%str, 1
......@@ -2288,17 +2292,125 @@ IADST16_FN iadst, IADST16, iadst, IADST16, avx
mova m3, [tmpq+20*%%str] ; t5
mova m13, [tmpq+24*%%str] ; t6
SUMSUB_BA w, 6, 8, 10
SUMSUB_BA w, 6, 8, 10
mova [tmpq+ 3*%%str], m8 ; t15
mova m10, [tmpq+28*%%str] ; t7
SUMSUB_BA w, 0, 9, 8
SUMSUB_BA w, 15, 12, 8
SUMSUB_BA w, 14, 11, 8
SUMSUB_BA w, 1, 2, 8
SUMSUB_BA w, 7, 3, 8
SUMSUB_BA w, 5, 13, 8
mova m10, [tmpq+28*%%str] ; t7
SUMSUB_BA w, 4, 10, 8
%if cpuflag(avx2)
; the "shitty" about this idct is that the final pass does the outermost
; interleave sumsubs (t0/31, t1/30, etc) but the tN for the 16x16 need
; to be sequential, which means I need to load/store half of the sumsub
; intermediates back to/from memory to get a 16x16 transpose going...
; This would be easier if we had more (e.g. 32) YMM regs here.
mova [tmpq+ 7*%%str], m9
mova [tmpq+11*%%str], m12
mova [tmpq+15*%%str], m11
mova [tmpq+19*%%str], m2
mova [tmpq+23*%%str], m3
mova [tmpq+27*%%str], m13
mova [tmpq+31*%%str], m10
mova [tmpq+12*%%str], m5
mova m13, [tmpq+30*%%str] ; t8
mova m12, [tmpq+26*%%str] ; t9
mova m11, [tmpq+22*%%str] ; t10
mova m10, [tmpq+18*%%str] ; t11
mova m9, [tmpq+17*%%str] ; t20
mova m8, [tmpq+ 1*%%str] ; t21
mova m3, [tmpq+25*%%str] ; t22
mova m2, [tmpq+ 5*%%str] ; t23
SUMSUB_BA w, 9, 10, 5
SUMSUB_BA w, 8, 11, 5
SUMSUB_BA w, 3, 12, 5
SUMSUB_BA w, 2, 13, 5
mova [tmpq+ 1*%%str], m10
mova [tmpq+ 5*%%str], m11
mova [tmpq+17*%%str], m12
mova [tmpq+25*%%str], m13
mova m13, [tmpq+14*%%str] ; t12
mova m12, [tmpq+10*%%str] ; t13
mova m11, [tmpq+ 9*%%str] ; t18
mova m10, [tmpq+13*%%str] ; t19
SUMSUB_BA w, 11, 12, 5
SUMSUB_BA w, 10, 13, 5
mova [tmpq+ 9*%%str], m13
mova [tmpq+13*%%str], m12
mova [tmpq+10*%%str], m10
mova [tmpq+14*%%str], m11
mova m13, [tmpq+ 6*%%str] ; t14
mova m12, [tmpq+ 2*%%str] ; t15
mova m11, [tmpq+21*%%str] ; t16
mova m10, [tmpq+29*%%str] ; t17
SUMSUB_BA w, 11, 12, 5
SUMSUB_BA w, 10, 13, 5
mova [tmpq+21*%%str], m12
mova [tmpq+29*%%str], m13
mova m12, [tmpq+10*%%str]
mova m13, [tmpq+14*%%str]
TRANSPOSE16x16W 6, 0, 15, 14, 1, 7, 5, 4, \
2, 3, 8, 9, 12, 13, 10, 11, \
[tmpq+12*%%str], [tmpq+ 8*%%str], 1
mova [tmpq+ 0*%%str], m6
mova [tmpq+ 2*%%str], m0
mova [tmpq+ 4*%%str], m15
mova [tmpq+ 6*%%str], m14
mova [tmpq+10*%%str], m7
mova [tmpq+12*%%str], m5
mova [tmpq+14*%%str], m4
mova [tmpq+16*%%str], m2
mova [tmpq+18*%%str], m3
mova [tmpq+20*%%str], m8
mova [tmpq+22*%%str], m9
mova [tmpq+24*%%str], m12
mova [tmpq+26*%%str], m13
mova [tmpq+28*%%str], m10
mova [tmpq+30*%%str], m11
mova m0, [tmpq+21*%%str]
mova m1, [tmpq+29*%%str]
mova m2, [tmpq+13*%%str]
mova m3, [tmpq+ 9*%%str]
mova m4, [tmpq+ 1*%%str]
mova m5, [tmpq+ 5*%%str]
mova m7, [tmpq+25*%%str]
mova m8, [tmpq+31*%%str]
mova m9, [tmpq+27*%%str]
mova m10, [tmpq+23*%%str]
mova m11, [tmpq+19*%%str]
mova m12, [tmpq+15*%%str]
mova m13, [tmpq+11*%%str]
mova m14, [tmpq+ 7*%%str]
mova m15, [tmpq+ 3*%%str]
TRANSPOSE16x16W 0, 1, 2, 3, 4, 5, 6, 7, \
8, 9, 10, 11, 12, 13, 14, 15, \
[tmpq+17*%%str], [tmpq+ 9*%%str], 1
mova [tmpq+ 1*%%str], m0
mova [tmpq+ 3*%%str], m1
mova [tmpq+ 5*%%str], m2
mova [tmpq+ 7*%%str], m3
mova [tmpq+11*%%str], m5
mova [tmpq+13*%%str], m6
mova [tmpq+15*%%str], m7
mova [tmpq+17*%%str], m8
mova [tmpq+19*%%str], m9
mova [tmpq+21*%%str], m10
mova [tmpq+23*%%str], m11
mova [tmpq+25*%%str], m12
mova [tmpq+27*%%str], m13
mova [tmpq+29*%%str], m14
mova [tmpq+31*%%str], m15
%else ; !avx2
TRANSPOSE8x8W 6, 0, 15, 14, 1, 7, 5, 4, 8
mova [tmpq+ 0*%%str], m6
mova [tmpq+ 4*%%str], m0
......@@ -2367,6 +2479,7 @@ IADST16_FN iadst, IADST16, iadst, IADST16, avx
mova [tmpq+22*%%str], m13
mova [tmpq+26*%%str], m14
mova [tmpq+30*%%str], m15
%endif ; avx2
%else
mova m2, [tmpq+24*%%str] ; t6
mova m3, [tmpq+28*%%str] ; t7
......@@ -2815,3 +2928,106 @@ cglobal vp9_idct_idct_32x32_add, 0, 6 + ARCH_X86_64 * 3, 16, 2048, dst, stride,
VP9_IDCT_IDCT_32x32_ADD_XMM sse2
VP9_IDCT_IDCT_32x32_ADD_XMM ssse3
VP9_IDCT_IDCT_32x32_ADD_XMM avx
; this is almost identical to VP9_STORE_2X, but it does two rows
; for slightly improved interleaving, and it omits vpermq since the
; input is DC so all values are identical
%macro VP9_STORE_YMM_DC_2X2 6 ; reg, tmp1, tmp2, tmp3, tmp4, zero
mova m%2, [dstq]
mova m%4, [dstq+strideq]
punpckhbw m%3, m%2, m%6
punpcklbw m%2, m%6
punpckhbw m%5, m%4, m%6
punpcklbw m%4, m%6
paddw m%3, m%1
paddw m%2, m%1
paddw m%5, m%1
paddw m%4, m%1
packuswb m%2, m%3
packuswb m%4, m%5
mova [dstq+strideq*0], m%2
mova [dstq+strideq*1], m%4
%endmacro
%if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
INIT_YMM avx2
cglobal vp9_idct_idct_32x32_add, 4, 9, 16, 2048, dst, stride, block, eob
cmp eobd, 135
jg .idctfull
cmp eobd, 1
jg .idct16x16
; dc-only case
mova m1, [pw_11585x2]
vpbroadcastw m0, [blockq]
pmulhrsw m0, m1
pmulhrsw m0, m1
pxor m5, m5
pmulhrsw m0, [pw_512]
movd [blockq], xm5
DEFINE_ARGS dst, stride, cnt
mov cntd, 16
.loop_dc:
VP9_STORE_YMM_DC_2X2 0, 1, 2, 3, 4, 5
lea dstq, [dstq+2*strideq]
dec cntd
jg .loop_dc
RET
DEFINE_ARGS dst_bak, stride, block, cnt, dst, stride30, dst_end, stride2, tmp
.idct16x16:
mov tmpq, rsp
VP9_IDCT32_1D blockq, 1, 16
mov stride30q, strideq ; stride
lea stride2q, [strideq*2] ; stride*2
shl stride30q, 5 ; stride*32
mov cntd, 2
sub stride30q, stride2q ; stride*30
.loop2_16x16:
mov dstq, dst_bakq
lea dst_endq, [dstq+stride30q]
VP9_IDCT32_1D tmpq, 2, 16
add dst_bakq, 16
add tmpq, 32
dec cntd
jg .loop2_16x16
; at the end of the loop, m1 should still be zero
; use that to zero out block coefficients
ZERO_BLOCK blockq, 64, 16, m1
RET
.idctfull:
mov cntd, 2
mov tmpq, rsp
.loop1_full:
VP9_IDCT32_1D blockq, 1
add blockq, 32
add tmpq, 1024
dec cntd
jg .loop1_full
sub blockq, 64
mov stride30q, strideq ; stride
lea stride2q, [strideq*2] ; stride*2
shl stride30q, 5 ; stride*32
mov cntd, 2
mov tmpq, rsp
sub stride30q, stride2q ; stride*30
.loop2_full:
mov dstq, dst_bakq
lea dst_endq, [dstq+stride30q]
VP9_IDCT32_1D tmpq, 2
add dst_bakq, 16
add tmpq, 32
dec cntd
jg .loop2_full
; at the end of the loop, m1 should still be zero
; use that to zero out block coefficients
ZERO_BLOCK blockq, 64, 32, m1
RET
%endif
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment