Commit e5c9de2a authored by Christophe Gisquet's avatar Christophe Gisquet Committed by Diego Biurrun

rv40: x86 SIMD for biweight

Provide MMX, SSE2 and SSSE3 versions, with a fast-path when the weights are
multiples of 512 (which is often the case when the values round up nicely).

*_TIMER report for the 16x16 and 8x8 cases:
C:
9015 decicycles in 16, 524257 runs, 31 skips
2656 decicycles in 8, 524271 runs, 17 skips
MMX:
4156 decicycles in 16, 262090 runs, 54 skips
1206 decicycles in 8, 262131 runs, 13 skips
MMX on fast-path:
2760 decicycles in 16, 524222 runs, 66 skips
995 decicycles in 8, 524252 runs, 36 skips
SSE2:
2163 decicycles in 16, 262131 runs, 13 skips
832 decicycles in 8, 262137 runs, 7 skips
SSE2 with fast path:
1783 decicycles in 16, 524276 runs, 12 skips
711 decicycles in 8, 524283 runs, 5 skips
SSSE3:
2117 decicycles in 16, 262136 runs, 8 skips
814 decicycles in 8, 262143 runs, 1 skips
SSSE3 with fast path:
1315 decicycles in 16, 524285 runs, 3 skips
578 decicycles in 8, 524286 runs, 2 skips

This means around a 4% speedup for some sequences.
Signed-off-by: 's avatarDiego Biurrun <diego@biurrun.de>
parent 91bafb52
...@@ -28,7 +28,8 @@ MMX-OBJS-$(CONFIG_RV30_DECODER) += x86/rv34dsp_init.o ...@@ -28,7 +28,8 @@ MMX-OBJS-$(CONFIG_RV30_DECODER) += x86/rv34dsp_init.o
YASM-OBJS-$(CONFIG_RV30_DECODER) += x86/rv34dsp.o YASM-OBJS-$(CONFIG_RV30_DECODER) += x86/rv34dsp.o
MMX-OBJS-$(CONFIG_RV40_DECODER) += x86/rv34dsp_init.o \ MMX-OBJS-$(CONFIG_RV40_DECODER) += x86/rv34dsp_init.o \
x86/rv40dsp_init.o x86/rv40dsp_init.o
YASM-OBJS-$(CONFIG_RV40_DECODER) += x86/rv34dsp.o YASM-OBJS-$(CONFIG_RV40_DECODER) += x86/rv34dsp.o \
x86/rv40dsp.o
YASM-OBJS-$(CONFIG_VC1_DECODER) += x86/vc1dsp_yasm.o YASM-OBJS-$(CONFIG_VC1_DECODER) += x86/vc1dsp_yasm.o
......
;******************************************************************************
;* MMX/SSE2-optimized functions for the RV40 decoder
;* Copyright (C) 2012 Christophe Gisquet <christophe.gisquet@gmail.com>
;*
;* This file is part of Libav.
;*
;* Libav is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* Libav is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with Libav; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "x86inc.asm"
%include "x86util.asm"
SECTION_RODATA
align 16
shift_round: times 8 dw 1 << (16 - 6)
cextern pw_16
SECTION .text
; %1=5bits weights?, %2=dst %3=src1 %4=src3 %5=stride if sse2
%macro RV40_WCORE 4-5
movh m4, [%3 + 0]
movh m5, [%4 + 0]
%if %0 == 4
%define OFFSET mmsize / 2
%else
; 8x8 block and sse2, stride was provided
%define OFFSET %5
%endif
movh m6, [%3 + OFFSET]
movh m7, [%4 + OFFSET]
%if %1 == 0
; 14bits weights
punpcklbw m4, m0
punpcklbw m5, m0
punpcklbw m6, m0
punpcklbw m7, m0
psllw m4, 7
psllw m5, 7
psllw m6, 7
psllw m7, 7
pmulhw m4, m3
pmulhw m5, m2
pmulhw m6, m3
pmulhw m7, m2
paddw m4, m5
paddw m6, m7
%else
; 5bits weights
%if cpuflag(ssse3)
punpcklbw m4, m5
punpcklbw m6, m7
pmaddubsw m4, m3
pmaddubsw m6, m3
%else
punpcklbw m4, m0
punpcklbw m5, m0
punpcklbw m6, m0
punpcklbw m7, m0
pmullw m4, m3
pmullw m5, m2
pmullw m6, m3
pmullw m7, m2
paddw m4, m5
paddw m6, m7
%endif
%endif
; bias and shift down
%if cpuflag(ssse3)
pmulhrsw m4, m1
pmulhrsw m6, m1
%else
paddw m4, m1
paddw m6, m1
psrlw m4, 5
psrlw m6, 5
%endif
packuswb m4, m6
%if %0 == 5
; Only called for 8x8 blocks and sse2
movh [%2 + 0], m4
movhps [%2 + %5], m4
%else
mova [%2], m4
%endif
%endmacro
%macro MAIN_LOOP 2
%if mmsize == 8
RV40_WCORE %2, r0, r1, r2
%if %1 == 16
RV40_WCORE %2, r0 + 8, r1 + 8, r2 + 8
%endif
; Prepare for next loop
add r0, r5
add r1, r5
add r2, r5
%else
%ifidn %1, 8
RV40_WCORE %2, r0, r1, r2, r5
; Prepare 2 next lines
lea r0, [r0 + 2 * r5]
lea r1, [r1 + 2 * r5]
lea r2, [r2 + 2 * r5]
%else
RV40_WCORE %2, r0, r1, r2
; Prepare single next line
add r0, r5
add r1, r5
add r2, r5
%endif
%endif
dec r6
%endmacro
; rv40_weight_func_%1(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w1, int w2, int stride)
; %1=size %2=num of xmm regs
%macro RV40_WEIGHT 2
cglobal rv40_weight_func_%1, 6, 7, %2
%if cpuflag(ssse3)
mova m1, [shift_round]
%else
mova m1, [pw_16]
%endif
pxor m0, m0
mov r6, r3
or r6, r4
; The weights are FP0.14 notation of fractions depending on pts.
; For timebases without rounding error (i.e. PAL), the fractions
; can be simplified, and several operations can be avoided.
; Therefore, we check here whether they are multiples of 2^9 for
; those simplifications to occur.
and r6, 0x1FF
; Set loop counter and increments
%if mmsize == 8
mov r6, %1
%else
mov r6, (%1 * %1) / mmsize
%endif
; Use result of test now
jz .loop_512
movd m2, r3
movd m3, r4
SPLATW m2, m2
SPLATW m3, m3
.loop:
MAIN_LOOP %1, 0
jnz .loop
REP_RET
; Weights are multiple of 512, which allows some shortcuts
.loop_512:
sar r3, 9
sar r4, 9
movd m2, r3
movd m3, r4
%if cpuflag(ssse3)
punpcklbw m3, m2
SPLATW m3, m3
%else
SPLATW m2, m2
SPLATW m3, m3
%endif
.loop2:
MAIN_LOOP %1, 1
jnz .loop2
REP_RET
%endmacro
INIT_MMX mmx
RV40_WEIGHT 8, 0
RV40_WEIGHT 16, 0
INIT_XMM sse2
RV40_WEIGHT 8, 8
RV40_WEIGHT 16, 8
INIT_XMM ssse3
RV40_WEIGHT 8, 8
RV40_WEIGHT 16, 8
...@@ -40,6 +40,15 @@ void ff_avg_rv40_chroma_mc4_mmx2 (uint8_t *dst, uint8_t *src, ...@@ -40,6 +40,15 @@ void ff_avg_rv40_chroma_mc4_mmx2 (uint8_t *dst, uint8_t *src,
void ff_avg_rv40_chroma_mc4_3dnow(uint8_t *dst, uint8_t *src, void ff_avg_rv40_chroma_mc4_3dnow(uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y); int stride, int h, int x, int y);
#define DECLARE_WEIGHT(opt) \
void ff_rv40_weight_func_16_##opt(uint8_t *dst, uint8_t *src1, uint8_t *src2, \
int w1, int w2, int stride); \
void ff_rv40_weight_func_8_##opt (uint8_t *dst, uint8_t *src1, uint8_t *src2, \
int w1, int w2, int stride);
DECLARE_WEIGHT(mmx)
DECLARE_WEIGHT(sse2)
DECLARE_WEIGHT(ssse3)
void ff_rv40dsp_init_x86(RV34DSPContext *c, DSPContext *dsp) void ff_rv40dsp_init_x86(RV34DSPContext *c, DSPContext *dsp)
{ {
#if HAVE_YASM #if HAVE_YASM
...@@ -48,6 +57,8 @@ void ff_rv40dsp_init_x86(RV34DSPContext *c, DSPContext *dsp) ...@@ -48,6 +57,8 @@ void ff_rv40dsp_init_x86(RV34DSPContext *c, DSPContext *dsp)
if (mm_flags & AV_CPU_FLAG_MMX) { if (mm_flags & AV_CPU_FLAG_MMX) {
c->put_chroma_pixels_tab[0] = ff_put_rv40_chroma_mc8_mmx; c->put_chroma_pixels_tab[0] = ff_put_rv40_chroma_mc8_mmx;
c->put_chroma_pixels_tab[1] = ff_put_rv40_chroma_mc4_mmx; c->put_chroma_pixels_tab[1] = ff_put_rv40_chroma_mc4_mmx;
c->rv40_weight_pixels_tab[0] = ff_rv40_weight_func_16_mmx;
c->rv40_weight_pixels_tab[1] = ff_rv40_weight_func_8_mmx;
} }
if (mm_flags & AV_CPU_FLAG_MMX2) { if (mm_flags & AV_CPU_FLAG_MMX2) {
c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_mmx2; c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_mmx2;
...@@ -56,5 +67,13 @@ void ff_rv40dsp_init_x86(RV34DSPContext *c, DSPContext *dsp) ...@@ -56,5 +67,13 @@ void ff_rv40dsp_init_x86(RV34DSPContext *c, DSPContext *dsp)
c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_3dnow; c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_3dnow;
c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_3dnow; c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_3dnow;
} }
if (mm_flags & AV_CPU_FLAG_SSE2) {
c->rv40_weight_pixels_tab[0] = ff_rv40_weight_func_16_sse2;
c->rv40_weight_pixels_tab[1] = ff_rv40_weight_func_8_sse2;
}
if (mm_flags & AV_CPU_FLAG_SSSE3) {
c->rv40_weight_pixels_tab[0] = ff_rv40_weight_func_16_ssse3;
c->rv40_weight_pixels_tab[1] = ff_rv40_weight_func_8_ssse3;
}
#endif #endif
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment