mpegvideoencdsp.asm 3.63 KB
Newer Older
1 2 3 4 5 6
;*****************************************************************************
;* SIMD-optimized MPEG encoding functions
;*****************************************************************************
;* Copyright (c) 2000, 2001 Fabrice Bellard
;* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
;*
7
;* This file is part of FFmpeg.
8
;*
9
;* FFmpeg is free software; you can redistribute it and/or
10 11 12 13
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
14
;* FFmpeg is distributed in the hope that it will be useful,
15 16 17 18 19
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
20
;* License along with FFmpeg; if not, write to the Free Software
21 22 23 24 25
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;*****************************************************************************

%include "libavutil/x86/x86util.asm"

26
SECTION_RODATA
27

28 29 30
cextern pw_1

SECTION .text
31
; int ff_pix_sum16_mmx(uint8_t *pix, int line_size)
32 33 34 35
; %1 = number of loops
; %2 = number of GPRs used
%macro PIX_SUM16 3
cglobal pix_sum16, 2, %2, 6
36
    movsxdifnidn r1, r1d
37 38
    mov          r2, %1
%if mmsize == 16
39
    lea          r3, [r1*3]
40 41
%endif
%if notcpuflag(xop)
42 43 44
    pxor         m5, m5
%endif
    pxor         m4, m4
45
.loop:
46 47 48 49 50 51 52 53 54
%if cpuflag(xop)
    vphaddubq    m0, [r0]
    vphaddubq    m1, [r0+r1]
    vphaddubq    m2, [r0+r1*2]
    vphaddubq    m3, [r0+r3]
%else
    mova         m0, [r0]
%if mmsize == 8
    mova         m1, [r0+8]
55 56 57 58 59
%if cpuflag(mmxext)
    mova         m2, [r0+r1]
    mova         m3, [r0+r1+8]
%endif
%else ; sse2
60
    mova         m1, [r0+r1]
61 62
    mova         m2, [r0+r1*2]
    mova         m3, [r0+r3]
63
%endif
64 65 66 67 68 69
%if cpuflag(mmxext)
    psadbw       m0, m5
    psadbw       m1, m5
    psadbw       m2, m5
    psadbw       m3, m5
%else ; mmx
70 71 72 73
    punpckhbw    m2, m0, m5
    punpcklbw    m0, m5
    punpckhbw    m3, m1, m5
    punpcklbw    m1, m5
74
%endif ; cpuflag(mmxext)
75
%endif ; cpuflag(xop)
76 77 78
    paddw        m1, m0
    paddw        m3, m2
    paddw        m3, m1
79
    paddw        m4, m3
80 81
%if cpuflag(mmxext)
    lea          r0, [r0+r1*%3]
82
%else
83
    add          r0, r1
84 85 86
%endif
    dec r2
    jne .loop
87
%if mmsize == 16
88 89
    pshufd       m0, m4, q0032
    paddd        m4, m0
90
%elif notcpuflag(mmxext)
91 92 93
    HADDW        m4, m5
%endif
    movd        eax, m4
94
    RET
95
%endmacro
96

97
%if ARCH_X86_32
98
INIT_MMX mmx
99 100 101 102
PIX_SUM16 16, 3, 0
INIT_MMX mmxext
PIX_SUM16  8, 4, 2
%endif
103
INIT_XMM sse2
104
PIX_SUM16  4, 4, 4
105 106
%if HAVE_XOP_EXTERNAL
INIT_XMM xop
107
PIX_SUM16  4, 4, 4
108 109
%endif

110
; int ff_pix_norm1_mmx(uint8_t *pix, int line_size)
111 112 113 114
; %1 = number of xmm registers used
; %2 = number of loops
%macro PIX_NORM1 2
cglobal pix_norm1, 2, 3, %1
115
    movsxdifnidn r1, r1d
116
    mov          r2, %2
117
    pxor         m0, m0
118
    pxor         m5, m5
119 120
.loop:
    mova         m2, [r0+0]
121
%if mmsize == 8
122
    mova         m3, [r0+8]
123 124 125 126
%else
    mova         m3, [r0+r1]
%endif
    punpckhbw    m1, m2, m0
127
    punpcklbw    m2, m0
128 129
    punpckhbw    m4, m3, m0
    punpcklbw    m3, m0
130 131 132 133 134 135
    pmaddwd      m1, m1
    pmaddwd      m2, m2
    pmaddwd      m3, m3
    pmaddwd      m4, m4
    paddd        m2, m1
    paddd        m4, m3
136 137 138
    paddd        m5, m2
    paddd        m5, m4
%if mmsize == 8
139
    add          r0, r1
140 141 142
%else
    lea          r0, [r0+r1*2]
%endif
143 144
    dec r2
    jne .loop
145 146
    HADDD        m5, m1
    movd        eax, m5
147
    RET
148 149 150 151 152 153
%endmacro

INIT_MMX mmx
PIX_NORM1 0, 16
INIT_XMM sse2
PIX_NORM1 6, 8
154