svq1enc.asm 1.73 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
;******************************************************************************
;* SIMD-optimized SVQ1 encoder functions
;* Copyright (c) 2007 Loren Merritt
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************

%include "libavutil/x86/x86util.asm"

24
SECTION .text
25 26 27 28

%macro SSD_INT8_VS_INT16 0
cglobal ssd_int8_vs_int16, 3, 3, 3, pix1, pix2, size
    pxor m0, m0
29
.loop:
30 31
    sub       sizeq, 8
    movq      m1, [pix1q + sizeq]
32
    mova      m2, [pix2q + sizeq*2]
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
%if mmsize == 8
    movq      m3, [pix2q + sizeq*2 + mmsize]
    punpckhbw m4, m1
    punpcklbw m1, m1
    psraw     m4, 8
    psraw     m1, 8
    psubw     m3, m4
    psubw     m2, m1
    pmaddwd   m3, m3
    pmaddwd   m2, m2
    paddd     m0, m3
    paddd     m0, m2
%else
    punpcklbw m1, m1
    psraw     m1, 8
    psubw     m2, m1
    pmaddwd   m2, m2
    paddd     m0, m2
%endif
    jg .loop
    HADDD     m0, m1
    movd     eax, m0
    RET
%endmacro

INIT_MMX mmx
SSD_INT8_VS_INT16
INIT_XMM sse2
SSD_INT8_VS_INT16