hevc_idct.asm 3.01 KB
Newer Older
plepere's avatar
plepere committed
1
; /*
2
; * SIMD optimized idct functions for HEVC decoding
plepere's avatar
plepere committed
3
; * Copyright (c) 2014 Pierre-Edouard LEPERE
4
; * Copyright (c) 2014 James Almer
plepere's avatar
plepere committed
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
; *
; * This file is part of FFmpeg.
; *
; * FFmpeg is free software; you can redistribute it and/or
; * modify it under the terms of the GNU Lesser General Public
; * License as published by the Free Software Foundation; either
; * version 2.1 of the License, or (at your option) any later version.
; *
; * FFmpeg is distributed in the hope that it will be useful,
; * but WITHOUT ANY WARRANTY; without even the implied warranty of
; * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
; * Lesser General Public License for more details.
; *
; * You should have received a copy of the GNU Lesser General Public
; * License along with FFmpeg; if not, write to the Free Software
; * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
; */
%include "libavutil/x86/x86util.asm"

24
SECTION_TEXT 32
plepere's avatar
plepere committed
25

26 27 28 29 30 31 32 33 34 35 36 37 38
; void ff_hevc_idctHxW_dc_{8,10}_<opt>(int16_t *coeffs)
; %1 = HxW
; %2 = number of loops
; %3 = bitdepth
%macro IDCT_DC 3
cglobal hevc_idct%1x%1_dc_%3, 1, 2, 1, coeff, tmp
    movsx             tmpq, word [coeffq]
    add               tmpw, ((1 << 14-%3) + 1)
    sar               tmpw, (15-%3)
    movd               xm0, tmpd
    SPLATW              m0, xm0
    DEFINE_ARGS coeff, cnt
    mov               cntd, %2
39
.loop:
40 41 42 43 44 45 46 47 48 49 50 51
    mova [coeffq+mmsize*0], m0
    mova [coeffq+mmsize*1], m0
    mova [coeffq+mmsize*2], m0
    mova [coeffq+mmsize*3], m0
    mova [coeffq+mmsize*4], m0
    mova [coeffq+mmsize*5], m0
    mova [coeffq+mmsize*6], m0
    mova [coeffq+mmsize*7], m0
    add  coeffq, mmsize*8
    dec  cntd
    jg  .loop
    RET
52 53
%endmacro

54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
; %1 = HxW
; %2 = bitdepth
%macro IDCT_DC_NL 2 ; No loop
cglobal hevc_idct%1x%1_dc_%2, 1, 2, 1, coeff, tmp
    movsx             tmpq, word [coeffq]
    add               tmpw, ((1 << 14-%2) + 1)
    sar               tmpw, (15-%2)
    movd                m0, tmpd
    SPLATW              m0, xm0
    mova [coeffq+mmsize*0], m0
    mova [coeffq+mmsize*1], m0
    mova [coeffq+mmsize*2], m0
    mova [coeffq+mmsize*3], m0
%if mmsize == 16
    mova [coeffq+mmsize*4], m0
    mova [coeffq+mmsize*5], m0
    mova [coeffq+mmsize*6], m0
    mova [coeffq+mmsize*7], m0
%endif
    RET
plepere's avatar
plepere committed
74 75
%endmacro

76
; 8-bit
plepere's avatar
plepere committed
77
INIT_MMX mmxext
78 79
IDCT_DC_NL  4,      8
IDCT_DC     8,  2,  8
plepere's avatar
plepere committed
80 81

INIT_XMM sse2
82 83 84
IDCT_DC_NL  8,      8
IDCT_DC    16,  4,  8
IDCT_DC    32, 16,  8
plepere's avatar
plepere committed
85

86 87
%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2
88 89
IDCT_DC    16,  2,  8
IDCT_DC    32,  8,  8
90
%endif ;HAVE_AVX2_EXTERNAL
plepere's avatar
plepere committed
91

92
; 10-bit
plepere's avatar
plepere committed
93
INIT_MMX mmxext
94 95
IDCT_DC_NL  4,     10
IDCT_DC     8,  2, 10
plepere's avatar
plepere committed
96 97

INIT_XMM sse2
98 99 100
IDCT_DC_NL  8,     10
IDCT_DC    16,  4, 10
IDCT_DC    32, 16, 10
101 102 103

%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2
104 105 106
IDCT_DC    16,  2, 10
IDCT_DC    32,  8, 10
%endif ;HAVE_AVX2_EXTERNAL
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122

; 12-bit
INIT_MMX mmxext
IDCT_DC_NL  4,     12
IDCT_DC     8,  2, 12

INIT_XMM sse2
IDCT_DC_NL  8,     12
IDCT_DC    16,  4, 12
IDCT_DC    32, 16, 12

%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2
IDCT_DC    16,  2, 12
IDCT_DC    32,  8, 12
%endif ;HAVE_AVX2_EXTERNAL