output.asm 11 KB
Newer Older
1 2 3 4 5
;******************************************************************************
;* x86-optimized vertical line scaling functions
;* Copyright (c) 2011 Ronald S. Bultje <rsbultje@gmail.com>
;*                    Kieran Kunhya <kieran@kunhya.com>
;*
6
;* This file is part of FFmpeg.
7
;*
8
;* FFmpeg is free software; you can redistribute it and/or
9 10 11 12
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
13
;* FFmpeg is distributed in the hope that it will be useful,
14 15 16 17 18
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
19
;* License along with FFmpeg; if not, write to the Free Software
20 21 22
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************

23
%include "libavutil/x86/x86util.asm"
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52

SECTION_RODATA

minshort:      times 8 dw 0x8000
yuv2yuvX_16_start:  times 4 dd 0x4000 - 0x40000000
yuv2yuvX_10_start:  times 4 dd 0x10000
yuv2yuvX_9_start:   times 4 dd 0x20000
yuv2yuvX_10_upper:  times 8 dw 0x3ff
yuv2yuvX_9_upper:   times 8 dw 0x1ff
pd_4:          times 4 dd 4
pd_4min0x40000:times 4 dd 4 - (0x40000)
pw_16:         times 8 dw 16
pw_32:         times 8 dw 32
pw_512:        times 8 dw 512
pw_1024:       times 8 dw 1024

SECTION .text

;-----------------------------------------------------------------------------
; vertical line scaling
;
; void yuv2plane1_<output_size>_<opt>(const int16_t *src, uint8_t *dst, int dstW,
;                                     const uint8_t *dither, int offset)
; and
; void yuv2planeX_<output_size>_<opt>(const int16_t *filter, int filterSize,
;                                     const int16_t **src, uint8_t *dst, int dstW,
;                                     const uint8_t *dither, int offset)
;
; Scale one or $filterSize lines of source data to generate one line of output
53 54
; data. The input is 15 bits in int16_t if $output_size is [8,10] and 19 bits in
; int32_t if $output_size is 16. $filter is 12 bits. $filterSize is a multiple
55 56
; of 2. $offset is either 0 or 3. $dither holds 8 values.
;-----------------------------------------------------------------------------
57 58
%macro yuv2planeX_mainloop 2
.pixelloop_%2:
59
%assign %%i 0
60
    ; the rep here is for the 8-bit output MMX case, where dither covers
61 62
    ; 8 pixels but we can only handle 2 pixels per register, and thus 4
    ; pixels per iteration. In order to not have to keep track of where
63
    ; we are w.r.t. dithering, we unroll the MMX/8-bit loop x2.
64
%if %1 == 8
65 66 67 68 69 70
%assign %%repcnt 16/mmsize
%else
%assign %%repcnt 1
%endif

%rep %%repcnt
71

72
%if %1 == 8
73
%if ARCH_X86_32
74 75 76 77 78 79
    mova            m2, [rsp+mmsize*(0+%%i)]
    mova            m1, [rsp+mmsize*(1+%%i)]
%else ; x86-64
    mova            m2,  m8
    mova            m1,  m_dith
%endif ; x86-32/64
80 81
%else ; %1 == 9/10/16
    mova            m1, [yuv2yuvX_%1_start]
82
    mova            m2,  m1
83
%endif ; %1 == 8/9/10/16
84
    movsx     cntr_reg,  fltsizem
85
.filterloop_%2_ %+ %%i:
86
    ; input pixels
87
    mov             r6, [srcq+gprsize*cntr_reg-2*gprsize]
88
%if %1 == 16
89 90
    mova            m3, [r6+r5*4]
    mova            m5, [r6+r5*4+mmsize]
91
%else ; %1 == 8/9/10
92
    mova            m3, [r6+r5*2]
93
%endif ; %1 == 8/9/10/16
94
    mov             r6, [srcq+gprsize*cntr_reg-gprsize]
95
%if %1 == 16
96 97
    mova            m4, [r6+r5*4]
    mova            m6, [r6+r5*4+mmsize]
98
%else ; %1 == 8/9/10
99
    mova            m4, [r6+r5*2]
100
%endif ; %1 == 8/9/10/16
101 102

    ; coefficients
103
    movd            m0, [filterq+2*cntr_reg-4] ; coeff[0], coeff[1]
104
%if %1 == 16
105 106 107 108 109 110 111 112 113 114 115 116 117 118
    pshuflw         m7,  m0,  0          ; coeff[0]
    pshuflw         m0,  m0,  0x55       ; coeff[1]
    pmovsxwd        m7,  m7              ; word -> dword
    pmovsxwd        m0,  m0              ; word -> dword

    pmulld          m3,  m7
    pmulld          m5,  m7
    pmulld          m4,  m0
    pmulld          m6,  m0

    paddd           m2,  m3
    paddd           m1,  m5
    paddd           m2,  m4
    paddd           m1,  m6
119
%else ; %1 == 10/9/8
120 121
    punpcklwd       m5,  m3,  m4
    punpckhwd       m3,  m4
122
    SPLATD          m0
123 124 125 126 127 128

    pmaddwd         m5,  m0
    pmaddwd         m3,  m0

    paddd           m2,  m5
    paddd           m1,  m3
129
%endif ; %1 == 8/9/10/16
130 131

    sub       cntr_reg,  2
132
    jg .filterloop_%2_ %+ %%i
133

134 135 136 137 138 139 140
%if %1 == 16
    psrad           m2,  31 - %1
    psrad           m1,  31 - %1
%else ; %1 == 10/9/8
    psrad           m2,  27 - %1
    psrad           m1,  27 - %1
%endif ; %1 == 8/9/10/16
141

142
%if %1 == 8
143 144
    packssdw        m2,  m1
    packuswb        m2,  m2
145
    movh   [dstq+r5*1],  m2
146 147
%else ; %1 == 9/10/16
%if %1 == 16
148 149
    packssdw        m2,  m1
    paddw           m2, [minshort]
150 151
%else ; %1 == 9/10
%if cpuflag(sse4)
152
    packusdw        m2,  m1
153
%else ; mmxext/sse2
154 155
    packssdw        m2,  m1
    pmaxsw          m2,  m6
156
%endif ; mmxext/sse2/sse4/avx
157 158
    pminsw          m2, [yuv2yuvX_%1_upper]
%endif ; %1 == 9/10/16
159
    mov%2   [dstq+r5*2],  m2
160
%endif ; %1 == 8/9/10/16
161 162

    add             r5,  mmsize/2
163
    sub             wd,  mmsize/2
164

165 166
%assign %%i %%i+2
%endrep
167
    jg .pixelloop_%2
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
%endmacro

%macro yuv2planeX_fn 3

%if ARCH_X86_32
%define cntr_reg fltsizeq
%define movsx mov
%else
%define cntr_reg r7
%define movsx movsxd
%endif

cglobal yuv2planeX_%1, %3, 8, %2, filter, fltsize, src, dst, w, dither, offset
%if %1 == 8 || %1 == 9 || %1 == 10
    pxor            m6,  m6
%endif ; %1 == 8/9/10

%if %1 == 8
%if ARCH_X86_32
%assign pad 0x2c - (stack_offset & 15)
    SUB             rsp, pad
%define m_dith m7
%else ; x86-64
%define m_dith m9
%endif ; x86-32

    ; create registers holding dither
    movq        m_dith, [ditherq]        ; dither
    test        offsetd, offsetd
    jz              .no_rot
%if mmsize == 16
    punpcklqdq  m_dith,  m_dith
%endif ; mmsize == 16
    PALIGNR     m_dith,  m_dith,  3,  m0
.no_rot:
%if mmsize == 16
    punpcklbw   m_dith,  m6
%if ARCH_X86_64
    punpcklwd       m8,  m_dith,  m6
    pslld           m8,  12
%else ; x86-32
    punpcklwd       m5,  m_dith,  m6
    pslld           m5,  12
%endif ; x86-32/64
    punpckhwd   m_dith,  m6
    pslld       m_dith,  12
%if ARCH_X86_32
    mova      [rsp+ 0],  m5
    mova      [rsp+16],  m_dith
%endif
%else ; mmsize == 8
    punpcklbw       m5,  m_dith,  m6
    punpckhbw   m_dith,  m6
    punpcklwd       m4,  m5,  m6
    punpckhwd       m5,  m6
    punpcklwd       m3,  m_dith,  m6
    punpckhwd   m_dith,  m6
    pslld           m4,  12
    pslld           m5,  12
    pslld           m3,  12
    pslld       m_dith,  12
    mova      [rsp+ 0],  m4
    mova      [rsp+ 8],  m5
    mova      [rsp+16],  m3
    mova      [rsp+24],  m_dith
%endif ; mmsize == 8/16
%endif ; %1 == 8

    xor             r5,  r5

238 239 240 241 242 243 244 245 246 247
%if mmsize == 8 || %1 == 8
    yuv2planeX_mainloop %1, a
%else ; mmsize == 16
    test          dstq, 15
    jnz .unaligned
    yuv2planeX_mainloop %1, a
    REP_RET
.unaligned:
    yuv2planeX_mainloop %1, u
%endif ; mmsize == 8/16
248

249
%if %1 == 8
250
%if ARCH_X86_32
251 252 253 254 255
    ADD             rsp, pad
    RET
%else ; x86-64
    REP_RET
%endif ; x86-32/64
256
%else ; %1 == 9/10/16
257
    REP_RET
258
%endif ; %1 == 8/9/10/16
259 260
%endmacro

261
%if ARCH_X86_32
262
INIT_MMX mmxext
263 264 265
yuv2planeX_fn  8,  0, 7
yuv2planeX_fn  9,  0, 5
yuv2planeX_fn 10,  0, 5
266 267
%endif

268 269 270 271
INIT_XMM sse2
yuv2planeX_fn  8, 10, 7
yuv2planeX_fn  9,  7, 5
yuv2planeX_fn 10,  7, 5
272

273 274 275 276 277
INIT_XMM sse4
yuv2planeX_fn  8, 10, 7
yuv2planeX_fn  9,  7, 5
yuv2planeX_fn 10,  7, 5
yuv2planeX_fn 16,  8, 5
278

279
%if HAVE_AVX_EXTERNAL
280 281 282 283
INIT_XMM avx
yuv2planeX_fn  8, 10, 7
yuv2planeX_fn  9,  7, 5
yuv2planeX_fn 10,  7, 5
284
%endif
285 286 287 288 289

; %1=outout-bpc, %2=alignment (u/a)
%macro yuv2plane1_mainloop 2
.loop_%2:
%if %1 == 8
290 291
    paddsw          m0, m2, [srcq+wq*2+mmsize*0]
    paddsw          m1, m3, [srcq+wq*2+mmsize*1]
292 293 294
    psraw           m0, 7
    psraw           m1, 7
    packuswb        m0, m1
295
    mov%2    [dstq+wq], m0
296
%elif %1 == 16
297 298 299 300
    paddd           m0, m4, [srcq+wq*4+mmsize*0]
    paddd           m1, m4, [srcq+wq*4+mmsize*1]
    paddd           m2, m4, [srcq+wq*4+mmsize*2]
    paddd           m3, m4, [srcq+wq*4+mmsize*3]
301 302 303 304 305 306 307 308 309 310 311 312 313
    psrad           m0, 3
    psrad           m1, 3
    psrad           m2, 3
    psrad           m3, 3
%if cpuflag(sse4) ; avx/sse4
    packusdw        m0, m1
    packusdw        m2, m3
%else ; mmx/sse2
    packssdw        m0, m1
    packssdw        m2, m3
    paddw           m0, m5
    paddw           m2, m5
%endif ; mmx/sse2/sse4/avx
314 315
    mov%2    [dstq+wq*2+mmsize*0], m0
    mov%2    [dstq+wq*2+mmsize*1], m2
316
%else ; %1 == 9/10
317 318
    paddsw          m0, m2, [srcq+wq*2+mmsize*0]
    paddsw          m1, m2, [srcq+wq*2+mmsize*1]
319 320 321 322 323 324
    psraw           m0, 15 - %1
    psraw           m1, 15 - %1
    pmaxsw          m0, m4
    pmaxsw          m1, m4
    pminsw          m0, m3
    pminsw          m1, m3
325 326
    mov%2    [dstq+wq*2+mmsize*0], m0
    mov%2    [dstq+wq*2+mmsize*1], m1
327
%endif
328
    add             wq, mmsize
329 330 331 332
    jl .loop_%2
%endmacro

%macro yuv2plane1_fn 3
333 334 335 336
cglobal yuv2plane1_%1, %3, %3, %2, src, dst, w, dither, offset
    movsxdifnidn    wq, wd
    add             wq, mmsize - 1
    and             wq, ~(mmsize - 1)
337
%if %1 == 8
338
    add           dstq, wq
339
%else ; %1 != 8
340
    lea           dstq, [dstq+wq*2]
341 342
%endif ; %1 == 8
%if %1 == 16
343
    lea           srcq, [srcq+wq*4]
344
%else ; %1 != 16
345
    lea           srcq, [srcq+wq*2]
346
%endif ; %1 == 16
347
    neg             wq
348 349 350 351 352

%if %1 == 8
    pxor            m4, m4               ; zero

    ; create registers holding dither
353 354
    movq            m3, [ditherq]        ; dither
    test       offsetd, offsetd
355 356 357 358
    jz              .no_rot
%if mmsize == 16
    punpcklqdq      m3, m3
%endif ; mmsize == 16
359
    PALIGNR         m3, m3, 3, m2
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
.no_rot:
%if mmsize == 8
    mova            m2, m3
    punpckhbw       m3, m4               ; byte->word
    punpcklbw       m2, m4               ; byte->word
%else
    punpcklbw       m3, m4
    mova            m2, m3
%endif
%elif %1 == 9
    pxor            m4, m4
    mova            m3, [pw_512]
    mova            m2, [pw_32]
%elif %1 == 10
    pxor            m4, m4
    mova            m3, [pw_1024]
    mova            m2, [pw_16]
%else ; %1 == 16
%if cpuflag(sse4) ; sse4/avx
    mova            m4, [pd_4]
%else ; mmx/sse2
    mova            m4, [pd_4min0x40000]
    mova            m5, [minshort]
%endif ; mmx/sse2/sse4/avx
%endif ; %1 == ..

    ; actual pixel scaling
%if mmsize == 8
    yuv2plane1_mainloop %1, a
%else ; mmsize == 16
390
    test          dstq, 15
391 392 393 394 395 396 397 398 399
    jnz .unaligned
    yuv2plane1_mainloop %1, a
    REP_RET
.unaligned:
    yuv2plane1_mainloop %1, u
%endif ; mmsize == 8/16
    REP_RET
%endmacro

400
%if ARCH_X86_32
401 402 403 404
INIT_MMX mmx
yuv2plane1_fn  8, 0, 5
yuv2plane1_fn 16, 0, 3

405
INIT_MMX mmxext
406 407 408 409 410 411 412 413 414 415 416 417 418
yuv2plane1_fn  9, 0, 3
yuv2plane1_fn 10, 0, 3
%endif

INIT_XMM sse2
yuv2plane1_fn  8, 5, 5
yuv2plane1_fn  9, 5, 3
yuv2plane1_fn 10, 5, 3
yuv2plane1_fn 16, 6, 3

INIT_XMM sse4
yuv2plane1_fn 16, 5, 3

419
%if HAVE_AVX_EXTERNAL
420 421 422 423 424
INIT_XMM avx
yuv2plane1_fn  8, 5, 5
yuv2plane1_fn  9, 5, 3
yuv2plane1_fn 10, 5, 3
yuv2plane1_fn 16, 5, 3
425
%endif