output.asm 10.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
;******************************************************************************
;* x86-optimized vertical line scaling functions
;* Copyright (c) 2011 Ronald S. Bultje <rsbultje@gmail.com>
;*                    Kieran Kunhya <kieran@kunhya.com>
;*
;* This file is part of Libav.
;*
;* Libav is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* Libav is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with Libav; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************

%include "x86inc.asm"
%include "x86util.asm"

SECTION_RODATA

minshort:      times 8 dw 0x8000
yuv2yuvX_16_start:  times 4 dd 0x4000 - 0x40000000
yuv2yuvX_10_start:  times 4 dd 0x10000
yuv2yuvX_9_start:   times 4 dd 0x20000
yuv2yuvX_10_upper:  times 8 dw 0x3ff
yuv2yuvX_9_upper:   times 8 dw 0x1ff
pd_4:          times 4 dd 4
pd_4min0x40000:times 4 dd 4 - (0x40000)
pw_16:         times 8 dw 16
pw_32:         times 8 dw 32
pw_512:        times 8 dw 512
pw_1024:       times 8 dw 1024

SECTION .text

;-----------------------------------------------------------------------------
; vertical line scaling
;
; void yuv2plane1_<output_size>_<opt>(const int16_t *src, uint8_t *dst, int dstW,
;                                     const uint8_t *dither, int offset)
; and
; void yuv2planeX_<output_size>_<opt>(const int16_t *filter, int filterSize,
;                                     const int16_t **src, uint8_t *dst, int dstW,
;                                     const uint8_t *dither, int offset)
;
; Scale one or $filterSize lines of source data to generate one line of output
; data. The input is 15-bit in int16_t if $output_size is [8,10] and 19-bit in
; int32_t if $output_size is 16. $filter is 12-bits. $filterSize is a multiple
; of 2. $offset is either 0 or 3. $dither holds 8 values.
;-----------------------------------------------------------------------------

59
%macro yuv2planeX_fn 3
60

61
%if ARCH_X86_32
62
%define cntr_reg fltsizeq
63 64
%define movsx mov
%else
65
%define cntr_reg r7
66 67 68
%define movsx movsxd
%endif

69
cglobal yuv2planeX_%1, %3, 8, %2, filter, fltsize, src, dst, w, dither, offset
70
%if %1 == 8 || %1 == 9 || %1 == 10
71
    pxor            m6,  m6
72
%endif ; %1 == 8/9/10
73

74
%if %1 == 8
75
%if ARCH_X86_32
76 77 78 79 80 81 82 83
%assign pad 0x2c - (stack_offset & 15)
    SUB             rsp, pad
%define m_dith m7
%else ; x86-64
%define m_dith m9
%endif ; x86-32

    ; create registers holding dither
84 85
    movq        m_dith, [ditherq]        ; dither
    test        offsetd, offsetd
86 87 88 89 90 91 92 93
    jz              .no_rot
%if mmsize == 16
    punpcklqdq  m_dith,  m_dith
%endif ; mmsize == 16
    PALIGNR     m_dith,  m_dith,  3,  m0
.no_rot:
%if mmsize == 16
    punpcklbw   m_dith,  m6
94
%if ARCH_X86_64
95 96 97 98 99 100 101 102
    punpcklwd       m8,  m_dith,  m6
    pslld           m8,  12
%else ; x86-32
    punpcklwd       m5,  m_dith,  m6
    pslld           m5,  12
%endif ; x86-32/64
    punpckhwd   m_dith,  m6
    pslld       m_dith,  12
103
%if ARCH_X86_32
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
    mova      [rsp+ 0],  m5
    mova      [rsp+16],  m_dith
%endif
%else ; mmsize == 8
    punpcklbw       m5,  m_dith,  m6
    punpckhbw   m_dith,  m6
    punpcklwd       m4,  m5,  m6
    punpckhwd       m5,  m6
    punpcklwd       m3,  m_dith,  m6
    punpckhwd   m_dith,  m6
    pslld           m4,  12
    pslld           m5,  12
    pslld           m3,  12
    pslld       m_dith,  12
    mova      [rsp+ 0],  m4
    mova      [rsp+ 8],  m5
    mova      [rsp+16],  m3
    mova      [rsp+24],  m_dith
%endif ; mmsize == 8/16
123
%endif ; %1 == 8
124 125 126 127 128 129 130 131 132

    xor             r5,  r5

.pixelloop:
%assign %%i 0
    ; the rep here is for the 8bit output mmx case, where dither covers
    ; 8 pixels but we can only handle 2 pixels per register, and thus 4
    ; pixels per iteration. In order to not have to keep track of where
    ; we are w.r.t. dithering, we unroll the mmx/8bit loop x2.
133
%if %1 == 8
134 135 136 137 138 139
%assign %%repcnt 16/mmsize
%else
%assign %%repcnt 1
%endif

%rep %%repcnt
140

141
%if %1 == 8
142
%if ARCH_X86_32
143 144 145 146 147 148
    mova            m2, [rsp+mmsize*(0+%%i)]
    mova            m1, [rsp+mmsize*(1+%%i)]
%else ; x86-64
    mova            m2,  m8
    mova            m1,  m_dith
%endif ; x86-32/64
149 150
%else ; %1 == 9/10/16
    mova            m1, [yuv2yuvX_%1_start]
151
    mova            m2,  m1
152
%endif ; %1 == 8/9/10/16
153
    movsx     cntr_reg,  fltsizem
154 155
.filterloop_ %+ %%i:
    ; input pixels
156
    mov             r6, [srcq+gprsize*cntr_reg-2*gprsize]
157
%if %1 == 16
158 159
    mova            m3, [r6+r5*4]
    mova            m5, [r6+r5*4+mmsize]
160
%else ; %1 == 8/9/10
161
    mova            m3, [r6+r5*2]
162
%endif ; %1 == 8/9/10/16
163
    mov             r6, [srcq+gprsize*cntr_reg-gprsize]
164
%if %1 == 16
165 166
    mova            m4, [r6+r5*4]
    mova            m6, [r6+r5*4+mmsize]
167
%else ; %1 == 8/9/10
168
    mova            m4, [r6+r5*2]
169
%endif ; %1 == 8/9/10/16
170 171

    ; coefficients
172
    movd            m0, [filterq+2*cntr_reg-4] ; coeff[0], coeff[1]
173
%if %1 == 16
174 175 176 177 178 179 180 181 182 183 184 185 186 187
    pshuflw         m7,  m0,  0          ; coeff[0]
    pshuflw         m0,  m0,  0x55       ; coeff[1]
    pmovsxwd        m7,  m7              ; word -> dword
    pmovsxwd        m0,  m0              ; word -> dword

    pmulld          m3,  m7
    pmulld          m5,  m7
    pmulld          m4,  m0
    pmulld          m6,  m0

    paddd           m2,  m3
    paddd           m1,  m5
    paddd           m2,  m4
    paddd           m1,  m6
188
%else ; %1 == 10/9/8
189 190 191 192 193 194 195 196 197
    punpcklwd       m5,  m3,  m4
    punpckhwd       m3,  m4
    SPLATD          m0,  m0

    pmaddwd         m5,  m0
    pmaddwd         m3,  m0

    paddd           m2,  m5
    paddd           m1,  m3
198
%endif ; %1 == 8/9/10/16
199 200 201 202

    sub       cntr_reg,  2
    jg .filterloop_ %+ %%i

203 204 205 206 207 208 209
%if %1 == 16
    psrad           m2,  31 - %1
    psrad           m1,  31 - %1
%else ; %1 == 10/9/8
    psrad           m2,  27 - %1
    psrad           m1,  27 - %1
%endif ; %1 == 8/9/10/16
210

211
%if %1 == 8
212 213
    packssdw        m2,  m1
    packuswb        m2,  m2
214
    movh   [dstq+r5*1],  m2
215 216
%else ; %1 == 9/10/16
%if %1 == 16
217 218
    packssdw        m2,  m1
    paddw           m2, [minshort]
219 220
%else ; %1 == 9/10
%if cpuflag(sse4)
221 222 223 224 225
    packusdw        m2,  m1
%else ; mmx2/sse2
    packssdw        m2,  m1
    pmaxsw          m2,  m6
%endif ; mmx2/sse2/sse4/avx
226 227
    pminsw          m2, [yuv2yuvX_%1_upper]
%endif ; %1 == 9/10/16
228
    mova   [dstq+r5*2],  m2
229
%endif ; %1 == 8/9/10/16
230 231

    add             r5,  mmsize/2
232
    sub             wd,  mmsize/2
233

234 235 236 237
%assign %%i %%i+2
%endrep
    jg .pixelloop

238
%if %1 == 8
239
%if ARCH_X86_32
240 241 242 243 244
    ADD             rsp, pad
    RET
%else ; x86-64
    REP_RET
%endif ; x86-32/64
245
%else ; %1 == 9/10/16
246
    REP_RET
247
%endif ; %1 == 8/9/10/16
248 249 250
%endmacro

%define PALIGNR PALIGNR_MMX
251
%if ARCH_X86_32
252 253 254 255
INIT_MMX mmx2
yuv2planeX_fn  8,  0, 7
yuv2planeX_fn  9,  0, 5
yuv2planeX_fn 10,  0, 5
256 257
%endif

258 259 260 261
INIT_XMM sse2
yuv2planeX_fn  8, 10, 7
yuv2planeX_fn  9,  7, 5
yuv2planeX_fn 10,  7, 5
262 263

%define PALIGNR PALIGNR_SSSE3
264 265 266 267 268
INIT_XMM sse4
yuv2planeX_fn  8, 10, 7
yuv2planeX_fn  9,  7, 5
yuv2planeX_fn 10,  7, 5
yuv2planeX_fn 16,  8, 5
269

270
%if HAVE_AVX_EXTERNAL
271 272 273 274
INIT_XMM avx
yuv2planeX_fn  8, 10, 7
yuv2planeX_fn  9,  7, 5
yuv2planeX_fn 10,  7, 5
275
%endif
276 277 278 279 280

; %1=outout-bpc, %2=alignment (u/a)
%macro yuv2plane1_mainloop 2
.loop_%2:
%if %1 == 8
281 282
    paddsw          m0, m2, [srcq+wq*2+mmsize*0]
    paddsw          m1, m3, [srcq+wq*2+mmsize*1]
283 284 285
    psraw           m0, 7
    psraw           m1, 7
    packuswb        m0, m1
286
    mov%2    [dstq+wq], m0
287
%elif %1 == 16
288 289 290 291
    paddd           m0, m4, [srcq+wq*4+mmsize*0]
    paddd           m1, m4, [srcq+wq*4+mmsize*1]
    paddd           m2, m4, [srcq+wq*4+mmsize*2]
    paddd           m3, m4, [srcq+wq*4+mmsize*3]
292 293 294 295 296 297 298 299 300 301 302 303 304
    psrad           m0, 3
    psrad           m1, 3
    psrad           m2, 3
    psrad           m3, 3
%if cpuflag(sse4) ; avx/sse4
    packusdw        m0, m1
    packusdw        m2, m3
%else ; mmx/sse2
    packssdw        m0, m1
    packssdw        m2, m3
    paddw           m0, m5
    paddw           m2, m5
%endif ; mmx/sse2/sse4/avx
305 306
    mov%2    [dstq+wq*2+mmsize*0], m0
    mov%2    [dstq+wq*2+mmsize*1], m2
307
%else ; %1 == 9/10
308 309
    paddsw          m0, m2, [srcq+wq*2+mmsize*0]
    paddsw          m1, m2, [srcq+wq*2+mmsize*1]
310 311 312 313 314 315
    psraw           m0, 15 - %1
    psraw           m1, 15 - %1
    pmaxsw          m0, m4
    pmaxsw          m1, m4
    pminsw          m0, m3
    pminsw          m1, m3
316 317
    mov%2    [dstq+wq*2+mmsize*0], m0
    mov%2    [dstq+wq*2+mmsize*1], m1
318
%endif
319
    add             wq, mmsize
320 321 322 323
    jl .loop_%2
%endmacro

%macro yuv2plane1_fn 3
324 325 326 327
cglobal yuv2plane1_%1, %3, %3, %2, src, dst, w, dither, offset
    movsxdifnidn    wq, wd
    add             wq, mmsize - 1
    and             wq, ~(mmsize - 1)
328
%if %1 == 8
329
    add           dstq, wq
330
%else ; %1 != 8
331
    lea           dstq, [dstq+wq*2]
332 333
%endif ; %1 == 8
%if %1 == 16
334
    lea           srcq, [srcq+wq*4]
335
%else ; %1 != 16
336
    lea           srcq, [srcq+wq*2]
337
%endif ; %1 == 16
338
    neg             wq
339 340 341 342 343

%if %1 == 8
    pxor            m4, m4               ; zero

    ; create registers holding dither
344 345
    movq            m3, [ditherq]        ; dither
    test       offsetd, offsetd
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
    jz              .no_rot
%if mmsize == 16
    punpcklqdq      m3, m3
%endif ; mmsize == 16
    PALIGNR_MMX     m3, m3, 3, m2
.no_rot:
%if mmsize == 8
    mova            m2, m3
    punpckhbw       m3, m4               ; byte->word
    punpcklbw       m2, m4               ; byte->word
%else
    punpcklbw       m3, m4
    mova            m2, m3
%endif
%elif %1 == 9
    pxor            m4, m4
    mova            m3, [pw_512]
    mova            m2, [pw_32]
%elif %1 == 10
    pxor            m4, m4
    mova            m3, [pw_1024]
    mova            m2, [pw_16]
%else ; %1 == 16
%if cpuflag(sse4) ; sse4/avx
    mova            m4, [pd_4]
%else ; mmx/sse2
    mova            m4, [pd_4min0x40000]
    mova            m5, [minshort]
%endif ; mmx/sse2/sse4/avx
%endif ; %1 == ..

    ; actual pixel scaling
%if mmsize == 8
    yuv2plane1_mainloop %1, a
%else ; mmsize == 16
381
    test          dstq, 15
382 383 384 385 386 387 388 389 390
    jnz .unaligned
    yuv2plane1_mainloop %1, a
    REP_RET
.unaligned:
    yuv2plane1_mainloop %1, u
%endif ; mmsize == 8/16
    REP_RET
%endmacro

391
%if ARCH_X86_32
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
INIT_MMX mmx
yuv2plane1_fn  8, 0, 5
yuv2plane1_fn 16, 0, 3

INIT_MMX mmx2
yuv2plane1_fn  9, 0, 3
yuv2plane1_fn 10, 0, 3
%endif

INIT_XMM sse2
yuv2plane1_fn  8, 5, 5
yuv2plane1_fn  9, 5, 3
yuv2plane1_fn 10, 5, 3
yuv2plane1_fn 16, 6, 3

INIT_XMM sse4
yuv2plane1_fn 16, 5, 3

410
%if HAVE_AVX_EXTERNAL
411 412 413 414 415
INIT_XMM avx
yuv2plane1_fn  8, 5, 5
yuv2plane1_fn  9, 5, 3
yuv2plane1_fn 10, 5, 3
yuv2plane1_fn 16, 5, 3
416
%endif