videodsp.asm 13.4 KB
Newer Older
1 2 3 4
;******************************************************************************
;* Core video DSP functions
;* Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com>
;*
5
;* This file is part of FFmpeg.
6
;*
7
;* FFmpeg is free software; you can redistribute it and/or
8 9 10 11
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
12
;* FFmpeg is distributed in the hope that it will be useful,
13 14 15 16 17
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
18
;* License along with FFmpeg; if not, write to the Free Software
19 20 21 22 23 24 25
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************

%include "libavutil/x86/x86util.asm"

SECTION .text

26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
; slow vertical extension loop function. Works with variable-width, and
; does per-line reading/writing of source data

%macro V_COPY_ROW 2 ; type (top/body/bottom), h
.%1_y_loop:                                     ; do {
    mov              wq, r7mp                   ;   initialize w (r7mp = wmp)
.%1_x_loop:                                     ;   do {
    movu             m0, [srcq+wq]              ;     m0 = read($mmsize)
    movu      [dstq+wq], m0                     ;     write(m0, $mmsize)
    add              wq, mmsize                 ;     w -= $mmsize
    cmp              wq, -mmsize                ;   } while (w > $mmsize);
    jl .%1_x_loop
    movu             m0, [srcq-mmsize]          ;     m0 = read($mmsize)
    movu  [dstq-mmsize], m0                     ;     write(m0, $mmsize)
%ifidn %1, body                                 ;   if ($type == body) {
    add            srcq, src_strideq            ;     src += src_stride
%endif                                          ;   }
    add            dstq, dst_strideq            ;   dst += dst_stride
    dec              %2                         ; } while (--$h);
    jnz .%1_y_loop
%endmacro
47

48 49 50 51 52 53 54 55
%macro vvar_fn 0
; .----. <- zero
; |    |    <- top is copied from first line in body of source
; |----| <- start_y
; |    |    <- body is copied verbatim (line-by-line) from source
; |----| <- end_y
; |    |    <- bottom is copied from last line in body of source
; '----' <- bh
56
%if ARCH_X86_64
57 58 59 60 61 62 63 64 65 66
cglobal emu_edge_vvar, 7, 8, 1, dst, dst_stride, src, src_stride, \
                                start_y, end_y, bh, w
%else ; x86-32
cglobal emu_edge_vvar, 1, 6, 1, dst, src, start_y, end_y, bh, w
%define src_strideq r3mp
%define dst_strideq r1mp
    mov            srcq, r2mp
    mov        start_yq, r4mp
    mov          end_yq, r5mp
    mov             bhq, r6mp
67
%endif
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
    sub             bhq, end_yq                 ; bh    -= end_q
    sub          end_yq, start_yq               ; end_q -= start_q
    add            srcq, r7mp                   ; (r7mp = wmp)
    add            dstq, r7mp                   ; (r7mp = wmp)
    neg            r7mp                         ; (r7mp = wmp)
    test       start_yq, start_yq               ; if (start_q) {
    jz .body
    V_COPY_ROW      top, start_yq               ;   v_copy_row(top, start_yq)
.body:                                          ; }
    V_COPY_ROW     body, end_yq                 ; v_copy_row(body, end_yq)
    test            bhq, bhq                    ; if (bh) {
    jz .end
    sub            srcq, src_strideq            ;   src -= src_stride
    V_COPY_ROW   bottom, bhq                    ;   v_copy_row(bottom, bh)
.end:                                           ; }
    RET
%endmacro
85 86

%if ARCH_X86_32
87 88
INIT_MMX mmx
vvar_fn
89
%endif
90 91 92 93 94 95 96 97 98 99 100 101 102

INIT_XMM sse
vvar_fn

%macro hvar_fn 0
cglobal emu_edge_hvar, 5, 6, 1, dst, dst_stride, start_x, n_words, h, w
    lea            dstq, [dstq+n_wordsq*2]
    neg        n_wordsq
    lea        start_xq, [start_xq+n_wordsq*2]
.y_loop:                                        ; do {
    ; FIXME also write a ssse3 version using pshufb
    movzx            wd, byte [dstq+start_xq]   ;   w = read(1)
    imul             wd, 0x01010101             ;   w *= 0x01010101
103
    movd             m0, wd
104
    mov              wq, n_wordsq               ;   initialize w
105 106
%if cpuflag(sse2)
    pshufd           m0, m0, q0000              ;   splat
107 108 109 110 111 112 113 114 115 116 117 118
%else ; mmx
    punpckldq        m0, m0                     ;   splat
%endif ; mmx/sse
.x_loop:                                        ;   do {
    movu    [dstq+wq*2], m0                     ;     write($reg, $mmsize)
    add              wq, mmsize/2               ;     w -= $mmsize/2
    cmp              wq, -mmsize/2              ;   } while (w > $mmsize/2)
    jl .x_loop
    movu  [dstq-mmsize], m0                     ;   write($reg, $mmsize)
    add            dstq, dst_strideq            ;   dst += dst_stride
    dec              hq                         ; } while (h--)
    jnz .y_loop
119
    RET
120
%endmacro
121

122 123 124
%if ARCH_X86_32
INIT_MMX mmx
hvar_fn
125 126
%endif

127
INIT_XMM sse2
128
hvar_fn
129 130

; macro to read/write a horizontal number of pixels (%2) to/from registers
131 132 133 134 135 136 137
; on sse, - fills xmm0-15 for consecutive sets of 16 pixels
;         - if (%2 & 8)  fills 8 bytes into xmm$next
;         - if (%2 & 4)  fills 4 bytes into xmm$next
;         - if (%2 & 3)  fills 1, 2 or 4 bytes in eax
; on mmx, - fills mm0-7 for consecutive sets of 8 pixels
;         - if (%2 & 4)  fills 4 bytes into mm$next
;         - if (%2 & 3)  fills 1, 2 or 4 bytes in eax
138 139
; writing data out is in the same way
%macro READ_NUM_BYTES 2
140 141 142
%assign %%off 0     ; offset in source buffer
%assign %%mmx_idx 0 ; mmx register index
%assign %%xmm_idx 0 ; xmm register index
143 144

%rep %2/mmsize
145 146 147 148 149 150 151
%if mmsize == 16
    movu   xmm %+ %%xmm_idx, [srcq+%%off]
%assign %%xmm_idx %%xmm_idx+1
%else ; mmx
    movu    mm %+ %%mmx_idx, [srcq+%%off]
%assign %%mmx_idx %%mmx_idx+1
%endif
152 153 154 155 156 157
%assign %%off %%off+mmsize
%endrep ; %2/mmsize

%if mmsize == 16
%if (%2-%%off) >= 8
%if %2 > 16 && (%2-%%off) > 8
158 159
    movu   xmm %+ %%xmm_idx, [srcq+%2-16]
%assign %%xmm_idx %%xmm_idx+1
160 161
%assign %%off %2
%else
162 163
    movq    mm %+ %%mmx_idx, [srcq+%%off]
%assign %%mmx_idx %%mmx_idx+1
164 165 166
%assign %%off %%off+8
%endif
%endif ; (%2-%%off) >= 8
167 168
%endif

169 170
%if (%2-%%off) >= 4
%if %2 > 8 && (%2-%%off) > 4
171
    movq    mm %+ %%mmx_idx, [srcq+%2-8]
172 173
%assign %%off %2
%else
174
    movd    mm %+ %%mmx_idx, [srcq+%%off]
175 176
%assign %%off %%off+4
%endif
177
%assign %%mmx_idx %%mmx_idx+1
178 179 180 181
%endif ; (%2-%%off) >= 4

%if (%2-%%off) >= 1
%if %2 >= 4
182
    movd mm %+ %%mmx_idx, [srcq+%2-4]
183 184 185 186
%elif (%2-%%off) == 1
    mov            valb, [srcq+%2-1]
%elif (%2-%%off) == 2
    mov            valw, [srcq+%2-2]
187
%elifidn %1, body
188 189
    mov            vald, [srcq+%2-3]
%else
190
    movd mm %+ %%mmx_idx, [srcq+%2-3]
191 192
%endif
%endif ; (%2-%%off) >= 1
193 194 195
%endmacro ; READ_NUM_BYTES

%macro WRITE_NUM_BYTES 2
196 197 198
%assign %%off 0     ; offset in destination buffer
%assign %%mmx_idx 0 ; mmx register index
%assign %%xmm_idx 0 ; xmm register index
199 200

%rep %2/mmsize
201 202 203 204 205 206 207
%if mmsize == 16
    movu   [dstq+%%off], xmm %+ %%xmm_idx
%assign %%xmm_idx %%xmm_idx+1
%else ; mmx
    movu   [dstq+%%off], mm %+ %%mmx_idx
%assign %%mmx_idx %%mmx_idx+1
%endif
208 209 210 211 212 213
%assign %%off %%off+mmsize
%endrep ; %2/mmsize

%if mmsize == 16
%if (%2-%%off) >= 8
%if %2 > 16 && (%2-%%off) > 8
214 215
    movu   [dstq+%2-16], xmm %+ %%xmm_idx
%assign %%xmm_idx %%xmm_idx+1
216 217
%assign %%off %2
%else
218 219
    movq   [dstq+%%off], mm %+ %%mmx_idx
%assign %%mmx_idx %%mmx_idx+1
220 221 222
%assign %%off %%off+8
%endif
%endif ; (%2-%%off) >= 8
223 224
%endif

225 226
%if (%2-%%off) >= 4
%if %2 > 8 && (%2-%%off) > 4
227
    movq    [dstq+%2-8], mm %+ %%mmx_idx
228 229
%assign %%off %2
%else
230
    movd   [dstq+%%off], mm %+ %%mmx_idx
231 232
%assign %%off %%off+4
%endif
233
%assign %%mmx_idx %%mmx_idx+1
234 235 236 237
%endif ; (%2-%%off) >= 4

%if (%2-%%off) >= 1
%if %2 >= 4
238
    movd    [dstq+%2-4], mm %+ %%mmx_idx
239 240 241 242
%elif (%2-%%off) == 1
    mov     [dstq+%2-1], valb
%elif (%2-%%off) == 2
    mov     [dstq+%2-2], valw
243
%elifidn %1, body
244 245 246 247
    mov     [dstq+%2-3], valw
    shr            vald, 16
    mov     [dstq+%2-1], valb
%else
248
    movd           vald, mm %+ %%mmx_idx
249 250 251 252 253
    mov     [dstq+%2-3], valw
    shr            vald, 16
    mov     [dstq+%2-1], valb
%endif
%endif ; (%2-%%off) >= 1
254 255 256 257 258 259
%endmacro ; WRITE_NUM_BYTES

; vertical top/bottom extend and body copy fast loops
; these are function pointers to set-width line copy functions, i.e.
; they read a fixed number of pixels into set registers, and write
; those out into the destination buffer
260 261 262 263
%macro VERTICAL_EXTEND 2
%assign %%n %1
%rep 1+%2-%1
%if %%n <= 3
264
%if ARCH_X86_64
265 266 267 268 269 270 271 272 273 274 275 276 277 278
cglobal emu_edge_vfix %+ %%n, 6, 8, 0, dst, dst_stride, src, src_stride, \
                                       start_y, end_y, val, bh
    mov             bhq, r6mp                   ; r6mp = bhmp
%else ; x86-32
cglobal emu_edge_vfix %+ %%n, 0, 6, 0, val, dst, src, start_y, end_y, bh
    mov            dstq, r0mp
    mov            srcq, r2mp
    mov        start_yq, r4mp
    mov          end_yq, r5mp
    mov             bhq, r6mp
%define dst_strideq r1mp
%define src_strideq r3mp
%endif ; x86-64/32
%else
279
%if ARCH_X86_64
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
cglobal emu_edge_vfix %+ %%n, 7, 7, 1, dst, dst_stride, src, src_stride, \
                                       start_y, end_y, bh
%else ; x86-32
cglobal emu_edge_vfix %+ %%n, 1, 5, 1, dst, src, start_y, end_y, bh
    mov            srcq, r2mp
    mov        start_yq, r4mp
    mov          end_yq, r5mp
    mov             bhq, r6mp
%define dst_strideq r1mp
%define src_strideq r3mp
%endif ; x86-64/32
%endif
    ; FIXME move this to c wrapper?
    sub             bhq, end_yq                 ; bh    -= end_y
    sub          end_yq, start_yq               ; end_y -= start_y

    ; extend pixels above body
    test       start_yq, start_yq               ; if (start_y) {
    jz .body_loop
    READ_NUM_BYTES  top, %%n                    ;   $variable_regs = read($n)
.top_loop:                                      ;   do {
    WRITE_NUM_BYTES top, %%n                    ;     write($variable_regs, $n)
    add            dstq, dst_strideq            ;     dst += linesize
    dec        start_yq                         ;   } while (--start_y)
    jnz .top_loop                               ; }
305 306

    ; copy body pixels
307 308 309 310 311 312 313
.body_loop:                                     ; do {
    READ_NUM_BYTES  body, %%n                   ;   $variable_regs = read($n)
    WRITE_NUM_BYTES body, %%n                   ;   write($variable_regs, $n)
    add            dstq, dst_strideq            ;   dst += dst_stride
    add            srcq, src_strideq            ;   src += src_stride
    dec          end_yq                         ; } while (--end_y)
    jnz .body_loop
314 315

    ; copy bottom pixels
316 317 318 319 320 321 322 323 324 325 326 327
    test            bhq, bhq                    ; if (block_h) {
    jz .end
    sub            srcq, src_strideq            ;   src -= linesize
    READ_NUM_BYTES  bottom, %%n                 ;   $variable_regs = read($n)
.bottom_loop:                                   ;   do {
    WRITE_NUM_BYTES bottom, %%n                 ;     write($variable_regs, $n)
    add            dstq, dst_strideq            ;     dst += linesize
    dec             bhq                         ;   } while (--bh)
    jnz .bottom_loop                            ; }

.end:
    RET
328
%assign %%n %%n+1
329 330 331 332 333 334 335 336 337 338 339
%endrep ; 1+%2-%1
%endmacro ; VERTICAL_EXTEND

INIT_MMX mmx
VERTICAL_EXTEND 1, 15
%if ARCH_X86_32
VERTICAL_EXTEND 16, 22
%endif

INIT_XMM sse
VERTICAL_EXTEND 16, 22
340 341 342 343 344 345 346

; left/right (horizontal) fast extend functions
; these are essentially identical to the vertical extend ones above,
; just left/right separated because number of pixels to extend is
; obviously not the same on both sides.

%macro READ_V_PIXEL 2
347 348
    movzx          vald, byte %2
    imul           vald, 0x01010101
349
%if %1 >= 8
350 351
    movd             m0, vald
%if mmsize == 16
352
    pshufd           m0, m0, q0000
353 354
%else
    punpckldq        m0, m0
355 356
%endif ; mmsize == 16
%endif ; %1 > 16
357
%endmacro ; READ_V_PIXEL
358

359 360
%macro WRITE_V_PIXEL 2
%assign %%off 0
361 362 363

%if %1 >= 8

364 365 366 367 368 369 370 371 372 373 374 375 376
%rep %1/mmsize
    movu     [%2+%%off], m0
%assign %%off %%off+mmsize
%endrep ; %1/mmsize

%if mmsize == 16
%if %1-%%off >= 8
%if %1 > 16 && %1-%%off > 8
    movu     [%2+%1-16], m0
%assign %%off %1
%else
    movq     [%2+%%off], m0
%assign %%off %%off+8
377
%endif
378
%endif ; %1-%%off >= 8
379
%endif ; mmsize == 16
380

381
%if %1-%%off >= 4
382
%if %1 > 8 && %1-%%off > 4
383 384
    movq      [%2+%1-8], m0
%assign %%off %1
385
%else
386
    movd     [%2+%%off], m0
387
%assign %%off %%off+4
388
%endif
389
%endif ; %1-%%off >= 4
390

391 392 393 394 395 396 397 398 399 400
%else ; %1 < 8

%rep %1/4
    mov      [%2+%%off], vald
%assign %%off %%off+4
%endrep ; %1/4

%endif ; %1 >=/< 8

%if %1-%%off == 2
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
    mov      [%2+%%off], valw
%endif ; (%1-%%off)/2
%endmacro ; WRITE_V_PIXEL

%macro H_EXTEND 2
%assign %%n %1
%rep 1+(%2-%1)/2
cglobal emu_edge_hfix %+ %%n, 4, 5, 1, dst, dst_stride, start_x, bh, val
.loop_y:                                        ; do {
    READ_V_PIXEL    %%n, [dstq+start_xq]        ;   $variable_regs = read($n)
    WRITE_V_PIXEL   %%n, dstq                   ;   write($variable_regs, $n)
    add            dstq, dst_strideq            ;   dst += dst_stride
    dec             bhq                         ; } while (--bh)
    jnz .loop_y
    RET
%assign %%n %%n+2
%endrep ; 1+(%2-%1)/2
%endmacro ; H_EXTEND
419

420 421
INIT_MMX mmx
H_EXTEND 2, 14
422
%if ARCH_X86_32
423
H_EXTEND 16, 22
424 425
%endif

426
INIT_XMM sse2
427
H_EXTEND 16, 22
428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444

%macro PREFETCH_FN 1
cglobal prefetch, 3, 3, 0, buf, stride, h
.loop:
    %1      [bufq]
    add      bufq, strideq
    dec        hd
    jg .loop
    REP_RET
%endmacro

INIT_MMX mmxext
PREFETCH_FN prefetcht0
%if ARCH_X86_32
INIT_MMX 3dnow
PREFETCH_FN prefetch
%endif