dsputilenc.asm 12.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
;*****************************************************************************
;* MMX optimized DSP utils
;*****************************************************************************
;* Copyright (c) 2000, 2001 Fabrice Bellard
;* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
21
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 23
;*****************************************************************************

24
%include "libavutil/x86/x86util.asm"
25 26 27

SECTION .text

28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
%macro DIFF_PIXELS_1 4
    movh            %1, %3
    movh            %2, %4
    punpcklbw       %2, %1
    punpcklbw       %1, %1
    psubw           %1, %2
%endmacro

; %1=uint8_t *pix1, %2=uint8_t *pix2, %3=static offset, %4=stride, %5=stride*3
; %6=temporary storage location
; this macro requires $mmsize stack space (aligned) on %6 (except on SSE+x86-64)
%macro DIFF_PIXELS_8 6
    DIFF_PIXELS_1   m0, m7, [%1     +%3], [%2     +%3]
    DIFF_PIXELS_1   m1, m7, [%1+%4  +%3], [%2+%4  +%3]
    DIFF_PIXELS_1   m2, m7, [%1+%4*2+%3], [%2+%4*2+%3]
    add             %1, %5
    add             %2, %5
    DIFF_PIXELS_1   m3, m7, [%1     +%3], [%2     +%3]
    DIFF_PIXELS_1   m4, m7, [%1+%4  +%3], [%2+%4  +%3]
    DIFF_PIXELS_1   m5, m7, [%1+%4*2+%3], [%2+%4*2+%3]
    DIFF_PIXELS_1   m6, m7, [%1+%5  +%3], [%2+%5  +%3]
%ifdef m8
    DIFF_PIXELS_1   m7, m8, [%1+%4*4+%3], [%2+%4*4+%3]
%else
    mova          [%6], m0
    DIFF_PIXELS_1   m7, m0, [%1+%4*4+%3], [%2+%4*4+%3]
    mova            m0, [%6]
%endif
    sub             %1, %5
    sub             %2, %5
%endmacro

%macro HADAMARD8 0
61 62 63 64 65 66
    SUMSUB_BADC       w, 0, 1, 2, 3
    SUMSUB_BADC       w, 4, 5, 6, 7
    SUMSUB_BADC       w, 0, 2, 1, 3
    SUMSUB_BADC       w, 4, 6, 5, 7
    SUMSUB_BADC       w, 0, 4, 1, 5
    SUMSUB_BADC       w, 2, 6, 3, 7
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
%endmacro

%macro ABS1_SUM 3
    ABS1            %1, %2
    paddusw         %3, %1
%endmacro

%macro ABS2_SUM 6
    ABS2            %1, %2, %3, %4
    paddusw         %5, %1
    paddusw         %6, %2
%endmacro

%macro ABS_SUM_8x8_64 1
    ABS2            m0, m1, m8, m9
    ABS2_SUM        m2, m3, m8, m9, m0, m1
    ABS2_SUM        m4, m5, m8, m9, m0, m1
    ABS2_SUM        m6, m7, m8, m9, m0, m1
    paddusw         m0, m1
%endmacro

%macro ABS_SUM_8x8_32 1
    mova          [%1], m7
    ABS1            m0, m7
    ABS1            m1, m7
    ABS1_SUM        m2, m7, m0
    ABS1_SUM        m3, m7, m1
    ABS1_SUM        m4, m7, m0
    ABS1_SUM        m5, m7, m1
    ABS1_SUM        m6, m7, m0
    mova            m2, [%1]
    ABS1_SUM        m2, m7, m1
    paddusw         m0, m1
%endmacro

102
; FIXME: HSUM saturates at 64k, while an 8x8 hadamard or dct block can get up to
103 104
; about 100k on extreme inputs. But that's very unlikely to occur in natural video,
; and it's even more unlikely to not have any alternative mvs/modes with lower cost.
105 106 107
%macro HSUM 3
%if cpuflag(sse2)
    movhlps         %2, %1
108
    paddusw         %1, %2
109 110 111
    pshuflw         %2, %1, 0xE
    paddusw         %1, %2
    pshuflw         %2, %1, 0x1
112 113
    paddusw         %1, %2
    movd            %3, %1
114
%elif cpuflag(mmxext)
115 116 117 118 119
    pshufw          %2, %1, 0xE
    paddusw         %1, %2
    pshufw          %2, %1, 0x1
    paddusw         %1, %2
    movd            %3, %1
120 121 122
%elif cpuflag(mmx)
    mova            %2, %1
    psrlq           %1, 32
123
    paddusw         %1, %2
124 125
    mova            %2, %1
    psrlq           %1, 16
126 127
    paddusw         %1, %2
    movd            %3, %1
128
%endif
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
%endmacro

%macro STORE4 5
    mova [%1+mmsize*0], %2
    mova [%1+mmsize*1], %3
    mova [%1+mmsize*2], %4
    mova [%1+mmsize*3], %5
%endmacro

%macro LOAD4 5
    mova            %2, [%1+mmsize*0]
    mova            %3, [%1+mmsize*1]
    mova            %4, [%1+mmsize*2]
    mova            %5, [%1+mmsize*3]
%endmacro

145 146
%macro hadamard8_16_wrapper 2
cglobal hadamard8_diff, 4, 4, %1
147
%ifndef m8
148
    %assign pad %2*mmsize-(4+stack_offset&(mmsize-1))
149 150
    SUB            rsp, pad
%endif
151
    call hadamard8x8_diff %+ SUFFIX
152 153 154 155 156
%ifndef m8
    ADD            rsp, pad
%endif
    RET

157
cglobal hadamard8_diff16, 5, 6, %1
158
%ifndef m8
159
    %assign pad %2*mmsize-(4+stack_offset&(mmsize-1))
160 161 162
    SUB            rsp, pad
%endif

163
    call hadamard8x8_diff %+ SUFFIX
164 165 166 167
    mov            r5d, eax

    add             r1, 8
    add             r2, 8
168
    call hadamard8x8_diff %+ SUFFIX
169 170 171 172 173 174 175
    add            r5d, eax

    cmp            r4d, 16
    jne .done

    lea             r1, [r1+r3*8-8]
    lea             r2, [r2+r3*8-8]
176
    call hadamard8x8_diff %+ SUFFIX
177 178 179 180
    add            r5d, eax

    add             r1, 8
    add             r2, 8
181
    call hadamard8x8_diff %+ SUFFIX
182 183
    add            r5d, eax

184
.done:
185 186 187 188 189 190 191
    mov            eax, r5d
%ifndef m8
    ADD            rsp, pad
%endif
    RET
%endmacro

192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
%macro HADAMARD8_DIFF 0-1
%if cpuflag(sse2)
hadamard8x8_diff %+ SUFFIX:
    lea                          r0, [r3*3]
    DIFF_PIXELS_8                r1, r2,  0, r3, r0, rsp+gprsize
    HADAMARD8
%if ARCH_X86_64
    TRANSPOSE8x8W                 0,  1,  2,  3,  4,  5,  6,  7,  8
%else
    TRANSPOSE8x8W                 0,  1,  2,  3,  4,  5,  6,  7, [rsp+gprsize], [rsp+mmsize+gprsize]
%endif
    HADAMARD8
    ABS_SUM_8x8         rsp+gprsize
    HSUM                        m0, m1, eax
    and                         eax, 0xFFFF
    ret

hadamard8_16_wrapper %1, 3
%elif cpuflag(mmx)
211 212 213 214 215 216 217
ALIGN 16
; int hadamard8_diff_##cpu(void *s, uint8_t *src1, uint8_t *src2,
;                          int stride, int h)
; r0 = void *s = unused, int h = unused (always 8)
; note how r1, r2 and r3 are not clobbered in this function, so 16x16
; can simply call this 2x2x (and that's why we access rsp+gprsize
; everywhere, which is rsp of calling func
218
hadamard8x8_diff %+ SUFFIX:
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
    lea                          r0, [r3*3]

    ; first 4x8 pixels
    DIFF_PIXELS_8                r1, r2,  0, r3, r0, rsp+gprsize+0x60
    HADAMARD8
    mova         [rsp+gprsize+0x60], m7
    TRANSPOSE4x4W                 0,  1,  2,  3,  7
    STORE4              rsp+gprsize, m0, m1, m2, m3
    mova                         m7, [rsp+gprsize+0x60]
    TRANSPOSE4x4W                 4,  5,  6,  7,  0
    STORE4         rsp+gprsize+0x40, m4, m5, m6, m7

    ; second 4x8 pixels
    DIFF_PIXELS_8                r1, r2,  4, r3, r0, rsp+gprsize+0x60
    HADAMARD8
    mova         [rsp+gprsize+0x60], m7
    TRANSPOSE4x4W                 0,  1,  2,  3,  7
    STORE4         rsp+gprsize+0x20, m0, m1, m2, m3
    mova                         m7, [rsp+gprsize+0x60]
    TRANSPOSE4x4W                 4,  5,  6,  7,  0

    LOAD4          rsp+gprsize+0x40, m0, m1, m2, m3
    HADAMARD8
    ABS_SUM_8x8_32 rsp+gprsize+0x60
    mova         [rsp+gprsize+0x60], m0

    LOAD4          rsp+gprsize     , m0, m1, m2, m3
    LOAD4          rsp+gprsize+0x20, m4, m5, m6, m7
    HADAMARD8
    ABS_SUM_8x8_32 rsp+gprsize
    paddusw                      m0, [rsp+gprsize+0x60]

    HSUM                         m0, m1, eax
    and                         rax, 0xFFFF
    ret

255
hadamard8_16_wrapper 0, 14
256 257 258
%endif
%endmacro

259 260
INIT_MMX mmx
HADAMARD8_DIFF
261

262 263
INIT_MMX mmxext
HADAMARD8_DIFF
264

265
INIT_XMM sse2
266
%if ARCH_X86_64
267 268 269 270
%define ABS_SUM_8x8 ABS_SUM_8x8_64
%else
%define ABS_SUM_8x8 ABS_SUM_8x8_32
%endif
271
HADAMARD8_DIFF 10
272

273
INIT_XMM ssse3
274
%define ABS_SUM_8x8 ABS_SUM_8x8_64
275
HADAMARD8_DIFF 9
276

277
INIT_XMM sse2
278
; sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h)
279
cglobal sse16, 5, 5, 8
280
    shr      r4d, 1
281 282 283
    pxor      m0, m0         ; mm0 = 0
    pxor      m7, m7         ; mm7 holds the sum

284
.next2lines: ; FIXME why are these unaligned movs? pix1[] is aligned
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
    movu      m1, [r1   ]    ; mm1 = pix1[0][0-15]
    movu      m2, [r2   ]    ; mm2 = pix2[0][0-15]
    movu      m3, [r1+r3]    ; mm3 = pix1[1][0-15]
    movu      m4, [r2+r3]    ; mm4 = pix2[1][0-15]

    ; todo: mm1-mm2, mm3-mm4
    ; algo: subtract mm1 from mm2 with saturation and vice versa
    ;       OR the result to get the absolute difference
    mova      m5, m1
    mova      m6, m3
    psubusb   m1, m2
    psubusb   m3, m4
    psubusb   m2, m5
    psubusb   m4, m6

    por       m2, m1
    por       m4, m3

    ; now convert to 16-bit vectors so we can square them
    mova      m1, m2
    mova      m3, m4

    punpckhbw m2, m0
    punpckhbw m4, m0
    punpcklbw m1, m0         ; mm1 not spread over (mm1,mm2)
    punpcklbw m3, m0         ; mm4 not spread over (mm3,mm4)

    pmaddwd   m2, m2
    pmaddwd   m4, m4
    pmaddwd   m1, m1
    pmaddwd   m3, m3

    lea       r1, [r1+r3*2]  ; pix1 += 2*line_size
    lea       r2, [r2+r3*2]  ; pix2 += 2*line_size

    paddd     m1, m2
    paddd     m3, m4
    paddd     m7, m1
    paddd     m7, m3

    dec       r4
    jnz .next2lines

    mova      m1, m7
    psrldq    m7, 8          ; shift hi qword to lo
    paddd     m7, m1
    mova      m1, m7
    psrldq    m7, 4          ; shift hi dword to lo
    paddd     m7, m1
    movd     eax, m7         ; return value
    RET
336 337

INIT_MMX mmx
Diego Biurrun's avatar
Diego Biurrun committed
338
; get_pixels_mmx(int16_t *block, const uint8_t *pixels, int line_size)
339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394
cglobal get_pixels, 3,4
    movsxdifnidn r2, r2d
    add          r0, 128
    mov          r3, -128
    pxor         m7, m7
.loop:
    mova         m0, [r1]
    mova         m2, [r1+r2]
    mova         m1, m0
    mova         m3, m2
    punpcklbw    m0, m7
    punpckhbw    m1, m7
    punpcklbw    m2, m7
    punpckhbw    m3, m7
    mova [r0+r3+ 0], m0
    mova [r0+r3+ 8], m1
    mova [r0+r3+16], m2
    mova [r0+r3+24], m3
    lea          r1, [r1+r2*2]
    add          r3, 32
    js .loop
    REP_RET

INIT_XMM sse2
cglobal get_pixels, 3, 4
    movsxdifnidn r2, r2d
    lea          r3, [r2*3]
    pxor         m4, m4
    movh         m0, [r1]
    movh         m1, [r1+r2]
    movh         m2, [r1+r2*2]
    movh         m3, [r1+r3]
    lea          r1, [r1+r2*4]
    punpcklbw    m0, m4
    punpcklbw    m1, m4
    punpcklbw    m2, m4
    punpcklbw    m3, m4
    mova       [r0], m0
    mova  [r0+0x10], m1
    mova  [r0+0x20], m2
    mova  [r0+0x30], m3
    movh         m0, [r1]
    movh         m1, [r1+r2*1]
    movh         m2, [r1+r2*2]
    movh         m3, [r1+r3]
    punpcklbw    m0, m4
    punpcklbw    m1, m4
    punpcklbw    m2, m4
    punpcklbw    m3, m4
    mova  [r0+0x40], m0
    mova  [r0+0x50], m1
    mova  [r0+0x60], m2
    mova  [r0+0x70], m3
    RET

INIT_MMX mmx
Diego Biurrun's avatar
Diego Biurrun committed
395
; diff_pixels_mmx(int16_t *block, const uint8_t *s1, const unint8_t *s2, stride)
396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487
cglobal diff_pixels, 4,5
    movsxdifnidn r3, r3d
    pxor         m7, m7
    add          r0,  128
    mov          r4, -128
.loop:
    mova         m0, [r1]
    mova         m2, [r2]
    mova         m1, m0
    mova         m3, m2
    punpcklbw    m0, m7
    punpckhbw    m1, m7
    punpcklbw    m2, m7
    punpckhbw    m3, m7
    psubw        m0, m2
    psubw        m1, m3
    mova  [r0+r4+0], m0
    mova  [r0+r4+8], m1
    add          r1, r3
    add          r2, r3
    add          r4, 16
    jne .loop
    REP_RET

INIT_MMX mmx
; pix_sum16_mmx(uint8_t * pix, int line_size)
cglobal pix_sum16, 2, 3
    movsxdifnidn r1, r1d
    mov          r2, r1
    neg          r2
    shl          r2, 4
    sub          r0, r2
    pxor         m7, m7
    pxor         m6, m6
.loop:
    mova         m0, [r0+r2+0]
    mova         m1, [r0+r2+0]
    mova         m2, [r0+r2+8]
    mova         m3, [r0+r2+8]
    punpcklbw    m0, m7
    punpckhbw    m1, m7
    punpcklbw    m2, m7
    punpckhbw    m3, m7
    paddw        m1, m0
    paddw        m3, m2
    paddw        m3, m1
    paddw        m6, m3
    add          r2, r1
    js .loop
    mova         m5, m6
    psrlq        m6, 32
    paddw        m6, m5
    mova         m5, m6
    psrlq        m6, 16
    paddw        m6, m5
    movd        eax, m6
    and         eax, 0xffff
    RET

INIT_MMX mmx
; pix_norm1_mmx(uint8_t *pix, int line_size)
cglobal pix_norm1, 2, 4
    movsxdifnidn r1, r1d
    mov          r2, 16
    pxor         m0, m0
    pxor         m7, m7
.loop:
    mova         m2, [r0+0]
    mova         m3, [r0+8]
    mova         m1, m2
    punpckhbw    m1, m0
    punpcklbw    m2, m0
    mova         m4, m3
    punpckhbw    m3, m0
    punpcklbw    m4, m0
    pmaddwd      m1, m1
    pmaddwd      m2, m2
    pmaddwd      m3, m3
    pmaddwd      m4, m4
    paddd        m2, m1
    paddd        m4, m3
    paddd        m7, m2
    add          r0, r1
    paddd        m7, m4
    dec r2
    jne .loop
    mova         m1, m7
    psrlq        m7, 32
    paddd        m1, m7
    movd        eax, m1
    RET