h264_idct_10bit.asm 14.3 KB
Newer Older
1 2 3 4 5 6 7
;*****************************************************************************
;* MMX/SSE2/AVX-optimized 10-bit H.264 iDCT code
;*****************************************************************************
;* Copyright (C) 2005-2011 x264 project
;*
;* Authors: Daniel Kang <daniel.d.kang@gmail.com>
;*
8
;* This file is part of FFmpeg.
9
;*
10
;* FFmpeg is free software; you can redistribute it and/or
11 12 13 14
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
15
;* FFmpeg is distributed in the hope that it will be useful,
16 17 18 19 20
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
21
;* License along with FFmpeg; if not, write to the Free Software
22 23 24
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************

25
%include "libavutil/x86/x86util.asm"
26 27 28 29 30 31 32

SECTION_RODATA

pd_32:        times 4 dd 32

SECTION .text

33 34 35
cextern pw_1023
%define pw_pixel_max pw_1023

36
;-----------------------------------------------------------------------------
37
; void ff_h264_idct_add_10(pixel *dst, int16_t *block, int stride)
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
;-----------------------------------------------------------------------------
%macro STORE_DIFFx2 6
    psrad       %1, 6
    psrad       %2, 6
    packssdw    %1, %2
    movq        %3, [%5]
    movhps      %3, [%5+%6]
    paddsw      %1, %3
    CLIPW       %1, %4, [pw_pixel_max]
    movq      [%5], %1
    movhps [%5+%6], %1
%endmacro

%macro STORE_DIFF16 5
    psrad       %1, 6
    psrad       %2, 6
    packssdw    %1, %2
    paddsw      %1, [%5]
    CLIPW       %1, %3, %4
    mova      [%5], %1
%endmacro

;dst, in, stride
%macro IDCT4_ADD_10 3
    mova  m0, [%2+ 0]
    mova  m1, [%2+16]
    mova  m2, [%2+32]
    mova  m3, [%2+48]
    IDCT4_1D d,0,1,2,3,4,5
    TRANSPOSE4x4D 0,1,2,3,4
    paddd m0, [pd_32]
    IDCT4_1D d,0,1,2,3,4,5
    pxor  m5, m5
71 72 73 74
    mova [%2+ 0], m5
    mova [%2+16], m5
    mova [%2+32], m5
    mova [%2+48], m5
75 76 77 78 79
    STORE_DIFFx2 m0, m1, m4, m5, %1, %3
    lea   %1, [%1+%3*2]
    STORE_DIFFx2 m2, m3, m4, m5, %1, %3
%endmacro

80 81
%macro IDCT_ADD_10 0
cglobal h264_idct_add_10, 3,3
82 83 84 85
    IDCT4_ADD_10 r0, r1, r2
    RET
%endmacro

86 87
INIT_XMM sse2
IDCT_ADD_10
88
%if HAVE_AVX_EXTERNAL
89 90
INIT_XMM avx
IDCT_ADD_10
91 92 93
%endif

;-----------------------------------------------------------------------------
94 95 96
; void ff_h264_idct_add16_10(pixel *dst, const int *block_offset,
;                            int16_t *block, int stride,
;                            const uint8_t nnzc[6*8])
97 98
;-----------------------------------------------------------------------------
;;;;;;; NO FATE SAMPLES TRIGGER THIS
99 100
%macro ADD4x4IDCT 0
add4x4_idct %+ SUFFIX:
101 102 103 104 105 106 107 108 109 110
    add   r5, r0
    mova  m0, [r2+ 0]
    mova  m1, [r2+16]
    mova  m2, [r2+32]
    mova  m3, [r2+48]
    IDCT4_1D d,0,1,2,3,4,5
    TRANSPOSE4x4D 0,1,2,3,4
    paddd m0, [pd_32]
    IDCT4_1D d,0,1,2,3,4,5
    pxor  m5, m5
111 112 113 114
    mova  [r2+ 0], m5
    mova  [r2+16], m5
    mova  [r2+32], m5
    mova  [r2+48], m5
115 116 117 118 119 120
    STORE_DIFFx2 m0, m1, m4, m5, r5, r3
    lea   r5, [r5+r3*2]
    STORE_DIFFx2 m2, m3, m4, m5, r5, r3
    ret
%endmacro

121
INIT_XMM sse2
122
ALIGN 16
123
ADD4x4IDCT
124
%if HAVE_AVX_EXTERNAL
125
INIT_XMM avx
126
ALIGN 16
127
ADD4x4IDCT
128 129
%endif

130 131 132 133 134 135 136
%macro ADD16_OP 2
    cmp          byte [r4+%2], 0
    jz .skipblock%1
    mov         r5d, [r1+%1*4]
    call add4x4_idct %+ SUFFIX
.skipblock%1:
%if %1<15
137 138 139 140
    add          r2, 64
%endif
%endmacro

141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
%macro IDCT_ADD16_10 0
cglobal h264_idct_add16_10, 5,6
    ADD16_OP 0, 4+1*8
    ADD16_OP 1, 5+1*8
    ADD16_OP 2, 4+2*8
    ADD16_OP 3, 5+2*8
    ADD16_OP 4, 6+1*8
    ADD16_OP 5, 7+1*8
    ADD16_OP 6, 6+2*8
    ADD16_OP 7, 7+2*8
    ADD16_OP 8, 4+3*8
    ADD16_OP 9, 5+3*8
    ADD16_OP 10, 4+4*8
    ADD16_OP 11, 5+4*8
    ADD16_OP 12, 6+3*8
    ADD16_OP 13, 7+3*8
    ADD16_OP 14, 6+4*8
    ADD16_OP 15, 7+4*8
159
    REP_RET
160 161
%endmacro

162 163
INIT_XMM sse2
IDCT_ADD16_10
164
%if HAVE_AVX_EXTERNAL
165 166
INIT_XMM avx
IDCT_ADD16_10
167 168 169
%endif

;-----------------------------------------------------------------------------
170
; void ff_h264_idct_dc_add_10(pixel *dst, int16_t *block, int stride)
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
;-----------------------------------------------------------------------------
%macro IDCT_DC_ADD_OP_10 3
    pxor      m5, m5
%if avx_enabled
    paddw     m1, m0, [%1+0   ]
    paddw     m2, m0, [%1+%2  ]
    paddw     m3, m0, [%1+%2*2]
    paddw     m4, m0, [%1+%3  ]
%else
    mova      m1, [%1+0   ]
    mova      m2, [%1+%2  ]
    mova      m3, [%1+%2*2]
    mova      m4, [%1+%3  ]
    paddw     m1, m0
    paddw     m2, m0
    paddw     m3, m0
    paddw     m4, m0
%endif
    CLIPW     m1, m5, m6
    CLIPW     m2, m5, m6
    CLIPW     m3, m5, m6
    CLIPW     m4, m5, m6
    mova [%1+0   ], m1
    mova [%1+%2  ], m2
    mova [%1+%2*2], m3
    mova [%1+%3  ], m4
%endmacro

199
INIT_MMX mmxext
200
cglobal h264_idct_dc_add_10,3,3
201
    movd      m0, [r1]
202
    mov dword [r1], 0
203 204 205 206 207 208 209 210 211
    paddd     m0, [pd_32]
    psrad     m0, 6
    lea       r1, [r2*3]
    pshufw    m0, m0, 0
    mova      m6, [pw_pixel_max]
    IDCT_DC_ADD_OP_10 r0, r2, r1
    RET

;-----------------------------------------------------------------------------
212
; void ff_h264_idct8_dc_add_10(pixel *dst, int16_t *block, int stride)
213
;-----------------------------------------------------------------------------
214
%macro IDCT8_DC_ADD 0
215 216 217 218 219
cglobal h264_idct8_dc_add_10,3,4,7
    movd      m0, [r1]
    mov dword[r1], 0
    paddd     m0, [pd_32]
    psrad     m0, 6
220 221 222 223 224 225 226 227 228
    lea       r1, [r2*3]
    SPLATW    m0, m0, 0
    mova      m6, [pw_pixel_max]
    IDCT_DC_ADD_OP_10 r0, r2, r1
    lea       r0, [r0+r2*4]
    IDCT_DC_ADD_OP_10 r0, r2, r1
    RET
%endmacro

229 230
INIT_XMM sse2
IDCT8_DC_ADD
231
%if HAVE_AVX_EXTERNAL
232 233
INIT_XMM avx
IDCT8_DC_ADD
234 235 236
%endif

;-----------------------------------------------------------------------------
237 238 239
; void ff_h264_idct_add16intra_10(pixel *dst, const int *block_offset,
;                                 int16_t *block, int stride,
;                                 const uint8_t nnzc[6*8])
240
;-----------------------------------------------------------------------------
241
%macro AC 1
242
.ac%1:
243 244 245
    mov  r5d, [r1+(%1+0)*4]
    call add4x4_idct %+ SUFFIX
    mov  r5d, [r1+(%1+1)*4]
246
    add  r2, 64
247
    call add4x4_idct %+ SUFFIX
248
    add  r2, 64
249
    jmp .skipadd%1
250 251
%endmacro

252
%assign last_block 16
253 254 255
%macro ADD16_OP_INTRA 2
    cmp      word [r4+%2], 0
    jnz .ac%1
256 257
    mov      r5d, [r2+ 0]
    or       r5d, [r2+64]
258 259 260 261 262
    jz .skipblock%1
    mov      r5d, [r1+(%1+0)*4]
    call idct_dc_add %+ SUFFIX
.skipblock%1:
%if %1<last_block-2
263
    add       r2, 128
264
%endif
265
.skipadd%1:
266 267
%endmacro

268 269
%macro IDCT_ADD16INTRA_10 0
idct_dc_add %+ SUFFIX:
270 271 272
    add       r5, r0
    movq      m0, [r2+ 0]
    movhps    m0, [r2+64]
273 274
    mov dword [r2+ 0], 0
    mov dword [r2+64], 0
275 276 277 278 279 280 281 282 283
    paddd     m0, [pd_32]
    psrad     m0, 6
    pshufhw   m0, m0, 0
    pshuflw   m0, m0, 0
    lea       r6, [r3*3]
    mova      m6, [pw_pixel_max]
    IDCT_DC_ADD_OP_10 r5, r3, r6
    ret

284 285 286 287 288 289 290 291 292
cglobal h264_idct_add16intra_10,5,7,8
    ADD16_OP_INTRA 0, 4+1*8
    ADD16_OP_INTRA 2, 4+2*8
    ADD16_OP_INTRA 4, 6+1*8
    ADD16_OP_INTRA 6, 6+2*8
    ADD16_OP_INTRA 8, 4+3*8
    ADD16_OP_INTRA 10, 4+4*8
    ADD16_OP_INTRA 12, 6+3*8
    ADD16_OP_INTRA 14, 6+4*8
293
    REP_RET
294 295 296 297 298 299 300 301
    AC 8
    AC 10
    AC 12
    AC 14
    AC 0
    AC 2
    AC 4
    AC 6
302 303
%endmacro

304 305
INIT_XMM sse2
IDCT_ADD16INTRA_10
306
%if HAVE_AVX_EXTERNAL
307 308
INIT_XMM avx
IDCT_ADD16INTRA_10
309 310
%endif

311
%assign last_block 36
312
;-----------------------------------------------------------------------------
313 314 315
; void ff_h264_idct_add8_10(pixel **dst, const int *block_offset,
;                           int16_t *block, int stride,
;                           const uint8_t nnzc[6*8])
316
;-----------------------------------------------------------------------------
317 318
%macro IDCT_ADD8 0
cglobal h264_idct_add8_10,5,8,7
319
%if ARCH_X86_64
320
    mov      r7, r0
321
%endif
322 323
    add      r2, 1024
    mov      r0, [r0]
324 325
    ADD16_OP_INTRA 16, 4+ 6*8
    ADD16_OP_INTRA 18, 4+ 7*8
326
    add      r2, 1024-128*2
327
%if ARCH_X86_64
328
    mov      r0, [r7+gprsize]
329
%else
330 331
    mov      r0, r0m
    mov      r0, [r0+gprsize]
332
%endif
333 334
    ADD16_OP_INTRA 32, 4+11*8
    ADD16_OP_INTRA 34, 4+12*8
335
    REP_RET
336 337 338 339
    AC 16
    AC 18
    AC 32
    AC 34
340

341 342
%endmacro ; IDCT_ADD8

343 344
INIT_XMM sse2
IDCT_ADD8
345
%if HAVE_AVX_EXTERNAL
346 347
INIT_XMM avx
IDCT_ADD8
348 349 350
%endif

;-----------------------------------------------------------------------------
351
; void ff_h264_idct8_add_10(pixel *dst, int16_t *block, int stride)
352 353
;-----------------------------------------------------------------------------
%macro IDCT8_1D 2
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
    SWAP      0, 1
    psrad     m4, m5, 1
    psrad     m1, m0, 1
    paddd     m4, m5
    paddd     m1, m0
    paddd     m4, m7
    paddd     m1, m5
    psubd     m4, m0
    paddd     m1, m3

    psubd     m0, m3
    psubd     m5, m3
    paddd     m0, m7
    psubd     m5, m7
    psrad     m3, 1
    psrad     m7, 1
    psubd     m0, m3
    psubd     m5, m7

    SWAP      1, 7
    psrad     m1, m7, 2
    psrad     m3, m4, 2
    paddd     m3, m0
    psrad     m0, 2
    paddd     m1, m5
    psrad     m5, 2
    psubd     m0, m4
    psubd     m7, m5

    SWAP      5, 6
    psrad     m4, m2, 1
    psrad     m6, m5, 1
    psubd     m4, m5
    paddd     m6, m2

    mova      m2, %1
    mova      m5, %2
    SUMSUB_BA d, 5, 2
    SUMSUB_BA d, 6, 5
    SUMSUB_BA d, 4, 2
    SUMSUB_BA d, 7, 6
    SUMSUB_BA d, 0, 4
    SUMSUB_BA d, 3, 2
    SUMSUB_BA d, 1, 5
    SWAP      7, 6, 4, 5, 2, 3, 1, 0 ; 70315246 -> 01234567
399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
%endmacro

%macro IDCT8_1D_FULL 1
    mova         m7, [%1+112*2]
    mova         m6, [%1+ 96*2]
    mova         m5, [%1+ 80*2]
    mova         m3, [%1+ 48*2]
    mova         m2, [%1+ 32*2]
    mova         m1, [%1+ 16*2]
    IDCT8_1D   [%1], [%1+ 64*2]
%endmacro

; %1=int16_t *block, %2=int16_t *dstblock
%macro IDCT8_ADD_SSE_START 2
    IDCT8_1D_FULL %1
414
%if ARCH_X86_64
415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452
    TRANSPOSE4x4D  0,1,2,3,8
    mova    [%2    ], m0
    TRANSPOSE4x4D  4,5,6,7,8
    mova    [%2+8*2], m4
%else
    mova         [%1], m7
    TRANSPOSE4x4D   0,1,2,3,7
    mova           m7, [%1]
    mova    [%2     ], m0
    mova    [%2+16*2], m1
    mova    [%2+32*2], m2
    mova    [%2+48*2], m3
    TRANSPOSE4x4D   4,5,6,7,3
    mova    [%2+ 8*2], m4
    mova    [%2+24*2], m5
    mova    [%2+40*2], m6
    mova    [%2+56*2], m7
%endif
%endmacro

; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
%macro IDCT8_ADD_SSE_END 3
    IDCT8_1D_FULL %2
    mova  [%2     ], m6
    mova  [%2+16*2], m7

    pxor         m7, m7
    STORE_DIFFx2 m0, m1, m6, m7, %1, %3
    lea          %1, [%1+%3*2]
    STORE_DIFFx2 m2, m3, m6, m7, %1, %3
    mova         m0, [%2     ]
    mova         m1, [%2+16*2]
    lea          %1, [%1+%3*2]
    STORE_DIFFx2 m4, m5, m6, m7, %1, %3
    lea          %1, [%1+%3*2]
    STORE_DIFFx2 m0, m1, m6, m7, %1, %3
%endmacro

453 454
%macro IDCT8_ADD 0
cglobal h264_idct8_add_10, 3,4,16
455
%if UNIX64 == 0
456 457
    %assign pad 16-gprsize-(stack_offset&15)
    sub  rsp, pad
458
    call h264_idct8_add1_10 %+ SUFFIX
459 460 461 462 463 464
    add  rsp, pad
    RET
%endif

ALIGN 16
; TODO: does not need to use stack
465
h264_idct8_add1_10 %+ SUFFIX:
466 467 468 469
%assign pad 256+16-gprsize
    sub          rsp, pad
    add   dword [r1], 32

470
%if ARCH_X86_64
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
    IDCT8_ADD_SSE_START r1, rsp
    SWAP 1,  9
    SWAP 2, 10
    SWAP 3, 11
    SWAP 5, 13
    SWAP 6, 14
    SWAP 7, 15
    IDCT8_ADD_SSE_START r1+16, rsp+128
    PERMUTE 1,9, 2,10, 3,11, 5,1, 6,2, 7,3, 9,13, 10,14, 11,15, 13,5, 14,6, 15,7
    IDCT8_1D [rsp], [rsp+128]
    SWAP 0,  8
    SWAP 1,  9
    SWAP 2, 10
    SWAP 3, 11
    SWAP 4, 12
    SWAP 5, 13
    SWAP 6, 14
    SWAP 7, 15
    IDCT8_1D [rsp+16], [rsp+144]
    psrad         m8, 6
    psrad         m0, 6
    packssdw      m8, m0
    paddsw        m8, [r0]
    pxor          m0, m0
495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
    mova    [r1+  0], m0
    mova    [r1+ 16], m0
    mova    [r1+ 32], m0
    mova    [r1+ 48], m0
    mova    [r1+ 64], m0
    mova    [r1+ 80], m0
    mova    [r1+ 96], m0
    mova    [r1+112], m0
    mova    [r1+128], m0
    mova    [r1+144], m0
    mova    [r1+160], m0
    mova    [r1+176], m0
    mova    [r1+192], m0
    mova    [r1+208], m0
    mova    [r1+224], m0
    mova    [r1+240], m0
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
    CLIPW         m8, m0, [pw_pixel_max]
    mova        [r0], m8
    mova          m8, [pw_pixel_max]
    STORE_DIFF16  m9, m1, m0, m8, r0+r2
    lea           r0, [r0+r2*2]
    STORE_DIFF16 m10, m2, m0, m8, r0
    STORE_DIFF16 m11, m3, m0, m8, r0+r2
    lea           r0, [r0+r2*2]
    STORE_DIFF16 m12, m4, m0, m8, r0
    STORE_DIFF16 m13, m5, m0, m8, r0+r2
    lea           r0, [r0+r2*2]
    STORE_DIFF16 m14, m6, m0, m8, r0
    STORE_DIFF16 m15, m7, m0, m8, r0+r2
%else
    IDCT8_ADD_SSE_START r1,    rsp
    IDCT8_ADD_SSE_START r1+16, rsp+128
    lea           r3, [r0+8]
    IDCT8_ADD_SSE_END r0, rsp,    r2
    IDCT8_ADD_SSE_END r3, rsp+16, r2
530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
    mova    [r1+  0], m7
    mova    [r1+ 16], m7
    mova    [r1+ 32], m7
    mova    [r1+ 48], m7
    mova    [r1+ 64], m7
    mova    [r1+ 80], m7
    mova    [r1+ 96], m7
    mova    [r1+112], m7
    mova    [r1+128], m7
    mova    [r1+144], m7
    mova    [r1+160], m7
    mova    [r1+176], m7
    mova    [r1+192], m7
    mova    [r1+208], m7
    mova    [r1+224], m7
    mova    [r1+240], m7
546 547 548 549 550 551
%endif ; ARCH_X86_64

    add          rsp, pad
    ret
%endmacro

552 553
INIT_XMM sse2
IDCT8_ADD
554
%if HAVE_AVX_EXTERNAL
555 556
INIT_XMM avx
IDCT8_ADD
557 558 559
%endif

;-----------------------------------------------------------------------------
560 561 562
; void ff_h264_idct8_add4_10(pixel **dst, const int *block_offset,
;                            int16_t *block, int stride,
;                            const uint8_t nnzc[6*8])
563 564
;-----------------------------------------------------------------------------
;;;;;;; NO FATE SAMPLES TRIGGER THIS
565 566 567 568
%macro IDCT8_ADD4_OP 2
    cmp       byte [r4+%2], 0
    jz .skipblock%1
    mov      r0d, [r6+%1*4]
569
    add       r0, r5
570 571 572
    call h264_idct8_add1_10 %+ SUFFIX
.skipblock%1:
%if %1<12
573 574 575 576
    add       r1, 256
%endif
%endmacro

577 578
%macro IDCT8_ADD4 0
cglobal h264_idct8_add4_10, 0,7,16
579 580 581 582 583 584 585
    %assign pad 16-gprsize-(stack_offset&15)
    SUB      rsp, pad
    mov       r5, r0mp
    mov       r6, r1mp
    mov       r1, r2mp
    mov      r2d, r3m
    movifnidn r4, r4mp
586 587 588 589
    IDCT8_ADD4_OP  0, 4+1*8
    IDCT8_ADD4_OP  4, 6+1*8
    IDCT8_ADD4_OP  8, 4+3*8
    IDCT8_ADD4_OP 12, 6+3*8
590 591 592 593
    ADD       rsp, pad
    RET
%endmacro ; IDCT8_ADD4

594 595
INIT_XMM sse2
IDCT8_ADD4
596
%if HAVE_AVX_EXTERNAL
597 598
INIT_XMM avx
IDCT8_ADD4
599
%endif