h264_deblock.asm 28.2 KB
Newer Older
1
;*****************************************************************************
2
;* MMX/SSE2/AVX-optimized H.264 deblocking code
3
;*****************************************************************************
4
;* Copyright (C) 2005-2011 x264 project
5 6
;*
;* Authors: Loren Merritt <lorenm@u.washington.edu>
7
;*          Jason Garrett-Glaser <darkshikari@gmail.com>
8
;*          Oskar Arvidsson <oskar@irock.se>
9
;*
10
;* This file is part of FFmpeg.
11
;*
12 13 14 15 16 17
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
18
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
19 20
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
;* Lesser General Public License for more details.
21
;*
22 23
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
24
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25
;******************************************************************************
26

27
%include "libavutil/x86/x86util.asm"
28

29 30 31 32
SECTION_RODATA

pb_3_1: times 4 db 3, 1

33
SECTION .text
34 35 36 37 38

cextern pb_0
cextern pb_1
cextern pb_3
cextern pb_A1
39 40 41 42 43 44

; expands to [base],...,[base+7*stride]
%define PASS8ROWS(base, base3, stride, stride3) \
    [base], [base+stride], [base+stride*2], [base3], \
    [base3+stride], [base3+stride*2], [base3+stride3], [base3+stride*4]

45 46 47 48
%define PASS8ROWS(base, base3, stride, stride3, offset) \
    PASS8ROWS(base+offset, base3+offset, stride, stride3)

; in: 8 rows of 4 bytes in %4..%11
49
; out: 4 rows of 8 bytes in m0..m3
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
%macro TRANSPOSE4x8_LOAD 11
    movh       m0, %4
    movh       m2, %5
    movh       m1, %6
    movh       m3, %7
    punpckl%1  m0, m2
    punpckl%1  m1, m3
    mova       m2, m0
    punpckl%2  m0, m1
    punpckh%2  m2, m1

    movh       m4, %8
    movh       m6, %9
    movh       m5, %10
    movh       m7, %11
    punpckl%1  m4, m6
    punpckl%1  m5, m7
    mova       m6, m4
    punpckl%2  m4, m5
    punpckh%2  m6, m5

    punpckh%3  m1, m0, m4
    punpckh%3  m3, m2, m6
    punpckl%3  m0, m4
    punpckl%3  m2, m6
75 76 77 78
%endmacro

; in: 4 rows of 8 bytes in m0..m3
; out: 8 rows of 4 bytes in %1..%8
79 80 81 82
%macro TRANSPOSE8x4B_STORE 8
    punpckhdq  m4, m0, m0
    punpckhdq  m5, m1, m1
    punpckhdq  m6, m2, m2
83 84 85

    punpcklbw  m0, m1
    punpcklbw  m2, m3
86 87 88
    punpcklwd  m1, m0, m2
    punpckhwd  m0, m2
    movh       %1, m1
89
    punpckhdq  m1, m1
90 91 92 93
    movh       %2, m1
    movh       %3, m0
    punpckhdq  m0, m0
    movh       %4, m0
94 95 96 97

    punpckhdq  m3, m3
    punpcklbw  m4, m5
    punpcklbw  m6, m3
98 99 100
    punpcklwd  m5, m4, m6
    punpckhwd  m4, m6
    movh       %5, m5
101
    punpckhdq  m5, m5
102 103 104 105 106 107 108 109
    movh       %6, m5
    movh       %7, m4
    punpckhdq  m4, m4
    movh       %8, m4
%endmacro

%macro TRANSPOSE4x8B_LOAD 8
    TRANSPOSE4x8_LOAD bw, wd, dq, %1, %2, %3, %4, %5, %6, %7, %8
110 111
%endmacro

112
%macro SBUTTERFLY3 4
113
    punpckh%1  %4, %2, %3
114 115 116 117 118 119
    punpckl%1  %2, %3
%endmacro

; in: 8 rows of 8 (only the middle 6 pels are used) in %1..%8
; out: 6 rows of 8 in [%9+0*16] .. [%9+5*16]
%macro TRANSPOSE6x8_MEM 9
120
    RESET_MM_PERMUTATION
121 122 123 124 125 126 127
    movq  m0, %1
    movq  m1, %2
    movq  m2, %3
    movq  m3, %4
    movq  m4, %5
    movq  m5, %6
    movq  m6, %7
128 129 130 131 132 133 134
    SBUTTERFLY bw, 0, 1, 7
    SBUTTERFLY bw, 2, 3, 7
    SBUTTERFLY bw, 4, 5, 7
    movq  [%9+0x10], m3
    SBUTTERFLY3 bw, m6, %8, m7
    SBUTTERFLY wd, 0, 2, 3
    SBUTTERFLY wd, 4, 6, 3
135 136
    punpckhdq m0, m4
    movq  [%9+0x00], m0
137 138 139 140 141 142 143 144 145 146 147
    SBUTTERFLY3 wd, m1, [%9+0x10], m3
    SBUTTERFLY wd, 5, 7, 0
    SBUTTERFLY dq, 1, 5, 0
    SBUTTERFLY dq, 2, 6, 0
    punpckldq m3, m7
    movq  [%9+0x10], m2
    movq  [%9+0x20], m6
    movq  [%9+0x30], m1
    movq  [%9+0x40], m5
    movq  [%9+0x50], m3
    RESET_MM_PERMUTATION
148 149 150 151 152
%endmacro

; in: 8 rows of 8 in %1..%8
; out: 8 rows of 8 in %9..%16
%macro TRANSPOSE8x8_MEM 16
153
    RESET_MM_PERMUTATION
154 155 156 157 158 159 160
    movq  m0, %1
    movq  m1, %2
    movq  m2, %3
    movq  m3, %4
    movq  m4, %5
    movq  m5, %6
    movq  m6, %7
161 162 163 164 165 166 167 168 169 170 171 172 173
    SBUTTERFLY bw, 0, 1, 7
    SBUTTERFLY bw, 2, 3, 7
    SBUTTERFLY bw, 4, 5, 7
    SBUTTERFLY3 bw, m6, %8, m7
    movq  %9,  m5
    SBUTTERFLY wd, 0, 2, 5
    SBUTTERFLY wd, 4, 6, 5
    SBUTTERFLY wd, 1, 3, 5
    movq  %11, m6
    movq  m6,  %9
    SBUTTERFLY wd, 6, 7, 5
    SBUTTERFLY dq, 0, 4, 5
    SBUTTERFLY dq, 1, 6, 5
174
    movq  %9,  m0
175 176 177 178 179 180
    movq  %10, m4
    movq  %13, m1
    movq  %14, m6
    SBUTTERFLY3 dq, m2, %11, m0
    SBUTTERFLY dq, 3, 7, 4
    movq  %11, m2
181
    movq  %12, m0
182 183 184
    movq  %15, m3
    movq  %16, m7
    RESET_MM_PERMUTATION
185 186 187 188 189
%endmacro

; out: %4 = |%1-%2|>%3
; clobbers: %5
%macro DIFF_GT 5
190
%if avx_enabled == 0
191 192 193 194
    mova    %5, %2
    mova    %4, %1
    psubusb %5, %1
    psubusb %4, %2
195 196 197 198
%else
    psubusb %5, %2, %1
    psubusb %4, %1, %2
%endif
199 200 201 202 203 204 205
    por     %4, %5
    psubusb %4, %3
%endmacro

; out: %4 = |%1-%2|>%3
; clobbers: %5
%macro DIFF_GT2 5
206
%if ARCH_X86_64
207 208 209
    psubusb %5, %2, %1
    psubusb %4, %1, %2
%else
210 211 212 213
    mova    %5, %2
    mova    %4, %1
    psubusb %5, %1
    psubusb %4, %2
214
%endif
215 216 217 218 219 220 221 222 223 224 225
    psubusb %5, %3
    psubusb %4, %3
    pcmpeqb %4, %5
%endmacro

; in: m0=p1 m1=p0 m2=q0 m3=q1 %1=alpha-1 %2=beta-1
; out: m5=beta-1, m7=mask, %3=alpha-1
; clobbers: m4,m6
%macro LOAD_MASK 2-3
    movd     m4, %1
    movd     m5, %2
226 227
    SPLATW   m4, m4
    SPLATW   m5, m5
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
    packuswb m4, m4  ; 16x alpha-1
    packuswb m5, m5  ; 16x beta-1
%if %0>2
    mova     %3, m4
%endif
    DIFF_GT  m1, m2, m4, m7, m6 ; |p0-q0| > alpha-1
    DIFF_GT  m0, m1, m5, m4, m6 ; |p1-p0| > beta-1
    por      m7, m4
    DIFF_GT  m3, m2, m5, m4, m6 ; |q1-q0| > beta-1
    por      m7, m4
    pxor     m6, m6
    pcmpeqb  m7, m6
%endmacro

; in: m0=p1 m1=p0 m2=q0 m3=q1 m7=(tc&mask)
; out: m1=p0' m2=q0'
; clobbers: m0,3-6
%macro DEBLOCK_P0_Q0 0
    pcmpeqb m4, m4
247
    pxor    m5, m1, m2   ; p0^q0
248
    pxor    m3, m4
249
    pand    m5, [pb_1]   ; (p0^q0)&1
250
    pavgb   m3, m0       ; (p1 - q1 + 256)>>1
251
    pxor    m4, m1
252
    pavgb   m3, [pb_3]   ; (((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2
253
    pavgb   m4, m2       ; (q0 - p0 + 256)>>1
254
    pavgb   m3, m5
255
    mova    m6, [pb_A1]
256
    paddusb m3, m4       ; d+128+33
257
    psubusb m6, m3
258
    psubusb m3, [pb_A1]
259 260 261 262 263 264 265 266 267 268 269 270 271
    pminub  m6, m7
    pminub  m3, m7
    psubusb m1, m6
    psubusb m2, m3
    paddusb m1, m3
    paddusb m2, m6
%endmacro

; in: m1=p0 m2=q0
;     %1=p1 %2=q2 %3=[q2] %4=[q1] %5=tc0 %6=tmp
; out: [q1] = clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 )
; clobbers: q2, tmp, tc0
%macro LUMA_Q1 6
272
    pavgb   %6, m1, m2
273
    pavgb   %2, %6       ; avg(p2,avg(p0,q0))
274
    pxor    %6, %3
275 276
    pand    %6, [pb_1]   ; (p2^avg(p0,q0))&1
    psubusb %2, %6       ; (p2+((p0+q0+1)>>1))>>1
277
    psubusb %6, %1, %5
278 279 280 281 282 283
    paddusb %5, %1
    pmaxub  %2, %6
    pminub  %2, %5
    mova    %4, %2
%endmacro

284
%if ARCH_X86_64
285
;-----------------------------------------------------------------------------
286
; void deblock_v_luma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
287
;-----------------------------------------------------------------------------
288 289
%macro DEBLOCK_LUMA 0
cglobal deblock_v_luma_8, 5,5,10
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
    movd    m8, [r4] ; tc0
    lea     r4, [r1*3]
    dec     r2d        ; alpha-1
    neg     r4
    dec     r3d        ; beta-1
    add     r4, r0     ; pix-3*stride

    mova    m0, [r4+r1]   ; p1
    mova    m1, [r4+2*r1] ; p0
    mova    m2, [r0]      ; q0
    mova    m3, [r0+r1]   ; q1
    LOAD_MASK r2d, r3d

    punpcklbw m8, m8
    punpcklbw m8, m8 ; tc = 4x tc0[3], 4x tc0[2], 4x tc0[1], 4x tc0[0]
    pcmpeqb m9, m9
    pcmpeqb m9, m8
    pandn   m9, m7
    pand    m8, m9

    movdqa  m3, [r4] ; p2
    DIFF_GT2 m1, m3, m5, m6, m7 ; |p2-p0| > beta-1
    pand    m6, m9
313
    psubb   m7, m8, m6
314 315 316 317 318 319 320 321 322 323 324 325 326 327
    pand    m6, m8
    LUMA_Q1 m0, m3, [r4], [r4+r1], m6, m4

    movdqa  m4, [r0+2*r1] ; q2
    DIFF_GT2 m2, m4, m5, m6, m3 ; |q2-q0| > beta-1
    pand    m6, m9
    pand    m8, m6
    psubb   m7, m6
    mova    m3, [r0+r1]
    LUMA_Q1 m3, m4, [r0+2*r1], [r0+r1], m8, m6

    DEBLOCK_P0_Q0
    mova    [r4+2*r1], m1
    mova    [r0], m2
328
    RET
329 330

;-----------------------------------------------------------------------------
331
; void deblock_h_luma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
332
;-----------------------------------------------------------------------------
333 334
INIT_MMX cpuname
cglobal deblock_h_luma_8, 5,9
335 336
    movsxd r7,  r1d
    lea    r8,  [r7+r7*2]
337
    lea    r6,  [r0-4]
338
    lea    r5,  [r0-4+r8]
339
%if WIN64
340 341 342
    sub    rsp, 0x98
    %define pix_tmp rsp+0x30
%else
343 344
    sub    rsp, 0x68
    %define pix_tmp rsp
345
%endif
346 347

    ; transpose 6x16 -> tmp space
348 349 350 351
    TRANSPOSE6x8_MEM  PASS8ROWS(r6, r5, r7, r8), pix_tmp
    lea    r6, [r6+r7*8]
    lea    r5, [r5+r7*8]
    TRANSPOSE6x8_MEM  PASS8ROWS(r6, r5, r7, r8), pix_tmp+8
352 353 354

    ; vertical filter
    ; alpha, beta, tc0 are still in r2d, r3d, r4
355
    ; don't backup r6, r5, r7, r8 because deblock_v_luma_sse2 doesn't use them
356
    lea    r0, [pix_tmp+0x30]
357
    mov    r1d, 0x10
358
%if WIN64
359 360
    mov    [rsp+0x20], r4
%endif
361
    call   deblock_v_luma_8
362 363

    ; transpose 16x4 -> original space  (only the middle 4 rows were changed by the filter)
364 365
    add    r6, 2
    add    r5, 2
366 367 368 369
    movq   m0, [pix_tmp+0x18]
    movq   m1, [pix_tmp+0x28]
    movq   m2, [pix_tmp+0x38]
    movq   m3, [pix_tmp+0x48]
370
    TRANSPOSE8x4B_STORE  PASS8ROWS(r6, r5, r7, r8)
371

372 373 374 375
    shl    r7,  3
    sub    r6,  r7
    sub    r5,  r7
    shr    r7,  3
376 377 378 379
    movq   m0, [pix_tmp+0x10]
    movq   m1, [pix_tmp+0x20]
    movq   m2, [pix_tmp+0x30]
    movq   m3, [pix_tmp+0x40]
380
    TRANSPOSE8x4B_STORE  PASS8ROWS(r6, r5, r7, r8)
381

382
%if WIN64
383 384
    add    rsp, 0x98
%else
385
    add    rsp, 0x68
386 387
%endif
    RET
388 389
%endmacro

390 391
INIT_XMM sse2
DEBLOCK_LUMA
392
%if HAVE_AVX_EXTERNAL
393 394
INIT_XMM avx
DEBLOCK_LUMA
395
%endif
396 397 398

%else

399
%macro DEBLOCK_LUMA 2
400
;-----------------------------------------------------------------------------
401
; void deblock_v8_luma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
402
;-----------------------------------------------------------------------------
403
cglobal deblock_%1_luma_8, 5,5,8,2*%2
404 405 406 407 408 409 410 411 412 413 414 415
    lea     r4, [r1*3]
    dec     r2     ; alpha-1
    neg     r4
    dec     r3     ; beta-1
    add     r4, r0 ; pix-3*stride

    mova    m0, [r4+r1]   ; p1
    mova    m1, [r4+2*r1] ; p0
    mova    m2, [r0]      ; q0
    mova    m3, [r0+r1]   ; q1
    LOAD_MASK r2, r3

416
    mov     r3, r4mp
417
    pcmpeqb m3, m3
418 419 420
    movd    m4, [r3] ; tc0
    punpcklbw m4, m4
    punpcklbw m4, m4 ; tc = 4x tc0[3], 4x tc0[2], 4x tc0[1], 4x tc0[0]
421
    mova   [esp+%2], m4 ; tc
422
    pcmpgtb m4, m3
423
    mova    m3, [r4] ; p2
424 425 426 427 428
    pand    m4, m7
    mova   [esp], m4 ; mask

    DIFF_GT2 m1, m3, m5, m6, m7 ; |p2-p0| > beta-1
    pand    m6, m4
429
    pand    m4, [esp+%2] ; tc
430
    psubb   m7, m4, m6
431 432 433 434 435
    pand    m6, m4
    LUMA_Q1 m0, m3, [r4], [r4+r1], m6, m4

    mova    m4, [r0+2*r1] ; q2
    DIFF_GT2 m2, m4, m5, m6, m3 ; |q2-q0| > beta-1
436
    pand    m6, [esp] ; mask
437
    mova    m5, [esp+%2] ; tc
438
    psubb   m7, m6
439
    pand    m5, m6
440 441 442 443 444 445 446 447 448
    mova    m3, [r0+r1]
    LUMA_Q1 m3, m4, [r0+2*r1], [r0+r1], m5, m6

    DEBLOCK_P0_Q0
    mova    [r4+2*r1], m1
    mova    [r0], m2
    RET

;-----------------------------------------------------------------------------
449
; void deblock_h_luma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
450
;-----------------------------------------------------------------------------
451
INIT_MMX cpuname
452
cglobal deblock_h_luma_8, 0,5,8,0x60+HAVE_ALIGNED_STACK*12
453
    mov    r0, r0mp
454 455 456 457
    mov    r3, r1m
    lea    r4, [r3*3]
    sub    r0, 4
    lea    r1, [r0+r4]
458
%define pix_tmp esp+12*HAVE_ALIGNED_STACK
459 460 461 462 463 464 465 466 467 468 469 470 471 472

    ; transpose 6x16 -> tmp space
    TRANSPOSE6x8_MEM  PASS8ROWS(r0, r1, r3, r4), pix_tmp
    lea    r0, [r0+r3*8]
    lea    r1, [r1+r3*8]
    TRANSPOSE6x8_MEM  PASS8ROWS(r0, r1, r3, r4), pix_tmp+8

    ; vertical filter
    lea    r0, [pix_tmp+0x30]
    PUSH   dword r4m
    PUSH   dword r3m
    PUSH   dword r2m
    PUSH   dword 16
    PUSH   dword r0
473 474
    call   deblock_%1_luma_8
%ifidn %1, v8
475 476
    add    dword [esp   ], 8 ; pix_tmp+0x38
    add    dword [esp+16], 2 ; tc0+2
477
    call   deblock_%1_luma_8
478 479 480 481
%endif
    ADD    esp, 20

    ; transpose 16x4 -> original space  (only the middle 4 rows were changed by the filter)
482
    mov    r0, r0mp
483 484 485 486
    sub    r0, 2

    movq   m0, [pix_tmp+0x10]
    movq   m1, [pix_tmp+0x20]
487
    lea    r1, [r0+r4]
488 489
    movq   m2, [pix_tmp+0x30]
    movq   m3, [pix_tmp+0x40]
490
    TRANSPOSE8x4B_STORE  PASS8ROWS(r0, r1, r3, r4)
491 492 493 494 495 496 497

    lea    r0, [r0+r3*8]
    lea    r1, [r1+r3*8]
    movq   m0, [pix_tmp+0x18]
    movq   m1, [pix_tmp+0x28]
    movq   m2, [pix_tmp+0x38]
    movq   m3, [pix_tmp+0x48]
498
    TRANSPOSE8x4B_STORE  PASS8ROWS(r0, r1, r3, r4)
499 500 501 502

    RET
%endmacro ; DEBLOCK_LUMA

503
INIT_MMX mmxext
504 505 506
DEBLOCK_LUMA v8, 8
INIT_XMM sse2
DEBLOCK_LUMA v, 16
507
%if HAVE_AVX_EXTERNAL
508 509
INIT_XMM avx
DEBLOCK_LUMA v, 16
510
%endif
511 512 513 514 515 516

%endif ; ARCH



%macro LUMA_INTRA_P012 4 ; p0..p3 in memory
517
%if ARCH_X86_64
518 519 520
    pavgb t0, p2, p1
    pavgb t1, p0, q0
%else
521 522 523 524
    mova  t0, p2
    mova  t1, p0
    pavgb t0, p1
    pavgb t1, q0
525
%endif
526 527
    pavgb t0, t1 ; ((p2+p1+1)/2 + (p0+q0+1)/2 + 1)/2
    mova  t5, t1
528
%if ARCH_X86_64
529 530 531
    paddb t2, p2, p1
    paddb t3, p0, q0
%else
532 533 534 535
    mova  t2, p2
    mova  t3, p0
    paddb t2, p1
    paddb t3, q0
536
%endif
537 538 539 540
    paddb t2, t3
    mova  t3, t2
    mova  t4, t2
    psrlw t2, 1
541
    pavgb t2, mpb_0
542
    pxor  t2, t0
543
    pand  t2, mpb_1
544 545
    psubb t0, t2 ; p1' = (p2+p1+p0+q0+2)/4;

546
%if ARCH_X86_64
547 548 549
    pavgb t1, p2, q1
    psubb t2, p2, q1
%else
550 551 552 553
    mova  t1, p2
    mova  t2, p2
    pavgb t1, q1
    psubb t2, q1
554
%endif
555 556
    paddb t3, t3
    psubb t3, t2 ; p2+2*p1+2*p0+2*q0+q1
557
    pand  t2, mpb_1
558 559 560 561
    psubb t1, t2
    pavgb t1, p1
    pavgb t1, t5 ; (((p2+q1)/2 + p1+1)/2 + (p0+q0+1)/2 + 1)/2
    psrlw t3, 2
562
    pavgb t3, mpb_0
563
    pxor  t3, t1
564
    pand  t3, mpb_1
565 566
    psubb t1, t3 ; p0'a = (p2+2*p1+2*p0+2*q0+q1+4)/8

567 568
    pxor  t3, p0, q1
    pavgb t2, p0, q1
569
    pand  t3, mpb_1
570 571 572 573 574 575 576 577 578 579 580 581
    psubb t2, t3
    pavgb t2, p1 ; p0'b = (2*p1+p0+q0+2)/4

    pxor  t1, t2
    pxor  t2, p0
    pand  t1, mask1p
    pand  t2, mask0
    pxor  t1, t2
    pxor  t1, p0
    mova  %1, t1 ; store p0

    mova  t1, %4 ; p3
582
    paddb t2, t1, p2
583 584 585 586 587
    pavgb t1, p2
    pavgb t1, t0 ; (p3+p2+1)/2 + (p2+p1+p0+q0+2)/4
    paddb t2, t2
    paddb t2, t4 ; 2*p3+3*p2+p1+p0+q0
    psrlw t2, 2
588
    pavgb t2, mpb_0
589
    pxor  t2, t1
590
    pand  t2, mpb_1
591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611
    psubb t1, t2 ; p2' = (2*p3+3*p2+p1+p0+q0+4)/8

    pxor  t0, p1
    pxor  t1, p2
    pand  t0, mask1p
    pand  t1, mask1p
    pxor  t0, p1
    pxor  t1, p2
    mova  %2, t0 ; store p1
    mova  %3, t1 ; store p2
%endmacro

%macro LUMA_INTRA_SWAP_PQ 0
    %define q1 m0
    %define q0 m1
    %define p0 m2
    %define p1 m3
    %define p2 q2
    %define mask1p mask1q
%endmacro

612
%macro DEBLOCK_LUMA_INTRA 1
613 614 615 616 617 618 619 620
    %define p1 m0
    %define p0 m1
    %define q0 m2
    %define q1 m3
    %define t0 m4
    %define t1 m5
    %define t2 m6
    %define t3 m7
621
%if ARCH_X86_64
622 623 624 625 626 627 628
    %define p2 m8
    %define q2 m9
    %define t4 m10
    %define t5 m11
    %define mask0 m12
    %define mask1p m13
    %define mask1q [rsp-24]
629 630
    %define mpb_0 m14
    %define mpb_1 m15
631
%else
632
    %define spill(x) [esp+16*x]
633 634 635 636 637 638 639
    %define p2 [r4+r1]
    %define q2 [r0+2*r1]
    %define t4 spill(0)
    %define t5 spill(1)
    %define mask0 spill(2)
    %define mask1p spill(3)
    %define mask1q spill(4)
640 641
    %define mpb_0 [pb_0]
    %define mpb_1 [pb_1]
642 643 644
%endif

;-----------------------------------------------------------------------------
645
; void deblock_v_luma_intra( uint8_t *pix, int stride, int alpha, int beta )
646
;-----------------------------------------------------------------------------
647
cglobal deblock_%1_luma_intra_8, 4,6,16,ARCH_X86_64*0x50-0x50
648 649 650 651 652 653 654 655 656 657 658 659
    lea     r4, [r1*4]
    lea     r5, [r1*3] ; 3*stride
    dec     r2d        ; alpha-1
    jl .end
    neg     r4
    dec     r3d        ; beta-1
    jl .end
    add     r4, r0     ; pix-4*stride
    mova    p1, [r4+2*r1]
    mova    p0, [r4+r5]
    mova    q0, [r0]
    mova    q1, [r0+r1]
660
%if ARCH_X86_64
661 662
    pxor    mpb_0, mpb_0
    mova    mpb_1, [pb_1]
663 664
    LOAD_MASK r2d, r3d, t5 ; m5=beta-1, t5=alpha-1, m7=mask0
    SWAP    7, 12 ; m12=mask0
665 666
    pavgb   t5, mpb_0
    pavgb   t5, mpb_1 ; alpha/4+1
667 668 669 670 671 672 673 674 675 676 677 678 679 680
    movdqa  p2, [r4+r1]
    movdqa  q2, [r0+2*r1]
    DIFF_GT2 p0, q0, t5, t0, t3 ; t0 = |p0-q0| > alpha/4+1
    DIFF_GT2 p0, p2, m5, t2, t5 ; mask1 = |p2-p0| > beta-1
    DIFF_GT2 q0, q2, m5, t4, t5 ; t4 = |q2-q0| > beta-1
    pand    t0, mask0
    pand    t4, t0
    pand    t2, t0
    mova    mask1q, t4
    mova    mask1p, t2
%else
    LOAD_MASK r2d, r3d, t5 ; m5=beta-1, t5=alpha-1, m7=mask0
    mova    m4, t5
    mova    mask0, m7
681 682
    pavgb   m4, [pb_0]
    pavgb   m4, [pb_1] ; alpha/4+1
683 684 685 686 687 688 689 690 691 692 693 694 695 696 697
    DIFF_GT2 p0, q0, m4, m6, m7 ; m6 = |p0-q0| > alpha/4+1
    pand    m6, mask0
    DIFF_GT2 p0, p2, m5, m4, m7 ; m4 = |p2-p0| > beta-1
    pand    m4, m6
    mova    mask1p, m4
    DIFF_GT2 q0, q2, m5, m4, m7 ; m4 = |q2-q0| > beta-1
    pand    m4, m6
    mova    mask1q, m4
%endif
    LUMA_INTRA_P012 [r4+r5], [r4+2*r1], [r4+r1], [r4]
    LUMA_INTRA_SWAP_PQ
    LUMA_INTRA_P012 [r0], [r0+r1], [r0+2*r1], [r0+r5]
.end:
    RET

698
INIT_MMX cpuname
699
%if ARCH_X86_64
700
;-----------------------------------------------------------------------------
701
; void deblock_h_luma_intra( uint8_t *pix, int stride, int alpha, int beta )
702
;-----------------------------------------------------------------------------
703
cglobal deblock_h_luma_intra_8, 4,9
704 705
    movsxd r7,  r1d
    lea    r8,  [r7*3]
706
    lea    r6,  [r0-4]
707
    lea    r5,  [r0-4+r8]
708 709 710 711
    sub    rsp, 0x88
    %define pix_tmp rsp

    ; transpose 8x16 -> tmp space
712 713 714 715
    TRANSPOSE8x8_MEM  PASS8ROWS(r6, r5, r7, r8), PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30)
    lea    r6, [r6+r7*8]
    lea    r5, [r5+r7*8]
    TRANSPOSE8x8_MEM  PASS8ROWS(r6, r5, r7, r8), PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30)
716 717 718

    lea    r0,  [pix_tmp+0x40]
    mov    r1,  0x10
719
    call   deblock_v_luma_intra_8
720 721

    ; transpose 16x6 -> original space (but we can't write only 6 pixels, so really 16x8)
722 723 724 725 726 727 728
    lea    r5, [r6+r8]
    TRANSPOSE8x8_MEM  PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30), PASS8ROWS(r6, r5, r7, r8)
    shl    r7,  3
    sub    r6,  r7
    sub    r5,  r7
    shr    r7,  3
    TRANSPOSE8x8_MEM  PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30), PASS8ROWS(r6, r5, r7, r8)
729
    add    rsp, 0x88
730
    RET
731
%else
732
cglobal deblock_h_luma_intra_8, 2,4,8,0x80
733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748
    lea    r3,  [r1*3]
    sub    r0,  4
    lea    r2,  [r0+r3]
    %define pix_tmp rsp

    ; transpose 8x16 -> tmp space
    TRANSPOSE8x8_MEM  PASS8ROWS(r0, r2, r1, r3), PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30)
    lea    r0,  [r0+r1*8]
    lea    r2,  [r2+r1*8]
    TRANSPOSE8x8_MEM  PASS8ROWS(r0, r2, r1, r3), PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30)

    lea    r0,  [pix_tmp+0x40]
    PUSH   dword r3m
    PUSH   dword r2m
    PUSH   dword 16
    PUSH   r0
749 750
    call   deblock_%1_luma_intra_8
%ifidn %1, v8
751
    add    dword [rsp], 8 ; pix_tmp+8
752
    call   deblock_%1_luma_intra_8
753 754 755 756
%endif
    ADD    esp, 16

    mov    r1,  r1m
757
    mov    r0,  r0mp
758 759 760 761 762 763 764 765 766 767 768 769
    lea    r3,  [r1*3]
    sub    r0,  4
    lea    r2,  [r0+r3]
    ; transpose 16x6 -> original space (but we can't write only 6 pixels, so really 16x8)
    TRANSPOSE8x8_MEM  PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30), PASS8ROWS(r0, r2, r1, r3)
    lea    r0,  [r0+r1*8]
    lea    r2,  [r2+r1*8]
    TRANSPOSE8x8_MEM  PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30), PASS8ROWS(r0, r2, r1, r3)
    RET
%endif ; ARCH_X86_64
%endmacro ; DEBLOCK_LUMA_INTRA

770 771
INIT_XMM sse2
DEBLOCK_LUMA_INTRA v
772
%if HAVE_AVX_EXTERNAL
773 774
INIT_XMM avx
DEBLOCK_LUMA_INTRA v
775
%endif
776
%if ARCH_X86_64 == 0
777
INIT_MMX mmxext
778
DEBLOCK_LUMA_INTRA v8
779
%endif
780

781
INIT_MMX mmxext
782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803

%macro CHROMA_V_START 0
    dec    r2d      ; alpha-1
    dec    r3d      ; beta-1
    mov    t5, r0
    sub    t5, r1
    sub    t5, r1
%endmacro

%macro CHROMA_H_START 0
    dec    r2d
    dec    r3d
    sub    r0, 2
    lea    t6, [r1*3]
    mov    t5, r0
    add    r0, t6
%endmacro

%define t5 r5
%define t6 r6

;-----------------------------------------------------------------------------
804
; void ff_deblock_v_chroma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
805
;-----------------------------------------------------------------------------
806
cglobal deblock_v_chroma_8, 5,6
807 808 809 810 811
    CHROMA_V_START
    movq  m0, [t5]
    movq  m1, [t5+r1]
    movq  m2, [r0]
    movq  m3, [r0+r1]
812
    call ff_chroma_inter_body_mmxext
813 814 815 816 817
    movq  [t5+r1], m1
    movq  [r0], m2
    RET

;-----------------------------------------------------------------------------
818
; void ff_deblock_h_chroma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
819
;-----------------------------------------------------------------------------
820
cglobal deblock_h_chroma_8, 5,7
821
%if UNIX64
822 823
    %define buf0 [rsp-24]
    %define buf1 [rsp-16]
824 825 826 827
%elif WIN64
    sub   rsp, 16
    %define buf0 [rsp]
    %define buf1 [rsp+8]
828 829 830 831 832
%else
    %define buf0 r0m
    %define buf1 r2m
%endif
    CHROMA_H_START
833
    TRANSPOSE4x8_LOAD  bw, wd, dq, PASS8ROWS(t5, r0, r1, t6)
834 835
    movq  buf0, m0
    movq  buf1, m3
836 837 838 839 840
    LOAD_MASK  r2d, r3d
    movd       m6, [r4] ; tc0
    punpcklbw  m6, m6
    pand       m7, m6
    DEBLOCK_P0_Q0
841 842
    movq  m0, buf0
    movq  m3, buf1
843
    TRANSPOSE8x4B_STORE PASS8ROWS(t5, r0, r1, t6)
844 845 846
%if WIN64
    add   rsp, 16
%endif
847 848 849
    RET

ALIGN 16
850
ff_chroma_inter_body_mmxext:
851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874
    LOAD_MASK  r2d, r3d
    movd       m6, [r4] ; tc0
    punpcklbw  m6, m6
    pand       m7, m6
    DEBLOCK_P0_Q0
    ret



; in: %1=p0 %2=p1 %3=q1
; out: p0 = (p0 + q1 + 2*p1 + 2) >> 2
%macro CHROMA_INTRA_P0 3
    movq    m4, %1
    pxor    m4, %3
    pand    m4, [pb_1] ; m4 = (p0^q1)&1
    pavgb   %1, %3
    psubusb %1, m4
    pavgb   %1, %2             ; dst = avg(p1, avg(p0,q1) - ((p0^q1)&1))
%endmacro

%define t5 r4
%define t6 r5

;-----------------------------------------------------------------------------
875
; void ff_deblock_v_chroma_intra( uint8_t *pix, int stride, int alpha, int beta )
876
;-----------------------------------------------------------------------------
877
cglobal deblock_v_chroma_intra_8, 4,5
878 879 880 881 882
    CHROMA_V_START
    movq  m0, [t5]
    movq  m1, [t5+r1]
    movq  m2, [r0]
    movq  m3, [r0+r1]
883
    call ff_chroma_intra_body_mmxext
884 885 886 887 888
    movq  [t5+r1], m1
    movq  [r0], m2
    RET

;-----------------------------------------------------------------------------
889
; void ff_deblock_h_chroma_intra( uint8_t *pix, int stride, int alpha, int beta )
890
;-----------------------------------------------------------------------------
891
cglobal deblock_h_chroma_intra_8, 4,6
892
    CHROMA_H_START
893
    TRANSPOSE4x8_LOAD  bw, wd, dq, PASS8ROWS(t5, r0, r1, t6)
894
    call ff_chroma_intra_body_mmxext
895
    TRANSPOSE8x4B_STORE PASS8ROWS(t5, r0, r1, t6)
896 897 898
    RET

ALIGN 16
899
ff_chroma_intra_body_mmxext:
900 901 902 903 904 905 906 907 908 909 910 911
    LOAD_MASK r2d, r3d
    movq   m5, m1
    movq   m6, m2
    CHROMA_INTRA_P0  m1, m0, m3
    CHROMA_INTRA_P0  m2, m3, m0
    psubb  m1, m5
    psubb  m2, m6
    pand   m1, m7
    pand   m2, m7
    paddb  m1, m5
    paddb  m2, m6
    ret
912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022

;-----------------------------------------------------------------------------
; void h264_loop_filter_strength(int16_t bs[2][4][4], uint8_t nnz[40],
;                                int8_t ref[2][40], int16_t mv[2][40][2],
;                                int bidir,    int edges,    int step,
;                                int mask_mv0, int mask_mv1, int field);
;
; bidir    is 0 or 1
; edges    is 1 or 4
; step     is 1 or 2
; mask_mv0 is 0 or 3
; mask_mv1 is 0 or 1
; field    is 0 or 1
;-----------------------------------------------------------------------------
%macro loop_filter_strength_iteration 7 ; edges, step, mask_mv,
                                        ; dir, d_idx, mask_dir, bidir
%define edgesd    %1
%define stepd     %2
%define mask_mvd  %3
%define dir       %4
%define d_idx     %5
%define mask_dir  %6
%define bidir     %7
    xor          b_idxd, b_idxd ; for (b_idx = 0; b_idx < edges; b_idx += step)
%%.b_idx_loop:
%if mask_dir == 0
    pxor             m0, m0
%endif
    test         b_idxd, dword mask_mvd
    jnz %%.skip_loop_iter                       ; if (!(b_idx & mask_mv))
%if bidir == 1
    movd             m2, [refq+b_idxq+d_idx+12] ; { ref0[bn] }
    punpckldq        m2, [refq+b_idxq+d_idx+52] ; { ref0[bn], ref1[bn] }
    pshufw           m0, [refq+b_idxq+12], 0x44 ; { ref0[b],  ref0[b]  }
    pshufw           m1, [refq+b_idxq+52], 0x44 ; { ref1[b],  ref1[b]  }
    pshufw           m3, m2, 0x4E               ; { ref1[bn], ref0[bn] }
    psubb            m0, m2                     ; { ref0[b] != ref0[bn],
                                                ;   ref0[b] != ref1[bn] }
    psubb            m1, m3                     ; { ref1[b] != ref1[bn],
                                                ;   ref1[b] != ref0[bn] }

    por              m0, m1
    mova             m1, [mvq+b_idxq*4+(d_idx+12)*4]
    mova             m2, [mvq+b_idxq*4+(d_idx+12)*4+mmsize]
    mova             m3, m1
    mova             m4, m2
    psubw            m1, [mvq+b_idxq*4+12*4]
    psubw            m2, [mvq+b_idxq*4+12*4+mmsize]
    psubw            m3, [mvq+b_idxq*4+52*4]
    psubw            m4, [mvq+b_idxq*4+52*4+mmsize]
    packsswb         m1, m2
    packsswb         m3, m4
    paddb            m1, m6
    paddb            m3, m6
    psubusb          m1, m5 ; abs(mv[b] - mv[bn]) >= limit
    psubusb          m3, m5
    packsswb         m1, m3

    por              m0, m1
    mova             m1, [mvq+b_idxq*4+(d_idx+52)*4]
    mova             m2, [mvq+b_idxq*4+(d_idx+52)*4+mmsize]
    mova             m3, m1
    mova             m4, m2
    psubw            m1, [mvq+b_idxq*4+12*4]
    psubw            m2, [mvq+b_idxq*4+12*4+mmsize]
    psubw            m3, [mvq+b_idxq*4+52*4]
    psubw            m4, [mvq+b_idxq*4+52*4+mmsize]
    packsswb         m1, m2
    packsswb         m3, m4
    paddb            m1, m6
    paddb            m3, m6
    psubusb          m1, m5 ; abs(mv[b] - mv[bn]) >= limit
    psubusb          m3, m5
    packsswb         m1, m3

    pshufw           m1, m1, 0x4E
    por              m0, m1
    pshufw           m1, m0, 0x4E
    pminub           m0, m1
%else ; bidir == 0
    movd             m0, [refq+b_idxq+12]
    psubb            m0, [refq+b_idxq+d_idx+12] ; ref[b] != ref[bn]

    mova             m1, [mvq+b_idxq*4+12*4]
    mova             m2, [mvq+b_idxq*4+12*4+mmsize]
    psubw            m1, [mvq+b_idxq*4+(d_idx+12)*4]
    psubw            m2, [mvq+b_idxq*4+(d_idx+12)*4+mmsize]
    packsswb         m1, m2
    paddb            m1, m6
    psubusb          m1, m5 ; abs(mv[b] - mv[bn]) >= limit
    packsswb         m1, m1
    por              m0, m1
%endif ; bidir == 1/0

%%.skip_loop_iter:
    movd             m1, [nnzq+b_idxq+12]
    por              m1, [nnzq+b_idxq+d_idx+12] ; nnz[b] || nnz[bn]

    pminub           m1, m7
    pminub           m0, m7
    psllw            m1, 1
    pxor             m2, m2
    pmaxub           m1, m0
    punpcklbw        m1, m2
    movq [bsq+b_idxq+32*dir], m1

    add          b_idxd, dword stepd
    cmp          b_idxd, dword edgesd
    jl %%.b_idx_loop
%endmacro

1023
INIT_MMX mmxext
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075
cglobal h264_loop_filter_strength, 9, 9, 0, bs, nnz, ref, mv, bidir, edges, \
                                            step, mask_mv0, mask_mv1, field
%define b_idxq bidirq
%define b_idxd bidird
    cmp    dword fieldm, 0
    mova             m7, [pb_1]
    mova             m5, [pb_3]
    je .nofield
    mova             m5, [pb_3_1]
.nofield:
    mova             m6, m5
    paddb            m5, m5

    shl     dword stepd, 3
    shl    dword edgesd, 3
%if ARCH_X86_32
%define mask_mv0d mask_mv0m
%define mask_mv1d mask_mv1m
%endif
    shl dword mask_mv1d, 3
    shl dword mask_mv0d, 3

    cmp    dword bidird, 0
    jne .bidir
    loop_filter_strength_iteration edgesd, stepd, mask_mv1d, 1, -8,  0, 0
    loop_filter_strength_iteration     32,     8, mask_mv0d, 0, -1, -1, 0

    mova             m0, [bsq+mmsize*0]
    mova             m1, [bsq+mmsize*1]
    mova             m2, [bsq+mmsize*2]
    mova             m3, [bsq+mmsize*3]
    TRANSPOSE4x4W 0, 1, 2, 3, 4
    mova  [bsq+mmsize*0], m0
    mova  [bsq+mmsize*1], m1
    mova  [bsq+mmsize*2], m2
    mova  [bsq+mmsize*3], m3
    RET

.bidir:
    loop_filter_strength_iteration edgesd, stepd, mask_mv1d, 1, -8,  0, 1
    loop_filter_strength_iteration     32,     8, mask_mv0d, 0, -1, -1, 1

    mova             m0, [bsq+mmsize*0]
    mova             m1, [bsq+mmsize*1]
    mova             m2, [bsq+mmsize*2]
    mova             m3, [bsq+mmsize*3]
    TRANSPOSE4x4W 0, 1, 2, 3, 4
    mova  [bsq+mmsize*0], m0
    mova  [bsq+mmsize*1], m1
    mova  [bsq+mmsize*2], m2
    mova  [bsq+mmsize*3], m3
    RET