vp9lpf.asm 37.7 KB
Newer Older
1 2 3 4
;******************************************************************************
;* VP9 loop filter SIMD optimizations
;*
;* Copyright (C) 2013-2014 Clément Bœsch <u pkh me>
5
;* Copyright (C) 2014 Ronald S. Bultje <rsbultje@gmail.com>
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************

%include "libavutil/x86/x86util.asm"

SECTION_RODATA

cextern pb_3
cextern pb_80

pb_4:   times 16 db 0x04
pb_10:  times 16 db 0x10
pb_40:  times 16 db 0x40
pb_81:  times 16 db 0x81
pb_f8:  times 16 db 0xf8
pb_fe:  times 16 db 0xfe
37
pb_ff:  times 16 db 0xff
38

39 40
cextern pw_4
cextern pw_8
41

42 43 44 45 46
; with mix functions, two 8-bit thresholds are stored in a 16-bit storage,
; the following mask is used to splat both in the same register
mask_mix: times 8 db 0
          times 8 db 1

47 48 49 50 51
mask_mix84: times 8 db 0xff
            times 8 db 0x00
mask_mix48: times 8 db 0x00
            times 8 db 0xff

52 53
SECTION .text

54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
%macro SCRATCH 3
%if ARCH_X86_64
    SWAP                %1, %2
%else
    mova              [%3], m%1
%endif
%endmacro

%macro UNSCRATCH 3
%if ARCH_X86_64
    SWAP                %1, %2
%else
    mova               m%1, [%3]
%endif
%endmacro

70 71
; %1 = abs(%2-%3)
%macro ABSSUB 4 ; dst, src1 (RO), src2 (RO), tmp
72
%if ARCH_X86_64
73 74
    psubusb             %1, %3, %2
    psubusb             %4, %2, %3
75 76 77 78 79 80
%else
    mova                %1, %3
    mova                %4, %2
    psubusb             %1, %2
    psubusb             %4, %3
%endif
81 82 83
    por                 %1, %4
%endmacro

84 85 86 87
; %1 = %1>%2
%macro CMP_GT 2-3 ; src/dst, cmp, pb_80
%if %0 == 3
    pxor                %1, %3
88
%endif
89
    pcmpgtb             %1, %2
90 91
%endmacro

92 93
; %1 = abs(%2-%3) > %4
%macro ABSSUB_GT 5-6 [pb_80]; dst, src1, src2, cmp, tmp, [pb_80]
94
    ABSSUB              %1, %2, %3, %5      ; dst = abs(src1-src2)
95
    CMP_GT              %1, %4, %6          ; dst > cmp
96 97 98 99 100 101 102 103
%endmacro

%macro MASK_APPLY 4 ; %1=new_data/dst %2=old_data %3=mask %4=tmp
    pand                %1, %3              ; new &= mask
    pandn               %4, %3, %2          ; tmp = ~mask & old
    por                 %1, %4              ; new&mask | old&~mask
%endmacro

104 105 106 107 108 109 110 111 112 113 114
%macro UNPACK 4
%if ARCH_X86_64
    punpck%1bw          %2, %3, %4
%else
    mova                %2, %3
    punpck%1bw          %2, %4
%endif
%endmacro

%macro FILTER_SUBx2_ADDx2 11 ; %1=dst %2=h/l %3=cache %4=stack_off %5=sub1 %6=sub2 %7=add1
                             ; %8=add2 %9=rshift, [unpack], [unpack_is_mem_on_x86_32]
115 116 117 118
    psubw               %3, [rsp+%4+%5*32]
    psubw               %3, [rsp+%4+%6*32]
    paddw               %3, [rsp+%4+%7*32]
%ifnidn %10, ""
119
%if %11 == 0
120
    punpck%2bw          %1, %10, m0
121 122 123
%else
    UNPACK          %2, %1, %10, m0
%endif
124
    mova    [rsp+%4+%8*32], %1
125
    paddw               %3, %1
126 127 128 129
%else
    paddw               %3, [rsp+%4+%8*32]
%endif
    psraw               %1, %3, %9
130 131
%endmacro

132 133 134 135
; FIXME interleave l/h better (for instruction pairing)
%macro FILTER_INIT 9 ; tmp1, tmp2, cacheL, cacheH, dstp, stack_off, filterid, mask, source
    FILTER%7_INIT       %1, l, %3, %6 +  0
    FILTER%7_INIT       %2, h, %4, %6 + 16
136
    packuswb            %1, %2
137
    MASK_APPLY          %1, %9, %8, %2
138 139 140
    mova                %5, %1
%endmacro

141

142 143
%macro FILTER_UPDATE 12-16 "", "", "", 0 ; tmp1, tmp2, cacheL, cacheH, dstp, stack_off, -, -, +, +, rshift,
                                         ; mask, [source], [unpack + src], [unpack_is_mem_on_x86_32]
144
; FIXME interleave this properly with the subx2/addx2
145
%ifnidn %15, ""
146
%if %16 == 0 || ARCH_X86_64
147
    mova               %14, %15
148
%endif
149
%endif
150 151
    FILTER_SUBx2_ADDx2  %1, l, %3, %6 +  0, %7, %8, %9, %10, %11, %14, %16
    FILTER_SUBx2_ADDx2  %2, h, %4, %6 + 16, %7, %8, %9, %10, %11, %14, %16
152
    packuswb            %1, %2
153 154
%ifnidn %13, ""
    MASK_APPLY          %1, %13, %12, %2
155
%else
156
    MASK_APPLY          %1, %5, %12, %2
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
%endif
    mova                %5, %1
%endmacro

%macro SRSHIFT3B_2X 4 ; reg1, reg2, [pb_10], tmp
    mova                %4, [pb_f8]
    pand                %1, %4
    pand                %2, %4
    psrlq               %1, 3
    psrlq               %2, 3
    pxor                %1, %3
    pxor                %2, %3
    psubb               %1, %3
    psubb               %2, %3
%endmacro

%macro EXTRACT_POS_NEG 3 ; i8, neg, pos
    pxor                %3, %3
    pxor                %2, %2
    pcmpgtb             %3, %1                          ; i8 < 0 mask
    psubb               %2, %1                          ; neg values (only the originally - will be kept)
    pand                %2, %3                          ; negative values of i8 (but stored as +)
    pandn               %3, %1                          ; positive values of i8
%endmacro

; clip_u8(u8 + i8)
183 184 185 186
%macro SIGN_ADD 4 ; dst, u8, i8, tmp1
    EXTRACT_POS_NEG     %3, %4, %1
    paddusb             %1, %2                          ; add the positives
    psubusb             %1, %4                          ; sub the negatives
187 188 189
%endmacro

; clip_u8(u8 - i8)
190 191 192 193
%macro SIGN_SUB 4 ; dst, u8, i8, tmp1
    EXTRACT_POS_NEG     %3, %1, %4
    paddusb             %1, %2                          ; add the negatives
    psubusb             %1, %4                          ; sub the positives
194 195
%endmacro

196
%macro FILTER6_INIT 4 ; %1=dst %2=h/l %3=cache, %4=stack_off
197
    UNPACK          %2, %1, rp3, m0                     ; p3: B->W
198
    mova     [rsp+%4+0*32], %1
199 200
    paddw               %3, %1, %1                      ; p3*2
    paddw               %3, %1                          ; p3*3
201
    punpck%2bw          %1, m1,  m0                     ; p2: B->W
202
    mova     [rsp+%4+1*32], %1
203 204
    paddw               %3, %1                          ; p3*3 + p2
    paddw               %3, %1                          ; p3*3 + p2*2
205
    UNPACK          %2, %1, rp1, m0                     ; p1: B->W
206
    mova     [rsp+%4+2*32], %1
207
    paddw               %3, %1                          ; p3*3 + p2*2 + p1
208
    UNPACK          %2, %1, rp0, m0                     ; p0: B->W
209
    mova     [rsp+%4+3*32], %1
210
    paddw               %3, %1                          ; p3*3 + p2*2 + p1 + p0
211
    UNPACK          %2, %1, rq0, m0                     ; q0: B->W
212
    mova     [rsp+%4+4*32], %1
213 214 215
    paddw               %3, %1                          ; p3*3 + p2*2 + p1 + p0 + q0
    paddw               %3, [pw_4]                      ; p3*3 + p2*2 + p1 + p0 + q0 + 4
    psraw               %1, %3, 3                       ; (p3*3 + p2*2 + p1 + p0 + q0 + 4) >> 3
216 217
%endmacro

218
%macro FILTER14_INIT 4 ; %1=dst %2=h/l %3=cache, %4=stack_off
219
    punpck%2bw          %1, m2, m0                      ; p7: B->W
220
    mova    [rsp+%4+ 8*32], %1
221 222 223
    psllw               %3, %1, 3                       ; p7*8
    psubw               %3, %1                          ; p7*7
    punpck%2bw          %1, m3, m0                      ; p6: B->W
224
    mova    [rsp+%4+ 9*32], %1
225 226
    paddw               %3, %1                          ; p7*7 + p6
    paddw               %3, %1                          ; p7*7 + p6*2
227
    UNPACK          %2, %1, rp5, m0                     ; p5: B->W
228
    mova    [rsp+%4+10*32], %1
229
    paddw               %3, %1                          ; p7*7 + p6*2 + p5
230
    UNPACK          %2, %1, rp4, m0                     ; p4: B->W
231
    mova    [rsp+%4+11*32], %1
232
    paddw               %3, %1                          ; p7*7 + p6*2 + p5 + p4
233 234 235 236 237
    paddw               %3, [rsp+%4+ 0*32]              ; p7*7 + p6*2 + p5 + p4 + p3
    paddw               %3, [rsp+%4+ 1*32]              ; p7*7 + p6*2 + p5 + .. + p2
    paddw               %3, [rsp+%4+ 2*32]              ; p7*7 + p6*2 + p5 + .. + p1
    paddw               %3, [rsp+%4+ 3*32]              ; p7*7 + p6*2 + p5 + .. + p0
    paddw               %3, [rsp+%4+ 4*32]              ; p7*7 + p6*2 + p5 + .. + p0 + q0
238 239
    paddw               %3, [pw_8]                      ; p7*7 + p6*2 + p5 + .. + p0 + q0 + 8
    psraw               %1, %3, 4                       ; (p7*7 + p6*2 + p5 + .. + p0 + q0 + 8) >> 4
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
%endmacro

%macro TRANSPOSE16x16B 17
    mova %17, m%16
    SBUTTERFLY bw,  %1,  %2,  %16
    SBUTTERFLY bw,  %3,  %4,  %16
    SBUTTERFLY bw,  %5,  %6,  %16
    SBUTTERFLY bw,  %7,  %8,  %16
    SBUTTERFLY bw,  %9,  %10, %16
    SBUTTERFLY bw,  %11, %12, %16
    SBUTTERFLY bw,  %13, %14, %16
    mova m%16,  %17
    mova  %17, m%14
    SBUTTERFLY bw,  %15, %16, %14
    SBUTTERFLY wd,  %1,  %3,  %14
    SBUTTERFLY wd,  %2,  %4,  %14
    SBUTTERFLY wd,  %5,  %7,  %14
    SBUTTERFLY wd,  %6,  %8,  %14
    SBUTTERFLY wd,  %9,  %11, %14
    SBUTTERFLY wd,  %10, %12, %14
    SBUTTERFLY wd,  %13, %15, %14
    mova m%14,  %17
    mova  %17, m%12
    SBUTTERFLY wd,  %14, %16, %12
    SBUTTERFLY dq,  %1,  %5,  %12
    SBUTTERFLY dq,  %2,  %6,  %12
    SBUTTERFLY dq,  %3,  %7,  %12
    SBUTTERFLY dq,  %4,  %8,  %12
    SBUTTERFLY dq,  %9,  %13, %12
    SBUTTERFLY dq,  %10, %14, %12
    SBUTTERFLY dq,  %11, %15, %12
    mova m%12, %17
    mova  %17, m%8
    SBUTTERFLY dq,  %12, %16, %8
    SBUTTERFLY qdq, %1,  %9,  %8
    SBUTTERFLY qdq, %2,  %10, %8
    SBUTTERFLY qdq, %3,  %11, %8
    SBUTTERFLY qdq, %4,  %12, %8
    SBUTTERFLY qdq, %5,  %13, %8
    SBUTTERFLY qdq, %6,  %14, %8
    SBUTTERFLY qdq, %7,  %15, %8
    mova m%8, %17
    mova %17, m%1
    SBUTTERFLY qdq, %8,  %16, %1
    mova m%1, %17
    SWAP %2,  %9
    SWAP %3,  %5
    SWAP %4,  %13
    SWAP %6,  %11
    SWAP %8,  %15
    SWAP %12, %14
%endmacro

293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
%macro TRANSPOSE8x8B 13
    SBUTTERFLY bw,  %1, %2, %7
    movdq%10 m%7, %9
    movdqa %11, m%2
    SBUTTERFLY bw,  %3, %4, %2
    SBUTTERFLY bw,  %5, %6, %2
    SBUTTERFLY bw,  %7, %8, %2
    SBUTTERFLY wd,  %1, %3, %2
    movdqa m%2, %11
    movdqa %11, m%3
    SBUTTERFLY wd,  %2, %4, %3
    SBUTTERFLY wd,  %5, %7, %3
    SBUTTERFLY wd,  %6, %8, %3
    SBUTTERFLY dq, %1, %5, %3
    SBUTTERFLY dq, %2, %6, %3
    movdqa m%3, %11
    movh   %12, m%2
    movhps %13, m%2
    SBUTTERFLY dq, %3, %7, %2
    SBUTTERFLY dq, %4, %8, %2
    SWAP %2, %5
    SWAP %4, %7
%endmacro

317
%macro DEFINE_REAL_P7_TO_Q7 0-1 0
318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
%define P7 dstq  + 4*mstrideq  + %1
%define P6 dstq  +   mstride3q + %1
%define P5 dstq  + 2*mstrideq  + %1
%define P4 dstq  +   mstrideq  + %1
%define P3 dstq                + %1
%define P2 dstq  +    strideq  + %1
%define P1 dstq  + 2* strideq  + %1
%define P0 dstq  +    stride3q + %1
%define Q0 dstq  + 4* strideq  + %1
%define Q1 dst2q +   mstride3q + %1
%define Q2 dst2q + 2*mstrideq  + %1
%define Q3 dst2q +   mstrideq  + %1
%define Q4 dst2q               + %1
%define Q5 dst2q +    strideq  + %1
%define Q6 dst2q + 2* strideq  + %1
%define Q7 dst2q +    stride3q + %1
334 335
%endmacro

336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
%macro DEFINE_TRANSPOSED_P7_TO_Q7 0-1 0
%define P3 rsp +   0 + %1
%define P2 rsp +  16 + %1
%define P1 rsp +  32 + %1
%define P0 rsp +  48 + %1
%define Q0 rsp +  64 + %1
%define Q1 rsp +  80 + %1
%define Q2 rsp +  96 + %1
%define Q3 rsp + 112 + %1
%define P7 rsp + 128 + %1
%define P6 rsp + 144 + %1
%define P5 rsp + 160 + %1
%define P4 rsp + 176 + %1
%define Q4 rsp + 192 + %1
%define Q5 rsp + 208 + %1
%define Q6 rsp + 224 + %1
%define Q7 rsp + 240 + %1
%endmacro

355 356
; ..............AB -> AAAAAAAABBBBBBBB
%macro SPLATB_MIX 1-2 [mask_mix]
357 358 359 360
%if cpuflag(ssse3)
    pshufb     %1, %2
%else
    punpcklbw  %1, %1
361 362
    punpcklwd  %1, %1
    punpckldq  %1, %1
363 364 365
%endif
%endmacro

366
%macro LOOPFILTER 5 ; %1=v/h %2=size1 %3+%4=stack, %5=32bit stack only
367
%if UNIX64
368
cglobal vp9_loop_filter_%1_%2_16, 5, 9, 16, %3 + %4, dst, stride, E, I, H, mstride, dst2, stride3, mstride3
369 370
%else
%if WIN64
371
cglobal vp9_loop_filter_%1_%2_16, 4, 8, 16, %3 + %4, dst, stride, E, I, mstride, dst2, stride3, mstride3
372
%else
373
cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, dst2, stride3, mstride3
374 375 376 377 378 379
%define Ed dword r2m
%define Id dword r3m
%endif
%define Hd dword r4m
%endif

380 381
    mov               mstrideq, strideq
    neg               mstrideq
382

383 384
    lea               stride3q, [strideq*3]
    lea              mstride3q, [mstrideq*3]
385 386

%ifidn %1, h
387
%if %2 > 16
388
%define movx movh
389
    lea                   dstq, [dstq + 4*strideq - 4]
390 391
%else
%define movx movu
392
    lea                   dstq, [dstq + 4*strideq - 8] ; go from top center (h pos) to center left (v pos)
393
%endif
394 395 396 397
    lea                  dst2q, [dstq + 8*strideq]
%else
    lea                   dstq, [dstq + 4*mstrideq]
    lea                  dst2q, [dstq + 8*strideq]
398 399 400
%endif

    DEFINE_REAL_P7_TO_Q7
401 402

%ifidn %1, h
403 404 405 406 407 408
    movx                    m0, [P7]
    movx                    m1, [P6]
    movx                    m2, [P5]
    movx                    m3, [P4]
    movx                    m4, [P3]
    movx                    m5, [P2]
409
%if ARCH_X86_64 || %2 != 16
410
    movx                    m6, [P1]
411
%endif
412
    movx                    m7, [P0]
413
%if ARCH_X86_64
414 415 416 417 418 419 420 421
    movx                    m8, [Q0]
    movx                    m9, [Q1]
    movx                   m10, [Q2]
    movx                   m11, [Q3]
    movx                   m12, [Q4]
    movx                   m13, [Q5]
    movx                   m14, [Q6]
    movx                   m15, [Q7]
422
    DEFINE_TRANSPOSED_P7_TO_Q7
423 424
%if %2 == 16
    TRANSPOSE16x16B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, [rsp]
425 426 427 428
    mova           [P7],  m0
    mova           [P6],  m1
    mova           [P5],  m2
    mova           [P4],  m3
429
%else ; %2 == 44/48/84/88
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
    ; 8x16 transpose
    punpcklbw        m0,  m1
    punpcklbw        m2,  m3
    punpcklbw        m4,  m5
    punpcklbw        m6,  m7
    punpcklbw        m8,  m9
    punpcklbw       m10, m11
    punpcklbw       m12, m13
    punpcklbw       m14, m15
    TRANSPOSE8x8W     0, 2, 4, 6, 8, 10, 12, 14, 15
    SWAP              0,  4
    SWAP              2,  5
    SWAP              0,  6
    SWAP              0,  7
    SWAP             10,  9
    SWAP             12, 10
    SWAP             14, 11
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
%endif ; %2
    mova           [P3],  m4
    mova           [P2],  m5
    mova           [P1],  m6
    mova           [P0],  m7
    mova           [Q0],  m8
    mova           [Q1],  m9
    mova           [Q2], m10
    mova           [Q3], m11
%if %2 == 16
    mova           [Q4], m12
    mova           [Q5], m13
    mova           [Q6], m14
    mova           [Q7], m15
%endif ; %2
462
%else ; x86-32
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
%if %2 == 16
    TRANSPOSE8x8B    0, 1, 2, 3, 4, 5, 6, 7, [P1], u, [rsp+%3+%4], [rsp+64], [rsp+80]
    DEFINE_TRANSPOSED_P7_TO_Q7
    movh          [P7], m0
    movh          [P5], m1
    movh          [P3], m2
    movh          [P1], m3
    movh          [Q2], m5
    movh          [Q4], m6
    movh          [Q6], m7
    movhps        [P6], m0
    movhps        [P4], m1
    movhps        [P2], m2
    movhps        [P0], m3
    movhps        [Q3], m5
    movhps        [Q5], m6
    movhps        [Q7], m7
    DEFINE_REAL_P7_TO_Q7
    movx                    m0, [Q0]
    movx                    m1, [Q1]
    movx                    m2, [Q2]
    movx                    m3, [Q3]
    movx                    m4, [Q4]
    movx                    m5, [Q5]
    movx                    m7, [Q7]
    TRANSPOSE8x8B 0, 1, 2, 3, 4, 5, 6, 7, [Q6], u, [rsp+%3+%4], [rsp+72], [rsp+88]
    DEFINE_TRANSPOSED_P7_TO_Q7 8
    movh          [P7], m0
    movh          [P5], m1
    movh          [P3], m2
    movh          [P1], m3
    movh          [Q2], m5
    movh          [Q4], m6
    movh          [Q6], m7
    movhps        [P6], m0
    movhps        [P4], m1
    movhps        [P2], m2
    movhps        [P0], m3
    movhps        [Q3], m5
    movhps        [Q5], m6
    movhps        [Q7], m7
    DEFINE_TRANSPOSED_P7_TO_Q7
%else ; %2 == 44/48/84/88
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
    punpcklbw        m0, m1
    punpcklbw        m2, m3
    punpcklbw        m4, m5
    punpcklbw        m6, m7
    movx             m1, [Q0]
    movx             m3, [Q1]
    movx             m5, [Q2]
    movx             m7, [Q3]
    punpcklbw        m1, m3
    punpcklbw        m5, m7
    movx             m3, [Q4]
    movx             m7, [Q5]
    punpcklbw        m3, m7
    mova          [rsp], m3
    movx             m3, [Q6]
    movx             m7, [Q7]
    punpcklbw        m3, m7
523
    DEFINE_TRANSPOSED_P7_TO_Q7
524 525 526 527 528 529 530 531
    TRANSPOSE8x8W     0, 2, 4, 6, 1, 5, 7, 3, [rsp], [Q0], 1
    mova           [P3],  m0
    mova           [P2],  m2
    mova           [P1],  m4
    mova           [P0],  m6
    mova           [Q1],  m5
    mova           [Q2],  m7
    mova           [Q3],  m3
532 533
%endif ; %2
%endif ; x86-32/64
534
%endif ; %1 == h
535 536

    ; calc fm mask
537
%if %2 == 16
538
%if cpuflag(ssse3)
539
    pxor                m0, m0
540 541 542
%endif
    SPLATB_REG          m2, I, m0                       ; I I I I ...
    SPLATB_REG          m3, E, m0                       ; E E E E ...
543
%else
544
%if cpuflag(ssse3)
545
    mova                m0, [mask_mix]
546
%endif
547 548
    movd                m2, Id
    movd                m3, Ed
549 550
    SPLATB_MIX          m2, m0
    SPLATB_MIX          m3, m0
551
%endif
552 553 554
    mova                m0, [pb_80]
    pxor                m2, m0
    pxor                m3, m0
555
%if ARCH_X86_64
556
%ifidn %1, v
557 558 559 560 561 562 563 564
    mova                m8, [P3]
    mova                m9, [P2]
    mova               m10, [P1]
    mova               m11, [P0]
    mova               m12, [Q0]
    mova               m13, [Q1]
    mova               m14, [Q2]
    mova               m15, [Q3]
565
%else
566 567
    ; In case of horizontal, P3..Q3 are already present in some registers due
    ; to the previous transpose, so we just swap registers.
568 569 570 571 572
    SWAP                 8,  4, 12
    SWAP                 9,  5, 13
    SWAP                10,  6, 14
    SWAP                11,  7, 15
%endif
573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
%define rp3 m8
%define rp2 m9
%define rp1 m10
%define rp0 m11
%define rq0 m12
%define rq1 m13
%define rq2 m14
%define rq3 m15
%else
%define rp3 [P3]
%define rp2 [P2]
%define rp1 [P1]
%define rp0 [P0]
%define rq0 [Q0]
%define rq1 [Q1]
%define rq2 [Q2]
%define rq3 [Q3]
%endif
    ABSSUB_GT           m5, rp3, rp2, m2, m7, m0        ; m5 = abs(p3-p2) <= I
    ABSSUB_GT           m1, rp2, rp1, m2, m7, m0        ; m1 = abs(p2-p1) <= I
593
    por                 m5, m1
594
    ABSSUB_GT           m1, rp1, rp0, m2, m7, m0        ; m1 = abs(p1-p0) <= I
595
    por                 m5, m1
596
    ABSSUB_GT           m1, rq0, rq1, m2, m7, m0        ; m1 = abs(q1-q0) <= I
597
    por                 m5, m1
598
    ABSSUB_GT           m1, rq1, rq2, m2, m7, m0        ; m1 = abs(q2-q1) <= I
599
    por                 m5, m1
600
    ABSSUB_GT           m1, rq2, rq3, m2, m7, m0        ; m1 = abs(q3-q2) <= I
601
    por                 m5, m1
602
    ABSSUB              m1, rp0, rq0, m7                ; abs(p0-q0)
603
    paddusb             m1, m1                          ; abs(p0-q0) * 2
604
    ABSSUB              m2, rp1, rq1, m7                ; abs(p1-q1)
605 606 607 608
    pand                m2, [pb_fe]                     ; drop lsb so shift can work
    psrlq               m2, 1                           ; abs(p1-q1)/2
    paddusb             m1, m2                          ; abs(p0-q0)*2 + abs(p1-q1)/2
    pxor                m1, m0
609 610 611 612
    pcmpgtb             m1, m3
    por                 m1, m5                          ; fm final value
    SWAP                 1, 3
    pxor                m3, [pb_ff]
613 614

    ; (m3: fm, m8..15: p3 p2 p1 p0 q0 q1 q2 q3)
615 616
    ; calc flat8in (if not 44_16) and hev masks
%if %2 != 44
617
    mova                m6, [pb_81]                     ; [1 1 1 1 ...] ^ 0x80
618 619
    ABSSUB_GT           m2, rp3, rp0, m6, m5            ; abs(p3 - p0) <= 1
%if ARCH_X86_64
620
    mova                m8, [pb_80]
621 622 623 624 625
%define rb80 m8
%else
%define rb80 [pb_80]
%endif
    ABSSUB_GT           m1, rp2, rp0, m6, m5, rb80      ; abs(p2 - p0) <= 1
626
    por                 m2, m1
627
    ABSSUB              m4, rp1, rp0, m5                ; abs(p1 - p0)
628
%if %2 == 16
629
%if cpuflag(ssse3)
630
    pxor                m0, m0
631 632
%endif
    SPLATB_REG          m7, H, m0                       ; H H H H ...
633 634
%else
    movd                m7, Hd
635
    SPLATB_MIX          m7
636
%endif
637 638
    pxor                m7, rb80
    pxor                m4, rb80
639
    pcmpgtb             m0, m4, m7                      ; abs(p1 - p0) > H (1/2 hev condition)
640 641
    CMP_GT              m4, m6                          ; abs(p1 - p0) <= 1
    por                 m2, m4                          ; (flat8in)
642 643
    ABSSUB              m4, rq1, rq0, m1                ; abs(q1 - q0)
    pxor                m4, rb80
644 645
    pcmpgtb             m5, m4, m7                      ; abs(q1 - q0) > H (2/2 hev condition)
    por                 m0, m5                          ; hev final value
646 647
    CMP_GT              m4, m6                          ; abs(q1 - q0) <= 1
    por                 m2, m4                          ; (flat8in)
648
    ABSSUB_GT           m1, rq2, rq0, m6, m5, rb80      ; abs(q2 - q0) <= 1
649
    por                 m2, m1
650
    ABSSUB_GT           m1, rq3, rq0, m6, m5, rb80      ; abs(q3 - q0) <= 1
651 652
    por                 m2, m1                          ; flat8in final value
    pxor                m2, [pb_ff]
653 654 655
%if %2 == 84 || %2 == 48
    pand                m2, [mask_mix%2]
%endif
656 657 658
%else
    mova                m6, [pb_80]
    movd                m7, Hd
659
    SPLATB_MIX          m7
660
    pxor                m7, m6
661
    ABSSUB              m4, rp1, rp0, m1                ; abs(p1 - p0)
662 663
    pxor                m4, m6
    pcmpgtb             m0, m4, m7                      ; abs(p1 - p0) > H (1/2 hev condition)
664
    ABSSUB              m4, rq1, rq0, m1                ; abs(q1 - q0)
665 666 667 668
    pxor                m4, m6
    pcmpgtb             m5, m4, m7                      ; abs(q1 - q0) > H (2/2 hev condition)
    por                 m0, m5                          ; hev final value
%endif
669

670
%if %2 == 16
671 672
    ; (m0: hev, m2: flat8in, m3: fm, m6: pb_81, m9..15: p2 p1 p0 q0 q1 q2 q3)
    ; calc flat8out mask
673
%if ARCH_X86_64
674 675
    mova                m8, [P7]
    mova                m9, [P6]
676 677 678 679 680 681 682 683
%define rp7 m8
%define rp6 m9
%else
%define rp7 [P7]
%define rp6 [P6]
%endif
    ABSSUB_GT           m1, rp7, rp0, m6, m5            ; abs(p7 - p0) <= 1
    ABSSUB_GT           m7, rp6, rp0, m6, m5            ; abs(p6 - p0) <= 1
684
    por                 m1, m7
685
%if ARCH_X86_64
686 687
    mova                m8, [P5]
    mova                m9, [P4]
688 689 690 691 692 693 694
%define rp5 m8
%define rp4 m9
%else
%define rp5 [P5]
%define rp4 [P4]
%endif
    ABSSUB_GT           m7, rp5, rp0, m6, m5            ; abs(p5 - p0) <= 1
695
    por                 m1, m7
696
    ABSSUB_GT           m7, rp4, rp0, m6, m5            ; abs(p4 - p0) <= 1
697
    por                 m1, m7
698
%if ARCH_X86_64
699 700
    mova                m14, [Q4]
    mova                m15, [Q5]
701 702 703 704 705 706 707
%define rq4 m14
%define rq5 m15
%else
%define rq4 [Q4]
%define rq5 [Q5]
%endif
    ABSSUB_GT           m7, rq4, rq0, m6, m5            ; abs(q4 - q0) <= 1
708
    por                 m1, m7
709
    ABSSUB_GT           m7, rq5, rq0, m6, m5            ; abs(q5 - q0) <= 1
710
    por                 m1, m7
711
%if ARCH_X86_64
712 713
    mova                m14, [Q6]
    mova                m15, [Q7]
714 715 716 717 718 719 720
%define rq6 m14
%define rq7 m15
%else
%define rq6 [Q6]
%define rq7 [Q7]
%endif
    ABSSUB_GT           m7, rq6, rq0, m6, m5            ; abs(q4 - q0) <= 1
721
    por                 m1, m7
722
    ABSSUB_GT           m7, rq7, rq0, m6, m5            ; abs(q5 - q0) <= 1
723 724
    por                 m1, m7                          ; flat8out final value
    pxor                m1, [pb_ff]
725
%endif
726 727 728 729 730 731 732 733 734 735 736 737 738

    ; if (fm) {
    ;     if (out && in) filter_14()
    ;     else if (in)   filter_6()
    ;     else if (hev)  filter_2()
    ;     else           filter_4()
    ; }
    ;
    ; f14:                                                                            fm &  out &  in
    ; f6:  fm & ~f14 & in        => fm & ~(out & in) & in                          => fm & ~out &  in
    ; f2:  fm & ~f14 & ~f6 & hev => fm & ~(out & in) & ~(~out & in) & hev          => fm &  ~in &  hev
    ; f4:  fm & ~f14 & ~f6 & ~f2 => fm & ~(out & in) & ~(~out & in) & ~(~in & hev) => fm &  ~in & ~hev

739
    ; (m0: hev, [m1: flat8out], [m2: flat8in], m3: fm, m8..15: p5 p4 p1 p0 q0 q1 q6 q7)
740
    ; filter2()
741 742
%if %2 != 44
    mova                m6, [pb_80]                     ; already in m6 if 44_16
743 744
    SCRATCH              2, 15, rsp+%3+%4
%if %2 == 16
745
    SCRATCH              1,  8, rsp+%3+%4+16
746
%endif
747
%endif
748 749 750 751 752
    pxor                m2, m6, rq0                     ; q0 ^ 0x80
    pxor                m4, m6, rp0                     ; p0 ^ 0x80
    psubsb              m2, m4                          ; (signed) q0 - p0
    pxor                m4, m6, rp1                     ; p1 ^ 0x80
    pxor                m5, m6, rq1                     ; q1 ^ 0x80
753
    psubsb              m4, m5                          ; (signed) p1 - q1
754 755 756
    paddsb              m4, m2                          ;   (q0 - p0) + (p1 - q1)
    paddsb              m4, m2                          ; 2*(q0 - p0) + (p1 - q1)
    paddsb              m4, m2                          ; 3*(q0 - p0) + (p1 - q1)
757 758
    paddsb              m6, m4, [pb_4]                  ; m6: f1 = clip(f + 4, 127)
    paddsb              m4, [pb_3]                      ; m4: f2 = clip(f + 3, 127)
759
%if ARCH_X86_64
760
    mova                m14, [pb_10]                    ; will be reused in filter4()
761 762 763 764 765 766 767
%define rb10 m14
%else
%define rb10 [pb_10]
%endif
    SRSHIFT3B_2X        m6, m4, rb10, m7                ; f1 and f2 sign byte shift by 3
    SIGN_SUB            m7, rq0, m6, m5                 ; m7 = q0 - f1
    SIGN_ADD            m1, rp0, m4, m5                 ; m1 = p0 + f2
768
%if %2 != 44
769
%if ARCH_X86_64
770
    pandn               m6, m15, m3                     ;  ~mask(in) & mask(fm)
771 772 773 774
%else
    mova                m6, [rsp+%3+%4]
    pandn               m6, m3
%endif
775
    pand                m6, m0                          ; (~mask(in) & mask(fm)) & mask(hev)
776 777 778
%else
    pand                m6, m3, m0
%endif
779 780
    MASK_APPLY          m7, rq0, m6, m5                 ; m7 = filter2(q0) & mask / we write it in filter4()
    MASK_APPLY          m1, rp0, m6, m5                 ; m1 = filter2(p0) & mask / we write it in filter4()
781

782
    ; (m0: hev, m1: p0', m2: q0-p0, m3: fm, m7: q0', [m8: flat8out], m10..13: p1 p0 q0 q1, m14: pb_10, [m15: flat8in], )
783
    ; filter4()
784 785 786 787 788 789
    mova                m4, m2
    paddsb              m2, m4                          ; 2 * (q0 - p0)
    paddsb              m2, m4                          ; 3 * (q0 - p0)
    paddsb              m6, m2, [pb_4]                  ; m6:  f1 = clip(f + 4, 127)
    paddsb              m2, [pb_3]                      ; m2: f2 = clip(f + 3, 127)
    SRSHIFT3B_2X        m6, m2, rb10, m4                ; f1 and f2 sign byte shift by 3
790
%if %2 != 44
791
%if ARCH_X86_64
792
    pandn               m5, m15, m3                     ;               ~mask(in) & mask(fm)
793 794 795 796
%else
    mova                m5, [rsp+%3+%4]
    pandn               m5, m3
%endif
797
    pandn               m0, m5                          ; ~mask(hev) & (~mask(in) & mask(fm))
798 799 800
%else
    pandn               m0, m3
%endif
801 802 803 804 805 806
    SIGN_SUB            m5, rq0, m6, m4                 ; q0 - f1
    MASK_APPLY          m5, m7, m0, m4                  ; filter4(q0) & mask
    mova                [Q0], m5
    SIGN_ADD            m7, rp0, m2, m4                 ; p0 + f2
    MASK_APPLY          m7, m1, m0, m4                  ; filter4(p0) & mask
    mova                [P0], m7
807
    paddb               m6, [pb_80]                     ;
808 809
    pxor                m1, m1                          ;   f=(f1+1)>>1
    pavgb               m6, m1                          ;
810
    psubb               m6, [pb_40]                     ;
811 812 813 814 815
    SIGN_ADD            m1, rp1, m6, m2                 ; p1 + f
    SIGN_SUB            m4, rq1, m6, m2                 ; q1 - f
    MASK_APPLY          m1, rp1, m0, m2                 ; m1 = filter4(p1)
    MASK_APPLY          m4, rq1, m0, m2                 ; m4 = filter4(q1)
    mova                [P1], m1
816
    mova                [Q1], m4
817

818 819 820 821
%if %2 != 44
    UNSCRATCH            2, 15, rsp+%3+%4
%endif

822
    ; ([m1: flat8out], m2: flat8in, m3: fm, m10..13: p1 p0 q0 q1)
823
    ; filter6()
824
%if %2 != 44
825
    pxor                m0, m0
826
%if %2 > 16
827 828 829
    pand                m3, m2
%else
    pand                m2, m3                          ;               mask(fm) & mask(in)
830
%if ARCH_X86_64
831
    pandn               m3, m8, m2                      ; ~mask(out) & (mask(fm) & mask(in))
832
%else
833 834
    mova                m3, [rsp+%3+%4+16]
    pandn               m3, m2
835
%endif
836
%endif
837
%if ARCH_X86_64
838 839
    mova               m14, [P3]
    mova                m9, [Q3]
840 841 842 843 844 845
%define rp3 m14
%define rq3 m9
%else
%define rp3 [P3]
%define rq3 [Q3]
%endif
846 847
    mova                m1, [P2]
    FILTER_INIT         m4, m5, m6, m7, [P2], %4, 6,             m3,  m1             ; [p2]
848 849
    mova                m1, [Q2]
    FILTER_UPDATE       m4, m5, m6, m7, [P1], %4, 0, 1, 2, 5, 3, m3,  "", rq1, "", 1 ; [p1] -p3 -p2 +p1 +q1
850
    FILTER_UPDATE       m4, m5, m6, m7, [P0], %4, 0, 2, 3, 6, 3, m3,  "", m1         ; [p0] -p3 -p1 +p0 +q2
851
    FILTER_UPDATE       m4, m5, m6, m7, [Q0], %4, 0, 3, 4, 7, 3, m3,  "", rq3, "", 1 ; [q0] -p3 -p0 +q0 +q3
852 853
    FILTER_UPDATE       m4, m5, m6, m7, [Q1], %4, 1, 4, 5, 7, 3, m3,  ""             ; [q1] -p2 -q0 +q1 +q3
    FILTER_UPDATE       m4, m5, m6, m7, [Q2], %4, 2, 5, 6, 7, 3, m3,  m1             ; [q2] -p1 -q1 +q2 +q3
854 855 856
%endif

%if %2 == 16
857
    UNSCRATCH            1,  8, rsp+%3+%4+16
858
%endif
859

860
    ; (m0: 0, [m1: flat8out], m2: fm & flat8in, m8..15: q2 q3 p1 p0 q0 q1 p3 p2)
861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880
    ; filter14()
    ;
    ;                            m2  m3  m8  m9 m14 m15 m10 m11 m12 m13
    ;
    ;                                    q2  q3  p3  p2  p1  p0  q0  q1
    ; p6  -7                     p7  p6  p5  p4   .   .   .   .   .
    ; p5  -6  -p7 -p6 +p5 +q1     .   .   .                           .
    ; p4  -5  -p7 -p5 +p4 +q2     .       .   .                      q2
    ; p3  -4  -p7 -p4 +p3 +q3     .           .   .                  q3
    ; p2  -3  -p7 -p3 +p2 +q4     .               .   .              q4
    ; p1  -2  -p7 -p2 +p1 +q5     .                   .   .          q5
    ; p0  -1  -p7 -p1 +p0 +q6     .                       .   .      q6
    ; q0  +0  -p7 -p0 +q0 +q7     .                           .   .  q7
    ; q1  +1  -p6 -q0 +q1 +q7    q1   .                           .   .
    ; q2  +2  -p5 -q1 +q2 +q7     .  q2   .                           .
    ; q3  +3  -p4 -q2 +q3 +q7         .  q3   .                       .
    ; q4  +4  -p3 -q3 +q4 +q7             .  q4   .                   .
    ; q5  +5  -p2 -q4 +q5 +q7                 .  q5   .               .
    ; q6  +6  -p1 -q5 +q6 +q7                     .  q6   .           .

881
%if %2 == 16
882
    pand            m1, m2                                                              ; mask(out) & (mask(fm) & mask(in))
883 884
    mova            m2, [P7]
    mova            m3, [P6]
885
%if ARCH_X86_64
886 887
    mova            m8, [P5]
    mova            m9, [P4]
888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913
%define rp5 m8
%define rp4 m9
%define rp5s m8
%define rp4s m9
%define rp3s m14
%define rq4 m8
%define rq5 m9
%define rq6 m14
%define rq7 m15
%define rq4s m8
%define rq5s m9
%define rq6s m14
%else
%define rp5 [P5]
%define rp4 [P4]
%define rp5s ""
%define rp4s ""
%define rp3s ""
%define rq4 [Q4]
%define rq5 [Q5]
%define rq6 [Q6]
%define rq7 [Q7]
%define rq4s ""
%define rq5s ""
%define rq6s ""
%endif
914
    FILTER_INIT     m4, m5, m6, m7, [P6], %4, 14,                m1,  m3            ; [p6]
915 916 917 918 919 920 921
    FILTER_UPDATE   m4, m5, m6, m7, [P5], %4,  8,  9, 10,  5, 4, m1, rp5s           ; [p5] -p7 -p6 +p5 +q1
    FILTER_UPDATE   m4, m5, m6, m7, [P4], %4,  8, 10, 11,  6, 4, m1, rp4s           ; [p4] -p7 -p5 +p4 +q2
    FILTER_UPDATE   m4, m5, m6, m7, [P3], %4,  8, 11,  0,  7, 4, m1, rp3s           ; [p3] -p7 -p4 +p3 +q3
    FILTER_UPDATE   m4, m5, m6, m7, [P2], %4,  8,  0,  1, 12, 4, m1,  "", rq4, [Q4], 1 ; [p2] -p7 -p3 +p2 +q4
    FILTER_UPDATE   m4, m5, m6, m7, [P1], %4,  8,  1,  2, 13, 4, m1,  "", rq5, [Q5], 1 ; [p1] -p7 -p2 +p1 +q5
    FILTER_UPDATE   m4, m5, m6, m7, [P0], %4,  8,  2,  3, 14, 4, m1,  "", rq6, [Q6], 1 ; [p0] -p7 -p1 +p0 +q6
    FILTER_UPDATE   m4, m5, m6, m7, [Q0], %4,  8,  3,  4, 15, 4, m1,  "", rq7, [Q7], 1 ; [q0] -p7 -p0 +q0 +q7
922 923 924
    FILTER_UPDATE   m4, m5, m6, m7, [Q1], %4,  9,  4,  5, 15, 4, m1,  ""            ; [q1] -p6 -q0 +q1 +q7
    FILTER_UPDATE   m4, m5, m6, m7, [Q2], %4, 10,  5,  6, 15, 4, m1,  ""            ; [q2] -p5 -q1 +q2 +q7
    FILTER_UPDATE   m4, m5, m6, m7, [Q3], %4, 11,  6,  7, 15, 4, m1,  ""            ; [q3] -p4 -q2 +q3 +q7
925 926 927
    FILTER_UPDATE   m4, m5, m6, m7, [Q4], %4,  0,  7, 12, 15, 4, m1, rq4s           ; [q4] -p3 -q3 +q4 +q7
    FILTER_UPDATE   m4, m5, m6, m7, [Q5], %4,  1, 12, 13, 15, 4, m1, rq5s           ; [q5] -p2 -q4 +q5 +q7
    FILTER_UPDATE   m4, m5, m6, m7, [Q6], %4,  2, 13, 14, 15, 4, m1, rq6s           ; [q6] -p1 -q5 +q6 +q7
928
%endif
929 930

%ifidn %1, h
931
%if %2 == 16
932 933 934 935 936 937
    mova                    m0, [P7]
    mova                    m1, [P6]
    mova                    m2, [P5]
    mova                    m3, [P4]
    mova                    m4, [P3]
    mova                    m5, [P2]
938
%if ARCH_X86_64
939
    mova                    m6, [P1]
940
%endif
941
    mova                    m7, [P0]
942
%if ARCH_X86_64
943 944 945 946 947 948 949 950
    mova                    m8, [Q0]
    mova                    m9, [Q1]
    mova                   m10, [Q2]
    mova                   m11, [Q3]
    mova                   m12, [Q4]
    mova                   m13, [Q5]
    mova                   m14, [Q6]
    mova                   m15, [Q7]
951
    TRANSPOSE16x16B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, [rsp]
952
    DEFINE_REAL_P7_TO_Q7
953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968
    movu  [P7],  m0
    movu  [P6],  m1
    movu  [P5],  m2
    movu  [P4],  m3
    movu  [P3],  m4
    movu  [P2],  m5
    movu  [P1],  m6
    movu  [P0],  m7
    movu  [Q0],  m8
    movu  [Q1],  m9
    movu  [Q2], m10
    movu  [Q3], m11
    movu  [Q4], m12
    movu  [Q5], m13
    movu  [Q6], m14
    movu  [Q7], m15
969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
%else
    DEFINE_REAL_P7_TO_Q7
    TRANSPOSE8x8B 0, 1, 2, 3, 4, 5, 6, 7, [rsp+32], a, [rsp+%3+%4], [Q0], [Q1]
    movh   [P7],  m0
    movh   [P5],  m1
    movh   [P3],  m2
    movh   [P1],  m3
    movh   [Q2],  m5
    movh   [Q4],  m6
    movh   [Q6],  m7
    movhps [P6],  m0
    movhps [P4],  m1
    movhps [P2],  m2
    movhps [P0],  m3
    movhps [Q3],  m5
    movhps [Q5],  m6
    movhps [Q7],  m7
    DEFINE_TRANSPOSED_P7_TO_Q7
    mova                    m0, [Q0]
    mova                    m1, [Q1]
    mova                    m2, [Q2]
    mova                    m3, [Q3]
    mova                    m4, [Q4]
    mova                    m5, [Q5]
    mova                    m7, [Q7]
    DEFINE_REAL_P7_TO_Q7 8
    TRANSPOSE8x8B 0, 1, 2, 3, 4, 5, 6, 7, [rsp+224], a, [rsp+%3+%4], [Q0], [Q1]
    movh   [P7],  m0
    movh   [P5],  m1
    movh   [P3],  m2
    movh   [P1],  m3
    movh   [Q2],  m5
    movh   [Q4],  m6
    movh   [Q6],  m7
    movhps [P6],  m0
    movhps [P4],  m1
    movhps [P2],  m2
    movhps [P0],  m3
    movhps [Q3],  m5
    movhps [Q5],  m6
    movhps [Q7],  m7
%endif
1011
%elif %2 == 44
1012 1013 1014
    SWAP 0, 1   ; m0 = p1
    SWAP 1, 7   ; m1 = p0
    SWAP 2, 5   ; m2 = q0
1015 1016
    SWAP 3, 4   ; m3 = q1
    DEFINE_REAL_P7_TO_Q7 2
1017 1018 1019 1020
    SBUTTERFLY  bw, 0, 1, 4
    SBUTTERFLY  bw, 2, 3, 4
    SBUTTERFLY  wd, 0, 2, 4
    SBUTTERFLY  wd, 1, 3, 4
1021 1022
    movd  [P7], m0
    movd  [P3], m2
1023
    movd  [Q0], m1
1024
    movd  [Q4], m3
1025 1026 1027 1028 1029 1030 1031
    psrldq  m0, 4
    psrldq  m1, 4
    psrldq  m2, 4
    psrldq  m3, 4
    movd  [P6], m0
    movd  [P2], m2
    movd  [Q1], m1
1032
    movd  [Q5], m3
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
    psrldq  m0, 4
    psrldq  m1, 4
    psrldq  m2, 4
    psrldq  m3, 4
    movd  [P5], m0
    movd  [P1], m2
    movd  [Q2], m1
    movd  [Q6], m3
    psrldq  m0, 4
    psrldq  m1, 4
    psrldq  m2, 4
    psrldq  m3, 4
    movd  [P4], m0
    movd  [P0], m2
    movd  [Q3], m1
    movd  [Q7], m3
1049
%else
1050
    ; the following code do a transpose of 8 full lines to 16 half
1051
    ; lines (high part). It is inlined to avoid the need of a staging area
1052 1053 1054 1055 1056 1057
    mova                    m0, [P3]
    mova                    m1, [P2]
    mova                    m2, [P1]
    mova                    m3, [P0]
    mova                    m4, [Q0]
    mova                    m5, [Q1]
1058
%if ARCH_X86_64
1059
    mova                    m6, [Q2]
1060
%endif
1061 1062
    mova                    m7, [Q3]
    DEFINE_REAL_P7_TO_Q7
1063
%if ARCH_X86_64
1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075
    SBUTTERFLY  bw,  0,  1, 8
    SBUTTERFLY  bw,  2,  3, 8
    SBUTTERFLY  bw,  4,  5, 8
    SBUTTERFLY  bw,  6,  7, 8
    SBUTTERFLY  wd,  0,  2, 8
    SBUTTERFLY  wd,  1,  3, 8
    SBUTTERFLY  wd,  4,  6, 8
    SBUTTERFLY  wd,  5,  7, 8
    SBUTTERFLY  dq,  0,  4, 8
    SBUTTERFLY  dq,  1,  5, 8
    SBUTTERFLY  dq,  2,  6, 8
    SBUTTERFLY  dq,  3,  7, 8
1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
%else
    SBUTTERFLY  bw,  0,  1, 6
    mova  [rsp+64], m1
    mova        m6, [rsp+96]
    SBUTTERFLY  bw,  2,  3, 1
    SBUTTERFLY  bw,  4,  5, 1
    SBUTTERFLY  bw,  6,  7, 1
    SBUTTERFLY  wd,  0,  2, 1
    mova  [rsp+96], m2
    mova        m1, [rsp+64]
    SBUTTERFLY  wd,  1,  3, 2
    SBUTTERFLY  wd,  4,  6, 2
    SBUTTERFLY  wd,  5,  7, 2
    SBUTTERFLY  dq,  0,  4, 2
    SBUTTERFLY  dq,  1,  5, 2
    movh      [Q0], m1
    movhps    [Q1], m1
    mova        m2, [rsp+96]
    SBUTTERFLY  dq,  2,  6, 1
    SBUTTERFLY  dq,  3,  7, 1
%endif
    SWAP         3, 6
    SWAP         1, 4
    movh      [P7], m0
    movhps    [P6], m0
    movh      [P5], m1
    movhps    [P4], m1
    movh      [P3], m2
    movhps    [P2], m2
    movh      [P1], m3
    movhps    [P0], m3
%if ARCH_X86_64
    movh      [Q0], m4
    movhps    [Q1], m4
%endif
    movh      [Q2], m5
    movhps    [Q3], m5
    movh      [Q4], m6
    movhps    [Q5], m6
    movh      [Q6], m7
    movhps    [Q7], m7
1117
%endif
1118
%endif
1119 1120

    RET
1121 1122
%endmacro

1123 1124 1125 1126
%macro LPF_16_VH 5
INIT_XMM %5
LOOPFILTER v, %1, %2,  0, %4
LOOPFILTER h, %1, %2, %3, %4
1127 1128
%endmacro

1129 1130 1131 1132
%macro LPF_16_VH_ALL_OPTS 4
LPF_16_VH %1, %2, %3, %4, sse2
LPF_16_VH %1, %2, %3, %4, ssse3
LPF_16_VH %1, %2, %3, %4, avx
1133 1134
%endmacro

1135 1136 1137 1138 1139
LPF_16_VH_ALL_OPTS 16, 512, 256, 32
LPF_16_VH_ALL_OPTS 44,   0, 128,  0
LPF_16_VH_ALL_OPTS 48, 256, 128, 16
LPF_16_VH_ALL_OPTS 84, 256, 128, 16
LPF_16_VH_ALL_OPTS 88, 256, 128, 16