resample.asm 23.1 KB
Newer Older
1 2 3
;******************************************************************************
;* Copyright (c) 2012 Michael Niedermayer
;* Copyright (c) 2014 James Almer <jamrial <at> gmail.com>
4
;* Copyright (c) 2014 Ronald S. Bultje <rsbultje@gmail.com>
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************

%include "libavutil/x86/x86util.asm"

%if ARCH_X86_64
%define pointer resq
%else
%define pointer resd
%endif

struc ResampleContext
    .av_class:              pointer 1
    .filter_bank:           pointer 1
    .filter_length:         resd 1
    .filter_alloc:          resd 1
    .ideal_dst_incr:        resd 1
    .dst_incr:              resd 1
    .dst_incr_div:          resd 1
    .dst_incr_mod:          resd 1
    .index:                 resd 1
    .frac:                  resd 1
    .src_incr:              resd 1
    .compensation_distance: resd 1
44
    .phase_count:           resd 1
45 46 47 48 49 50

    ; there's a few more here but we only care about the first few
endstruc

SECTION_RODATA

51
pf_1:      dd 1.0
52
pdbl_1:    dq 1.0
53
pd_0x4000: dd 0x4000
54 55 56

SECTION .text

57
; FIXME remove unneeded variables (index_incr, phase_mask)
58
%macro RESAMPLE_FNS 3-5 ; format [float or int16], bps, log2_bps, float op suffix [s or d], 1.0 constant
59 60
; int resample_common_$format(ResampleContext *ctx, $format *dst,
;                             const $format *src, int size, int update_ctx)
61
%if ARCH_X86_64 ; unix64 and win64
62
cglobal resample_common_%1, 0, 15, 2, ctx, dst, src, phase_count, index, frac, \
63 64 65
                                      dst_incr_mod, size, min_filter_count_x4, \
                                      min_filter_len_x4, dst_incr_div, src_incr, \
                                      phase_mask, dst_end, filter_bank
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89

    ; use red-zone for variable storage
%define ctx_stackq            [rsp-0x8]
%define src_stackq            [rsp-0x10]
%if WIN64
%define update_context_stackd r4m
%else ; unix64
%define update_context_stackd [rsp-0x14]
%endif

    ; load as many variables in registers as possible; for the rest, store
    ; on stack so that we have 'ctx' available as one extra register
    mov                        sized, r3d
%if UNIX64
    mov        update_context_stackd, r4d
%endif
    mov                       indexd, [ctxq+ResampleContext.index]
    mov                        fracd, [ctxq+ResampleContext.frac]
    mov                dst_incr_modd, [ctxq+ResampleContext.dst_incr_mod]
    mov                 filter_bankq, [ctxq+ResampleContext.filter_bank]
    mov                    src_incrd, [ctxq+ResampleContext.src_incr]
    mov                   ctx_stackq, ctxq
    mov           min_filter_len_x4d, [ctxq+ResampleContext.filter_length]
    mov                dst_incr_divd, [ctxq+ResampleContext.dst_incr_div]
90 91
    shl           min_filter_len_x4d, %3
    lea                     dst_endq, [dstq+sizeq*%2]
92 93

%if UNIX64
94
    mov                          ecx, [ctxq+ResampleContext.phase_count]
95 96
    mov                          edi, [ctxq+ResampleContext.filter_alloc]

97
    DEFINE_ARGS filter_alloc, dst, src, phase_count, index, frac, dst_incr_mod, \
98 99 100 101
                filter, min_filter_count_x4, min_filter_len_x4, dst_incr_div, \
                src_incr, phase_mask, dst_end, filter_bank
%elif WIN64
    mov                          R9d, [ctxq+ResampleContext.filter_alloc]
102
    mov                          ecx, [ctxq+ResampleContext.phase_count]
103

104
    DEFINE_ARGS phase_count, dst, src, filter_alloc, index, frac, dst_incr_mod, \
105 106 107 108 109 110 111 112 113
                filter, min_filter_count_x4, min_filter_len_x4, dst_incr_div, \
                src_incr, phase_mask, dst_end, filter_bank
%endif

    neg           min_filter_len_x4q
    sub                 filter_bankq, min_filter_len_x4q
    sub                         srcq, min_filter_len_x4q
    mov                   src_stackq, srcq
%else ; x86-32
114
cglobal resample_common_%1, 1, 7, 2, ctx, phase_count, dst, frac, \
115
                                     index, min_filter_length_x4, filter_bank
116 117 118 119 120 121 122 123

    ; push temp variables to stack
%define ctx_stackq            r0mp
%define src_stackq            r2mp
%define update_context_stackd r4m

    mov                         dstq, r1mp
    mov                           r3, r3mp
124
    lea                           r3, [dstq+r3*%2]
125 126 127 128
    PUSH                              dword [ctxq+ResampleContext.dst_incr_div]
    PUSH                              dword [ctxq+ResampleContext.dst_incr_mod]
    PUSH                              dword [ctxq+ResampleContext.filter_alloc]
    PUSH                              r3
129
    PUSH                              dword [ctxq+ResampleContext.phase_count]  ; unneeded replacement for phase_mask
130 131 132
    PUSH                              dword [ctxq+ResampleContext.src_incr]
    mov        min_filter_length_x4d, [ctxq+ResampleContext.filter_length]
    mov                       indexd, [ctxq+ResampleContext.index]
133
    shl        min_filter_length_x4d, %3
134 135 136 137 138 139 140
    mov                        fracd, [ctxq+ResampleContext.frac]
    neg        min_filter_length_x4q
    mov                 filter_bankq, [ctxq+ResampleContext.filter_bank]
    sub                         r2mp, min_filter_length_x4q
    sub                 filter_bankq, min_filter_length_x4q
    PUSH                              min_filter_length_x4q
    PUSH                              filter_bankq
141
    mov                 phase_countd, [ctxq+ResampleContext.phase_count]
142

143
    DEFINE_ARGS src, phase_count, dst, frac, index, min_filter_count_x4, filter
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161

%define filter_bankq          dword [rsp+0x0]
%define min_filter_length_x4q dword [rsp+0x4]
%define src_incrd             dword [rsp+0x8]
%define phase_maskd           dword [rsp+0xc]
%define dst_endq              dword [rsp+0x10]
%define filter_allocd         dword [rsp+0x14]
%define dst_incr_modd         dword [rsp+0x18]
%define dst_incr_divd         dword [rsp+0x1c]

    mov                         srcq, r2mp
%endif

.loop:
    mov                      filterd, filter_allocd
    imul                     filterd, indexd
%if ARCH_X86_64
    mov         min_filter_count_x4q, min_filter_len_x4q
162
    lea                      filterq, [filter_bankq+filterq*%2]
163 164
%else ; x86-32
    mov         min_filter_count_x4q, filter_bankq
165
    lea                      filterq, [min_filter_count_x4q+filterq*%2]
166 167
    mov         min_filter_count_x4q, min_filter_length_x4q
%endif
168
%ifidn %1, int16
169
    movd                          m0, [pd_0x4000]
170 171
%else ; float/double
    xorps                         m0, m0, m0
172
%endif
173 174 175

    align 16
.inner_loop:
176
    movu                          m1, [srcq+min_filter_count_x4q*1]
177
%ifidn %1, int16
178 179 180 181 182 183
%if cpuflag(xop)
    vpmadcswd                     m0, m1, [filterq+min_filter_count_x4q*1], m0
%else
    pmaddwd                       m1, [filterq+min_filter_count_x4q*1]
    paddd                         m0, m1
%endif
184
%else ; float/double
185 186 187
%if cpuflag(fma4) || cpuflag(fma3)
    fmaddp%4                      m0, m1, [filterq+min_filter_count_x4q*1], m0
%else
188 189
    mulp%4                        m1, m1, [filterq+min_filter_count_x4q*1]
    addp%4                        m0, m0, m1
190
%endif ; cpuflag
191
%endif
192 193 194
    add         min_filter_count_x4q, mmsize
    js .inner_loop

195
%ifidn %1, int16
196
    HADDD                         m0, m1
197 198 199 200 201
    psrad                         m0, 15
    add                        fracd, dst_incr_modd
    packssdw                      m0, m0
    add                       indexd, dst_incr_divd
    movd                      [dstq], m0
202 203
%else ; float/double
    ; horizontal sum & store
204 205 206 207
%if mmsize == 32
    vextractf128                 xm1, m0, 0x1
    addps                        xm0, xm1
%endif
208 209 210 211 212 213 214 215 216
    movhlps                      xm1, xm0
%ifidn %1, float
    addps                        xm0, xm1
    shufps                       xm1, xm0, xm0, q0001
%endif
    add                        fracd, dst_incr_modd
    addp%4                       xm0, xm1
    add                       indexd, dst_incr_divd
    movs%4                    [dstq], xm0
217
%endif
218 219 220 221 222 223
    cmp                        fracd, src_incrd
    jl .skip
    sub                        fracd, src_incrd
    inc                       indexd

%if UNIX64
224
    DEFINE_ARGS filter_alloc, dst, src, phase_count, index, frac, dst_incr_mod, \
225 226 227
                index_incr, min_filter_count_x4, min_filter_len_x4, dst_incr_div, \
                src_incr, phase_mask, dst_end, filter_bank
%elif WIN64
228
    DEFINE_ARGS phase_count, dst, src, filter_alloc, index, frac, dst_incr_mod, \
229 230 231
                index_incr, min_filter_count_x4, min_filter_len_x4, dst_incr_div, \
                src_incr, phase_mask, dst_end, filter_bank
%else ; x86-32
232
    DEFINE_ARGS src, phase_count, dst, frac, index, index_incr
233 234 235
%endif

.skip:
236
    add                         dstq, %2
237 238 239 240 241 242 243 244
    cmp                       indexd, phase_countd
    jb .index_skip
.index_while:
    sub                       indexd, phase_countd
    lea                         srcq, [srcq+%2]
    cmp                       indexd, phase_countd
    jnb .index_while
.index_skip:
245 246 247 248
    cmp                         dstq, dst_endq
    jne .loop

%if ARCH_X86_64
249
    DEFINE_ARGS ctx, dst, src, phase_count, index, frac
250 251 252 253 254 255 256 257 258 259 260 261 262 263
%else ; x86-32
    DEFINE_ARGS src, ctx, update_context, frac, index
%endif

    cmp  dword update_context_stackd, 0
    jz .skip_store
    ; strictly speaking, the function should always return the consumed
    ; number of bytes; however, we only use the value if update_context
    ; is true, so let's just leave it uninitialized otherwise
    mov                         ctxq, ctx_stackq
    movifnidn                    rax, srcq
    mov [ctxq+ResampleContext.frac ], fracd
    sub                          rax, src_stackq
    mov [ctxq+ResampleContext.index], indexd
264
    shr                          rax, %3
265 266 267 268 269 270 271

.skip_store:
%if ARCH_X86_32
    ADD                          rsp, 0x20
%endif
    RET

272 273
; int resample_linear_$format(ResampleContext *ctx, float *dst,
;                             const float *src, int size, int update_ctx)
274
%if ARCH_X86_64 ; unix64 and win64
275
%if UNIX64
276
cglobal resample_linear_%1, 0, 15, 5, ctx, dst, phase_mask, phase_count, index, frac, \
277 278 279 280 281 282
                                      size, dst_incr_mod, min_filter_count_x4, \
                                      min_filter_len_x4, dst_incr_div, src_incr, \
                                      src, dst_end, filter_bank

    mov                         srcq, r2mp
%else ; win64
283
cglobal resample_linear_%1, 0, 15, 5, ctx, phase_mask, src, phase_count, index, frac, \
284 285 286 287 288 289
                                      size, dst_incr_mod, min_filter_count_x4, \
                                      min_filter_len_x4, dst_incr_div, src_incr, \
                                      dst, dst_end, filter_bank

    mov                         dstq, r1mp
%endif
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313

    ; use red-zone for variable storage
%define ctx_stackq            [rsp-0x8]
%define src_stackq            [rsp-0x10]
%define phase_mask_stackd     [rsp-0x14]
%if WIN64
%define update_context_stackd r4m
%else ; unix64
%define update_context_stackd [rsp-0x18]
%endif

    ; load as many variables in registers as possible; for the rest, store
    ; on stack so that we have 'ctx' available as one extra register
    mov                        sized, r3d
%if UNIX64
    mov        update_context_stackd, r4d
%endif
    mov                       indexd, [ctxq+ResampleContext.index]
    mov                        fracd, [ctxq+ResampleContext.frac]
    mov                dst_incr_modd, [ctxq+ResampleContext.dst_incr_mod]
    mov                 filter_bankq, [ctxq+ResampleContext.filter_bank]
    mov                    src_incrd, [ctxq+ResampleContext.src_incr]
    mov                   ctx_stackq, ctxq
    mov           min_filter_len_x4d, [ctxq+ResampleContext.filter_length]
314
%ifidn %1, int16
315
    movd                          m4, [pd_0x4000]
316 317 318 319
%else ; float/double
    cvtsi2s%4                    xm0, src_incrd
    movs%4                       xm4, [%5]
    divs%4                       xm4, xm0
320
%endif
321
    mov                dst_incr_divd, [ctxq+ResampleContext.dst_incr_div]
322 323
    shl           min_filter_len_x4d, %3
    lea                     dst_endq, [dstq+sizeq*%2]
324 325

%if UNIX64
326
    mov                          ecx, [ctxq+ResampleContext.phase_count]
327 328
    mov                          edi, [ctxq+ResampleContext.filter_alloc]

329
    DEFINE_ARGS filter_alloc, dst, filter2, phase_count, index, frac, filter1, \
330 331
                dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
                dst_incr_div, src_incr, src, dst_end, filter_bank
332 333
%elif WIN64
    mov                          R9d, [ctxq+ResampleContext.filter_alloc]
334
    mov                          ecx, [ctxq+ResampleContext.phase_count]
335

336
    DEFINE_ARGS phase_count, filter2, src, filter_alloc, index, frac, filter1, \
337 338
                dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
                dst_incr_div, src_incr, dst, dst_end, filter_bank
339 340 341 342 343 344 345
%endif

    neg           min_filter_len_x4q
    sub                 filter_bankq, min_filter_len_x4q
    sub                         srcq, min_filter_len_x4q
    mov                   src_stackq, srcq
%else ; x86-32
346 347
cglobal resample_linear_%1, 1, 7, 5, ctx, min_filter_length_x4, filter2, \
                                     frac, index, dst, filter_bank
348 349 350 351 352 353 354 355

    ; push temp variables to stack
%define ctx_stackq            r0mp
%define src_stackq            r2mp
%define update_context_stackd r4m

    mov                         dstq, r1mp
    mov                           r3, r3mp
356
    lea                           r3, [dstq+r3*%2]
357 358 359 360 361
    PUSH                              dword [ctxq+ResampleContext.dst_incr_div]
    PUSH                              r3
    mov                           r3, dword [ctxq+ResampleContext.filter_alloc]
    PUSH                              dword [ctxq+ResampleContext.dst_incr_mod]
    PUSH                              r3
362
    shl                           r3, %3
363 364
    PUSH                              r3
    mov                           r3, dword [ctxq+ResampleContext.src_incr]
365
    PUSH                              dword [ctxq+ResampleContext.phase_count]  ; unneeded replacement of phase_mask
366
    PUSH                              r3d
367
%ifidn %1, int16
368
    movd                          m4, [pd_0x4000]
369 370 371 372
%else ; float/double
    cvtsi2s%4                    xm0, r3d
    movs%4                       xm4, [%5]
    divs%4                       xm4, xm0
373
%endif
374 375
    mov        min_filter_length_x4d, [ctxq+ResampleContext.filter_length]
    mov                       indexd, [ctxq+ResampleContext.index]
376
    shl        min_filter_length_x4d, %3
377 378 379 380 381 382 383
    mov                        fracd, [ctxq+ResampleContext.frac]
    neg        min_filter_length_x4q
    mov                 filter_bankq, [ctxq+ResampleContext.filter_bank]
    sub                         r2mp, min_filter_length_x4q
    sub                 filter_bankq, min_filter_length_x4q
    PUSH                              min_filter_length_x4q
    PUSH                              filter_bankq
384
    PUSH                              dword [ctxq+ResampleContext.phase_count]
385

386
    DEFINE_ARGS filter1, min_filter_count_x4, filter2, frac, index, dst, src
387

388
%define phase_count_stackd    dword [rsp+0x0]
389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
%define filter_bankq          dword [rsp+0x4]
%define min_filter_length_x4q dword [rsp+0x8]
%define src_incrd             dword [rsp+0xc]
%define phase_mask_stackd     dword [rsp+0x10]
%define filter_alloc_x4q      dword [rsp+0x14]
%define filter_allocd         dword [rsp+0x18]
%define dst_incr_modd         dword [rsp+0x1c]
%define dst_endq              dword [rsp+0x20]
%define dst_incr_divd         dword [rsp+0x24]

    mov                         srcq, r2mp
%endif

.loop:
    mov                     filter1d, filter_allocd
    imul                    filter1d, indexd
%if ARCH_X86_64
    mov         min_filter_count_x4q, min_filter_len_x4q
407 408
    lea                     filter1q, [filter_bankq+filter1q*%2]
    lea                     filter2q, [filter1q+filter_allocq*%2]
409 410
%else ; x86-32
    mov         min_filter_count_x4q, filter_bankq
411
    lea                     filter1q, [min_filter_count_x4q+filter1q*%2]
412 413 414 415
    mov         min_filter_count_x4q, min_filter_length_x4q
    mov                     filter2q, filter1q
    add                     filter2q, filter_alloc_x4q
%endif
416
%ifidn %1, int16
417 418
    mova                          m0, m4
    mova                          m2, m4
419 420 421
%else ; float/double
    xorps                         m0, m0, m0
    xorps                         m2, m2, m2
422
%endif
423 424 425

    align 16
.inner_loop:
426
    movu                          m1, [srcq+min_filter_count_x4q*1]
427
%ifidn %1, int16
428 429 430 431
%if cpuflag(xop)
    vpmadcswd                     m2, m1, [filter2q+min_filter_count_x4q*1], m2
    vpmadcswd                     m0, m1, [filter1q+min_filter_count_x4q*1], m0
%else
432 433 434 435
    pmaddwd                       m3, m1, [filter2q+min_filter_count_x4q*1]
    pmaddwd                       m1, [filter1q+min_filter_count_x4q*1]
    paddd                         m2, m3
    paddd                         m0, m1
436
%endif ; cpuflag
437
%else ; float/double
438 439 440 441
%if cpuflag(fma4) || cpuflag(fma3)
    fmaddp%4                      m2, m1, [filter2q+min_filter_count_x4q*1], m2
    fmaddp%4                      m0, m1, [filter1q+min_filter_count_x4q*1], m0
%else
442 443 444 445
    mulp%4                        m3, m1, [filter2q+min_filter_count_x4q*1]
    mulp%4                        m1, m1, [filter1q+min_filter_count_x4q*1]
    addp%4                        m2, m2, m3
    addp%4                        m0, m0, m1
446
%endif ; cpuflag
447
%endif
448 449 450
    add         min_filter_count_x4q, mmsize
    js .inner_loop

451
%ifidn %1, int16
452
%if mmsize == 16
453 454 455 456
%if cpuflag(xop)
    vphadddq                      m2, m2
    vphadddq                      m0, m0
%endif
457 458 459 460 461
    pshufd                        m3, m2, q0032
    pshufd                        m1, m0, q0032
    paddd                         m2, m3
    paddd                         m0, m1
%endif
462 463 464
%if notcpuflag(xop)
    PSHUFLW                       m3, m2, q0032
    PSHUFLW                       m1, m0, q0032
465 466
    paddd                         m2, m3
    paddd                         m0, m1
467
%endif
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
    psubd                         m2, m0
    ; This is probably a really bad idea on atom and other machines with a
    ; long transfer latency between GPRs and XMMs (atom). However, it does
    ; make the clip a lot simpler...
    movd                         eax, m2
    add                       indexd, dst_incr_divd
    imul                              fracd
    idiv                              src_incrd
    movd                          m1, eax
    add                        fracd, dst_incr_modd
    paddd                         m0, m1
    psrad                         m0, 15
    packssdw                      m0, m0
    movd                      [dstq], m0

    ; note that for imul/idiv, I need to move filter to edx/eax for each:
    ; - 32bit: eax=r0[filter1], edx=r2[filter2]
    ; - win64: eax=r6[filter1], edx=r1[todo]
    ; - unix64: eax=r6[filter1], edx=r2[todo]
487 488
%else ; float/double
    ; val += (v2 - val) * (FELEML) frac / c->src_incr;
489 490 491 492 493 494
%if mmsize == 32
    vextractf128                 xm1, m0, 0x1
    vextractf128                 xm3, m2, 0x1
    addps                        xm0, xm1
    addps                        xm2, xm3
%endif
495 496 497 498
    cvtsi2s%4                    xm1, fracd
    subp%4                       xm2, xm0
    mulp%4                       xm1, xm4
    shufp%4                      xm1, xm1, q0000
499 500 501
%if cpuflag(fma4) || cpuflag(fma3)
    fmaddp%4                     xm0, xm2, xm1, xm0
%else
502 503
    mulp%4                       xm2, xm1
    addp%4                       xm0, xm2
504
%endif ; cpuflag
505 506 507 508 509 510 511 512 513 514 515

    ; horizontal sum & store
    movhlps                      xm1, xm0
%ifidn %1, float
    addps                        xm0, xm1
    shufps                       xm1, xm0, xm0, q0001
%endif
    add                        fracd, dst_incr_modd
    addp%4                       xm0, xm1
    add                       indexd, dst_incr_divd
    movs%4                    [dstq], xm0
516
%endif
517 518 519 520 521 522
    cmp                        fracd, src_incrd
    jl .skip
    sub                        fracd, src_incrd
    inc                       indexd

%if UNIX64
523
    DEFINE_ARGS filter_alloc, dst, filter2, phase_count, index, frac, index_incr, \
524 525
                dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
                dst_incr_div, src_incr, src, dst_end, filter_bank
526
%elif WIN64
527
    DEFINE_ARGS phase_count, filter2, src, filter_alloc, index, frac, index_incr, \
528 529
                dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
                dst_incr_div, src_incr, dst, dst_end, filter_bank
530
%else ; x86-32
531
    DEFINE_ARGS filter1, phase_count, index_incr, frac, index, dst, src
532 533 534 535
%endif

.skip:
%if ARCH_X86_32
536
    mov                 phase_countd, phase_count_stackd
537
%endif
538
    add                         dstq, %2
539 540 541 542 543 544 545 546
    cmp                       indexd, phase_countd
    jb .index_skip
.index_while:
    sub                       indexd, phase_countd
    lea                         srcq, [srcq+%2]
    cmp                       indexd, phase_countd
    jnb .index_while
.index_skip:
547 548 549
    cmp                         dstq, dst_endq
    jne .loop

550
%if UNIX64
551
    DEFINE_ARGS ctx, dst, filter2, phase_count, index, frac, index_incr, \
552 553 554
                dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
                dst_incr_div, src_incr, src, dst_end, filter_bank
%elif WIN64
555
    DEFINE_ARGS ctx, filter2, src, phase_count, index, frac, index_incr, \
556 557
                dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
                dst_incr_div, src_incr, dst, dst_end, filter_bank
558
%else ; x86-32
559
    DEFINE_ARGS filter1, ctx, update_context, frac, index, dst, src
560 561 562 563 564 565 566 567 568 569 570 571
%endif

    cmp  dword update_context_stackd, 0
    jz .skip_store
    ; strictly speaking, the function should always return the consumed
    ; number of bytes; however, we only use the value if update_context
    ; is true, so let's just leave it uninitialized otherwise
    mov                         ctxq, ctx_stackq
    movifnidn                    rax, srcq
    mov [ctxq+ResampleContext.frac ], fracd
    sub                          rax, src_stackq
    mov [ctxq+ResampleContext.index], indexd
572
    shr                          rax, %3
573 574 575 576 577 578 579 580 581

.skip_store:
%if ARCH_X86_32
    ADD                          rsp, 0x28
%endif
    RET
%endmacro

INIT_XMM sse
582
RESAMPLE_FNS float, 4, 2, s, pf_1
583 584 585

%if HAVE_AVX_EXTERNAL
INIT_YMM avx
586
RESAMPLE_FNS float, 4, 2, s, pf_1
587
%endif
588 589 590 591 592 593 594 595
%if HAVE_FMA3_EXTERNAL
INIT_YMM fma3
RESAMPLE_FNS float, 4, 2, s, pf_1
%endif
%if HAVE_FMA4_EXTERNAL
INIT_XMM fma4
RESAMPLE_FNS float, 4, 2, s, pf_1
%endif
596 597 598 599 600 601 602 603

%if ARCH_X86_32
INIT_MMX mmxext
RESAMPLE_FNS int16, 2, 1
%endif

INIT_XMM sse2
RESAMPLE_FNS int16, 2, 1
604 605 606 607 608 609
%if HAVE_XOP_EXTERNAL
INIT_XMM xop
RESAMPLE_FNS int16, 2, 1
%endif

INIT_XMM sse2
610
RESAMPLE_FNS double, 8, 3, d, pdbl_1