audio_convert.asm 43 KB
Newer Older
Justin Ruggles's avatar
Justin Ruggles committed
1 2 3
;******************************************************************************
;* x86 optimized Format Conversion Utils
;* Copyright (c) 2008 Loren Merritt
4
;* Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
Justin Ruggles's avatar
Justin Ruggles committed
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
;*
;* This file is part of Libav.
;*
;* Libav is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* Libav is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with Libav; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************

23
%include "libavutil/x86/x86util.asm"
24 25 26 27
%include "util.asm"

SECTION_RODATA 32

28
pf_s32_inv_scale: times 8 dd 0x30000000
29
pf_s32_scale:     times 8 dd 0x4f000000
30
pf_s32_clip:      times 8 dd 0x4effffff
31
pf_s16_inv_scale: times 4 dd 0x38000000
32
pf_s16_scale:     times 4 dd 0x47000000
33 34
pb_shuf_unpack_even:      db -1, -1,  0,  1, -1, -1,  2,  3, -1, -1,  8,  9, -1, -1, 10, 11
pb_shuf_unpack_odd:       db -1, -1,  4,  5, -1, -1,  6,  7, -1, -1, 12, 13, -1, -1, 14, 15
35
pb_interleave_words: SHUFFLE_MASK_W  0,  4,  1,  5,  2,  6,  3,  7
36
pb_deinterleave_words: SHUFFLE_MASK_W  0,  2,  4,  6,  1,  3,  5,  7
37
pw_zero_even:     times 4 dw 0x0000, 0xffff
Justin Ruggles's avatar
Justin Ruggles committed
38 39 40

SECTION_TEXT

41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
;------------------------------------------------------------------------------
; void ff_conv_s16_to_s32(int32_t *dst, const int16_t *src, int len);
;------------------------------------------------------------------------------

INIT_XMM sse2
cglobal conv_s16_to_s32, 3,3,3, dst, src, len
    lea      lenq, [2*lend]
    lea      dstq, [dstq+2*lenq]
    add      srcq, lenq
    neg      lenq
.loop:
    mova       m2, [srcq+lenq]
    pxor       m0, m0
    pxor       m1, m1
    punpcklwd  m0, m2
    punpckhwd  m1, m2
    mova  [dstq+2*lenq       ], m0
    mova  [dstq+2*lenq+mmsize], m1
    add      lenq, mmsize
    jl .loop
    REP_RET

63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
;------------------------------------------------------------------------------
; void ff_conv_s16_to_flt(float *dst, const int16_t *src, int len);
;------------------------------------------------------------------------------

%macro CONV_S16_TO_FLT 0
cglobal conv_s16_to_flt, 3,3,3, dst, src, len
    lea      lenq, [2*lend]
    add      srcq, lenq
    lea      dstq, [dstq + 2*lenq]
    neg      lenq
    mova       m2, [pf_s16_inv_scale]
    ALIGN 16
.loop:
    mova       m0, [srcq+lenq]
    S16_TO_S32_SX 0, 1
    cvtdq2ps   m0, m0
    cvtdq2ps   m1, m1
    mulps      m0, m2
    mulps      m1, m2
    mova  [dstq+2*lenq       ], m0
    mova  [dstq+2*lenq+mmsize], m1
    add      lenq, mmsize
    jl .loop
    REP_RET
%endmacro

INIT_XMM sse2
CONV_S16_TO_FLT
INIT_XMM sse4
CONV_S16_TO_FLT

94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
;------------------------------------------------------------------------------
; void ff_conv_s32_to_s16(int16_t *dst, const int32_t *src, int len);
;------------------------------------------------------------------------------

%macro CONV_S32_TO_S16 0
cglobal conv_s32_to_s16, 3,3,4, dst, src, len
    lea     lenq, [2*lend]
    lea     srcq, [srcq+2*lenq]
    add     dstq, lenq
    neg     lenq
.loop:
    mova      m0, [srcq+2*lenq         ]
    mova      m1, [srcq+2*lenq+  mmsize]
    mova      m2, [srcq+2*lenq+2*mmsize]
    mova      m3, [srcq+2*lenq+3*mmsize]
    psrad     m0, 16
    psrad     m1, 16
    psrad     m2, 16
    psrad     m3, 16
    packssdw  m0, m1
    packssdw  m2, m3
    mova  [dstq+lenq       ], m0
    mova  [dstq+lenq+mmsize], m2
    add     lenq, mmsize*2
    jl .loop
%if mmsize == 8
    emms
    RET
%else
    REP_RET
%endif
%endmacro

INIT_MMX mmx
CONV_S32_TO_S16
INIT_XMM sse2
CONV_S32_TO_S16

132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
;------------------------------------------------------------------------------
; void ff_conv_s32_to_flt(float *dst, const int32_t *src, int len);
;------------------------------------------------------------------------------

%macro CONV_S32_TO_FLT 0
cglobal conv_s32_to_flt, 3,3,3, dst, src, len
    lea     lenq, [4*lend]
    add     srcq, lenq
    add     dstq, lenq
    neg     lenq
    mova      m0, [pf_s32_inv_scale]
    ALIGN 16
.loop:
    cvtdq2ps  m1, [srcq+lenq       ]
    cvtdq2ps  m2, [srcq+lenq+mmsize]
    mulps     m1, m1, m0
    mulps     m2, m2, m0
    mova  [dstq+lenq       ], m1
    mova  [dstq+lenq+mmsize], m2
    add     lenq, mmsize*2
    jl .loop
    REP_RET
%endmacro

INIT_XMM sse2
CONV_S32_TO_FLT
158
%if HAVE_AVX_EXTERNAL
159 160 161 162
INIT_YMM avx
CONV_S32_TO_FLT
%endif

163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
;------------------------------------------------------------------------------
; void ff_conv_flt_to_s16(int16_t *dst, const float *src, int len);
;------------------------------------------------------------------------------

INIT_XMM sse2
cglobal conv_flt_to_s16, 3,3,5, dst, src, len
    lea     lenq, [2*lend]
    lea     srcq, [srcq+2*lenq]
    add     dstq, lenq
    neg     lenq
    mova      m4, [pf_s16_scale]
.loop:
    mova      m0, [srcq+2*lenq         ]
    mova      m1, [srcq+2*lenq+1*mmsize]
    mova      m2, [srcq+2*lenq+2*mmsize]
    mova      m3, [srcq+2*lenq+3*mmsize]
    mulps     m0, m4
    mulps     m1, m4
    mulps     m2, m4
    mulps     m3, m4
    cvtps2dq  m0, m0
    cvtps2dq  m1, m1
    cvtps2dq  m2, m2
    cvtps2dq  m3, m3
    packssdw  m0, m1
    packssdw  m2, m3
    mova  [dstq+lenq       ], m0
    mova  [dstq+lenq+mmsize], m2
    add     lenq, mmsize*2
    jl .loop
    REP_RET

195 196 197 198 199
;------------------------------------------------------------------------------
; void ff_conv_flt_to_s32(int32_t *dst, const float *src, int len);
;------------------------------------------------------------------------------

%macro CONV_FLT_TO_S32 0
200
cglobal conv_flt_to_s32, 3,3,6, dst, src, len
201 202 203 204 205
    lea     lenq, [lend*4]
    add     srcq, lenq
    add     dstq, lenq
    neg     lenq
    mova      m4, [pf_s32_scale]
206
    mova      m5, [pf_s32_clip]
207 208 209 210 211
.loop:
    mulps     m0, m4, [srcq+lenq         ]
    mulps     m1, m4, [srcq+lenq+1*mmsize]
    mulps     m2, m4, [srcq+lenq+2*mmsize]
    mulps     m3, m4, [srcq+lenq+3*mmsize]
212 213 214 215
    minps     m0, m0, m5
    minps     m1, m1, m5
    minps     m2, m2, m5
    minps     m3, m3, m5
216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
    cvtps2dq  m0, m0
    cvtps2dq  m1, m1
    cvtps2dq  m2, m2
    cvtps2dq  m3, m3
    mova  [dstq+lenq         ], m0
    mova  [dstq+lenq+1*mmsize], m1
    mova  [dstq+lenq+2*mmsize], m2
    mova  [dstq+lenq+3*mmsize], m3
    add     lenq, mmsize*4
    jl .loop
    REP_RET
%endmacro

INIT_XMM sse2
CONV_FLT_TO_S32
231
%if HAVE_AVX_EXTERNAL
232 233 234 235
INIT_YMM avx
CONV_FLT_TO_S32
%endif

236 237 238 239 240 241 242 243 244 245 246 247 248 249
;------------------------------------------------------------------------------
; void ff_conv_s16p_to_s16_2ch(int16_t *dst, int16_t *const *src, int len,
;                              int channels);
;------------------------------------------------------------------------------

%macro CONV_S16P_TO_S16_2CH 0
cglobal conv_s16p_to_s16_2ch, 3,4,5, dst, src0, len, src1
    mov       src1q, [src0q+gprsize]
    mov       src0q, [src0q        ]
    lea        lenq, [2*lend]
    add       src0q, lenq
    add       src1q, lenq
    lea        dstq, [dstq+2*lenq]
    neg        lenq
250
.loop:
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
    mova         m0, [src0q+lenq       ]
    mova         m1, [src1q+lenq       ]
    mova         m2, [src0q+lenq+mmsize]
    mova         m3, [src1q+lenq+mmsize]
    SBUTTERFLY2  wd, 0, 1, 4
    SBUTTERFLY2  wd, 2, 3, 4
    mova  [dstq+2*lenq+0*mmsize], m0
    mova  [dstq+2*lenq+1*mmsize], m1
    mova  [dstq+2*lenq+2*mmsize], m2
    mova  [dstq+2*lenq+3*mmsize], m3
    add        lenq, 2*mmsize
    jl .loop
    REP_RET
%endmacro

INIT_XMM sse2
CONV_S16P_TO_S16_2CH
268
%if HAVE_AVX_EXTERNAL
269 270 271 272
INIT_XMM avx
CONV_S16P_TO_S16_2CH
%endif

273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
;------------------------------------------------------------------------------
; void ff_conv_s16p_to_s16_6ch(int16_t *dst, int16_t *const *src, int len,
;                              int channels);
;------------------------------------------------------------------------------

;------------------------------------------------------------------------------
; NOTE: In the 6-channel functions, len could be used as an index on x86-64
;       instead of just a counter, which would avoid incrementing the
;       pointers, but the extra complexity and amount of code is not worth
;       the small gain. On x86-32 there are not enough registers to use len
;       as an index without keeping two of the pointers on the stack and
;       loading them in each iteration.
;------------------------------------------------------------------------------

%macro CONV_S16P_TO_S16_6CH 0
%if ARCH_X86_64
cglobal conv_s16p_to_s16_6ch, 3,8,7, dst, src0, len, src1, src2, src3, src4, src5
%else
cglobal conv_s16p_to_s16_6ch, 2,7,7, dst, src0, src1, src2, src3, src4, src5
%define lend dword r2m
%endif
    mov      src1q, [src0q+1*gprsize]
    mov      src2q, [src0q+2*gprsize]
    mov      src3q, [src0q+3*gprsize]
    mov      src4q, [src0q+4*gprsize]
    mov      src5q, [src0q+5*gprsize]
    mov      src0q, [src0q]
    sub      src1q, src0q
    sub      src2q, src0q
    sub      src3q, src0q
    sub      src4q, src0q
    sub      src5q, src0q
.loop:
%if cpuflag(sse2slow)
    movq        m0, [src0q      ]   ; m0 =  0,  6, 12, 18,  x,  x,  x,  x
    movq        m1, [src0q+src1q]   ; m1 =  1,  7, 13, 19,  x,  x,  x,  x
    movq        m2, [src0q+src2q]   ; m2 =  2,  8, 14, 20,  x,  x,  x,  x
    movq        m3, [src0q+src3q]   ; m3 =  3,  9, 15, 21,  x,  x,  x,  x
    movq        m4, [src0q+src4q]   ; m4 =  4, 10, 16, 22,  x,  x,  x,  x
    movq        m5, [src0q+src5q]   ; m5 =  5, 11, 17, 23,  x,  x,  x,  x
                                    ; unpack words:
    punpcklwd   m0, m1              ; m0 =  0,  1,  6,  7, 12, 13, 18, 19
    punpcklwd   m2, m3              ; m2 =  4,  5, 10, 11, 16, 17, 22, 23
    punpcklwd   m4, m5              ; m4 =  2,  3,  8,  9, 14, 15, 20, 21
                                    ; blend dwords
    shufps      m1, m0, m2, q2020   ; m1 =  0,  1, 12, 13,  2,  3, 14, 15
    shufps      m0, m4, q2031       ; m0 =  6,  7, 18, 19,  4,  5, 16, 17
    shufps      m2, m4, q3131       ; m2 =  8,  9, 20, 21, 10, 11, 22, 23
                                    ; shuffle dwords
    pshufd      m0, m0, q1302       ; m0 =  4,  5,  6,  7, 16, 17, 18, 19
    pshufd      m1, m1, q3120       ; m1 =  0,  1,  2,  3, 12, 13, 14, 15
    pshufd      m2, m2, q3120       ; m2 =  8,  9, 10, 11, 20, 21, 22, 23
    movq   [dstq+0*mmsize/2], m1
    movq   [dstq+1*mmsize/2], m0
    movq   [dstq+2*mmsize/2], m2
    movhps [dstq+3*mmsize/2], m1
    movhps [dstq+4*mmsize/2], m0
    movhps [dstq+5*mmsize/2], m2
    add      src0q, mmsize/2
    add       dstq, mmsize*3
    sub       lend, mmsize/4
%else
    mova        m0, [src0q      ]   ; m0 =  0,  6, 12, 18, 24, 30, 36, 42
    mova        m1, [src0q+src1q]   ; m1 =  1,  7, 13, 19, 25, 31, 37, 43
    mova        m2, [src0q+src2q]   ; m2 =  2,  8, 14, 20, 26, 32, 38, 44
    mova        m3, [src0q+src3q]   ; m3 =  3,  9, 15, 21, 27, 33, 39, 45
    mova        m4, [src0q+src4q]   ; m4 =  4, 10, 16, 22, 28, 34, 40, 46
    mova        m5, [src0q+src5q]   ; m5 =  5, 11, 17, 23, 29, 35, 41, 47
                                    ; unpack words:
    SBUTTERFLY2 wd, 0, 1, 6         ; m0 =  0,  1,  6,  7, 12, 13, 18, 19
                                    ; m1 = 24, 25, 30, 31, 36, 37, 42, 43
    SBUTTERFLY2 wd, 2, 3, 6         ; m2 =  2,  3,  8,  9, 14, 15, 20, 21
                                    ; m3 = 26, 27, 32, 33, 38, 39, 44, 45
    SBUTTERFLY2 wd, 4, 5, 6         ; m4 =  4,  5, 10, 11, 16, 17, 22, 23
                                    ; m5 = 28, 29, 34, 35, 40, 41, 46, 47
                                    ; blend dwords
    shufps      m6, m0, m2, q2020   ; m6 =  0,  1, 12, 13,  2,  3, 14, 15
    shufps      m0, m4, q2031       ; m0 =  6,  7, 18, 19,  4,  5, 16, 17
    shufps      m2, m4, q3131       ; m2 =  8,  9, 20, 21, 10, 11, 22, 23
    SWAP 4,6                        ; m4 =  0,  1, 12, 13,  2,  3, 14, 15
    shufps      m6, m1, m3, q2020   ; m6 = 24, 25, 36, 37, 26, 27, 38, 39
    shufps      m1, m5, q2031       ; m1 = 30, 31, 42, 43, 28, 29, 40, 41
    shufps      m3, m5, q3131       ; m3 = 32, 33, 44, 45, 34, 35, 46, 47
    SWAP 5,6                        ; m5 = 24, 25, 36, 37, 26, 27, 38, 39
                                    ; shuffle dwords
    pshufd      m0, m0, q1302       ; m0 =  4,  5,  6,  7, 16, 17, 18, 19
    pshufd      m2, m2, q3120       ; m2 =  8,  9, 10, 11, 20, 21, 22, 23
    pshufd      m4, m4, q3120       ; m4 =  0,  1,  2,  3, 12, 13, 14, 15
    pshufd      m1, m1, q1302       ; m1 = 28, 29, 30, 31, 40, 41, 42, 43
    pshufd      m3, m3, q3120       ; m3 = 32, 33, 34, 35, 44, 45, 46, 47
    pshufd      m5, m5, q3120       ; m5 = 24, 25, 26, 27, 36, 37, 38, 39
                                    ; shuffle qwords
    punpcklqdq  m6, m4, m0          ; m6 =  0,  1,  2,  3,  4,  5,  6,  7
    punpckhqdq  m0, m2              ; m0 = 16, 17, 18, 19, 20, 21, 22, 23
    shufps      m2, m4, q3210       ; m2 =  8,  9, 10, 11, 12, 13, 14, 15
    SWAP 4,6                        ; m4 =  0,  1,  2,  3,  4,  5,  6,  7
    punpcklqdq  m6, m5, m1          ; m6 = 24, 25, 26, 27, 28, 29, 30, 31
    punpckhqdq  m1, m3              ; m1 = 40, 41, 42, 43, 44, 45, 46, 47
    shufps      m3, m5, q3210       ; m3 = 32, 33, 34, 35, 36, 37, 38, 39
    SWAP 5,6                        ; m5 = 24, 25, 26, 27, 28, 29, 30, 31
    mova   [dstq+0*mmsize], m4
    mova   [dstq+1*mmsize], m2
    mova   [dstq+2*mmsize], m0
    mova   [dstq+3*mmsize], m5
    mova   [dstq+4*mmsize], m3
    mova   [dstq+5*mmsize], m1
    add      src0q, mmsize
    add       dstq, mmsize*6
    sub       lend, mmsize/2
%endif
    jg .loop
    REP_RET
%endmacro

INIT_XMM sse2
CONV_S16P_TO_S16_6CH
INIT_XMM sse2slow
CONV_S16P_TO_S16_6CH
391
%if HAVE_AVX_EXTERNAL
392 393 394 395
INIT_XMM avx
CONV_S16P_TO_S16_6CH
%endif

396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
;------------------------------------------------------------------------------
; void ff_conv_s16p_to_flt_2ch(float *dst, int16_t *const *src, int len,
;                              int channels);
;------------------------------------------------------------------------------

%macro CONV_S16P_TO_FLT_2CH 0
cglobal conv_s16p_to_flt_2ch, 3,4,6, dst, src0, len, src1
    lea       lenq, [2*lend]
    mov      src1q, [src0q+gprsize]
    mov      src0q, [src0q        ]
    lea       dstq, [dstq+4*lenq]
    add      src0q, lenq
    add      src1q, lenq
    neg       lenq
    mova        m5, [pf_s32_inv_scale]
.loop:
    mova        m2, [src0q+lenq]    ; m2 =  0,  2,  4,  6,  8, 10, 12, 14
    mova        m4, [src1q+lenq]    ; m4 =  1,  3,  5,  7,  9, 11, 13, 15
    SBUTTERFLY2 wd, 2, 4, 3         ; m2 =  0,  1,  2,  3,  4,  5,  6,  7
                                    ; m4 =  8,  9, 10, 11, 12, 13, 14, 15
    pxor        m3, m3
    punpcklwd   m0, m3, m2          ; m0 =      0,      1,      2,      3
    punpckhwd   m1, m3, m2          ; m1 =      4,      5,      6,      7
    punpcklwd   m2, m3, m4          ; m2 =      8,      9,     10,     11
    punpckhwd   m3, m4              ; m3 =     12,     13,     14,     15
    cvtdq2ps    m0, m0
    cvtdq2ps    m1, m1
    cvtdq2ps    m2, m2
    cvtdq2ps    m3, m3
    mulps       m0, m5
    mulps       m1, m5
    mulps       m2, m5
    mulps       m3, m5
    mova  [dstq+4*lenq         ], m0
    mova  [dstq+4*lenq+  mmsize], m1
    mova  [dstq+4*lenq+2*mmsize], m2
    mova  [dstq+4*lenq+3*mmsize], m3
    add       lenq, mmsize
    jl .loop
    REP_RET
%endmacro

INIT_XMM sse2
CONV_S16P_TO_FLT_2CH
440
%if HAVE_AVX_EXTERNAL
441 442 443 444
INIT_XMM avx
CONV_S16P_TO_FLT_2CH
%endif

445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543
;------------------------------------------------------------------------------
; void ff_conv_s16p_to_flt_6ch(float *dst, int16_t *const *src, int len,
;                              int channels);
;------------------------------------------------------------------------------

%macro CONV_S16P_TO_FLT_6CH 0
%if ARCH_X86_64
cglobal conv_s16p_to_flt_6ch, 3,8,8, dst, src, len, src1, src2, src3, src4, src5
%else
cglobal conv_s16p_to_flt_6ch, 2,7,8, dst, src, src1, src2, src3, src4, src5
%define lend dword r2m
%endif
    mov     src1q, [srcq+1*gprsize]
    mov     src2q, [srcq+2*gprsize]
    mov     src3q, [srcq+3*gprsize]
    mov     src4q, [srcq+4*gprsize]
    mov     src5q, [srcq+5*gprsize]
    mov      srcq, [srcq]
    sub     src1q, srcq
    sub     src2q, srcq
    sub     src3q, srcq
    sub     src4q, srcq
    sub     src5q, srcq
    mova       m7, [pf_s32_inv_scale]
%if cpuflag(ssse3)
    %define unpack_even m6
    mova       m6, [pb_shuf_unpack_even]
%if ARCH_X86_64
    %define unpack_odd m8
    mova       m8, [pb_shuf_unpack_odd]
%else
    %define unpack_odd [pb_shuf_unpack_odd]
%endif
%endif
.loop:
    movq       m0, [srcq      ]  ; m0 =  0,  6, 12, 18,  x,  x,  x,  x
    movq       m1, [srcq+src1q]  ; m1 =  1,  7, 13, 19,  x,  x,  x,  x
    movq       m2, [srcq+src2q]  ; m2 =  2,  8, 14, 20,  x,  x,  x,  x
    movq       m3, [srcq+src3q]  ; m3 =  3,  9, 15, 21,  x,  x,  x,  x
    movq       m4, [srcq+src4q]  ; m4 =  4, 10, 16, 22,  x,  x,  x,  x
    movq       m5, [srcq+src5q]  ; m5 =  5, 11, 17, 23,  x,  x,  x,  x
                                 ; unpack words:
    punpcklwd  m0, m1            ; m0 =  0,  1,  6,  7, 12, 13, 18, 19
    punpcklwd  m2, m3            ; m2 =  2,  3,  8,  9, 14, 15, 20, 21
    punpcklwd  m4, m5            ; m4 =  4,  5, 10, 11, 16, 17, 22, 23
                                 ; blend dwords
    shufps     m1, m4, m0, q3120 ; m1 =  4,  5, 16, 17,  6,  7, 18, 19
    shufps         m0, m2, q2020 ; m0 =  0,  1, 12, 13,  2,  3, 14, 15
    shufps         m2, m4, q3131 ; m2 =  8,  9, 20, 21, 10, 11, 22, 23
%if cpuflag(ssse3)
    pshufb     m3, m0, unpack_odd   ; m3 =  12,     13,     14,     15
    pshufb         m0, unpack_even  ; m0 =   0,      1,      2,      3
    pshufb     m4, m1, unpack_odd   ; m4 =  16,     17,     18,     19
    pshufb         m1, unpack_even  ; m1 =   4,      5,      6,      7
    pshufb     m5, m2, unpack_odd   ; m5 =  20,     21,     22,     23
    pshufb         m2, unpack_even  ; m2 =   8,      9,     10,     11
%else
                                 ; shuffle dwords
    pshufd     m0, m0, q3120     ; m0 =  0,  1,  2,  3, 12, 13, 14, 15
    pshufd     m1, m1, q3120     ; m1 =  4,  5,  6,  7, 16, 17, 18, 19
    pshufd     m2, m2, q3120     ; m2 =  8,  9, 10, 11, 20, 21, 22, 23
    pxor       m6, m6            ; convert s16 in m0-m2 to s32 in m0-m5
    punpcklwd  m3, m6, m0        ; m3 =      0,      1,      2,      3
    punpckhwd  m4, m6, m0        ; m4 =     12,     13,     14,     15
    punpcklwd  m0, m6, m1        ; m0 =      4,      5,      6,      7
    punpckhwd  m5, m6, m1        ; m5 =     16,     17,     18,     19
    punpcklwd  m1, m6, m2        ; m1 =      8,      9,     10,     11
    punpckhwd      m6, m2        ; m6 =     20,     21,     22,     23
    SWAP 6,2,1,0,3,4,5           ; swap registers 3,0,1,4,5,6 to 0,1,2,3,4,5
%endif
    cvtdq2ps   m0, m0            ; convert s32 to float
    cvtdq2ps   m1, m1
    cvtdq2ps   m2, m2
    cvtdq2ps   m3, m3
    cvtdq2ps   m4, m4
    cvtdq2ps   m5, m5
    mulps      m0, m7            ; scale float from s32 range to [-1.0,1.0]
    mulps      m1, m7
    mulps      m2, m7
    mulps      m3, m7
    mulps      m4, m7
    mulps      m5, m7
    mova  [dstq         ], m0
    mova  [dstq+  mmsize], m1
    mova  [dstq+2*mmsize], m2
    mova  [dstq+3*mmsize], m3
    mova  [dstq+4*mmsize], m4
    mova  [dstq+5*mmsize], m5
    add      srcq, mmsize/2
    add      dstq, mmsize*6
    sub      lend, mmsize/4
    jg .loop
    REP_RET
%endmacro

INIT_XMM sse2
CONV_S16P_TO_FLT_6CH
INIT_XMM ssse3
CONV_S16P_TO_FLT_6CH
544
%if HAVE_AVX_EXTERNAL
545 546 547 548
INIT_XMM avx
CONV_S16P_TO_FLT_6CH
%endif

549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590
;------------------------------------------------------------------------------
; void ff_conv_fltp_to_s16_2ch(int16_t *dst, float *const *src, int len,
;                              int channels);
;------------------------------------------------------------------------------

%macro CONV_FLTP_TO_S16_2CH 0
cglobal conv_fltp_to_s16_2ch, 3,4,3, dst, src0, len, src1
    lea      lenq, [4*lend]
    mov     src1q, [src0q+gprsize]
    mov     src0q, [src0q        ]
    add      dstq, lenq
    add     src0q, lenq
    add     src1q, lenq
    neg      lenq
    mova       m2, [pf_s16_scale]
%if cpuflag(ssse3)
    mova       m3, [pb_interleave_words]
%endif
.loop:
    mulps      m0, m2, [src0q+lenq] ; m0 =    0,    2,    4,    6
    mulps      m1, m2, [src1q+lenq] ; m1 =    1,    3,    5,    7
    cvtps2dq   m0, m0
    cvtps2dq   m1, m1
%if cpuflag(ssse3)
    packssdw   m0, m1               ; m0 = 0, 2, 4, 6, 1, 3, 5, 7
    pshufb     m0, m3               ; m0 = 0, 1, 2, 3, 4, 5, 6, 7
%else
    packssdw   m0, m0               ; m0 = 0, 2, 4, 6, x, x, x, x
    packssdw   m1, m1               ; m1 = 1, 3, 5, 7, x, x, x, x
    punpcklwd  m0, m1               ; m0 = 0, 1, 2, 3, 4, 5, 6, 7
%endif
    mova  [dstq+lenq], m0
    add      lenq, mmsize
    jl .loop
    REP_RET
%endmacro

INIT_XMM sse2
CONV_FLTP_TO_S16_2CH
INIT_XMM ssse3
CONV_FLTP_TO_S16_2CH

591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
;------------------------------------------------------------------------------
; void ff_conv_fltp_to_s16_6ch(int16_t *dst, float *const *src, int len,
;                              int channels);
;------------------------------------------------------------------------------

%macro CONV_FLTP_TO_S16_6CH 0
%if ARCH_X86_64
cglobal conv_fltp_to_s16_6ch, 3,8,7, dst, src, len, src1, src2, src3, src4, src5
%else
cglobal conv_fltp_to_s16_6ch, 2,7,7, dst, src, src1, src2, src3, src4, src5
%define lend dword r2m
%endif
    mov        src1q, [srcq+1*gprsize]
    mov        src2q, [srcq+2*gprsize]
    mov        src3q, [srcq+3*gprsize]
    mov        src4q, [srcq+4*gprsize]
    mov        src5q, [srcq+5*gprsize]
    mov         srcq, [srcq]
    sub        src1q, srcq
    sub        src2q, srcq
    sub        src3q, srcq
    sub        src4q, srcq
    sub        src5q, srcq
    movaps      xmm6, [pf_s16_scale]
.loop:
%if cpuflag(sse2)
    mulps         m0, m6, [srcq      ]
    mulps         m1, m6, [srcq+src1q]
    mulps         m2, m6, [srcq+src2q]
    mulps         m3, m6, [srcq+src3q]
    mulps         m4, m6, [srcq+src4q]
    mulps         m5, m6, [srcq+src5q]
    cvtps2dq      m0, m0
    cvtps2dq      m1, m1
    cvtps2dq      m2, m2
    cvtps2dq      m3, m3
    cvtps2dq      m4, m4
    cvtps2dq      m5, m5
    packssdw      m0, m3            ; m0 =  0,  6, 12, 18,  3,  9, 15, 21
    packssdw      m1, m4            ; m1 =  1,  7, 13, 19,  4, 10, 16, 22
    packssdw      m2, m5            ; m2 =  2,  8, 14, 20,  5, 11, 17, 23
                                    ; unpack words:
    movhlps       m3, m0            ; m3 =  3,  9, 15, 21,  x,  x,  x,  x
    punpcklwd     m0, m1            ; m0 =  0,  1,  6,  7, 12, 13, 18, 19
    punpckhwd     m1, m2            ; m1 =  4,  5, 10, 11, 16, 17, 22, 23
    punpcklwd     m2, m3            ; m2 =  2,  3,  8,  9, 14, 15, 20, 21
                                    ; blend dwords:
    shufps        m3, m0, m2, q2020 ; m3 =  0,  1, 12, 13,  2,  3, 14, 15
    shufps        m0, m1, q2031     ; m0 =  6,  7, 18, 19,  4,  5, 16, 17
    shufps        m2, m1, q3131     ; m2 =  8,  9, 20, 21, 10, 11, 22, 23
                                    ; shuffle dwords:
    shufps        m1, m2, m3, q3120 ; m1 =  8,  9, 10, 11, 12, 13, 14, 15
    shufps        m3, m0,     q0220 ; m3 =  0,  1,  2,  3,  4,  5,  6,  7
    shufps        m0, m2,     q3113 ; m0 = 16, 17, 18, 19, 20, 21, 22, 23
    mova  [dstq+0*mmsize], m3
    mova  [dstq+1*mmsize], m1
    mova  [dstq+2*mmsize], m0
%else ; sse
    movlps      xmm0, [srcq      ]
    movlps      xmm1, [srcq+src1q]
    movlps      xmm2, [srcq+src2q]
    movlps      xmm3, [srcq+src3q]
    movlps      xmm4, [srcq+src4q]
    movlps      xmm5, [srcq+src5q]
    mulps       xmm0, xmm6
    mulps       xmm1, xmm6
    mulps       xmm2, xmm6
    mulps       xmm3, xmm6
    mulps       xmm4, xmm6
    mulps       xmm5, xmm6
    cvtps2pi     mm0, xmm0
    cvtps2pi     mm1, xmm1
    cvtps2pi     mm2, xmm2
    cvtps2pi     mm3, xmm3
    cvtps2pi     mm4, xmm4
    cvtps2pi     mm5, xmm5
    packssdw     mm0, mm3           ; m0 =  0,  6,  3,  9
    packssdw     mm1, mm4           ; m1 =  1,  7,  4, 10
    packssdw     mm2, mm5           ; m2 =  2,  8,  5, 11
                                    ; unpack words
    pshufw       mm3, mm0, q1032    ; m3 =  3,  9,  0,  6
    punpcklwd    mm0, mm1           ; m0 =  0,  1,  6,  7
    punpckhwd    mm1, mm2           ; m1 =  4,  5, 10, 11
    punpcklwd    mm2, mm3           ; m2 =  2,  3,  8,  9
                                    ; unpack dwords
    pshufw       mm3, mm0, q1032    ; m3 =  6,  7,  0,  1
    punpckldq    mm0, mm2           ; m0 =  0,  1,  2,  3 (final)
    punpckhdq    mm2, mm1           ; m2 =  8,  9, 10, 11 (final)
    punpckldq    mm1, mm3           ; m1 =  4,  5,  6,  7 (final)
    mova  [dstq+0*mmsize], mm0
    mova  [dstq+1*mmsize], mm1
    mova  [dstq+2*mmsize], mm2
%endif
    add       srcq, mmsize
    add       dstq, mmsize*3
    sub       lend, mmsize/4
    jg .loop
%if mmsize == 8
    emms
    RET
%else
    REP_RET
%endif
%endmacro

INIT_MMX sse
CONV_FLTP_TO_S16_6CH
INIT_XMM sse2
CONV_FLTP_TO_S16_6CH
700
%if HAVE_AVX_EXTERNAL
701 702 703 704
INIT_XMM avx
CONV_FLTP_TO_S16_6CH
%endif

705 706 707 708 709 710 711 712 713 714 715 716 717 718
;------------------------------------------------------------------------------
; void ff_conv_fltp_to_flt_2ch(float *dst, float *const *src, int len,
;                              int channels);
;------------------------------------------------------------------------------

%macro CONV_FLTP_TO_FLT_2CH 0
cglobal conv_fltp_to_flt_2ch, 3,4,5, dst, src0, len, src1
    mov  src1q, [src0q+gprsize]
    mov  src0q, [src0q]
    lea   lenq, [4*lend]
    add  src0q, lenq
    add  src1q, lenq
    lea   dstq, [dstq+2*lenq]
    neg   lenq
719
.loop:
720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736
    mova    m0, [src0q+lenq       ]
    mova    m1, [src1q+lenq       ]
    mova    m2, [src0q+lenq+mmsize]
    mova    m3, [src1q+lenq+mmsize]
    SBUTTERFLYPS 0, 1, 4
    SBUTTERFLYPS 2, 3, 4
    mova  [dstq+2*lenq+0*mmsize], m0
    mova  [dstq+2*lenq+1*mmsize], m1
    mova  [dstq+2*lenq+2*mmsize], m2
    mova  [dstq+2*lenq+3*mmsize], m3
    add   lenq, 2*mmsize
    jl .loop
    REP_RET
%endmacro

INIT_XMM sse
CONV_FLTP_TO_FLT_2CH
737
%if HAVE_AVX_EXTERNAL
738 739 740 741
INIT_XMM avx
CONV_FLTP_TO_FLT_2CH
%endif

Justin Ruggles's avatar
Justin Ruggles committed
742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771
;-----------------------------------------------------------------------------
; void ff_conv_fltp_to_flt_6ch(float *dst, float *const *src, int len,
;                              int channels);
;-----------------------------------------------------------------------------

%macro CONV_FLTP_TO_FLT_6CH 0
cglobal conv_fltp_to_flt_6ch, 2,8,7, dst, src, src1, src2, src3, src4, src5, len
%if ARCH_X86_64
    mov     lend, r2d
%else
    %define lend dword r2m
%endif
    mov    src1q, [srcq+1*gprsize]
    mov    src2q, [srcq+2*gprsize]
    mov    src3q, [srcq+3*gprsize]
    mov    src4q, [srcq+4*gprsize]
    mov    src5q, [srcq+5*gprsize]
    mov     srcq, [srcq]
    sub    src1q, srcq
    sub    src2q, srcq
    sub    src3q, srcq
    sub    src4q, srcq
    sub    src5q, srcq
.loop:
    mova      m0, [srcq      ]
    mova      m1, [srcq+src1q]
    mova      m2, [srcq+src2q]
    mova      m3, [srcq+src3q]
    mova      m4, [srcq+src4q]
    mova      m5, [srcq+src5q]
772
%if cpuflag(sse4)
Justin Ruggles's avatar
Justin Ruggles committed
773 774 775 776
    SBUTTERFLYPS 0, 1, 6
    SBUTTERFLYPS 2, 3, 6
    SBUTTERFLYPS 4, 5, 6

777
    blendps   m6, m4, m0, 1100b
Justin Ruggles's avatar
Justin Ruggles committed
778
    movlhps   m0, m2
779 780
    movhlps   m4, m2
    blendps   m2, m5, m1, 1100b
Justin Ruggles's avatar
Justin Ruggles committed
781
    movlhps   m1, m3
782 783 784 785 786
    movhlps   m5, m3

    movaps [dstq   ], m0
    movaps [dstq+16], m6
    movaps [dstq+32], m4
Justin Ruggles's avatar
Justin Ruggles committed
787
    movaps [dstq+48], m1
788 789
    movaps [dstq+64], m2
    movaps [dstq+80], m5
Justin Ruggles's avatar
Justin Ruggles committed
790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
%else ; mmx
    SBUTTERFLY dq, 0, 1, 6
    SBUTTERFLY dq, 2, 3, 6
    SBUTTERFLY dq, 4, 5, 6

    movq   [dstq   ], m0
    movq   [dstq+ 8], m2
    movq   [dstq+16], m4
    movq   [dstq+24], m1
    movq   [dstq+32], m3
    movq   [dstq+40], m5
%endif
    add      srcq, mmsize
    add      dstq, mmsize*6
    sub      lend, mmsize/4
    jg .loop
%if mmsize == 8
    emms
    RET
%else
    REP_RET
%endif
%endmacro

INIT_MMX mmx
CONV_FLTP_TO_FLT_6CH
816 817
INIT_XMM sse4
CONV_FLTP_TO_FLT_6CH
818
%if HAVE_AVX_EXTERNAL
819
INIT_XMM avx
Justin Ruggles's avatar
Justin Ruggles committed
820
CONV_FLTP_TO_FLT_6CH
821
%endif
822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866

;------------------------------------------------------------------------------
; void ff_conv_s16_to_s16p_2ch(int16_t *const *dst, int16_t *src, int len,
;                              int channels);
;------------------------------------------------------------------------------

%macro CONV_S16_TO_S16P_2CH 0
cglobal conv_s16_to_s16p_2ch, 3,4,4, dst0, src, len, dst1
    lea       lenq, [2*lend]
    mov      dst1q, [dst0q+gprsize]
    mov      dst0q, [dst0q        ]
    lea       srcq, [srcq+2*lenq]
    add      dst0q, lenq
    add      dst1q, lenq
    neg       lenq
%if cpuflag(ssse3)
    mova        m3, [pb_deinterleave_words]
%endif
.loop:
    mova        m0, [srcq+2*lenq       ]  ; m0 =  0,  1,  2,  3,  4,  5,  6,  7
    mova        m1, [srcq+2*lenq+mmsize]  ; m1 =  8,  9, 10, 11, 12, 13, 14, 15
%if cpuflag(ssse3)
    pshufb      m0, m3                    ; m0 =  0,  2,  4,  6,  1,  3,  5,  7
    pshufb      m1, m3                    ; m1 =  8, 10, 12, 14,  9, 11, 13, 15
    SBUTTERFLY2 qdq, 0, 1, 2              ; m0 =  0,  2,  4,  6,  8, 10, 12, 14
                                          ; m1 =  1,  3,  5,  7,  9, 11, 13, 15
%else ; sse2
    pshuflw     m0, m0, q3120             ; m0 =  0,  2,  1,  3,  4,  5,  6,  7
    pshufhw     m0, m0, q3120             ; m0 =  0,  2,  1,  3,  4,  6,  5,  7
    pshuflw     m1, m1, q3120             ; m1 =  8, 10,  9, 11, 12, 13, 14, 15
    pshufhw     m1, m1, q3120             ; m1 =  8, 10,  9, 11, 12, 14, 13, 15
    DEINT2_PS    0, 1, 2                  ; m0 =  0,  2,  4,  6,  8, 10, 12, 14
                                          ; m1 =  1,  3,  5,  7,  9, 11, 13, 15
%endif
    mova  [dst0q+lenq], m0
    mova  [dst1q+lenq], m1
    add       lenq, mmsize
    jl .loop
    REP_RET
%endmacro

INIT_XMM sse2
CONV_S16_TO_S16P_2CH
INIT_XMM ssse3
CONV_S16_TO_S16P_2CH
867
%if HAVE_AVX_EXTERNAL
868 869 870
INIT_XMM avx
CONV_S16_TO_S16P_2CH
%endif
871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925

;------------------------------------------------------------------------------
; void ff_conv_s16_to_s16p_6ch(int16_t *const *dst, int16_t *src, int len,
;                              int channels);
;------------------------------------------------------------------------------

%macro CONV_S16_TO_S16P_6CH 0
%if ARCH_X86_64
cglobal conv_s16_to_s16p_6ch, 3,8,5, dst, src, len, dst1, dst2, dst3, dst4, dst5
%else
cglobal conv_s16_to_s16p_6ch, 2,7,5, dst, src, dst1, dst2, dst3, dst4, dst5
%define lend dword r2m
%endif
    mov     dst1q, [dstq+  gprsize]
    mov     dst2q, [dstq+2*gprsize]
    mov     dst3q, [dstq+3*gprsize]
    mov     dst4q, [dstq+4*gprsize]
    mov     dst5q, [dstq+5*gprsize]
    mov      dstq, [dstq          ]
    sub     dst1q, dstq
    sub     dst2q, dstq
    sub     dst3q, dstq
    sub     dst4q, dstq
    sub     dst5q, dstq
.loop:
    mova       m0, [srcq+0*mmsize]      ; m0 =  0,  1,  2,  3,  4,  5,  6,  7
    mova       m3, [srcq+1*mmsize]      ; m3 =  8,  9, 10, 11, 12, 13, 14, 15
    mova       m2, [srcq+2*mmsize]      ; m2 = 16, 17, 18, 19, 20, 21, 22, 23
    PALIGNR    m1, m3, m0, 12, m4       ; m1 =  6,  7,  8,  9, 10, 11,  x,  x
    shufps     m3, m2, q1032            ; m3 = 12, 13, 14, 15, 16, 17, 18, 19
    psrldq     m2, 4                    ; m2 = 18, 19, 20, 21, 22, 23,  x,  x
    SBUTTERFLY2 wd, 0, 1, 4             ; m0 =  0,  6,  1,  7,  2,  8,  3,  9
                                        ; m1 =  4, 10,  5, 11,  x,  x,  x,  x
    SBUTTERFLY2 wd, 3, 2, 4             ; m3 = 12, 18, 13, 19, 14, 20, 15, 21
                                        ; m2 = 16, 22, 17, 23,  x,  x,  x,  x
    SBUTTERFLY2 dq, 0, 3, 4             ; m0 =  0,  6, 12, 18,  1,  7, 13, 19
                                        ; m3 =  2,  8, 14, 20,  3,  9, 15, 21
    punpckldq  m1, m2                   ; m1 =  4, 10, 16, 22,  5, 11, 17, 23
    movq    [dstq      ], m0
    movhps  [dstq+dst1q], m0
    movq    [dstq+dst2q], m3
    movhps  [dstq+dst3q], m3
    movq    [dstq+dst4q], m1
    movhps  [dstq+dst5q], m1
    add      srcq, mmsize*3
    add      dstq, mmsize/2
    sub      lend, mmsize/4
    jg .loop
    REP_RET
%endmacro

INIT_XMM sse2
CONV_S16_TO_S16P_6CH
INIT_XMM ssse3
CONV_S16_TO_S16P_6CH
926
%if HAVE_AVX_EXTERNAL
927 928 929
INIT_XMM avx
CONV_S16_TO_S16P_6CH
%endif
930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963

;------------------------------------------------------------------------------
; void ff_conv_s16_to_fltp_2ch(float *const *dst, int16_t *src, int len,
;                              int channels);
;------------------------------------------------------------------------------

%macro CONV_S16_TO_FLTP_2CH 0
cglobal conv_s16_to_fltp_2ch, 3,4,5, dst0, src, len, dst1
    lea       lenq, [4*lend]
    mov      dst1q, [dst0q+gprsize]
    mov      dst0q, [dst0q        ]
    add       srcq, lenq
    add      dst0q, lenq
    add      dst1q, lenq
    neg       lenq
    mova        m3, [pf_s32_inv_scale]
    mova        m4, [pw_zero_even]
.loop:
    mova        m1, [srcq+lenq]
    pslld       m0, m1, 16
    pand        m1, m4
    cvtdq2ps    m0, m0
    cvtdq2ps    m1, m1
    mulps       m0, m0, m3
    mulps       m1, m1, m3
    mova  [dst0q+lenq], m0
    mova  [dst1q+lenq], m1
    add       lenq, mmsize
    jl .loop
    REP_RET
%endmacro

INIT_XMM sse2
CONV_S16_TO_FLTP_2CH
964
%if HAVE_AVX_EXTERNAL
965 966 967
INIT_XMM avx
CONV_S16_TO_FLTP_2CH
%endif
968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044

;------------------------------------------------------------------------------
; void ff_conv_s16_to_fltp_6ch(float *const *dst, int16_t *src, int len,
;                              int channels);
;------------------------------------------------------------------------------

%macro CONV_S16_TO_FLTP_6CH 0
%if ARCH_X86_64
cglobal conv_s16_to_fltp_6ch, 3,8,7, dst, src, len, dst1, dst2, dst3, dst4, dst5
%else
cglobal conv_s16_to_fltp_6ch, 2,7,7, dst, src, dst1, dst2, dst3, dst4, dst5
%define lend dword r2m
%endif
    mov     dst1q, [dstq+  gprsize]
    mov     dst2q, [dstq+2*gprsize]
    mov     dst3q, [dstq+3*gprsize]
    mov     dst4q, [dstq+4*gprsize]
    mov     dst5q, [dstq+5*gprsize]
    mov      dstq, [dstq          ]
    sub     dst1q, dstq
    sub     dst2q, dstq
    sub     dst3q, dstq
    sub     dst4q, dstq
    sub     dst5q, dstq
    mova       m6, [pf_s16_inv_scale]
.loop:
    mova       m0, [srcq+0*mmsize]  ; m0 =  0,  1,  2,  3,  4,  5,  6,  7
    mova       m3, [srcq+1*mmsize]  ; m3 =  8,  9, 10, 11, 12, 13, 14, 15
    mova       m2, [srcq+2*mmsize]  ; m2 = 16, 17, 18, 19, 20, 21, 22, 23
    PALIGNR    m1, m3, m0, 12, m4   ; m1 =  6,  7,  8,  9, 10, 11,  x,  x
    shufps     m3, m2, q1032        ; m3 = 12, 13, 14, 15, 16, 17, 18, 19
    psrldq     m2, 4                ; m2 = 18, 19, 20, 21, 22, 23,  x,  x
    SBUTTERFLY2 wd, 0, 1, 4         ; m0 =  0,  6,  1,  7,  2,  8,  3,  9
                                    ; m1 =  4, 10,  5, 11,  x,  x,  x,  x
    SBUTTERFLY2 wd, 3, 2, 4         ; m3 = 12, 18, 13, 19, 14, 20, 15, 21
                                    ; m2 = 16, 22, 17, 23,  x,  x,  x,  x
    SBUTTERFLY2 dq, 0, 3, 4         ; m0 =  0,  6, 12, 18,  1,  7, 13, 19
                                    ; m3 =  2,  8, 14, 20,  3,  9, 15, 21
    punpckldq  m1, m2               ; m1 =  4, 10, 16, 22,  5, 11, 17, 23
    S16_TO_S32_SX 0, 2              ; m0 =      0,      6,     12,     18
                                    ; m2 =      1,      7,     13,     19
    S16_TO_S32_SX 3, 4              ; m3 =      2,      8,     14,     20
                                    ; m4 =      3,      9,     15,     21
    S16_TO_S32_SX 1, 5              ; m1 =      4,     10,     16,     22
                                    ; m5 =      5,     11,     17,     23
    SWAP 1,2,3,4
    cvtdq2ps   m0, m0
    cvtdq2ps   m1, m1
    cvtdq2ps   m2, m2
    cvtdq2ps   m3, m3
    cvtdq2ps   m4, m4
    cvtdq2ps   m5, m5
    mulps      m0, m6
    mulps      m1, m6
    mulps      m2, m6
    mulps      m3, m6
    mulps      m4, m6
    mulps      m5, m6
    mova  [dstq      ], m0
    mova  [dstq+dst1q], m1
    mova  [dstq+dst2q], m2
    mova  [dstq+dst3q], m3
    mova  [dstq+dst4q], m4
    mova  [dstq+dst5q], m5
    add      srcq, mmsize*3
    add      dstq, mmsize
    sub      lend, mmsize/4
    jg .loop
    REP_RET
%endmacro

INIT_XMM sse2
CONV_S16_TO_FLTP_6CH
INIT_XMM ssse3
CONV_S16_TO_FLTP_6CH
INIT_XMM sse4
CONV_S16_TO_FLTP_6CH
1045
%if HAVE_AVX_EXTERNAL
1046 1047 1048
INIT_XMM avx
CONV_S16_TO_FLTP_6CH
%endif
1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090

;------------------------------------------------------------------------------
; void ff_conv_flt_to_s16p_2ch(int16_t *const *dst, float *src, int len,
;                              int channels);
;------------------------------------------------------------------------------

%macro CONV_FLT_TO_S16P_2CH 0
cglobal conv_flt_to_s16p_2ch, 3,4,6, dst0, src, len, dst1
    lea       lenq, [2*lend]
    mov      dst1q, [dst0q+gprsize]
    mov      dst0q, [dst0q        ]
    lea       srcq, [srcq+4*lenq]
    add      dst0q, lenq
    add      dst1q, lenq
    neg       lenq
    mova        m5, [pf_s16_scale]
.loop:
    mova       m0, [srcq+4*lenq         ]
    mova       m1, [srcq+4*lenq+  mmsize]
    mova       m2, [srcq+4*lenq+2*mmsize]
    mova       m3, [srcq+4*lenq+3*mmsize]
    DEINT2_PS   0, 1, 4
    DEINT2_PS   2, 3, 4
    mulps      m0, m0, m5
    mulps      m1, m1, m5
    mulps      m2, m2, m5
    mulps      m3, m3, m5
    cvtps2dq   m0, m0
    cvtps2dq   m1, m1
    cvtps2dq   m2, m2
    cvtps2dq   m3, m3
    packssdw   m0, m2
    packssdw   m1, m3
    mova  [dst0q+lenq], m0
    mova  [dst1q+lenq], m1
    add      lenq, mmsize
    jl .loop
    REP_RET
%endmacro

INIT_XMM sse2
CONV_FLT_TO_S16P_2CH
1091
%if HAVE_AVX_EXTERNAL
1092 1093 1094
INIT_XMM avx
CONV_FLT_TO_S16P_2CH
%endif
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162

;------------------------------------------------------------------------------
; void ff_conv_flt_to_s16p_6ch(int16_t *const *dst, float *src, int len,
;                              int channels);
;------------------------------------------------------------------------------

%macro CONV_FLT_TO_S16P_6CH 0
%if ARCH_X86_64
cglobal conv_flt_to_s16p_6ch, 3,8,7, dst, src, len, dst1, dst2, dst3, dst4, dst5
%else
cglobal conv_flt_to_s16p_6ch, 2,7,7, dst, src, dst1, dst2, dst3, dst4, dst5
%define lend dword r2m
%endif
    mov     dst1q, [dstq+  gprsize]
    mov     dst2q, [dstq+2*gprsize]
    mov     dst3q, [dstq+3*gprsize]
    mov     dst4q, [dstq+4*gprsize]
    mov     dst5q, [dstq+5*gprsize]
    mov      dstq, [dstq          ]
    sub     dst1q, dstq
    sub     dst2q, dstq
    sub     dst3q, dstq
    sub     dst4q, dstq
    sub     dst5q, dstq
    mova       m6, [pf_s16_scale]
.loop:
    mulps      m0, m6, [srcq+0*mmsize]
    mulps      m3, m6, [srcq+1*mmsize]
    mulps      m1, m6, [srcq+2*mmsize]
    mulps      m4, m6, [srcq+3*mmsize]
    mulps      m2, m6, [srcq+4*mmsize]
    mulps      m5, m6, [srcq+5*mmsize]
    cvtps2dq   m0, m0
    cvtps2dq   m1, m1
    cvtps2dq   m2, m2
    cvtps2dq   m3, m3
    cvtps2dq   m4, m4
    cvtps2dq   m5, m5
    packssdw   m0, m3               ; m0 =  0,  1,  2,  3,  4,  5,  6,  7
    packssdw   m1, m4               ; m1 =  8,  9, 10, 11, 12, 13, 14, 15
    packssdw   m2, m5               ; m2 = 16, 17, 18, 19, 20, 21, 22, 23
    PALIGNR    m3, m1, m0, 12, m4   ; m3 =  6,  7,  8,  9, 10, 11,  x,  x
    shufps     m1, m2, q1032        ; m1 = 12, 13, 14, 15, 16, 17, 18, 19
    psrldq     m2, 4                ; m2 = 18, 19, 20, 21, 22, 23,  x,  x
    SBUTTERFLY2 wd, 0, 3, 4         ; m0 =  0,  6,  1,  7,  2,  8,  3,  9
                                    ; m3 =  4, 10,  5, 11,  x,  x,  x,  x
    SBUTTERFLY2 wd, 1, 2, 4         ; m1 = 12, 18, 13, 19, 14, 20, 15, 21
                                    ; m2 = 16, 22, 17, 23,  x,  x,  x,  x
    SBUTTERFLY2 dq, 0, 1, 4         ; m0 =  0,  6, 12, 18,  1,  7, 13, 19
                                    ; m1 =  2,  8, 14, 20,  3,  9, 15, 21
    punpckldq  m3, m2               ; m3 =  4, 10, 16, 22,  5, 11, 17, 23
    movq    [dstq      ], m0
    movhps  [dstq+dst1q], m0
    movq    [dstq+dst2q], m1
    movhps  [dstq+dst3q], m1
    movq    [dstq+dst4q], m3
    movhps  [dstq+dst5q], m3
    add      srcq, mmsize*6
    add      dstq, mmsize/2
    sub      lend, mmsize/4
    jg .loop
    REP_RET
%endmacro

INIT_XMM sse2
CONV_FLT_TO_S16P_6CH
INIT_XMM ssse3
CONV_FLT_TO_S16P_6CH
1163
%if HAVE_AVX_EXTERNAL
1164 1165 1166
INIT_XMM avx
CONV_FLT_TO_S16P_6CH
%endif
1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194

;------------------------------------------------------------------------------
; void ff_conv_flt_to_fltp_2ch(float *const *dst, float *src, int len,
;                              int channels);
;------------------------------------------------------------------------------

%macro CONV_FLT_TO_FLTP_2CH 0
cglobal conv_flt_to_fltp_2ch, 3,4,3, dst0, src, len, dst1
    lea    lenq, [4*lend]
    mov   dst1q, [dst0q+gprsize]
    mov   dst0q, [dst0q        ]
    lea    srcq, [srcq+2*lenq]
    add   dst0q, lenq
    add   dst1q, lenq
    neg    lenq
.loop:
    mova     m0, [srcq+2*lenq       ]
    mova     m1, [srcq+2*lenq+mmsize]
    DEINT2_PS 0, 1, 2
    mova  [dst0q+lenq], m0
    mova  [dst1q+lenq], m1
    add    lenq, mmsize
    jl .loop
    REP_RET
%endmacro

INIT_XMM sse
CONV_FLT_TO_FLTP_2CH
1195
%if HAVE_AVX_EXTERNAL
1196 1197 1198
INIT_XMM avx
CONV_FLT_TO_FLTP_2CH
%endif
1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257

;------------------------------------------------------------------------------
; void ff_conv_flt_to_fltp_6ch(float *const *dst, float *src, int len,
;                              int channels);
;------------------------------------------------------------------------------

%macro CONV_FLT_TO_FLTP_6CH 0
%if ARCH_X86_64
cglobal conv_flt_to_fltp_6ch, 3,8,7, dst, src, len, dst1, dst2, dst3, dst4, dst5
%else
cglobal conv_flt_to_fltp_6ch, 2,7,7, dst, src, dst1, dst2, dst3, dst4, dst5
%define lend dword r2m
%endif
    mov     dst1q, [dstq+  gprsize]
    mov     dst2q, [dstq+2*gprsize]
    mov     dst3q, [dstq+3*gprsize]
    mov     dst4q, [dstq+4*gprsize]
    mov     dst5q, [dstq+5*gprsize]
    mov      dstq, [dstq          ]
    sub     dst1q, dstq
    sub     dst2q, dstq
    sub     dst3q, dstq
    sub     dst4q, dstq
    sub     dst5q, dstq
.loop:
    mova       m0, [srcq+0*mmsize]  ; m0 =  0,  1,  2,  3
    mova       m1, [srcq+1*mmsize]  ; m1 =  4,  5,  6,  7
    mova       m2, [srcq+2*mmsize]  ; m2 =  8,  9, 10, 11
    mova       m3, [srcq+3*mmsize]  ; m3 = 12, 13, 14, 15
    mova       m4, [srcq+4*mmsize]  ; m4 = 16, 17, 18, 19
    mova       m5, [srcq+5*mmsize]  ; m5 = 20, 21, 22, 23

    SBUTTERFLY2 dq, 0, 3, 6         ; m0 =  0, 12,  1, 13
                                    ; m3 =  2, 14,  3, 15
    SBUTTERFLY2 dq, 1, 4, 6         ; m1 =  4, 16,  5, 17
                                    ; m4 =  6, 18,  7, 19
    SBUTTERFLY2 dq, 2, 5, 6         ; m2 =  8, 20,  9, 21
                                    ; m5 = 10, 22, 11, 23
    SBUTTERFLY2 dq, 0, 4, 6         ; m0 =  0,  6, 12, 18
                                    ; m4 =  1,  7, 13, 19
    SBUTTERFLY2 dq, 3, 2, 6         ; m3 =  2,  8, 14, 20
                                    ; m2 =  3,  9, 15, 21
    SBUTTERFLY2 dq, 1, 5, 6         ; m1 =  4, 10, 16, 22
                                    ; m5 =  5, 11, 17, 23
    mova [dstq      ], m0
    mova [dstq+dst1q], m4
    mova [dstq+dst2q], m3
    mova [dstq+dst3q], m2
    mova [dstq+dst4q], m1
    mova [dstq+dst5q], m5
    add      srcq, mmsize*6
    add      dstq, mmsize
    sub      lend, mmsize/4
    jg .loop
    REP_RET
%endmacro

INIT_XMM sse2
CONV_FLT_TO_FLTP_6CH
1258
%if HAVE_AVX_EXTERNAL
1259 1260 1261
INIT_XMM avx
CONV_FLT_TO_FLTP_6CH
%endif