Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Contribute to GitLab
Sign in / Register
Toggle navigation
F
ffmpeg.wasm-core
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Linshizhi
ffmpeg.wasm-core
Commits
3758eb0e
Commit
3758eb0e
authored
May 17, 2011
by
Vitor Sessak
Committed by
Reinhard Tartler
May 21, 2011
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
dct32: port SSE 32-point DCT to YASM
parent
153382e1
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
291 additions
and
297 deletions
+291
-297
Makefile
libavcodec/x86/Makefile
+2
-1
dct32_sse.asm
libavcodec/x86/dct32_sse.asm
+289
-0
dct32_sse.c
libavcodec/x86/dct32_sse.c
+0
-296
No files found.
libavcodec/x86/Makefile
View file @
3758eb0e
OBJS-$(CONFIG_MLP_DECODER)
+=
x86/mlpdsp.o
OBJS-$(CONFIG_TRUEHD_DECODER)
+=
x86/mlpdsp.o
YASM-OBJS-$(CONFIG_DCT)
+=
x86/dct32_sse.o
YASM-OBJS-FFT-$(HAVE_AMD3DNOW)
+=
x86/fft_3dn.o
YASM-OBJS-FFT-$(HAVE_AMD3DNOWEXT)
+=
x86/fft_3dn2.o
YASM-OBJS-FFT-$(HAVE_SSE)
+=
x86/fft_sse.o
...
...
@@ -54,4 +56,3 @@ OBJS-$(HAVE_MMX) += x86/dnxhd_mmx.o \
x86/mpegvideo_mmx.o
\
x86/simple_idct_mmx.o
\
MMX-OBJS-$(CONFIG_DCT)
+=
x86/dct32_sse.o
libavcodec/x86/dct32_sse.asm
0 → 100644
View file @
3758eb0e
;******************************************************************************
;* 32 point SSE-optimized DCT transform
;* Copyright (c) 2010 Vitor Sessak
;*
;* This file is part of Libav.
;*
;* Libav is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* Libav is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with Libav; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include
"x86inc.asm"
SECTION_RODATA
32
align
32
ps_cos_vec
:
dd
0
.
500603
,
0
.
505471
,
0
.
515447
,
0
.
531043
dd
0
.
553104
,
0
.
582935
,
0
.
622504
,
0
.
674808
dd
-
1
.
169440
,
-
0
.
972568
,
-
0
.
839350
,
-
0
.
744536
dd
-
10
.
190008
,
-
3
.
407609
,
-
2
.
057781
,
-
1
.
484165
dd
0
.
502419
,
0
.
522499
,
0
.
566944
,
0
.
646822
dd
0
.
788155
,
1
.
060678
,
1
.
722447
,
5
.
101149
dd
0
.
509796
,
0
.
601345
,
0
.
899976
,
2
.
562916
dd
1
.
000000
,
1
.
000000
,
1
.
306563
,
0
.
541196
dd
1
.
000000
,
0
.
707107
,
1
.
000000
,
-
0
.
707107
ps_p1p1m1m1
:
dd
0
,
0
,
0x80000000
,
0x80000000
%macro
BUTTERFLY
4
movaps
%4
,
%1
subps
%1
,
%2
addps
%2
,
%4
mulps
%1
,
%3
%endmacro
%macro
BUTTERFLY0
5
movaps
%4
,
%1
shufps
%1
,
%1
,
%5
xorps
%4
,
%2
addps
%1
,
%4
mulps
%1
,
%3
%endmacro
%macro
BUTTERFLY2
4
BUTTERFLY0
%1
,
%2
,
%3
,
%4
,
0x1b
%endmacro
%macro
BUTTERFLY3
4
BUTTERFLY0
%1
,
%2
,
%3
,
%4
,
0xb1
%endmacro
INIT_XMM
section
.
text
align
=
16
; void ff_dct32_float_sse(FFTSample *out, const FFTSample *in)
cglobal
dct32_float_sse
,
2
,
3
,
8
,
out
,
in
,
tmp
; pass 1
movaps
m0
,
[
inq
+
0
]
movaps
m1
,
[
inq
+
112
]
shufps
m1
,
m1
,
0x1b
BUTTERFLY
m0
,
m1
,
[
ps_cos_vec
]
,
m3
movaps
m7
,
[
inq
+
64
]
movaps
m4
,
[
inq
+
48
]
shufps
m4
,
m4
,
0x1b
BUTTERFLY
m7
,
m4
,
[
ps_cos_vec
+
48
]
,
m3
; pass 2
movaps
m2
,
[
ps_cos_vec
+
64
]
BUTTERFLY
m1
,
m4
,
m2
,
m3
movaps
[
outq
+
48
]
,
m1
movaps
[
outq
+
0
]
,
m4
; pass 1
movaps
m1
,
[
inq
+
16
]
movaps
m6
,
[
inq
+
96
]
shufps
m6
,
m6
,
0x1b
BUTTERFLY
m1
,
m6
,
[
ps_cos_vec
+
16
]
,
m3
movaps
m4
,
[
inq
+
80
]
movaps
m5
,
[
inq
+
32
]
shufps
m5
,
m5
,
0x1b
BUTTERFLY
m4
,
m5
,
[
ps_cos_vec
+
32
]
,
m3
; pass 2
BUTTERFLY
m0
,
m7
,
m2
,
m3
movaps
m2
,
[
ps_cos_vec
+
80
]
BUTTERFLY
m6
,
m5
,
m2
,
m3
BUTTERFLY
m1
,
m4
,
m2
,
m3
; pass 3
movaps
m2
,
[
ps_cos_vec
+
96
]
shufps
m1
,
m1
,
0x1b
BUTTERFLY
m0
,
m1
,
m2
,
m3
movaps
[
outq
+
112
]
,
m0
movaps
[
outq
+
96
]
,
m1
movaps
m0
,
[
outq
+
0
]
shufps
m5
,
m5
,
0x1b
BUTTERFLY
m0
,
m5
,
m2
,
m3
movaps
m1
,
[
outq
+
48
]
shufps
m6
,
m6
,
0x1b
BUTTERFLY
m1
,
m6
,
m2
,
m3
movaps
[
outq
+
48
]
,
m1
shufps
m4
,
m4
,
0x1b
BUTTERFLY
m7
,
m4
,
m2
,
m3
; pass 4
movaps
m3
,
[
ps_p1p1m1m1
+
0
]
movaps
m2
,
[
ps_cos_vec
+
112
]
BUTTERFLY2
m5
,
m3
,
m2
,
m1
BUTTERFLY2
m0
,
m3
,
m2
,
m1
movaps
[
outq
+
16
]
,
m0
BUTTERFLY2
m6
,
m3
,
m2
,
m1
movaps
[
outq
+
32
]
,
m6
movaps
m0
,
[
outq
+
48
]
BUTTERFLY2
m0
,
m3
,
m2
,
m1
movaps
[
outq
+
48
]
,
m0
BUTTERFLY2
m4
,
m3
,
m2
,
m1
BUTTERFLY2
m7
,
m3
,
m2
,
m1
movaps
m6
,
[
outq
+
96
]
BUTTERFLY2
m6
,
m3
,
m2
,
m1
movaps
m0
,
[
outq
+
112
]
BUTTERFLY2
m0
,
m3
,
m2
,
m1
; pass 5
movaps
m2
,
[
ps_cos_vec
+
128
]
shufps
m3
,
m3
,
0xcc
BUTTERFLY3
m5
,
m3
,
m2
,
m1
movaps
[
outq
+
0
]
,
m5
movaps
m1
,
[
outq
+
16
]
BUTTERFLY3
m1
,
m3
,
m2
,
m5
movaps
[
outq
+
16
]
,
m1
BUTTERFLY3
m4
,
m3
,
m2
,
m5
movaps
[
outq
+
64
]
,
m4
BUTTERFLY3
m7
,
m3
,
m2
,
m5
movaps
[
outq
+
80
]
,
m7
movaps
m5
,
[
outq
+
32
]
BUTTERFLY3
m5
,
m3
,
m2
,
m7
movaps
[
outq
+
32
]
,
m5
movaps
m4
,
[
outq
+
48
]
BUTTERFLY3
m4
,
m3
,
m2
,
m7
movaps
[
outq
+
48
]
,
m4
BUTTERFLY3
m6
,
m3
,
m2
,
m7
movaps
[
outq
+
96
]
,
m6
BUTTERFLY3
m0
,
m3
,
m2
,
m7
movaps
[
outq
+
112
]
,
m0
; pass 6, no SIMD...
movss
m3
,
[
outq
+
56
]
mov
tmpd
,
[
outq
+
4
]
addss
m3
,
[
outq
+
60
]
movss
m7
,
[
outq
+
72
]
addss
m4
,
m3
movss
m2
,
[
outq
+
52
]
addss
m2
,
m3
movss
m3
,
[
outq
+
24
]
addss
m3
,
[
outq
+
28
]
addss
m7
,
[
outq
+
76
]
addss
m1
,
m3
addss
m5
,
m4
movss
[
outq
+
16
]
,
m1
movss
m1
,
[
outq
+
20
]
addss
m1
,
m3
movss
m3
,
[
outq
+
40
]
movss
[
outq
+
48
]
,
m1
addss
m3
,
[
outq
+
44
]
movss
m1
,
[
outq
+
20
]
addss
m4
,
m3
addss
m3
,
m2
addss
m1
,
[
outq
+
28
]
movss
[
outq
+
40
]
,
m3
addss
m2
,
[
outq
+
36
]
movss
m3
,
[
outq
+
8
]
movss
[
outq
+
56
]
,
m2
addss
m3
,
[
outq
+
12
]
movss
[
outq
+
8
]
,
m5
movss
[
outq
+
32
]
,
m3
movss
m2
,
[
outq
+
52
]
movss
m3
,
[
outq
+
80
]
movss
m5
,
[
outq
+
120
]
movss
[
outq
+
80
]
,
m1
movss
[
outq
+
24
]
,
m4
addss
m5
,
[
outq
+
124
]
movss
m1
,
[
outq
+
64
]
addss
m2
,
[
outq
+
60
]
addss
m0
,
m5
addss
m5
,
[
outq
+
116
]
mov
[
outq
+
64
]
,
tmpd
addss
m6
,
m0
addss
m1
,
m6
mov
tmpd
,
[
outq
+
12
]
movss
[
outq
+
4
]
,
m1
movss
m1
,
[
outq
+
88
]
mov
[
outq
+
96
]
,
tmpd
addss
m1
,
[
outq
+
92
]
movss
m4
,
[
outq
+
104
]
mov
tmpd
,
[
outq
+
28
]
addss
m4
,
[
outq
+
108
]
addss
m0
,
m4
addss
m3
,
m1
addss
m1
,
[
outq
+
84
]
addss
m4
,
m5
addss
m6
,
m3
addss
m3
,
m0
addss
m0
,
m7
addss
m5
,
[
outq
+
100
]
addss
m7
,
m4
mov
[
outq
+
112
]
,
tmpd
movss
[
outq
+
28
]
,
m0
movss
m0
,
[
outq
+
36
]
movss
[
outq
+
36
]
,
m7
addss
m4
,
m1
movss
m7
,
[
outq
+
116
]
addss
m0
,
m2
addss
m7
,
[
outq
+
124
]
movss
[
outq
+
72
]
,
m0
movss
m0
,
[
outq
+
44
]
movss
[
outq
+
12
]
,
m6
movss
[
outq
+
20
]
,
m3
addss
m2
,
m0
movss
[
outq
+
44
]
,
m4
movss
[
outq
+
88
]
,
m2
addss
m0
,
[
outq
+
60
]
mov
tmpd
,
[
outq
+
60
]
mov
[
outq
+
120
]
,
tmpd
movss
[
outq
+
104
]
,
m0
addss
m1
,
m5
addss
m5
,
[
outq
+
68
]
movss
[
outq
+
52
]
,
m1
movss
[
outq
+
60
]
,
m5
movss
m1
,
[
outq
+
68
]
movss
m5
,
[
outq
+
100
]
addss
m5
,
m7
addss
m7
,
[
outq
+
108
]
addss
m1
,
m5
movss
m2
,
[
outq
+
84
]
addss
m2
,
[
outq
+
92
]
addss
m5
,
m2
movss
[
outq
+
68
]
,
m1
addss
m2
,
m7
movss
m1
,
[
outq
+
76
]
movss
[
outq
+
84
]
,
m2
movss
[
outq
+
76
]
,
m5
movss
m2
,
[
outq
+
108
]
addss
m7
,
m1
addss
m2
,
[
outq
+
124
]
addss
m1
,
m2
addss
m2
,
[
outq
+
92
]
movss
[
outq
+
100
]
,
m1
movss
[
outq
+
108
]
,
m2
movss
m2
,
[
outq
+
92
]
movss
[
outq
+
92
]
,
m7
addss
m2
,
[
outq
+
124
]
movss
[
outq
+
116
]
,
m2
RET
libavcodec/x86/dct32_sse.c
deleted
100644 → 0
View file @
153382e1
/*
* 32 point SSE-optimized DCT transform
* Copyright (c) 2010 Vitor Sessak
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include "libavutil/x86_cpu.h"
#include "libavutil/mem.h"
#include "libavcodec/dsputil.h"
#include "fft.h"
DECLARE_ALIGNED
(
16
,
static
const
float
,
b1
)[]
=
{
0
.
500603
,
0
.
505471
,
0
.
515447
,
0
.
531043
,
0
.
553104
,
0
.
582935
,
0
.
622504
,
0
.
674808
,
-
1
.
169440
,
-
0
.
972568
,
-
0
.
839350
,
-
0
.
744536
,
-
10
.
190008
,
-
3
.
407609
,
-
2
.
0577
81
,
-
1
.
484165
,
0
.
502419
,
0
.
522499
,
0
.
566944
,
0
.
646822
,
0
.
788155
,
1
.
06067
8
,
1
.
722447
,
5
.
101149
,
0
.
509796
,
0
.
601345
,
0
.
899976
,
2
.
562916
,
1
.
000000
,
1
.
000000
,
1
.
306563
,
0
.
541196
,
1
.
000000
,
0
.
707107
,
1
.
000000
,
-
0
.
707107
};
DECLARE_ALIGNED
(
16
,
static
const
int32_t
,
smask
)[
4
]
=
{
0
,
0
,
0x80000000
,
0x80000000
};
/* butterfly operator */
#define BUTTERFLY(a,b,c,tmp) \
"movaps %%" #a ", %%" #tmp " \n\t" \
"subps %%" #b ", %%" #a " \n\t" \
"addps %%" #tmp ", %%" #b " \n\t" \
"mulps " #c ", %%" #a " \n\t"
///* Same as BUTTERFLY when vectors a and b overlap */
#define BUTTERFLY0(val, mask, cos, tmp, shuf) \
"movaps %%" #val ", %%" #tmp " \n\t" \
"shufps " #shuf ", %%" #val ",%%" #val " \n\t" \
"xorps %%" #mask ", %%" #tmp " \n\t"
/* flip signs */
\
"addps %%" #tmp ", %%" #val " \n\t" \
"mulps %%" #cos ", %%" #val " \n\t"
#define BUTTERFLY2(val, mask, cos, tmp) BUTTERFLY0(val, mask, cos, tmp, $0x1b)
#define BUTTERFLY3(val, mask, cos, tmp) BUTTERFLY0(val, mask, cos, tmp, $0xb1)
void
ff_dct32_float_sse
(
FFTSample
*
out
,
const
FFTSample
*
in
)
{
int32_t
tmp1
=
0
;
__asm__
volatile
(
/* pass 1 */
"movaps (%4), %%xmm0
\n\t
"
"movaps 112(%4), %%xmm1
\n\t
"
"shufps $0x1b, %%xmm1, %%xmm1
\n\t
"
BUTTERFLY
(
xmm0
,
xmm1
,
(
%
2
),
xmm3
)
"movaps 64(%4), %%xmm7
\n\t
"
"movaps 48(%4), %%xmm4
\n\t
"
"shufps $0x1b, %%xmm4, %%xmm4
\n\t
"
BUTTERFLY
(
xmm7
,
xmm4
,
48
(
%
2
),
xmm3
)
/* pass 2 */
"movaps 64(%2), %%xmm2
\n\t
"
BUTTERFLY
(
xmm1
,
xmm4
,
%%
xmm2
,
xmm3
)
"movaps %%xmm1, 48(%1)
\n\t
"
"movaps %%xmm4, (%1)
\n\t
"
/* pass 1 */
"movaps 16(%4), %%xmm1
\n\t
"
"movaps 96(%4), %%xmm6
\n\t
"
"shufps $0x1b, %%xmm6, %%xmm6
\n\t
"
BUTTERFLY
(
xmm1
,
xmm6
,
16
(
%
2
),
xmm3
)
"movaps 80(%4), %%xmm4
\n\t
"
"movaps 32(%4), %%xmm5
\n\t
"
"shufps $0x1b, %%xmm5, %%xmm5
\n\t
"
BUTTERFLY
(
xmm4
,
xmm5
,
32
(
%
2
),
xmm3
)
/* pass 2 */
BUTTERFLY
(
xmm0
,
xmm7
,
%%
xmm2
,
xmm3
)
"movaps 80(%2), %%xmm2
\n\t
"
BUTTERFLY
(
xmm6
,
xmm5
,
%%
xmm2
,
xmm3
)
BUTTERFLY
(
xmm1
,
xmm4
,
%%
xmm2
,
xmm3
)
/* pass 3 */
"movaps 96(%2), %%xmm2
\n\t
"
"shufps $0x1b, %%xmm1, %%xmm1
\n\t
"
BUTTERFLY
(
xmm0
,
xmm1
,
%%
xmm2
,
xmm3
)
"movaps %%xmm0, 112(%1)
\n\t
"
"movaps %%xmm1, 96(%1)
\n\t
"
"movaps 0(%1), %%xmm0
\n\t
"
"shufps $0x1b, %%xmm5, %%xmm5
\n\t
"
BUTTERFLY
(
xmm0
,
xmm5
,
%%
xmm2
,
xmm3
)
"movaps 48(%1), %%xmm1
\n\t
"
"shufps $0x1b, %%xmm6, %%xmm6
\n\t
"
BUTTERFLY
(
xmm1
,
xmm6
,
%%
xmm2
,
xmm3
)
"movaps %%xmm1, 48(%1)
\n\t
"
"shufps $0x1b, %%xmm4, %%xmm4
\n\t
"
BUTTERFLY
(
xmm7
,
xmm4
,
%%
xmm2
,
xmm3
)
/* pass 4 */
"movaps (%3), %%xmm3
\n\t
"
"movaps 112(%2), %%xmm2
\n\t
"
BUTTERFLY2
(
xmm5
,
xmm3
,
xmm2
,
xmm1
)
BUTTERFLY2
(
xmm0
,
xmm3
,
xmm2
,
xmm1
)
"movaps %%xmm0, 16(%1)
\n\t
"
BUTTERFLY2
(
xmm6
,
xmm3
,
xmm2
,
xmm1
)
"movaps %%xmm6, 32(%1)
\n\t
"
"movaps 48(%1), %%xmm0
\n\t
"
BUTTERFLY2
(
xmm0
,
xmm3
,
xmm2
,
xmm1
)
"movaps %%xmm0, 48(%1)
\n\t
"
BUTTERFLY2
(
xmm4
,
xmm3
,
xmm2
,
xmm1
)
BUTTERFLY2
(
xmm7
,
xmm3
,
xmm2
,
xmm1
)
"movaps 96(%1), %%xmm6
\n\t
"
BUTTERFLY2
(
xmm6
,
xmm3
,
xmm2
,
xmm1
)
"movaps 112(%1), %%xmm0
\n\t
"
BUTTERFLY2
(
xmm0
,
xmm3
,
xmm2
,
xmm1
)
/* pass 5 */
"movaps 128(%2), %%xmm2
\n\t
"
"shufps $0xCC, %%xmm3,%%xmm3
\n\t
"
BUTTERFLY3
(
xmm5
,
xmm3
,
xmm2
,
xmm1
)
"movaps %%xmm5, (%1)
\n\t
"
"movaps 16(%1), %%xmm1
\n\t
"
BUTTERFLY3
(
xmm1
,
xmm3
,
xmm2
,
xmm5
)
"movaps %%xmm1, 16(%1)
\n\t
"
BUTTERFLY3
(
xmm4
,
xmm3
,
xmm2
,
xmm5
)
"movaps %%xmm4, 64(%1)
\n\t
"
BUTTERFLY3
(
xmm7
,
xmm3
,
xmm2
,
xmm5
)
"movaps %%xmm7, 80(%1)
\n\t
"
"movaps 32(%1), %%xmm5
\n\t
"
BUTTERFLY3
(
xmm5
,
xmm3
,
xmm2
,
xmm7
)
"movaps %%xmm5, 32(%1)
\n\t
"
"movaps 48(%1), %%xmm4
\n\t
"
BUTTERFLY3
(
xmm4
,
xmm3
,
xmm2
,
xmm7
)
"movaps %%xmm4, 48(%1)
\n\t
"
BUTTERFLY3
(
xmm6
,
xmm3
,
xmm2
,
xmm7
)
"movaps %%xmm6, 96(%1)
\n\t
"
BUTTERFLY3
(
xmm0
,
xmm3
,
xmm2
,
xmm7
)
"movaps %%xmm0, 112(%1)
\n\t
"
/* pass 6, no SIMD... */
"movss 56(%1), %%xmm3
\n\t
"
"movl 4(%1), %0
\n\t
"
"addss 60(%1), %%xmm3
\n\t
"
"movss 72(%1), %%xmm7
\n\t
"
"addss %%xmm3, %%xmm4
\n\t
"
"movss 52(%1), %%xmm2
\n\t
"
"addss %%xmm3, %%xmm2
\n\t
"
"movss 24(%1), %%xmm3
\n\t
"
"addss 28(%1), %%xmm3
\n\t
"
"addss 76(%1), %%xmm7
\n\t
"
"addss %%xmm3, %%xmm1
\n\t
"
"addss %%xmm4, %%xmm5
\n\t
"
"movss %%xmm1, 16(%1)
\n\t
"
"movss 20(%1), %%xmm1
\n\t
"
"addss %%xmm3, %%xmm1
\n\t
"
"movss 40(%1), %%xmm3
\n\t
"
"movss %%xmm1, 48(%1)
\n\t
"
"addss 44(%1), %%xmm3
\n\t
"
"movss 20(%1), %%xmm1
\n\t
"
"addss %%xmm3, %%xmm4
\n\t
"
"addss %%xmm2, %%xmm3
\n\t
"
"addss 28(%1), %%xmm1
\n\t
"
"movss %%xmm3, 40(%1)
\n\t
"
"addss 36(%1), %%xmm2
\n\t
"
"movss 8(%1), %%xmm3
\n\t
"
"movss %%xmm2, 56(%1)
\n\t
"
"addss 12(%1), %%xmm3
\n\t
"
"movss %%xmm5, 8(%1)
\n\t
"
"movss %%xmm3, 32(%1)
\n\t
"
"movss 52(%1), %%xmm2
\n\t
"
"movss 80(%1), %%xmm3
\n\t
"
"movss 120(%1), %%xmm5
\n\t
"
"movss %%xmm1, 80(%1)
\n\t
"
"movss %%xmm4, 24(%1)
\n\t
"
"addss 124(%1), %%xmm5
\n\t
"
"movss 64(%1), %%xmm1
\n\t
"
"addss 60(%1), %%xmm2
\n\t
"
"addss %%xmm5, %%xmm0
\n\t
"
"addss 116(%1), %%xmm5
\n\t
"
"movl %0, 64(%1)
\n\t
"
"addss %%xmm0, %%xmm6
\n\t
"
"addss %%xmm6, %%xmm1
\n\t
"
"movl 12(%1), %0
\n\t
"
"movss %%xmm1, 4(%1)
\n\t
"
"movss 88(%1), %%xmm1
\n\t
"
"movl %0, 96(%1)
\n\t
"
"addss 92(%1), %%xmm1
\n\t
"
"movss 104(%1), %%xmm4
\n\t
"
"movl 28(%1), %0
\n\t
"
"addss 108(%1), %%xmm4
\n\t
"
"addss %%xmm4, %%xmm0
\n\t
"
"addss %%xmm1, %%xmm3
\n\t
"
"addss 84(%1), %%xmm1
\n\t
"
"addss %%xmm5, %%xmm4
\n\t
"
"addss %%xmm3, %%xmm6
\n\t
"
"addss %%xmm0, %%xmm3
\n\t
"
"addss %%xmm7, %%xmm0
\n\t
"
"addss 100(%1), %%xmm5
\n\t
"
"addss %%xmm4, %%xmm7
\n\t
"
"movl %0, 112(%1)
\n\t
"
"movss %%xmm0, 28(%1)
\n\t
"
"movss 36(%1), %%xmm0
\n\t
"
"movss %%xmm7, 36(%1)
\n\t
"
"addss %%xmm1, %%xmm4
\n\t
"
"movss 116(%1), %%xmm7
\n\t
"
"addss %%xmm2, %%xmm0
\n\t
"
"addss 124(%1), %%xmm7
\n\t
"
"movss %%xmm0, 72(%1)
\n\t
"
"movss 44(%1), %%xmm0
\n\t
"
"movss %%xmm6, 12(%1)
\n\t
"
"movss %%xmm3, 20(%1)
\n\t
"
"addss %%xmm0, %%xmm2
\n\t
"
"movss %%xmm4, 44(%1)
\n\t
"
"movss %%xmm2, 88(%1)
\n\t
"
"addss 60(%1), %%xmm0
\n\t
"
"movl 60(%1), %0
\n\t
"
"movl %0, 120(%1)
\n\t
"
"movss %%xmm0, 104(%1)
\n\t
"
"addss %%xmm5, %%xmm1
\n\t
"
"addss 68(%1), %%xmm5
\n\t
"
"movss %%xmm1, 52(%1)
\n\t
"
"movss %%xmm5, 60(%1)
\n\t
"
"movss 68(%1), %%xmm1
\n\t
"
"movss 100(%1), %%xmm5
\n\t
"
"addss %%xmm7, %%xmm5
\n\t
"
"addss 108(%1), %%xmm7
\n\t
"
"addss %%xmm5, %%xmm1
\n\t
"
"movss 84(%1), %%xmm2
\n\t
"
"addss 92(%1), %%xmm2
\n\t
"
"addss %%xmm2, %%xmm5
\n\t
"
"movss %%xmm1, 68(%1)
\n\t
"
"addss %%xmm7, %%xmm2
\n\t
"
"movss 76(%1), %%xmm1
\n\t
"
"movss %%xmm2, 84(%1)
\n\t
"
"movss %%xmm5, 76(%1)
\n\t
"
"movss 108(%1), %%xmm2
\n\t
"
"addss %%xmm1, %%xmm7
\n\t
"
"addss 124(%1), %%xmm2
\n\t
"
"addss %%xmm2, %%xmm1
\n\t
"
"addss 92(%1), %%xmm2
\n\t
"
"movss %%xmm1, 100(%1)
\n\t
"
"movss %%xmm2, 108(%1)
\n\t
"
"movss 92(%1), %%xmm2
\n\t
"
"movss %%xmm7, 92(%1)
\n\t
"
"addss 124(%1), %%xmm2
\n\t
"
"movss %%xmm2, 116(%1)
\n\t
"
:
"+&r"
(
tmp1
)
:
"r"
(
out
),
"r"
(
b1
),
"r"
(
smask
),
"r"
(
in
)
:
XMM_CLOBBERS
(
"%xmm0"
,
"%xmm1"
,
"%xmm2"
,
"%xmm3"
,
"%xmm4"
,
"%xmm5"
,
"%xmm6"
,
"%xmm7"
,)
"memory"
);
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment