Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Contribute to GitLab
Sign in / Register
Toggle navigation
F
ffmpeg.wasm-core
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Linshizhi
ffmpeg.wasm-core
Commits
84e4804a
Commit
84e4804a
authored
May 27, 2011
by
Mans Rullgard
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
ARM: remove unnecessary volatile from inline asm
Signed-off-by:
Mans Rullgard
<
mans@mansr.com
>
parent
5726ec17
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
34 additions
and
34 deletions
+34
-34
mathops.h
libavcodec/arm/mathops.h
+1
-1
vp56_arith.h
libavcodec/arm/vp56_arith.h
+33
-33
No files found.
libavcodec/arm/mathops.h
View file @
84e4804a
...
...
@@ -97,7 +97,7 @@ static inline av_const int MUL16(int ra, int rb)
static
inline
av_const
int
mid_pred
(
int
a
,
int
b
,
int
c
)
{
int
m
;
__asm__
volatile
(
__asm__
(
"mov %0, %2
\n\t
"
"cmp %1, %2
\n\t
"
"movgt %0, %1
\n\t
"
...
...
libavcodec/arm/vp56_arith.h
View file @
84e4804a
...
...
@@ -31,25 +31,25 @@ static inline int vp56_rac_get_prob_armv6(VP56RangeCoder *c, int pr)
unsigned
high
=
c
->
high
<<
shift
;
unsigned
bit
;
__asm__
volatile
(
"adds %3, %3, %0
\n
"
"cmpcs %7, %4
\n
"
"ldrcsh %2, [%4], #2
\n
"
"rsb %0, %6, #256
\n
"
"smlabb %0, %5, %6, %0
\n
"
"rev16cs %2, %2
\n
"
"orrcs %1, %1, %2, lsl %3
\n
"
"subcs %3, %3, #16
\n
"
"lsr %0, %0, #8
\n
"
"cmp %1, %0, lsl #16
\n
"
"subge %1, %1, %0, lsl #16
\n
"
"subge %0, %5, %0
\n
"
"movge %2, #1
\n
"
"movlt %2, #0
\n
"
:
"=&r"
(
c
->
high
),
"=&r"
(
c
->
code_word
),
"=&r"
(
bit
),
"+&r"
(
c
->
bits
),
"+&r"
(
c
->
buffer
)
:
"r"
(
high
),
"r"
(
pr
),
"r"
(
c
->
end
-
1
),
"0"
(
shift
),
"1"
(
code_word
)
:
"cc"
);
__asm__
(
"adds %3, %3, %0
\n
"
"cmpcs %7, %4
\n
"
"ldrcsh %2, [%4], #2
\n
"
"rsb %0, %6, #256
\n
"
"smlabb %0, %5, %6, %0
\n
"
"rev16cs %2, %2
\n
"
"orrcs %1, %1, %2, lsl %3
\n
"
"subcs %3, %3, #16
\n
"
"lsr %0, %0, #8
\n
"
"cmp %1, %0, lsl #16
\n
"
"subge %1, %1, %0, lsl #16
\n
"
"subge %0, %5, %0
\n
"
"movge %2, #1
\n
"
"movlt %2, #0
\n
"
:
"=&r"
(
c
->
high
),
"=&r"
(
c
->
code_word
),
"=&r"
(
bit
),
"+&r"
(
c
->
bits
),
"+&r"
(
c
->
buffer
)
:
"r"
(
high
),
"r"
(
pr
),
"r"
(
c
->
end
-
1
),
"0"
(
shift
),
"1"
(
code_word
)
:
"cc"
);
return
bit
;
}
...
...
@@ -63,20 +63,20 @@ static inline int vp56_rac_get_prob_branchy_armv6(VP56RangeCoder *c, int pr)
unsigned
low
;
unsigned
tmp
;
__asm__
volatile
(
"adds %3, %3, %0
\n
"
"cmpcs %7, %4
\n
"
"ldrcsh %2, [%4], #2
\n
"
"rsb %0, %6, #256
\n
"
"smlabb %0, %5, %6, %0
\n
"
"rev16cs %2, %2
\n
"
"orrcs %1, %1, %2, lsl %3
\n
"
"subcs %3, %3, #16
\n
"
"lsr %0, %0, #8
\n
"
"lsl %2, %0, #16
\n
"
:
"=&r"
(
low
),
"+&r"
(
code_word
),
"=&r"
(
tmp
),
"+&r"
(
c
->
bits
),
"+&r"
(
c
->
buffer
)
:
"r"
(
high
),
"r"
(
pr
),
"r"
(
c
->
end
-
1
),
"0"
(
shift
)
:
"cc"
);
__asm__
(
"adds %3, %3, %0
\n
"
"cmpcs %7, %4
\n
"
"ldrcsh %2, [%4], #2
\n
"
"rsb %0, %6, #256
\n
"
"smlabb %0, %5, %6, %0
\n
"
"rev16cs %2, %2
\n
"
"orrcs %1, %1, %2, lsl %3
\n
"
"subcs %3, %3, #16
\n
"
"lsr %0, %0, #8
\n
"
"lsl %2, %0, #16
\n
"
:
"=&r"
(
low
),
"+&r"
(
code_word
),
"=&r"
(
tmp
),
"+&r"
(
c
->
bits
),
"+&r"
(
c
->
buffer
)
:
"r"
(
high
),
"r"
(
pr
),
"r"
(
c
->
end
-
1
),
"0"
(
shift
)
:
"cc"
);
if
(
code_word
>=
tmp
)
{
c
->
high
=
high
-
low
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment