Commit 77cd6efc authored by Mans Rullgard's avatar Mans Rullgard

ARM: remove volatile from asm statements in libavutil/intmath

The volatile qualifiers are not needed on these statements as
their effects are fully specified by constraints.
Signed-off-by: 's avatarMans Rullgard <mans@mansr.com>
parent 74cc8c52
...@@ -34,11 +34,11 @@ ...@@ -34,11 +34,11 @@
static av_always_inline av_const int FASTDIV(int a, int b) static av_always_inline av_const int FASTDIV(int a, int b)
{ {
int r, t; int r, t;
__asm__ volatile("cmp %3, #2 \n\t" __asm__ ("cmp %3, #2 \n\t"
"ldr %1, [%4, %3, lsl #2] \n\t" "ldr %1, [%4, %3, lsl #2] \n\t"
"lsrle %0, %2, #1 \n\t" "lsrle %0, %2, #1 \n\t"
"smmulgt %0, %1, %2 \n\t" "smmulgt %0, %1, %2 \n\t"
: "=&r"(r), "=&r"(t) : "r"(a), "r"(b), "r"(ff_inverse)); : "=&r"(r), "=&r"(t) : "r"(a), "r"(b), "r"(ff_inverse));
return r; return r;
} }
...@@ -46,7 +46,7 @@ static av_always_inline av_const int FASTDIV(int a, int b) ...@@ -46,7 +46,7 @@ static av_always_inline av_const int FASTDIV(int a, int b)
static av_always_inline av_const uint8_t av_clip_uint8_arm(int a) static av_always_inline av_const uint8_t av_clip_uint8_arm(int a)
{ {
unsigned x; unsigned x;
__asm__ volatile ("usat %0, #8, %1" : "=r"(x) : "r"(a)); __asm__ ("usat %0, #8, %1" : "=r"(x) : "r"(a));
return x; return x;
} }
...@@ -54,7 +54,7 @@ static av_always_inline av_const uint8_t av_clip_uint8_arm(int a) ...@@ -54,7 +54,7 @@ static av_always_inline av_const uint8_t av_clip_uint8_arm(int a)
static av_always_inline av_const uint8_t av_clip_int8_arm(int a) static av_always_inline av_const uint8_t av_clip_int8_arm(int a)
{ {
unsigned x; unsigned x;
__asm__ volatile ("ssat %0, #8, %1" : "=r"(x) : "r"(a)); __asm__ ("ssat %0, #8, %1" : "=r"(x) : "r"(a));
return x; return x;
} }
...@@ -62,7 +62,7 @@ static av_always_inline av_const uint8_t av_clip_int8_arm(int a) ...@@ -62,7 +62,7 @@ static av_always_inline av_const uint8_t av_clip_int8_arm(int a)
static av_always_inline av_const uint16_t av_clip_uint16_arm(int a) static av_always_inline av_const uint16_t av_clip_uint16_arm(int a)
{ {
unsigned x; unsigned x;
__asm__ volatile ("usat %0, #16, %1" : "=r"(x) : "r"(a)); __asm__ ("usat %0, #16, %1" : "=r"(x) : "r"(a));
return x; return x;
} }
...@@ -70,7 +70,7 @@ static av_always_inline av_const uint16_t av_clip_uint16_arm(int a) ...@@ -70,7 +70,7 @@ static av_always_inline av_const uint16_t av_clip_uint16_arm(int a)
static av_always_inline av_const int16_t av_clip_int16_arm(int a) static av_always_inline av_const int16_t av_clip_int16_arm(int a)
{ {
int x; int x;
__asm__ volatile ("ssat %0, #16, %1" : "=r"(x) : "r"(a)); __asm__ ("ssat %0, #16, %1" : "=r"(x) : "r"(a));
return x; return x;
} }
...@@ -80,8 +80,8 @@ static av_always_inline av_const int16_t av_clip_int16_arm(int a) ...@@ -80,8 +80,8 @@ static av_always_inline av_const int16_t av_clip_int16_arm(int a)
static av_always_inline av_const int FASTDIV(int a, int b) static av_always_inline av_const int FASTDIV(int a, int b)
{ {
int r, t; int r, t;
__asm__ volatile("umull %1, %0, %2, %3" __asm__ ("umull %1, %0, %2, %3"
: "=&r"(r), "=&r"(t) : "r"(a), "r"(ff_inverse[b])); : "=&r"(r), "=&r"(t) : "r"(a), "r"(ff_inverse[b]));
return r; return r;
} }
...@@ -91,11 +91,11 @@ static av_always_inline av_const int FASTDIV(int a, int b) ...@@ -91,11 +91,11 @@ static av_always_inline av_const int FASTDIV(int a, int b)
static av_always_inline av_const int32_t av_clipl_int32_arm(int64_t a) static av_always_inline av_const int32_t av_clipl_int32_arm(int64_t a)
{ {
int x, y; int x, y;
__asm__ volatile ("adds %1, %R2, %Q2, lsr #31 \n\t" __asm__ ("adds %1, %R2, %Q2, lsr #31 \n\t"
"mvnne %1, #1<<31 \n\t" "mvnne %1, #1<<31 \n\t"
"moveq %0, %Q2 \n\t" "moveq %0, %Q2 \n\t"
"eorne %0, %1, %R2, asr #31 \n\t" "eorne %0, %1, %R2, asr #31 \n\t"
: "=r"(x), "=&r"(y) : "r"(a)); : "=r"(x), "=&r"(y) : "r"(a));
return x; return x;
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment