Commit 0ae14a49 authored by Junliang Yan's avatar Junliang Yan Committed by Commit Bot

[wasm] Add BE support for atomic operations for wasm interpreter

This fixes RunWasmInterpreter_* tests on big endian mips/ppc/s390.

Change-Id: I4b9b767d0de45004ee1195ac225c6d1027c17a05
Reviewed-on: https://chromium-review.googlesource.com/c/1439517
Commit-Queue: Junliang Yan <jyan@ca.ibm.com>
Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59174}
parent 14054ad9
......@@ -1065,34 +1065,30 @@ bool StringToArrayIndex(Stream* stream, uint32_t* index);
// return an address significantly above the actual current stack position.
V8_NOINLINE uintptr_t GetCurrentStackPosition();
template <typename V>
static inline V ByteReverse(V value) {
size_t size_of_v = sizeof(value);
switch (size_of_v) {
case 2:
static inline uint16_t ByteReverse16(uint16_t value) {
#if V8_HAS_BUILTIN_BSWAP16
return static_cast<V>(__builtin_bswap16(static_cast<uint16_t>(value)));
return __builtin_bswap16(value);
#else
return value << 8 | (value >> 8 & 0x00FF);
#endif
case 4:
}
static inline uint32_t ByteReverse32(uint32_t value) {
#if V8_HAS_BUILTIN_BSWAP32
return static_cast<V>(__builtin_bswap32(static_cast<uint32_t>(value)));
return __builtin_bswap32(value);
#else
{
size_t bits_of_v = size_of_v * kBitsPerByte;
return value << (bits_of_v - 8) |
((value << (bits_of_v - 24)) & 0x00FF0000) |
((value >> (bits_of_v - 24)) & 0x0000FF00) |
((value >> (bits_of_v - 8)) & 0x00000FF);
}
return value << 24 |
((value << 8) & 0x00FF0000) |
((value >> 8) & 0x0000FF00) |
((value >> 24) & 0x00000FF);
#endif
case 8:
}
static inline uint64_t ByteReverse64(uint64_t value) {
#if V8_HAS_BUILTIN_BSWAP64
return static_cast<V>(__builtin_bswap64(static_cast<uint64_t>(value)));
return __builtin_bswap64(value);
#else
{
size_t bits_of_v = size_of_v * kBitsPerByte;
size_t bits_of_v = sizeof(value) * kBitsPerByte;
return value << (bits_of_v - 8) |
((value << (bits_of_v - 24)) & 0x00FF000000000000) |
((value << (bits_of_v - 40)) & 0x0000FF0000000000) |
......@@ -1101,8 +1097,21 @@ static inline V ByteReverse(V value) {
((value >> (bits_of_v - 40)) & 0x0000000000FF0000) |
((value >> (bits_of_v - 24)) & 0x000000000000FF00) |
((value >> (bits_of_v - 8)) & 0x00000000000000FF);
}
#endif
}
template <typename V>
static inline V ByteReverse(V value) {
size_t size_of_v = sizeof(value);
switch (size_of_v) {
case 1:
return value;
case 2:
return static_cast<V>(ByteReverse16(static_cast<uint16_t>(value)));
case 4:
return static_cast<V>(ByteReverse32(static_cast<uint32_t>(value)));
case 8:
return static_cast<V>(ByteReverse64(static_cast<uint64_t>(value)));
default:
UNREACHABLE();
}
......
......@@ -1596,17 +1596,42 @@ class ThreadImpl {
return false;
}
template <typename type, typename op_type, typename func>
op_type ExecuteAtomicBinopBE(type val, Address addr, func op) {
type old_val;
type new_val;
old_val = ReadUnalignedValue<type>(addr);
do {
new_val =
ByteReverse(static_cast<type>(op(ByteReverse<type>(old_val), val)));
} while (!(std::atomic_compare_exchange_strong(
reinterpret_cast<std::atomic<type>*>(addr), &old_val, new_val)));
return static_cast<op_type>(ByteReverse<type>(old_val));
}
template <typename type>
type AdjustByteOrder(type param) {
#if V8_TARGET_BIG_ENDIAN
return ByteReverse(param);
#else
return param;
#endif
}
bool ExecuteAtomicOp(WasmOpcode opcode, Decoder* decoder,
InterpreterCode* code, pc_t pc, int& len) {
#if V8_TARGET_BIG_ENDIAN
constexpr bool kBigEndian = true;
#else
constexpr bool kBigEndian = false;
#endif
WasmValue result;
switch (opcode) {
// Disabling on Mips as 32 bit atomics are not correctly laid out for load/store
// on big endian and 64 bit atomics fail to compile.
#if !(V8_TARGET_ARCH_MIPS && V8_TARGET_BIG_ENDIAN)
#define ATOMIC_BINOP_CASE(name, type, op_type, operation) \
#define ATOMIC_BINOP_CASE(name, type, op_type, operation, op) \
case kExpr##name: { \
type val; \
Address addr; \
op_type result; \
if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
&val)) { \
return false; \
......@@ -1614,74 +1639,95 @@ class ThreadImpl {
static_assert(sizeof(std::atomic<type>) == sizeof(type), \
"Size mismatch for types std::atomic<" #type \
">, and " #type); \
result = WasmValue(static_cast<op_type>( \
std::operation(reinterpret_cast<std::atomic<type>*>(addr), val))); \
Push(result); \
if (kBigEndian) { \
auto oplambda = [](type a, type b) { return a op b; }; \
result = ExecuteAtomicBinopBE<type, op_type>(val, addr, oplambda); \
} else { \
result = static_cast<op_type>( \
std::operation(reinterpret_cast<std::atomic<type>*>(addr), val)); \
} \
Push(WasmValue(result)); \
break; \
}
ATOMIC_BINOP_CASE(I32AtomicAdd, uint32_t, uint32_t, atomic_fetch_add);
ATOMIC_BINOP_CASE(I32AtomicAdd8U, uint8_t, uint32_t, atomic_fetch_add);
ATOMIC_BINOP_CASE(I32AtomicAdd16U, uint16_t, uint32_t, atomic_fetch_add);
ATOMIC_BINOP_CASE(I32AtomicSub, uint32_t, uint32_t, atomic_fetch_sub);
ATOMIC_BINOP_CASE(I32AtomicSub8U, uint8_t, uint32_t, atomic_fetch_sub);
ATOMIC_BINOP_CASE(I32AtomicSub16U, uint16_t, uint32_t, atomic_fetch_sub);
ATOMIC_BINOP_CASE(I32AtomicAnd, uint32_t, uint32_t, atomic_fetch_and);
ATOMIC_BINOP_CASE(I32AtomicAnd8U, uint8_t, uint32_t, atomic_fetch_and);
ATOMIC_BINOP_CASE(I32AtomicAnd16U, uint16_t, uint32_t, atomic_fetch_and);
ATOMIC_BINOP_CASE(I32AtomicOr, uint32_t, uint32_t, atomic_fetch_or);
ATOMIC_BINOP_CASE(I32AtomicOr8U, uint8_t, uint32_t, atomic_fetch_or);
ATOMIC_BINOP_CASE(I32AtomicOr16U, uint16_t, uint32_t, atomic_fetch_or);
ATOMIC_BINOP_CASE(I32AtomicXor, uint32_t, uint32_t, atomic_fetch_xor);
ATOMIC_BINOP_CASE(I32AtomicXor8U, uint8_t, uint32_t, atomic_fetch_xor);
ATOMIC_BINOP_CASE(I32AtomicXor16U, uint16_t, uint32_t, atomic_fetch_xor);
ATOMIC_BINOP_CASE(I32AtomicExchange, uint32_t, uint32_t, atomic_exchange);
ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t, uint32_t,
atomic_exchange);
ATOMIC_BINOP_CASE(I32AtomicAdd, uint32_t, uint32_t, atomic_fetch_add, +);
ATOMIC_BINOP_CASE(I32AtomicAdd8U, uint8_t, uint32_t, atomic_fetch_add, +);
ATOMIC_BINOP_CASE(I32AtomicAdd16U, uint16_t, uint32_t, atomic_fetch_add,
+);
ATOMIC_BINOP_CASE(I32AtomicSub, uint32_t, uint32_t, atomic_fetch_sub, -);
ATOMIC_BINOP_CASE(I32AtomicSub8U, uint8_t, uint32_t, atomic_fetch_sub, -);
ATOMIC_BINOP_CASE(I32AtomicSub16U, uint16_t, uint32_t, atomic_fetch_sub,
-);
ATOMIC_BINOP_CASE(I32AtomicAnd, uint32_t, uint32_t, atomic_fetch_and, &);
ATOMIC_BINOP_CASE(I32AtomicAnd8U, uint8_t, uint32_t, atomic_fetch_and, &);
ATOMIC_BINOP_CASE(I32AtomicAnd16U, uint16_t, uint32_t,
atomic_fetch_and, &);
ATOMIC_BINOP_CASE(I32AtomicOr, uint32_t, uint32_t, atomic_fetch_or, |);
ATOMIC_BINOP_CASE(I32AtomicOr8U, uint8_t, uint32_t, atomic_fetch_or, |);
ATOMIC_BINOP_CASE(I32AtomicOr16U, uint16_t, uint32_t, atomic_fetch_or, |);
ATOMIC_BINOP_CASE(I32AtomicXor, uint32_t, uint32_t, atomic_fetch_xor, ^);
ATOMIC_BINOP_CASE(I32AtomicXor8U, uint8_t, uint32_t, atomic_fetch_xor, ^);
ATOMIC_BINOP_CASE(I32AtomicXor16U, uint16_t, uint32_t, atomic_fetch_xor,
^);
ATOMIC_BINOP_CASE(I32AtomicExchange, uint32_t, uint32_t, atomic_exchange,
=);
ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t, uint32_t, atomic_exchange,
=);
ATOMIC_BINOP_CASE(I32AtomicExchange16U, uint16_t, uint32_t,
atomic_exchange);
ATOMIC_BINOP_CASE(I64AtomicAdd, uint64_t, uint64_t, atomic_fetch_add);
ATOMIC_BINOP_CASE(I64AtomicAdd8U, uint8_t, uint64_t, atomic_fetch_add);
ATOMIC_BINOP_CASE(I64AtomicAdd16U, uint16_t, uint64_t, atomic_fetch_add);
ATOMIC_BINOP_CASE(I64AtomicAdd32U, uint32_t, uint64_t, atomic_fetch_add);
ATOMIC_BINOP_CASE(I64AtomicSub, uint64_t, uint64_t, atomic_fetch_sub);
ATOMIC_BINOP_CASE(I64AtomicSub8U, uint8_t, uint64_t, atomic_fetch_sub);
ATOMIC_BINOP_CASE(I64AtomicSub16U, uint16_t, uint64_t, atomic_fetch_sub);
ATOMIC_BINOP_CASE(I64AtomicSub32U, uint32_t, uint64_t, atomic_fetch_sub);
ATOMIC_BINOP_CASE(I64AtomicAnd, uint64_t, uint64_t, atomic_fetch_and);
ATOMIC_BINOP_CASE(I64AtomicAnd8U, uint8_t, uint64_t, atomic_fetch_and);
ATOMIC_BINOP_CASE(I64AtomicAnd16U, uint16_t, uint64_t, atomic_fetch_and);
ATOMIC_BINOP_CASE(I64AtomicAnd32U, uint32_t, uint64_t, atomic_fetch_and);
ATOMIC_BINOP_CASE(I64AtomicOr, uint64_t, uint64_t, atomic_fetch_or);
ATOMIC_BINOP_CASE(I64AtomicOr8U, uint8_t, uint64_t, atomic_fetch_or);
ATOMIC_BINOP_CASE(I64AtomicOr16U, uint16_t, uint64_t, atomic_fetch_or);
ATOMIC_BINOP_CASE(I64AtomicOr32U, uint32_t, uint64_t, atomic_fetch_or);
ATOMIC_BINOP_CASE(I64AtomicXor, uint64_t, uint64_t, atomic_fetch_xor);
ATOMIC_BINOP_CASE(I64AtomicXor8U, uint8_t, uint64_t, atomic_fetch_xor);
ATOMIC_BINOP_CASE(I64AtomicXor16U, uint16_t, uint64_t, atomic_fetch_xor);
ATOMIC_BINOP_CASE(I64AtomicXor32U, uint32_t, uint64_t, atomic_fetch_xor);
ATOMIC_BINOP_CASE(I64AtomicExchange, uint64_t, uint64_t, atomic_exchange);
ATOMIC_BINOP_CASE(I64AtomicExchange8U, uint8_t, uint64_t,
atomic_exchange);
atomic_exchange, =);
ATOMIC_BINOP_CASE(I64AtomicAdd, uint64_t, uint64_t, atomic_fetch_add, +);
ATOMIC_BINOP_CASE(I64AtomicAdd8U, uint8_t, uint64_t, atomic_fetch_add, +);
ATOMIC_BINOP_CASE(I64AtomicAdd16U, uint16_t, uint64_t, atomic_fetch_add,
+);
ATOMIC_BINOP_CASE(I64AtomicAdd32U, uint32_t, uint64_t, atomic_fetch_add,
+);
ATOMIC_BINOP_CASE(I64AtomicSub, uint64_t, uint64_t, atomic_fetch_sub, -);
ATOMIC_BINOP_CASE(I64AtomicSub8U, uint8_t, uint64_t, atomic_fetch_sub, -);
ATOMIC_BINOP_CASE(I64AtomicSub16U, uint16_t, uint64_t, atomic_fetch_sub,
-);
ATOMIC_BINOP_CASE(I64AtomicSub32U, uint32_t, uint64_t, atomic_fetch_sub,
-);
ATOMIC_BINOP_CASE(I64AtomicAnd, uint64_t, uint64_t, atomic_fetch_and, &);
ATOMIC_BINOP_CASE(I64AtomicAnd8U, uint8_t, uint64_t, atomic_fetch_and, &);
ATOMIC_BINOP_CASE(I64AtomicAnd16U, uint16_t, uint64_t,
atomic_fetch_and, &);
ATOMIC_BINOP_CASE(I64AtomicAnd32U, uint32_t, uint64_t,
atomic_fetch_and, &);
ATOMIC_BINOP_CASE(I64AtomicOr, uint64_t, uint64_t, atomic_fetch_or, |);
ATOMIC_BINOP_CASE(I64AtomicOr8U, uint8_t, uint64_t, atomic_fetch_or, |);
ATOMIC_BINOP_CASE(I64AtomicOr16U, uint16_t, uint64_t, atomic_fetch_or, |);
ATOMIC_BINOP_CASE(I64AtomicOr32U, uint32_t, uint64_t, atomic_fetch_or, |);
ATOMIC_BINOP_CASE(I64AtomicXor, uint64_t, uint64_t, atomic_fetch_xor, ^);
ATOMIC_BINOP_CASE(I64AtomicXor8U, uint8_t, uint64_t, atomic_fetch_xor, ^);
ATOMIC_BINOP_CASE(I64AtomicXor16U, uint16_t, uint64_t, atomic_fetch_xor,
^);
ATOMIC_BINOP_CASE(I64AtomicXor32U, uint32_t, uint64_t, atomic_fetch_xor,
^);
ATOMIC_BINOP_CASE(I64AtomicExchange, uint64_t, uint64_t, atomic_exchange,
=);
ATOMIC_BINOP_CASE(I64AtomicExchange8U, uint8_t, uint64_t, atomic_exchange,
=);
ATOMIC_BINOP_CASE(I64AtomicExchange16U, uint16_t, uint64_t,
atomic_exchange);
atomic_exchange, =);
ATOMIC_BINOP_CASE(I64AtomicExchange32U, uint32_t, uint64_t,
atomic_exchange);
atomic_exchange, =);
#undef ATOMIC_BINOP_CASE
#define ATOMIC_COMPARE_EXCHANGE_CASE(name, type, op_type) \
case kExpr##name: { \
type val; \
type val2; \
type old_val; \
type new_val; \
Address addr; \
if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
&val, &val2)) { \
&old_val, &new_val)) { \
return false; \
} \
static_assert(sizeof(std::atomic<type>) == sizeof(type), \
"Size mismatch for types std::atomic<" #type \
">, and " #type); \
old_val = AdjustByteOrder<type>(old_val); \
new_val = AdjustByteOrder<type>(new_val); \
std::atomic_compare_exchange_strong( \
reinterpret_cast<std::atomic<type>*>(addr), &val, val2); \
Push(WasmValue(static_cast<op_type>(val))); \
reinterpret_cast<std::atomic<type>*>(addr), &old_val, new_val); \
Push(WasmValue(static_cast<op_type>(AdjustByteOrder<type>(old_val)))); \
break; \
}
ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange, uint32_t,
......@@ -1708,8 +1754,8 @@ class ThreadImpl {
static_assert(sizeof(std::atomic<type>) == sizeof(type), \
"Size mismatch for types std::atomic<" #type \
">, and " #type); \
result = WasmValue(static_cast<op_type>( \
std::operation(reinterpret_cast<std::atomic<type>*>(addr)))); \
result = WasmValue(static_cast<op_type>(AdjustByteOrder<type>( \
std::operation(reinterpret_cast<std::atomic<type>*>(addr))))); \
Push(result); \
break; \
}
......@@ -1732,7 +1778,8 @@ class ThreadImpl {
static_assert(sizeof(std::atomic<type>) == sizeof(type), \
"Size mismatch for types std::atomic<" #type \
">, and " #type); \
std::operation(reinterpret_cast<std::atomic<type>*>(addr), val); \
std::operation(reinterpret_cast<std::atomic<type>*>(addr), \
AdjustByteOrder<type>(val)); \
break; \
}
ATOMIC_STORE_CASE(I32AtomicStore, uint32_t, uint32_t, atomic_store);
......@@ -1743,7 +1790,6 @@ class ThreadImpl {
ATOMIC_STORE_CASE(I64AtomicStore16U, uint16_t, uint64_t, atomic_store);
ATOMIC_STORE_CASE(I64AtomicStore32U, uint32_t, uint64_t, atomic_store);
#undef ATOMIC_STORE_CASE
#endif // !(V8_TARGET_ARCH_MIPS && V8_TARGET_BIG_ENDIAN)
default:
UNREACHABLE();
return false;
......
......@@ -160,6 +160,28 @@ WASM_EXEC_TEST(I32AtomicCompareExchange8U) {
}
}
WASM_EXEC_TEST(I32AtomicCompareExchange_fail) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint32_t* memory =
r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(uint32_t));
BUILD(r, WASM_ATOMICS_TERNARY_OP(
kExprI32AtomicCompareExchange, WASM_I32V_1(0), WASM_GET_LOCAL(0),
WASM_GET_LOCAL(1), MachineRepresentation::kWord32));
// The original value at the memory location.
uint32_t old_val = 4;
// The value we use as the expected value for the compare-exchange so that it
// fails.
uint32_t expected = 6;
// The new value for the compare-exchange.
uint32_t new_val = 5;
r.builder().WriteMemory(&memory[0], old_val);
CHECK_EQ(old_val, r.Call(expected, new_val));
}
WASM_EXEC_TEST(I32AtomicLoad) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint32_t> r(execution_tier);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment