Commit 91bd6a5d authored by Ivica Bogosavljevic's avatar Ivica Bogosavljevic Committed by Commit Bot

[wasm] Implement SIMD big-endian support

This CL implements several things needed for full SIMD BE support in WASM:
* Global variables are now kept in little-endian order as per specification
* Added support for SIMD on BE in wasm interpreter
* Fixed several tests that didn't work on BE because input or output
  data were not using LE in-memory layout

Change-Id: I4542d13d09fd276e15b0fc39f02e4a58831f65e4
Reviewed-on: https://chromium-review.googlesource.com/1160484
Commit-Queue: Ivica Bogosavljevic <ibogosavljevic@wavecomp.com>
Reviewed-by: 's avatarBill Budge <bbudge@chromium.org>
Reviewed-by: 's avatarAseem Garg <aseemgarg@chromium.org>
Cr-Commit-Position: refs/heads/master@{#55075}
parent 51207093
......@@ -484,16 +484,9 @@ void SimdScalarLowering::LowerBinaryOp(Node* node, SimdType input_rep_type,
}
} else {
for (int i = 0; i < num_lanes / 2; ++i) {
#if defined(V8_TARGET_BIG_ENDIAN)
rep_node[i] =
graph()->NewNode(op, rep_right[i * 2], rep_right[i * 2 + 1]);
rep_node[i + num_lanes / 2] =
graph()->NewNode(op, rep_left[i * 2], rep_left[i * 2 + 1]);
#else
rep_node[i] = graph()->NewNode(op, rep_left[i * 2], rep_left[i * 2 + 1]);
rep_node[i + num_lanes / 2] =
graph()->NewNode(op, rep_right[i * 2], rep_right[i * 2 + 1]);
#endif
}
}
ReplaceNode(node, rep_node, num_lanes);
......@@ -554,21 +547,12 @@ void SimdScalarLowering::LowerBinaryOpForSmallInt(Node* node,
}
} else {
for (int i = 0; i < num_lanes / 2; ++i) {
#if defined(V8_TARGET_BIG_ENDIAN)
rep_node[i] = FixUpperBits(
graph()->NewNode(op, rep_right[i * 2], rep_right[i * 2 + 1]),
shift_val);
rep_node[i + num_lanes / 2] = FixUpperBits(
graph()->NewNode(op, rep_left[i * 2], rep_left[i * 2 + 1]),
shift_val);
#else
rep_node[i] = FixUpperBits(
graph()->NewNode(op, rep_left[i * 2], rep_left[i * 2 + 1]),
shift_val);
rep_node[i + num_lanes / 2] = FixUpperBits(
graph()->NewNode(op, rep_right[i * 2], rep_right[i * 2 + 1]),
shift_val);
#endif
}
}
ReplaceNode(node, rep_node, num_lanes);
......@@ -804,17 +788,10 @@ void SimdScalarLowering::LowerPack(Node* node, SimdType input_rep_type,
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
for (int i = 0; i < num_lanes; ++i) {
Node* input = nullptr;
#if defined(V8_TARGET_BIG_ENDIAN)
if (i < num_lanes / 2)
input = rep_right[i];
else
input = rep_left[i - num_lanes / 2];
#else
if (i < num_lanes / 2)
input = rep_left[i];
else
input = rep_right[i - num_lanes / 2];
#endif
if (is_signed) {
Diamond d_min(graph(), common(), graph()->NewNode(less_op, input, min));
input = d_min.Phi(phi_rep, min, input);
......@@ -1366,12 +1343,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
Node** rep_node = zone()->NewArray<Node*>(16);
for (int i = 0; i < 16; i++) {
int lane = shuffle[i];
#if defined(V8_TARGET_BIG_ENDIAN)
rep_node[15 - i] =
lane < 16 ? rep_left[15 - lane] : rep_right[31 - lane];
#else
rep_node[i] = lane < 16 ? rep_left[lane] : rep_right[lane - 16];
#endif
}
ReplaceNode(node, rep_node, 16);
break;
......@@ -1487,6 +1459,59 @@ void SimdScalarLowering::Float32ToInt32(Node** replacements, Node** result) {
}
}
template <typename T>
void SimdScalarLowering::Int32ToSmallerInt(Node** replacements, Node** result) {
const int num_ints = sizeof(int32_t) / sizeof(T);
const int bit_size = sizeof(T) * 8;
const Operator* sign_extend;
switch (sizeof(T)) {
case 1:
sign_extend = machine()->SignExtendWord8ToInt32();
break;
case 2:
sign_extend = machine()->SignExtendWord16ToInt32();
break;
default:
UNREACHABLE();
}
for (int i = 0; i < kNumLanes32; i++) {
if (replacements[i] != nullptr) {
for (int j = 0; j < num_ints; j++) {
result[num_ints * i + j] = graph()->NewNode(
sign_extend,
graph()->NewNode(machine()->Word32Sar(), replacements[i],
mcgraph_->Int32Constant(j * bit_size)));
}
} else {
for (int j = 0; j < num_ints; j++) {
result[num_ints * i + j] = nullptr;
}
}
}
}
template <typename T>
void SimdScalarLowering::SmallerIntToInt32(Node** replacements, Node** result) {
const int num_ints = sizeof(int32_t) / sizeof(T);
const int bit_size = sizeof(T) * 8;
const int bit_mask = (1 << bit_size) - 1;
for (int i = 0; i < kNumLanes32; ++i) {
result[i] = mcgraph_->Int32Constant(0);
for (int j = 0; j < num_ints; j++) {
if (replacements[num_ints * i + j] != nullptr) {
Node* clean_bits = graph()->NewNode(machine()->Word32And(),
replacements[num_ints * i + j],
mcgraph_->Int32Constant(bit_mask));
Node* shift = graph()->NewNode(machine()->Word32Shl(), clean_bits,
mcgraph_->Int32Constant(j * bit_size));
result[i] = graph()->NewNode(machine()->Word32Or(), result[i], shift);
}
}
}
}
Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
Node** replacements = GetReplacements(node);
if (ReplacementType(node) == type) {
......@@ -1498,7 +1523,9 @@ Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
if (ReplacementType(node) == SimdType::kFloat32x4) {
Float32ToInt32(replacements, result);
} else if (ReplacementType(node) == SimdType::kInt16x8) {
UNIMPLEMENTED();
SmallerIntToInt32<int16_t>(replacements, result);
} else if (ReplacementType(node) == SimdType::kInt8x16) {
SmallerIntToInt32<int8_t>(replacements, result);
} else {
UNREACHABLE();
}
......@@ -1511,12 +1538,19 @@ Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
UNREACHABLE();
}
} else if (type == SimdType::kInt16x8) {
if (ReplacementType(node) == SimdType::kInt32x4 ||
ReplacementType(node) == SimdType::kFloat32x4) {
if (ReplacementType(node) == SimdType::kInt32x4) {
Int32ToSmallerInt<int16_t>(replacements, result);
} else if (ReplacementType(node) == SimdType::kFloat32x4) {
UNIMPLEMENTED();
} else {
UNREACHABLE();
}
} else if (type == SimdType::kInt8x16) {
if (ReplacementType(node) == SimdType::kInt32x4) {
Int32ToSmallerInt<int8_t>(replacements, result);
} else {
UNIMPLEMENTED();
}
} else {
UNREACHABLE();
}
......
......@@ -68,6 +68,10 @@ class SimdScalarLowering {
int ReplacementCount(Node* node);
void Float32ToInt32(Node** replacements, Node** result);
void Int32ToFloat32(Node** replacements, Node** result);
template <typename T>
void Int32ToSmallerInt(Node** replacements, Node** result);
template <typename T>
void SmallerIntToInt32(Node** replacements, Node** result);
Node** GetReplacementsWithType(Node* node, SimdType type);
SimdType ReplacementType(Node* node);
void PreparePhiReplacement(Node* phi);
......
......@@ -3008,8 +3008,13 @@ Node* WasmGraphBuilder::GetGlobal(uint32_t index) {
Node* offset = nullptr;
GetGlobalBaseAndOffset(mem_type, env_->module->globals[index], &base,
&offset);
return SetEffect(graph()->NewNode(mcgraph()->machine()->Load(mem_type), base,
offset, Effect(), Control()));
Node* load = SetEffect(graph()->NewNode(mcgraph()->machine()->Load(mem_type),
base, offset, Effect(), Control()));
#if defined(V8_TARGET_BIG_ENDIAN)
load = BuildChangeEndiannessLoad(load, mem_type,
env_->module->globals[index].type);
#endif
return load;
}
Node* WasmGraphBuilder::SetGlobal(uint32_t index, Node* val) {
......@@ -3021,6 +3026,10 @@ Node* WasmGraphBuilder::SetGlobal(uint32_t index, Node* val) {
&offset);
const Operator* op = mcgraph()->machine()->Store(
StoreRepresentation(mem_type.representation(), kNoWriteBarrier));
#if defined(V8_TARGET_BIG_ENDIAN)
val = BuildChangeEndiannessStore(val, mem_type.representation(),
env_->module->globals[index].type);
#endif
return SetEffect(
graph()->NewNode(op, base, offset, val, Effect(), Control()));
}
......
......@@ -159,6 +159,16 @@ static inline void WriteLittleEndianValue(Address p, V value) {
#endif // V8_TARGET_LITTLE_ENDIAN
}
template <typename V>
static inline V ReadLittleEndianValue(V* p) {
return ReadLittleEndianValue<V>(reinterpret_cast<Address>(p));
}
template <typename V>
static inline void WriteLittleEndianValue(V* p, V value) {
WriteLittleEndianValue<V>(reinterpret_cast<Address>(p), value);
}
} // namespace internal
} // namespace v8
......
......@@ -1321,17 +1321,20 @@ void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global, double num) {
num, ValueTypes::TypeName(global.type));
switch (global.type) {
case kWasmI32:
*GetRawGlobalPtr<int32_t>(global) = static_cast<int32_t>(num);
WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global),
static_cast<int32_t>(num));
break;
case kWasmI64:
// TODO(titzer): initialization of imported i64 globals.
UNREACHABLE();
break;
case kWasmF32:
*GetRawGlobalPtr<float>(global) = static_cast<float>(num);
WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global),
static_cast<float>(num));
break;
case kWasmF64:
*GetRawGlobalPtr<double>(global) = static_cast<double>(num);
WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global),
static_cast<double>(num));
break;
default:
UNREACHABLE();
......@@ -1345,25 +1348,25 @@ void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global,
switch (global.type) {
case kWasmI32: {
int32_t num = value->GetI32();
*GetRawGlobalPtr<int32_t>(global) = num;
WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global), num);
TRACE("%d", num);
break;
}
case kWasmI64: {
int64_t num = value->GetI64();
*GetRawGlobalPtr<int64_t>(global) = num;
WriteLittleEndianValue<int64_t>(GetRawGlobalPtr<int64_t>(global), num);
TRACE("%" PRId64, num);
break;
}
case kWasmF32: {
float num = value->GetF32();
*GetRawGlobalPtr<float>(global) = num;
WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global), num);
TRACE("%f", num);
break;
}
case kWasmF64: {
double num = value->GetF64();
*GetRawGlobalPtr<double>(global) = num;
WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global), num);
TRACE("%lf", num);
break;
}
......@@ -1737,16 +1740,20 @@ void InstanceBuilder::InitGlobals() {
switch (global.init.kind) {
case WasmInitExpr::kI32Const:
*GetRawGlobalPtr<int32_t>(global) = global.init.val.i32_const;
WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global),
global.init.val.i32_const);
break;
case WasmInitExpr::kI64Const:
*GetRawGlobalPtr<int64_t>(global) = global.init.val.i64_const;
WriteLittleEndianValue<int64_t>(GetRawGlobalPtr<int64_t>(global),
global.init.val.i64_const);
break;
case WasmInitExpr::kF32Const:
*GetRawGlobalPtr<float>(global) = global.init.val.f32_const;
WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global),
global.init.val.f32_const);
break;
case WasmInitExpr::kF64Const:
*GetRawGlobalPtr<double>(global) = global.init.val.f64_const;
WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global),
global.init.val.f64_const);
break;
case WasmInitExpr::kGlobalIndex: {
// Initialize with another global.
......@@ -1956,13 +1963,16 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
double num = 0;
switch (global.type) {
case kWasmI32:
num = *GetRawGlobalPtr<int32_t>(global);
num = ReadLittleEndianValue<int32_t>(
GetRawGlobalPtr<int32_t>(global));
break;
case kWasmF32:
num = *GetRawGlobalPtr<float>(global);
num =
ReadLittleEndianValue<float>(GetRawGlobalPtr<float>(global));
break;
case kWasmF64:
num = *GetRawGlobalPtr<double>(global);
num = ReadLittleEndianValue<double>(
GetRawGlobalPtr<double>(global));
break;
case kWasmI64:
thrower_->LinkError(
......
This diff is collapsed.
......@@ -403,57 +403,6 @@
}], # 'arch == ppc or arch == ppc64 or arch == s390 or arch == s390x'
##############################################################################
['byteorder == big', {
# BUG(7827). fix simd globals for big endian
'test-run-wasm-simd/RunWasm_SimdI32x4GetGlobal_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_SimdI32x4SetGlobal_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_SimdF32x4GetGlobal_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_SimdF32x4SetGlobal_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_SimdLoadStoreLoad_simd_turbofan': [SKIP],
'test-run-wasm-simd/RunWasm_SimdLoadStoreLoad_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_SimdLoadStoreLoad_simd_lowered': [SKIP],
'test-run-wasm-simd/RunWasm_I32x4AddHoriz_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_I16x8AddHoriz_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_F32x4AddHoriz_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S32x4Dup_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S32x4ZipLeft_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S32x4ZipRight_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S32x4UnzipLeft_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S32x4UnzipRight_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S32x4TransposeLeft_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S32x4TransposeRight_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S32x2Reverse_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S32x4Irregular_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S16x8Dup_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S16x8ZipLeft_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S16x8ZipRight_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S16x8UnzipLeft_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S16x8UnzipRight_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S16x8TransposeLeft_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S16x8TransposeRight_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S16x4Reverse_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S16x2Reverse_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S16x8Irregular_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S8x16Dup_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S8x16ZipLeft_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S8x16ZipRight_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S8x16UnzipLeft_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S8x16UnzipRight_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S8x16TransposeLeft_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S8x16TransposeRight_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S8x8Reverse_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S8x4Reverse_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S8x2Reverse_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S8x16Irregular_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S8x16Blend_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_S8x16Concat_interpreter': [SKIP],
'test-run-wasm-simd/RunWasm_I16x8ConvertI32x4*': [SKIP],
'test-run-wasm-simd/RunWasm_I8x16ConvertI16x8*': [SKIP],
}], # 'byteorder == big'
##############################################################################
['variant == stress_incremental_marking', {
'test-heap-profiler/SamplingHeapProfiler': [SKIP],
......
......@@ -1359,9 +1359,9 @@ WASM_EXEC_TEST(I64Global) {
r.builder().WriteMemory<int64_t>(global, 0xFFFFFFFFFFFFFFFFLL);
for (int i = 9; i < 444444; i += 111111) {
int64_t expected = *global & i;
int64_t expected = ReadLittleEndianValue<int64_t>(global) & i;
r.Call(i);
CHECK_EQ(expected, *global);
CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(global));
}
}
......
......@@ -1692,8 +1692,8 @@ void RunBinaryLaneOpTest(
T* src1 = r.builder().AddGlobal<T>(kWasmS128);
static const int kElems = kSimd128Size / sizeof(T);
for (int i = 0; i < kElems; i++) {
src0[i] = i;
src1[i] = kElems + i;
WriteLittleEndianValue<T>(&src0[i], i);
WriteLittleEndianValue<T>(&src1[i], kElems + i);
}
if (simd_op == kExprS8x16Shuffle) {
BUILD(r,
......@@ -1710,7 +1710,7 @@ void RunBinaryLaneOpTest(
CHECK_EQ(1, r.Call());
for (size_t i = 0; i < expected.size(); i++) {
CHECK_EQ(src0[i], expected[i]);
CHECK_EQ(ReadLittleEndianValue<T>(&src0[i]), expected[i]);
}
}
......@@ -1979,13 +1979,13 @@ void RunWasmCode(WasmExecutionMode execution_mode, LowerSimd lower_simd,
int8_t* src0 = r.builder().AddGlobal<int8_t>(kWasmS128);
int8_t* src1 = r.builder().AddGlobal<int8_t>(kWasmS128);
for (int i = 0; i < kSimd128Size; ++i) {
src0[i] = i;
src1[i] = kSimd128Size + i;
WriteLittleEndianValue<int8_t>(&src0[i], i);
WriteLittleEndianValue<int8_t>(&src1[i], kSimd128Size + i);
}
r.Build(code.data(), code.data() + code.size());
CHECK_EQ(1, r.Call());
for (size_t i = 0; i < kSimd128Size; i++) {
(*result)[i] = src0[i];
(*result)[i] = ReadLittleEndianValue<int8_t>(&src0[i]);
}
}
......@@ -2233,26 +2233,17 @@ WASM_SIMD_TEST(SimdF32x4For) {
template <typename T, int numLanes = 4>
void SetVectorByLanes(T* v, const std::array<T, numLanes>& arr) {
for (int lane = 0; lane < numLanes; lane++) {
const T& value = arr[lane];
#if defined(V8_TARGET_BIG_ENDIAN)
v[numLanes - 1 - lane] = value;
#else
v[lane] = value;
#endif
WriteLittleEndianValue<T>(&v[lane], arr[lane]);
}
}
template <typename T>
const T& GetScalar(T* v, int lane) {
const T GetScalar(T* v, int lane) {
constexpr int kElems = kSimd128Size / sizeof(T);
#if defined(V8_TARGET_BIG_ENDIAN)
const int index = kElems - 1 - lane;
#else
const int index = lane;
#endif
USE(kElems);
DCHECK(index >= 0 && index < kElems);
return v[index];
return ReadLittleEndianValue<T>(&v[index]);
}
WASM_SIMD_TEST(SimdI32x4GetGlobal) {
......
......@@ -2066,11 +2066,11 @@ WASM_EXEC_TEST(Int32Global) {
WASM_SET_GLOBAL(0, WASM_I32_ADD(WASM_GET_GLOBAL(0), WASM_GET_LOCAL(0))),
WASM_ZERO);
*global = 116;
WriteLittleEndianValue<int32_t>(global, 116);
for (int i = 9; i < 444444; i += 111111) {
int32_t expected = *global + i;
int32_t expected = ReadLittleEndianValue<int32_t>(global) + i;
r.Call(i);
CHECK_EQ(expected, *global);
CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(global));
}
}
......@@ -2088,16 +2088,17 @@ WASM_EXEC_TEST(Int32Globals_DontAlias) {
WASM_GET_GLOBAL(g));
// Check that reading/writing global number {g} doesn't alter the others.
*globals[g] = 116 * g;
WriteLittleEndianValue<int32_t>(globals[g], 116 * g);
int32_t before[kNumGlobals];
for (int i = 9; i < 444444; i += 111113) {
int32_t sum = *globals[g] + i;
for (int j = 0; j < kNumGlobals; ++j) before[j] = *globals[j];
int32_t sum = ReadLittleEndianValue<int32_t>(globals[g]) + i;
for (int j = 0; j < kNumGlobals; ++j)
before[j] = ReadLittleEndianValue<int32_t>(globals[j]);
int32_t result = r.Call(i);
CHECK_EQ(sum, result);
for (int j = 0; j < kNumGlobals; ++j) {
int32_t expected = j == g ? sum : before[j];
CHECK_EQ(expected, *globals[j]);
CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(globals[j]));
}
}
}
......@@ -2112,11 +2113,11 @@ WASM_EXEC_TEST(Float32Global) {
WASM_F32_SCONVERT_I32(WASM_GET_LOCAL(0)))),
WASM_ZERO);
*global = 1.25;
WriteLittleEndianValue<float>(global, 1.25);
for (int i = 9; i < 4444; i += 1111) {
volatile float expected = *global + i;
volatile float expected = ReadLittleEndianValue<float>(global) + i;
r.Call(i);
CHECK_EQ(expected, *global);
CHECK_EQ(expected, ReadLittleEndianValue<float>(global));
}
}
......@@ -2129,11 +2130,11 @@ WASM_EXEC_TEST(Float64Global) {
WASM_F64_SCONVERT_I32(WASM_GET_LOCAL(0)))),
WASM_ZERO);
*global = 1.25;
WriteLittleEndianValue<double>(global, 1.25);
for (int i = 9; i < 4444; i += 1111) {
volatile double expected = *global + i;
volatile double expected = ReadLittleEndianValue<double>(global) + i;
r.Call(i);
CHECK_EQ(expected, *global);
CHECK_EQ(expected, ReadLittleEndianValue<double>(global));
}
}
......@@ -2164,10 +2165,13 @@ WASM_EXEC_TEST(MixedGlobals) {
memory[7] = 0x99;
r.Call(1);
CHECK(static_cast<int32_t>(0xEE55CCAA) == *var_int32);
CHECK(static_cast<uint32_t>(0xEE55CCAA) == *var_uint32);
CHECK(bit_cast<float>(0xEE55CCAA) == *var_float);
CHECK(bit_cast<double>(0x99112233EE55CCAAULL) == *var_double);
CHECK(static_cast<int32_t>(0xEE55CCAA) ==
ReadLittleEndianValue<int32_t>(var_int32));
CHECK(static_cast<uint32_t>(0xEE55CCAA) ==
ReadLittleEndianValue<uint32_t>(var_uint32));
CHECK(bit_cast<float>(0xEE55CCAA) == ReadLittleEndianValue<float>(var_float));
CHECK(bit_cast<double>(0x99112233EE55CCAAULL) ==
ReadLittleEndianValue<double>(var_double));
USE(unused);
}
......@@ -3226,8 +3230,7 @@ void BinOpOnDifferentRegisters(
ctype value =
i == lhs ? lhs_value
: i == rhs ? rhs_value : static_cast<ctype>(i + 47);
WriteLittleEndianValue<ctype>(
reinterpret_cast<Address>(&memory[i]), value);
WriteLittleEndianValue<ctype>(&memory[i], value);
}
bool trap = false;
int64_t expect = expect_fn(lhs_value, rhs_value, &trap);
......@@ -3236,14 +3239,12 @@ void BinOpOnDifferentRegisters(
continue;
}
CHECK_EQ(0, r.Call());
CHECK_EQ(expect, ReadLittleEndianValue<ctype>(
reinterpret_cast<Address>(&memory[0])));
CHECK_EQ(expect, ReadLittleEndianValue<ctype>(&memory[0]));
for (int i = 0; i < num_locals; ++i) {
ctype value =
i == lhs ? lhs_value
: i == rhs ? rhs_value : static_cast<ctype>(i + 47);
CHECK_EQ(value, ReadLittleEndianValue<ctype>(
reinterpret_cast<Address>(&memory[i + 1])));
CHECK_EQ(value, ReadLittleEndianValue<ctype>(&memory[i + 1]));
}
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment