Commit ba14c2f3 authored by Ng Zhi An's avatar Ng Zhi An Committed by Commit Bot

[wasm-simd][liftoff] Support SIMD locals

Add kWasmS128 to the list of supported types, and implement Fill for all
the architectures so that LocalGet works.

Add a new test file to contain tests that run only on Liftoff, and
assert that the code is indeed compiled by Liftoff.
We cannot rely on the nooptimization variant for testing
because by default, if Liftoff compilation fails, it will fall back to
Turbofan, and we accidentally get a test passing.

We skip these tests on mips architecture that don't support SIMD, since
there is no way to implement these, and we don't have a "lowering" phase
for Liftoff.

As we implement more of SIMD in Liftoff, we can add more
tests to this file and ensure correctness. Future patches will introduce
support for globals and params.

Bug: v8:9909
Change-Id: I7fc911f2d588d60c709ddb258b2efc1f22805fab
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1999470
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65768}
parent 33e88152
...@@ -662,6 +662,15 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) { ...@@ -662,6 +662,15 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
case kWasmF64: case kWasmF64:
vldr(reg.fp(), liftoff::GetStackSlot(offset)); vldr(reg.fp(), liftoff::GetStackSlot(offset));
break; break;
case kWasmS128: {
// Get memory address of slot to fill from.
MemOperand slot = liftoff::GetStackSlot(offset);
UseScratchRegisterScope temps(this);
Register addr = liftoff::CalculateActualAddress(this, &temps, slot.rn(),
no_reg, slot.offset());
vld1(Neon64, NeonListOperand(reg.low_fp(), 2), NeonMemOperand(addr));
break;
}
default: default:
UNREACHABLE(); UNREACHABLE();
} }
......
...@@ -57,6 +57,8 @@ inline CPURegister GetRegFromType(const LiftoffRegister& reg, ValueType type) { ...@@ -57,6 +57,8 @@ inline CPURegister GetRegFromType(const LiftoffRegister& reg, ValueType type) {
return reg.fp().S(); return reg.fp().S();
case kWasmF64: case kWasmF64:
return reg.fp().D(); return reg.fp().D();
case kWasmS128:
return reg.fp().Q();
default: default:
UNREACHABLE(); UNREACHABLE();
} }
......
...@@ -329,6 +329,9 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, ...@@ -329,6 +329,9 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
case LoadType::kF64Load: case LoadType::kF64Load:
movsd(dst.fp(), src_op); movsd(dst.fp(), src_op);
break; break;
case LoadType::kS128Load:
movdqu(dst.fp(), src_op);
break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
...@@ -473,6 +476,9 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) { ...@@ -473,6 +476,9 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
case kWasmF64: case kWasmF64:
movsd(dst, reg.fp()); movsd(dst, reg.fp());
break; break;
case kWasmS128:
movdqu(dst, reg.fp());
break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
...@@ -514,6 +520,9 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) { ...@@ -514,6 +520,9 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
case kWasmF64: case kWasmF64:
movsd(reg.fp(), src); movsd(reg.fp(), src);
break; break;
case kWasmS128:
movdqu(reg.fp(), src);
break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
......
...@@ -116,7 +116,7 @@ compiler::CallDescriptor* GetLoweredCallDescriptor( ...@@ -116,7 +116,7 @@ compiler::CallDescriptor* GetLoweredCallDescriptor(
} }
constexpr ValueType kSupportedTypesArr[] = {kWasmI32, kWasmI64, kWasmF32, constexpr ValueType kSupportedTypesArr[] = {kWasmI32, kWasmI64, kWasmF32,
kWasmF64}; kWasmF64, kWasmS128};
constexpr Vector<const ValueType> kSupportedTypes = constexpr Vector<const ValueType> kSupportedTypes =
ArrayVector(kSupportedTypesArr); ArrayVector(kSupportedTypesArr);
......
...@@ -302,6 +302,9 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, ...@@ -302,6 +302,9 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
case LoadType::kF64Load: case LoadType::kF64Load:
Movsd(dst.fp(), src_op); Movsd(dst.fp(), src_op);
break; break;
case LoadType::kS128Load:
Movdqu(dst.fp(), src_op);
break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
...@@ -438,6 +441,9 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) { ...@@ -438,6 +441,9 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
case kWasmF64: case kWasmF64:
Movsd(dst, reg.fp()); Movsd(dst, reg.fp());
break; break;
case kWasmS128:
Movdqu(dst, reg.fp());
break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
...@@ -485,6 +491,9 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) { ...@@ -485,6 +491,9 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
case kWasmF64: case kWasmF64:
Movsd(reg.fp(), src); Movsd(reg.fp(), src);
break; break;
case kWasmS128:
Movdqu(reg.fp(), src);
break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
......
...@@ -286,6 +286,7 @@ v8_source_set("cctest_sources") { ...@@ -286,6 +286,7 @@ v8_source_set("cctest_sources") {
"wasm/test-run-wasm-js.cc", "wasm/test-run-wasm-js.cc",
"wasm/test-run-wasm-module.cc", "wasm/test-run-wasm-module.cc",
"wasm/test-run-wasm-sign-extension.cc", "wasm/test-run-wasm-sign-extension.cc",
"wasm/test-run-wasm-simd-liftoff.cc",
"wasm/test-run-wasm-simd.cc", "wasm/test-run-wasm-simd.cc",
"wasm/test-run-wasm.cc", "wasm/test-run-wasm.cc",
"wasm/test-streaming-compilation.cc", "wasm/test-streaming-compilation.cc",
......
...@@ -321,6 +321,7 @@ ...@@ -321,6 +321,7 @@
'test-run-wasm-simd/RunWasm_ReductionTest4_compiled': [SKIP], 'test-run-wasm-simd/RunWasm_ReductionTest4_compiled': [SKIP],
'test-run-wasm-simd/RunWasm_ReductionTest8_compiled': [SKIP], 'test-run-wasm-simd/RunWasm_ReductionTest8_compiled': [SKIP],
'test-run-wasm-simd/RunWasm_ReductionTest16_compiled': [SKIP], 'test-run-wasm-simd/RunWasm_ReductionTest16_compiled': [SKIP],
'test-run-wasm-simd-liftoff/*': [SKIP],
}], # '(arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips' }], # '(arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips'
############################################################################## ##############################################################################
...@@ -464,6 +465,7 @@ ...@@ -464,6 +465,7 @@
'test-run-wasm-js/*': [SKIP], 'test-run-wasm-js/*': [SKIP],
'test-run-wasm-module/*': [SKIP], 'test-run-wasm-module/*': [SKIP],
'test-run-wasm-sign-extension/*': [SKIP], 'test-run-wasm-sign-extension/*': [SKIP],
'test-run-wasm-simd-liftoff/*': [SKIP],
'test-run-wasm-simd/*': [SKIP], 'test-run-wasm-simd/*': [SKIP],
'test-streaming-compilation/*': [SKIP], 'test-streaming-compilation/*': [SKIP],
'test-wasm-breakpoints/*': [SKIP], 'test-wasm-breakpoints/*': [SKIP],
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// This file contains tests that run only on Liftoff, and each test verifies
// that the code was compiled by Liftoff. The default behavior is that each
// function is first attempted to be compiled by Liftoff, and if it fails, fall
// back to TurboFan. However we want to enforce that Liftoff is the tier that
// compiles these functions, in order to verify correctness of SIMD
// implementation in Liftoff.
#include "test/cctest/cctest.h"
#include "test/cctest/wasm/wasm-run-utils.h"
#include "test/common/wasm/wasm-macro-gen.h"
namespace v8 {
namespace internal {
namespace wasm {
namespace test_run_wasm_simd_liftoff {
#define WASM_SIMD_LIFTOFF_TEST(name) \
void RunWasm_##name##_Impl(); \
TEST(RunWasm_##name##_liftoff) { \
EXPERIMENTAL_FLAG_SCOPE(simd); \
RunWasm_##name##_Impl(); \
} \
void RunWasm_##name##_Impl()
WASM_SIMD_LIFTOFF_TEST(S128Local) {
WasmRunner<int32_t> r(ExecutionTier::kLiftoff, kNoLowerSimd);
byte temp1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_GET_LOCAL(temp1)), WASM_ONE);
CHECK_EQ(1, r.Call());
r.CheckUsedExecutionTier(ExecutionTier::kLiftoff);
}
#undef WASM_SIMD_LIFTOFF_TEST
} // namespace test_run_wasm_simd_liftoff
} // namespace wasm
} // namespace internal
} // namespace v8
...@@ -559,6 +559,13 @@ class WasmRunner : public WasmRunnerBase { ...@@ -559,6 +559,13 @@ class WasmRunner : public WasmRunnerBase {
CheckCallApplyViaJS(expected, function()->func_index, buffer, sizeof...(p)); CheckCallApplyViaJS(expected, function()->func_index, buffer, sizeof...(p));
} }
void CheckUsedExecutionTier(ExecutionTier expected_tier) {
// Liftoff can fail and fallback to Turbofan, so check that the function
// gets compiled by the tier requested, to guard against accidental success.
CHECK(compiled_);
CHECK_EQ(expected_tier, builder_.GetFunctionCode(0)->tier());
}
Handle<Code> GetWrapperCode() { return wrapper_.GetWrapperCode(); } Handle<Code> GetWrapperCode() { return wrapper_.GetWrapperCode(); }
private: private:
......
...@@ -8,4 +8,4 @@ liftoff func: 4+0x5 store to 00000004 val: i8:171 / ab ...@@ -8,4 +8,4 @@ liftoff func: 4+0x5 store to 00000004 val: i8:171 / ab
liftoff func: 0+0x3 load from 00000002 val: i32:1454047232 / 56ab0000 liftoff func: 0+0x3 load from 00000002 val: i32:1454047232 / 56ab0000
liftoff func: 2+0x3 load from 00000002 val: f32:94008244174848.000000 / 56ab0000 liftoff func: 2+0x3 load from 00000002 val: f32:94008244174848.000000 / 56ab0000
turbofan func: 6+0x7 store to 00000004 val: s128:48879 48879 48879 48879 / 0000beef 0000beef 0000beef 0000beef turbofan func: 6+0x7 store to 00000004 val: s128:48879 48879 48879 48879 / 0000beef 0000beef 0000beef 0000beef
turbofan func: 5+0x3 load from 00000002 val: s128:-1091633152 -1091633152 -1091633152 -1091633152 / beef0000 beef0000 beef0000 beef0000 liftoff func: 5+0x3 load from 00000002 val: s128:-1091633152 -1091633152 -1091633152 -1091633152 / beef0000 beef0000 beef0000 beef0000
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment