Commit 8bcee191 authored by Ng Zhi An's avatar Ng Zhi An Committed by Commit Bot

[wasm-simd][liftoff] Handle SIMD params in function

Add a cctest that contains a function with a SIMD parameter, and calls
that function. This will exercise two cases in Liftoff which involves
preparing to call the function, and processing the SIMD parameters of
the function. The tricky case here is ARM, which requires an FP pair.
Most of the logic added is to check the RegClass/type and construct the
right type of LiftoffRegister to use.

As a drive-by, added SIMD case to the various backends' Move
implementation. This is not exercised by the test case, requires more
complicated function setup.

Bug: v8:9909
Change-Id: I1d01e8c3bee0cf336d1a8ff537317c77aedfdac0
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2004369
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65829}
parent 13b148a3
...@@ -181,6 +181,10 @@ inline FloatRegister GetFloatRegister(DoubleRegister reg) { ...@@ -181,6 +181,10 @@ inline FloatRegister GetFloatRegister(DoubleRegister reg) {
return LowDwVfpRegister::from_code(reg.code()).low(); return LowDwVfpRegister::from_code(reg.code()).low();
} }
inline Simd128Register GetSimd128Register(DoubleRegister reg) {
return QwNeonRegister::from_code(reg.code() / 2);
}
enum class MinOrMax : uint8_t { kMin, kMax }; enum class MinOrMax : uint8_t { kMin, kMax };
template <typename RegisterType> template <typename RegisterType>
inline void EmitFloatMinOrMax(LiftoffAssembler* assm, RegisterType dst, inline void EmitFloatMinOrMax(LiftoffAssembler* assm, RegisterType dst,
...@@ -591,9 +595,11 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ...@@ -591,9 +595,11 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
DCHECK_NE(dst, src); DCHECK_NE(dst, src);
if (type == kWasmF32) { if (type == kWasmF32) {
vmov(liftoff::GetFloatRegister(dst), liftoff::GetFloatRegister(src)); vmov(liftoff::GetFloatRegister(dst), liftoff::GetFloatRegister(src));
} else { } else if (type == kWasmF64) {
DCHECK_EQ(kWasmF64, type);
vmov(dst, src); vmov(dst, src);
} else {
DCHECK_EQ(kWasmS128, type);
vmov(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(src));
} }
} }
......
...@@ -382,9 +382,11 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ...@@ -382,9 +382,11 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueType type) { ValueType type) {
if (type == kWasmF32) { if (type == kWasmF32) {
Fmov(dst.S(), src.S()); Fmov(dst.S(), src.S());
} else { } else if (type == kWasmF64) {
DCHECK_EQ(kWasmF64, type);
Fmov(dst.D(), src.D()); Fmov(dst.D(), src.D());
} else {
DCHECK_EQ(kWasmS128, type);
Fmov(dst.Q(), src.Q());
} }
} }
......
...@@ -452,9 +452,11 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ...@@ -452,9 +452,11 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
DCHECK_NE(dst, src); DCHECK_NE(dst, src);
if (type == kWasmF32) { if (type == kWasmF32) {
movss(dst, src); movss(dst, src);
} else { } else if (type == kWasmF64) {
DCHECK_EQ(kWasmF64, type);
movsd(dst, src); movsd(dst, src);
} else {
DCHECK_EQ(kWasmS128, type);
movapd(dst, src);
} }
} }
......
...@@ -636,17 +636,22 @@ void LiftoffAssembler::PrepareCall(FunctionSig* sig, ...@@ -636,17 +636,22 @@ void LiftoffAssembler::PrepareCall(FunctionSig* sig,
DCHECK(!loc.IsAnyRegister()); DCHECK(!loc.IsAnyRegister());
RegClass rc = is_gp_pair ? kGpReg : reg_class_for(type); RegClass rc = is_gp_pair ? kGpReg : reg_class_for(type);
int reg_code = loc.AsRegister(); int reg_code = loc.AsRegister();
#if V8_TARGET_ARCH_ARM
// Initialize to anything, will be set in all branches below.
LiftoffRegister reg = kGpCacheRegList.GetFirstRegSet();
if (!kSimpleFPAliasing && type == kWasmF32) {
// Liftoff assumes a one-to-one mapping between float registers and // Liftoff assumes a one-to-one mapping between float registers and
// double registers, and so does not distinguish between f32 and f64 // double registers, and so does not distinguish between f32 and f64
// registers. The f32 register code must therefore be halved in order to // registers. The f32 register code must therefore be halved in order
// pass the f64 code to Liftoff. // to pass the f64 code to Liftoff.
DCHECK_IMPLIES(type == kWasmF32, (reg_code % 2) == 0); DCHECK_EQ(0, reg_code % 2);
LiftoffRegister reg = LiftoffRegister::from_code( reg = LiftoffRegister::from_code(rc, (reg_code / 2));
rc, (type == kWasmF32) ? (reg_code / 2) : reg_code); } else if (kNeedS128RegPair && type == kWasmS128) {
#else reg = LiftoffRegister::ForFpPair(DoubleRegister::from_code(reg_code));
LiftoffRegister reg = LiftoffRegister::from_code(rc, reg_code); } else {
#endif reg = LiftoffRegister::from_code(rc, reg_code);
}
param_regs.set(reg); param_regs.set(reg);
if (is_gp_pair) { if (is_gp_pair) {
stack_transfers.LoadI64HalfIntoRegister(reg, slot, stack_offset, stack_transfers.LoadI64HalfIntoRegister(reg, slot, stack_offset,
......
...@@ -426,7 +426,12 @@ class LiftoffCompiler { ...@@ -426,7 +426,12 @@ class LiftoffCompiler {
: kLiftoffAssemblerFpCacheRegs; : kLiftoffAssemblerFpCacheRegs;
if (cache_regs & (1ULL << reg_code)) { if (cache_regs & (1ULL << reg_code)) {
// This is a cache register, just use it. // This is a cache register, just use it.
if (kNeedS128RegPair && rc == kFpRegPair) {
in_reg =
LiftoffRegister::ForFpPair(DoubleRegister::from_code(reg_code));
} else {
in_reg = LiftoffRegister::from_code(rc, reg_code); in_reg = LiftoffRegister::from_code(rc, reg_code);
}
} else { } else {
// Move to a cache register (spill one if necessary). // Move to a cache register (spill one if necessary).
// Note that we cannot create a {LiftoffRegister} for reg_code, since // Note that we cannot create a {LiftoffRegister} for reg_code, since
...@@ -434,7 +439,11 @@ class LiftoffCompiler { ...@@ -434,7 +439,11 @@ class LiftoffCompiler {
in_reg = __ GetUnusedRegister(rc, pinned); in_reg = __ GetUnusedRegister(rc, pinned);
if (rc == kGpReg) { if (rc == kGpReg) {
__ Move(in_reg.gp(), Register::from_code(reg_code), lowered_type); __ Move(in_reg.gp(), Register::from_code(reg_code), lowered_type);
} else if (kNeedS128RegPair && rc == kFpRegPair) {
__ Move(in_reg.low_fp(), DoubleRegister::from_code(reg_code),
lowered_type);
} else { } else {
DCHECK_EQ(kFpReg, rc);
__ Move(in_reg.fp(), DoubleRegister::from_code(reg_code), __ Move(in_reg.fp(), DoubleRegister::from_code(reg_code),
lowered_type); lowered_type);
} }
......
...@@ -418,9 +418,11 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ...@@ -418,9 +418,11 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
DCHECK_NE(dst, src); DCHECK_NE(dst, src);
if (type == kWasmF32) { if (type == kWasmF32) {
Movss(dst, src); Movss(dst, src);
} else { } else if (type == kWasmF64) {
DCHECK_EQ(kWasmF64, type);
Movsd(dst, src); Movsd(dst, src);
} else {
DCHECK_EQ(kWasmS128, type);
Movapd(dst, src);
} }
} }
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include "test/cctest/cctest.h" #include "test/cctest/cctest.h"
#include "test/cctest/wasm/wasm-run-utils.h" #include "test/cctest/wasm/wasm-run-utils.h"
#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-macro-gen.h" #include "test/common/wasm/wasm-macro-gen.h"
namespace v8 { namespace v8 {
...@@ -52,6 +53,24 @@ WASM_SIMD_LIFTOFF_TEST(S128Global) { ...@@ -52,6 +53,24 @@ WASM_SIMD_LIFTOFF_TEST(S128Global) {
} }
} }
WASM_SIMD_LIFTOFF_TEST(S128Param) {
// Test how SIMD parameters in functions are processed. There is no easy way
// to specify a SIMD value when initializing a WasmRunner, so we manually
// add a new function with the right signature, and call it from main.
WasmRunner<int32_t> r(ExecutionTier::kLiftoff, kNoLowerSimd);
TestSignatures sigs;
// We use a temp local to materialize a SIMD value, since at this point
// Liftoff does not support any SIMD operations.
byte temp1 = r.AllocateLocal(kWasmS128);
WasmFunctionCompiler& simd_func = r.NewFunction(sigs.i_s());
BUILD(simd_func, WASM_ONE);
BUILD(r,
WASM_CALL_FUNCTION(simd_func.function_index(), WASM_GET_LOCAL(temp1)));
CHECK_EQ(1, r.Call());
}
#undef WASM_SIMD_LIFTOFF_TEST #undef WASM_SIMD_LIFTOFF_TEST
} // namespace test_run_wasm_simd_liftoff } // namespace test_run_wasm_simd_liftoff
......
...@@ -29,6 +29,7 @@ class TestSignatures { ...@@ -29,6 +29,7 @@ class TestSignatures {
sig_i_rr(1, 2, kIntAnyRefTypes4), sig_i_rr(1, 2, kIntAnyRefTypes4),
sig_i_a(1, 1, kIntFuncRefTypes4), sig_i_a(1, 1, kIntFuncRefTypes4),
sig_i_n(1, 1, kIntNullRefTypes4), sig_i_n(1, 1, kIntNullRefTypes4),
sig_i_s(1, 1, kIntSimd128Types4),
sig_l_v(1, 0, kLongTypes4), sig_l_v(1, 0, kLongTypes4),
sig_l_l(1, 1, kLongTypes4), sig_l_l(1, 1, kLongTypes4),
sig_l_ll(1, 2, kLongTypes4), sig_l_ll(1, 2, kLongTypes4),
...@@ -65,6 +66,7 @@ class TestSignatures { ...@@ -65,6 +66,7 @@ class TestSignatures {
for (int i = 1; i < 4; i++) kIntAnyRefTypes4[i] = kWasmAnyRef; for (int i = 1; i < 4; i++) kIntAnyRefTypes4[i] = kWasmAnyRef;
for (int i = 1; i < 4; i++) kIntFuncRefTypes4[i] = kWasmFuncRef; for (int i = 1; i < 4; i++) kIntFuncRefTypes4[i] = kWasmFuncRef;
for (int i = 1; i < 4; i++) kIntNullRefTypes4[i] = kWasmNullRef; for (int i = 1; i < 4; i++) kIntNullRefTypes4[i] = kWasmNullRef;
for (int i = 1; i < 4; i++) kIntSimd128Types4[i] = kWasmS128;
for (int i = 0; i < 4; i++) kSimd128IntTypes4[i] = kWasmS128; for (int i = 0; i < 4; i++) kSimd128IntTypes4[i] = kWasmS128;
kIntLongTypes4[0] = kWasmI32; kIntLongTypes4[0] = kWasmI32;
kIntFloatTypes4[0] = kWasmI32; kIntFloatTypes4[0] = kWasmI32;
...@@ -72,6 +74,7 @@ class TestSignatures { ...@@ -72,6 +74,7 @@ class TestSignatures {
kIntAnyRefTypes4[0] = kWasmI32; kIntAnyRefTypes4[0] = kWasmI32;
kIntFuncRefTypes4[0] = kWasmI32; kIntFuncRefTypes4[0] = kWasmI32;
kIntNullRefTypes4[0] = kWasmI32; kIntNullRefTypes4[0] = kWasmI32;
kIntSimd128Types4[0] = kWasmI32;
kSimd128IntTypes4[1] = kWasmI32; kSimd128IntTypes4[1] = kWasmI32;
} }
...@@ -93,6 +96,7 @@ class TestSignatures { ...@@ -93,6 +96,7 @@ class TestSignatures {
FunctionSig* i_rr() { return &sig_i_rr; } FunctionSig* i_rr() { return &sig_i_rr; }
FunctionSig* i_a() { return &sig_i_a; } FunctionSig* i_a() { return &sig_i_a; }
FunctionSig* i_n() { return &sig_i_n; } FunctionSig* i_n() { return &sig_i_n; }
FunctionSig* i_s() { return &sig_i_s; }
FunctionSig* f_f() { return &sig_f_f; } FunctionSig* f_f() { return &sig_f_f; }
FunctionSig* f_ff() { return &sig_f_ff; } FunctionSig* f_ff() { return &sig_f_ff; }
...@@ -139,6 +143,7 @@ class TestSignatures { ...@@ -139,6 +143,7 @@ class TestSignatures {
ValueType kIntAnyRefTypes4[4]; ValueType kIntAnyRefTypes4[4];
ValueType kIntFuncRefTypes4[4]; ValueType kIntFuncRefTypes4[4];
ValueType kIntNullRefTypes4[4]; ValueType kIntNullRefTypes4[4];
ValueType kIntSimd128Types4[4];
ValueType kSimd128IntTypes4[4]; ValueType kSimd128IntTypes4[4];
FunctionSig sig_i_v; FunctionSig sig_i_v;
...@@ -154,6 +159,7 @@ class TestSignatures { ...@@ -154,6 +159,7 @@ class TestSignatures {
FunctionSig sig_i_rr; FunctionSig sig_i_rr;
FunctionSig sig_i_a; FunctionSig sig_i_a;
FunctionSig sig_i_n; FunctionSig sig_i_n;
FunctionSig sig_i_s;
FunctionSig sig_l_v; FunctionSig sig_l_v;
FunctionSig sig_l_l; FunctionSig sig_l_l;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment