Commit ac33b533 authored by Thibaud Michaud's avatar Thibaud Michaud Committed by Commit Bot

[liftoff][mv] Support multi-value calls

R=clemensb@chromium.org,ahaas@chromium.org

Bug: v8:10408
Change-Id: Id4f5136e36ab41a18a240e31c7a43bf634be2e44
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2179384
Commit-Queue: Thibaud Michaud <thibaudm@chromium.org>
Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67644}
parent dc1af6a2
...@@ -44,12 +44,14 @@ constexpr int kInstanceOffset = 2 * kSystemPointerSize; ...@@ -44,12 +44,14 @@ constexpr int kInstanceOffset = 2 * kSystemPointerSize;
// Three instructions are required to sub a large constant, movw + movt + sub. // Three instructions are required to sub a large constant, movw + movt + sub.
constexpr int32_t kPatchInstructionsRequired = 3; constexpr int32_t kPatchInstructionsRequired = 3;
inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); } inline MemOperand GetStackSlot(int offset) {
return MemOperand(offset > 0 ? fp : sp, -offset);
}
inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) { inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset = int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2; half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return MemOperand(fp, -offset + half_offset); return MemOperand(offset > 0 ? fp : sp, -offset + half_offset);
} }
inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); } inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
......
...@@ -41,7 +41,9 @@ namespace liftoff { ...@@ -41,7 +41,9 @@ namespace liftoff {
constexpr int kInstanceOffset = 2 * kSystemPointerSize; constexpr int kInstanceOffset = 2 * kSystemPointerSize;
inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); } inline MemOperand GetStackSlot(int offset) {
return MemOperand(offset > 0 ? fp : sp, -offset);
}
inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); } inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
......
...@@ -23,12 +23,14 @@ namespace liftoff { ...@@ -23,12 +23,14 @@ namespace liftoff {
// ebp-4 holds the stack marker, ebp-8 is the instance parameter. // ebp-4 holds the stack marker, ebp-8 is the instance parameter.
constexpr int kInstanceOffset = 8; constexpr int kInstanceOffset = 8;
inline Operand GetStackSlot(int offset) { return Operand(ebp, -offset); } inline Operand GetStackSlot(int offset) {
return Operand(offset > 0 ? ebp : esp, -offset);
}
inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) { inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset = int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2; half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return Operand(ebp, -offset + half_offset); return Operand(offset > 0 ? ebp : esp, -offset + half_offset);
} }
// TODO(clemensb): Make this a constexpr variable once Operand is constexpr. // TODO(clemensb): Make this a constexpr variable once Operand is constexpr.
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include "src/compiler/linkage.h" #include "src/compiler/linkage.h"
#include "src/compiler/wasm-compiler.h" #include "src/compiler/wasm-compiler.h"
#include "src/utils/ostreams.h" #include "src/utils/ostreams.h"
#include "src/wasm/baseline/liftoff-register.h"
#include "src/wasm/function-body-decoder-impl.h" #include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/wasm-linkage.h" #include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-opcodes.h" #include "src/wasm/wasm-opcodes.h"
...@@ -676,25 +677,8 @@ void PrepareStackTransfers(const FunctionSig* sig, ...@@ -676,25 +677,8 @@ void PrepareStackTransfers(const FunctionSig* sig,
DCHECK(!loc.IsAnyRegister()); DCHECK(!loc.IsAnyRegister());
RegClass rc = is_gp_pair ? kGpReg : reg_class_for(type); RegClass rc = is_gp_pair ? kGpReg : reg_class_for(type);
int reg_code = loc.AsRegister(); int reg_code = loc.AsRegister();
LiftoffRegister reg =
// Initialize to anything, will be set in all branches below. LiftoffRegister::from_external_code(rc, type, reg_code);
LiftoffRegister reg = kGpCacheRegList.GetFirstRegSet();
if (!kSimpleFPAliasing && type == kWasmF32) {
// Liftoff assumes a one-to-one mapping between float registers and
// double registers, and so does not distinguish between f32 and f64
// registers. The f32 register code must therefore be halved in order
// to pass the f64 code to Liftoff.
DCHECK_EQ(0, reg_code % 2);
reg = LiftoffRegister::from_code(rc, (reg_code / 2));
} else if (kNeedS128RegPair && type == kWasmS128) {
// Similarly for double registers and SIMD registers, the SIMD code
// needs to be doubled to pass the f64 code to Liftoff.
reg = LiftoffRegister::ForFpPair(
DoubleRegister::from_code(reg_code * 2));
} else {
reg = LiftoffRegister::from_code(rc, reg_code);
}
param_regs->set(reg); param_regs->set(reg);
if (is_gp_pair) { if (is_gp_pair) {
stack_transfers->LoadI64HalfIntoRegister(reg, slot, stack_offset, stack_transfers->LoadI64HalfIntoRegister(reg, slot, stack_offset,
...@@ -792,7 +776,6 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig, ...@@ -792,7 +776,6 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
stack_slots.Construct(); stack_slots.Construct();
// Execute the stack transfers before filling the instance register. // Execute the stack transfers before filling the instance register.
stack_transfers.Execute(); stack_transfers.Execute();
// Pop parameters from the value stack. // Pop parameters from the value stack.
cache_state_.stack_state.pop_back(num_params); cache_state_.stack_state.pop_back(num_params);
...@@ -807,36 +790,46 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig, ...@@ -807,36 +790,46 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
void LiftoffAssembler::FinishCall(const FunctionSig* sig, void LiftoffAssembler::FinishCall(const FunctionSig* sig,
compiler::CallDescriptor* call_descriptor) { compiler::CallDescriptor* call_descriptor) {
const size_t return_count = sig->return_count(); // Offset of the current return value relative to the stack pointer.
if (return_count != 0) { int return_offset = 0;
DCHECK_EQ(1, return_count); int call_desc_return_idx = 0;
ValueType return_type = sig->GetReturn(0); for (ValueType return_type : sig->returns()) {
DCHECK_LT(call_desc_return_idx, call_descriptor->ReturnCount());
const bool needs_gp_pair = needs_gp_reg_pair(return_type); const bool needs_gp_pair = needs_gp_reg_pair(return_type);
const bool needs_fp_pair = needs_fp_reg_pair(return_type); const int num_lowered_params = 1 + needs_gp_pair;
DCHECK_EQ(needs_gp_pair ? 2 : 1, call_descriptor->ReturnCount()); const ValueType lowered_type = needs_gp_pair ? kWasmI32 : return_type;
RegClass rc = needs_gp_pair const RegClass rc = reg_class_for(lowered_type);
? kGpReg // Initialize to anything, will be set in the loop and used afterwards.
: needs_fp_pair ? kFpReg : reg_class_for(return_type); LiftoffRegister reg_pair[2] = {kGpCacheRegList.GetFirstRegSet(),
#if V8_TARGET_ARCH_ARM kGpCacheRegList.GetFirstRegSet()};
// If the return register was not d0 for f32, the code value would have to LiftoffRegList pinned;
// be halved as is done for the parameter registers. for (int pair_idx = 0; pair_idx < num_lowered_params; ++pair_idx) {
DCHECK_EQ(call_descriptor->GetReturnLocation(0).AsRegister(), 0); compiler::LinkageLocation loc =
#endif call_descriptor->GetReturnLocation(call_desc_return_idx++);
LiftoffRegister return_reg = LiftoffRegister::from_code( if (loc.IsRegister()) {
rc, call_descriptor->GetReturnLocation(0).AsRegister()); DCHECK(!loc.IsAnyRegister());
DCHECK(GetCacheRegList(rc).has(return_reg)); reg_pair[pair_idx] = LiftoffRegister::from_external_code(
if (needs_gp_pair) { rc, lowered_type, loc.AsRegister());
LiftoffRegister high_reg = LiftoffRegister::from_code( } else {
rc, call_descriptor->GetReturnLocation(1).AsRegister()); DCHECK(loc.IsCallerFrameSlot());
DCHECK(GetCacheRegList(rc).has(high_reg)); reg_pair[pair_idx] = GetUnusedRegister(rc, pinned);
return_reg = LiftoffRegister::ForPair(return_reg.gp(), high_reg.gp()); Fill(reg_pair[pair_idx], -return_offset, lowered_type);
} else if (needs_fp_pair) { const int type_size = lowered_type.element_size_bytes();
DCHECK_EQ(0, return_reg.fp().code() % 2); const int slot_size = RoundUp<kSystemPointerSize>(type_size);
return_reg = LiftoffRegister::ForFpPair(return_reg.fp()); return_offset += slot_size;
} }
DCHECK(!cache_state_.is_used(return_reg)); if (pair_idx == 0) {
PushRegister(return_type, return_reg); pinned.set(reg_pair[0]);
}
}
if (num_lowered_params == 1) {
PushRegister(return_type, reg_pair[0]);
} else {
PushRegister(return_type, LiftoffRegister::ForPair(reg_pair[0].gp(),
reg_pair[1].gp()));
}
} }
RecordUsedSpillOffset(TopSpillOffset() + return_offset);
} }
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src,
......
...@@ -473,45 +473,8 @@ class LiftoffCompiler { ...@@ -473,45 +473,8 @@ class LiftoffCompiler {
LiftoffRegister in_reg = kGpCacheRegList.GetFirstRegSet(); LiftoffRegister in_reg = kGpCacheRegList.GetFirstRegSet();
if (param_loc.IsRegister()) { if (param_loc.IsRegister()) {
DCHECK(!param_loc.IsAnyRegister()); DCHECK(!param_loc.IsAnyRegister());
int reg_code = param_loc.AsRegister(); in_reg = LiftoffRegister::from_external_code(rc, type,
if (!kSimpleFPAliasing && type == kWasmF32) { param_loc.AsRegister());
// Liftoff assumes a one-to-one mapping between float registers and
// double registers, and so does not distinguish between f32 and f64
// registers. The f32 register code must therefore be halved in order
// to pass the f64 code to Liftoff.
DCHECK_EQ(0, reg_code % 2);
reg_code /= 2;
} else if (kNeedS128RegPair && type == kWasmS128) {
// Similarly for double registers and SIMD registers, the SIMD code
// needs to be doubled to pass the f64 code to Liftoff.
reg_code *= 2;
}
RegList cache_regs = rc == kGpReg ? kLiftoffAssemblerGpCacheRegs
: kLiftoffAssemblerFpCacheRegs;
if (cache_regs & (1ULL << reg_code)) {
// This is a cache register, just use it.
if (kNeedS128RegPair && rc == kFpRegPair) {
in_reg =
LiftoffRegister::ForFpPair(DoubleRegister::from_code(reg_code));
} else {
in_reg = LiftoffRegister::from_code(rc, reg_code);
}
} else {
// Move to a cache register (spill one if necessary).
// Note that we cannot create a {LiftoffRegister} for reg_code, since
// {LiftoffRegister} can only store cache regs.
in_reg = __ GetUnusedRegister(rc, pinned);
if (rc == kGpReg) {
__ Move(in_reg.gp(), Register::from_code(reg_code), lowered_type);
} else if (kNeedS128RegPair && rc == kFpRegPair) {
__ Move(in_reg.low_fp(), DoubleRegister::from_code(reg_code),
lowered_type);
} else {
DCHECK_EQ(kFpReg, rc);
__ Move(in_reg.fp(), DoubleRegister::from_code(reg_code),
lowered_type);
}
}
} else if (param_loc.IsCallerFrameSlot()) { } else if (param_loc.IsCallerFrameSlot()) {
in_reg = __ GetUnusedRegister(rc, pinned); in_reg = __ GetUnusedRegister(rc, pinned);
__ LoadCallerFrameSlot(in_reg, -param_loc.AsCallerFrameSlot(), __ LoadCallerFrameSlot(in_reg, -param_loc.AsCallerFrameSlot(),
...@@ -2164,14 +2127,11 @@ class LiftoffCompiler { ...@@ -2164,14 +2127,11 @@ class LiftoffCompiler {
void CallDirect(FullDecoder* decoder, void CallDirect(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm, const CallFunctionImmediate<validate>& imm,
const Value args[], Value returns[]) { const Value args[], Value returns[]) {
if (imm.sig->return_count() > 1) { for (ValueType ret : imm.sig->returns()) {
return unsupported(decoder, kMultiValue, "multi-return"); if (!CheckSupportedType(decoder, kSupportedTypes, ret, "return")) {
}
if (imm.sig->return_count() == 1 &&
!CheckSupportedType(decoder, kSupportedTypes, imm.sig->GetReturn(0),
"return")) {
return; return;
} }
}
auto call_descriptor = auto call_descriptor =
compiler::GetWasmCallDescriptor(compilation_zone_, imm.sig); compiler::GetWasmCallDescriptor(compilation_zone_, imm.sig);
......
...@@ -183,6 +183,26 @@ class LiftoffRegister { ...@@ -183,6 +183,26 @@ class LiftoffRegister {
} }
} }
// Shifts the register code depending on the type before converting to a
// LiftoffRegister.
static LiftoffRegister from_external_code(RegClass rc, ValueType type,
int code) {
if (!kSimpleFPAliasing && type == kWasmF32) {
// Liftoff assumes a one-to-one mapping between float registers and
// double registers, and so does not distinguish between f32 and f64
// registers. The f32 register code must therefore be halved in order
// to pass the f64 code to Liftoff.
DCHECK_EQ(0, code % 2);
return LiftoffRegister::from_code(rc, code >> 1);
}
if (kNeedS128RegPair && type == kWasmS128) {
// Similarly for double registers and SIMD registers, the SIMD code
// needs to be doubled to pass the f64 code to Liftoff.
return LiftoffRegister::ForFpPair(DoubleRegister::from_code(code << 1));
}
return LiftoffRegister::from_code(rc, code);
}
static LiftoffRegister ForPair(Register low, Register high) { static LiftoffRegister ForPair(Register low, Register high) {
DCHECK(kNeedI64RegPair); DCHECK(kNeedI64RegPair);
DCHECK_NE(low, high); DCHECK_NE(low, high);
......
...@@ -36,7 +36,9 @@ static_assert((kLiftoffAssemblerFpCacheRegs & ...@@ -36,7 +36,9 @@ static_assert((kLiftoffAssemblerFpCacheRegs &
// rbp-8 holds the stack marker, rbp-16 is the instance parameter. // rbp-8 holds the stack marker, rbp-16 is the instance parameter.
constexpr int kInstanceOffset = 16; constexpr int kInstanceOffset = 16;
inline Operand GetStackSlot(int offset) { return Operand(rbp, -offset); } inline Operand GetStackSlot(int offset) {
return Operand(offset > 0 ? rbp : rsp, -offset);
}
// TODO(clemensb): Make this a constexpr variable once Operand is constexpr. // TODO(clemensb): Make this a constexpr variable once Operand is constexpr.
inline Operand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); } inline Operand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment