Commit ac33b533 authored by Thibaud Michaud's avatar Thibaud Michaud Committed by Commit Bot

[liftoff][mv] Support multi-value calls

R=clemensb@chromium.org,ahaas@chromium.org

Bug: v8:10408
Change-Id: Id4f5136e36ab41a18a240e31c7a43bf634be2e44
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2179384
Commit-Queue: Thibaud Michaud <thibaudm@chromium.org>
Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67644}
parent dc1af6a2
......@@ -44,12 +44,14 @@ constexpr int kInstanceOffset = 2 * kSystemPointerSize;
// Three instructions are required to sub a large constant, movw + movt + sub.
constexpr int32_t kPatchInstructionsRequired = 3;
inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
inline MemOperand GetStackSlot(int offset) {
return MemOperand(offset > 0 ? fp : sp, -offset);
}
inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return MemOperand(fp, -offset + half_offset);
return MemOperand(offset > 0 ? fp : sp, -offset + half_offset);
}
inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
......
......@@ -41,7 +41,9 @@ namespace liftoff {
constexpr int kInstanceOffset = 2 * kSystemPointerSize;
inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
inline MemOperand GetStackSlot(int offset) {
return MemOperand(offset > 0 ? fp : sp, -offset);
}
inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
......
......@@ -23,12 +23,14 @@ namespace liftoff {
// ebp-4 holds the stack marker, ebp-8 is the instance parameter.
constexpr int kInstanceOffset = 8;
inline Operand GetStackSlot(int offset) { return Operand(ebp, -offset); }
inline Operand GetStackSlot(int offset) {
return Operand(offset > 0 ? ebp : esp, -offset);
}
inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return Operand(ebp, -offset + half_offset);
return Operand(offset > 0 ? ebp : esp, -offset + half_offset);
}
// TODO(clemensb): Make this a constexpr variable once Operand is constexpr.
......
......@@ -12,6 +12,7 @@
#include "src/compiler/linkage.h"
#include "src/compiler/wasm-compiler.h"
#include "src/utils/ostreams.h"
#include "src/wasm/baseline/liftoff-register.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-opcodes.h"
......@@ -676,25 +677,8 @@ void PrepareStackTransfers(const FunctionSig* sig,
DCHECK(!loc.IsAnyRegister());
RegClass rc = is_gp_pair ? kGpReg : reg_class_for(type);
int reg_code = loc.AsRegister();
// Initialize to anything, will be set in all branches below.
LiftoffRegister reg = kGpCacheRegList.GetFirstRegSet();
if (!kSimpleFPAliasing && type == kWasmF32) {
// Liftoff assumes a one-to-one mapping between float registers and
// double registers, and so does not distinguish between f32 and f64
// registers. The f32 register code must therefore be halved in order
// to pass the f64 code to Liftoff.
DCHECK_EQ(0, reg_code % 2);
reg = LiftoffRegister::from_code(rc, (reg_code / 2));
} else if (kNeedS128RegPair && type == kWasmS128) {
// Similarly for double registers and SIMD registers, the SIMD code
// needs to be doubled to pass the f64 code to Liftoff.
reg = LiftoffRegister::ForFpPair(
DoubleRegister::from_code(reg_code * 2));
} else {
reg = LiftoffRegister::from_code(rc, reg_code);
}
LiftoffRegister reg =
LiftoffRegister::from_external_code(rc, type, reg_code);
param_regs->set(reg);
if (is_gp_pair) {
stack_transfers->LoadI64HalfIntoRegister(reg, slot, stack_offset,
......@@ -792,7 +776,6 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
stack_slots.Construct();
// Execute the stack transfers before filling the instance register.
stack_transfers.Execute();
// Pop parameters from the value stack.
cache_state_.stack_state.pop_back(num_params);
......@@ -807,36 +790,46 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
void LiftoffAssembler::FinishCall(const FunctionSig* sig,
compiler::CallDescriptor* call_descriptor) {
const size_t return_count = sig->return_count();
if (return_count != 0) {
DCHECK_EQ(1, return_count);
ValueType return_type = sig->GetReturn(0);
// Offset of the current return value relative to the stack pointer.
int return_offset = 0;
int call_desc_return_idx = 0;
for (ValueType return_type : sig->returns()) {
DCHECK_LT(call_desc_return_idx, call_descriptor->ReturnCount());
const bool needs_gp_pair = needs_gp_reg_pair(return_type);
const bool needs_fp_pair = needs_fp_reg_pair(return_type);
DCHECK_EQ(needs_gp_pair ? 2 : 1, call_descriptor->ReturnCount());
RegClass rc = needs_gp_pair
? kGpReg
: needs_fp_pair ? kFpReg : reg_class_for(return_type);
#if V8_TARGET_ARCH_ARM
// If the return register was not d0 for f32, the code value would have to
// be halved as is done for the parameter registers.
DCHECK_EQ(call_descriptor->GetReturnLocation(0).AsRegister(), 0);
#endif
LiftoffRegister return_reg = LiftoffRegister::from_code(
rc, call_descriptor->GetReturnLocation(0).AsRegister());
DCHECK(GetCacheRegList(rc).has(return_reg));
if (needs_gp_pair) {
LiftoffRegister high_reg = LiftoffRegister::from_code(
rc, call_descriptor->GetReturnLocation(1).AsRegister());
DCHECK(GetCacheRegList(rc).has(high_reg));
return_reg = LiftoffRegister::ForPair(return_reg.gp(), high_reg.gp());
} else if (needs_fp_pair) {
DCHECK_EQ(0, return_reg.fp().code() % 2);
return_reg = LiftoffRegister::ForFpPair(return_reg.fp());
}
DCHECK(!cache_state_.is_used(return_reg));
PushRegister(return_type, return_reg);
const int num_lowered_params = 1 + needs_gp_pair;
const ValueType lowered_type = needs_gp_pair ? kWasmI32 : return_type;
const RegClass rc = reg_class_for(lowered_type);
// Initialize to anything, will be set in the loop and used afterwards.
LiftoffRegister reg_pair[2] = {kGpCacheRegList.GetFirstRegSet(),
kGpCacheRegList.GetFirstRegSet()};
LiftoffRegList pinned;
for (int pair_idx = 0; pair_idx < num_lowered_params; ++pair_idx) {
compiler::LinkageLocation loc =
call_descriptor->GetReturnLocation(call_desc_return_idx++);
if (loc.IsRegister()) {
DCHECK(!loc.IsAnyRegister());
reg_pair[pair_idx] = LiftoffRegister::from_external_code(
rc, lowered_type, loc.AsRegister());
} else {
DCHECK(loc.IsCallerFrameSlot());
reg_pair[pair_idx] = GetUnusedRegister(rc, pinned);
Fill(reg_pair[pair_idx], -return_offset, lowered_type);
const int type_size = lowered_type.element_size_bytes();
const int slot_size = RoundUp<kSystemPointerSize>(type_size);
return_offset += slot_size;
}
if (pair_idx == 0) {
pinned.set(reg_pair[0]);
}
}
if (num_lowered_params == 1) {
PushRegister(return_type, reg_pair[0]);
} else {
PushRegister(return_type, LiftoffRegister::ForPair(reg_pair[0].gp(),
reg_pair[1].gp()));
}
}
RecordUsedSpillOffset(TopSpillOffset() + return_offset);
}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src,
......
......@@ -473,45 +473,8 @@ class LiftoffCompiler {
LiftoffRegister in_reg = kGpCacheRegList.GetFirstRegSet();
if (param_loc.IsRegister()) {
DCHECK(!param_loc.IsAnyRegister());
int reg_code = param_loc.AsRegister();
if (!kSimpleFPAliasing && type == kWasmF32) {
// Liftoff assumes a one-to-one mapping between float registers and
// double registers, and so does not distinguish between f32 and f64
// registers. The f32 register code must therefore be halved in order
// to pass the f64 code to Liftoff.
DCHECK_EQ(0, reg_code % 2);
reg_code /= 2;
} else if (kNeedS128RegPair && type == kWasmS128) {
// Similarly for double registers and SIMD registers, the SIMD code
// needs to be doubled to pass the f64 code to Liftoff.
reg_code *= 2;
}
RegList cache_regs = rc == kGpReg ? kLiftoffAssemblerGpCacheRegs
: kLiftoffAssemblerFpCacheRegs;
if (cache_regs & (1ULL << reg_code)) {
// This is a cache register, just use it.
if (kNeedS128RegPair && rc == kFpRegPair) {
in_reg =
LiftoffRegister::ForFpPair(DoubleRegister::from_code(reg_code));
} else {
in_reg = LiftoffRegister::from_code(rc, reg_code);
}
} else {
// Move to a cache register (spill one if necessary).
// Note that we cannot create a {LiftoffRegister} for reg_code, since
// {LiftoffRegister} can only store cache regs.
in_reg = __ GetUnusedRegister(rc, pinned);
if (rc == kGpReg) {
__ Move(in_reg.gp(), Register::from_code(reg_code), lowered_type);
} else if (kNeedS128RegPair && rc == kFpRegPair) {
__ Move(in_reg.low_fp(), DoubleRegister::from_code(reg_code),
lowered_type);
} else {
DCHECK_EQ(kFpReg, rc);
__ Move(in_reg.fp(), DoubleRegister::from_code(reg_code),
lowered_type);
}
}
in_reg = LiftoffRegister::from_external_code(rc, type,
param_loc.AsRegister());
} else if (param_loc.IsCallerFrameSlot()) {
in_reg = __ GetUnusedRegister(rc, pinned);
__ LoadCallerFrameSlot(in_reg, -param_loc.AsCallerFrameSlot(),
......@@ -2164,14 +2127,11 @@ class LiftoffCompiler {
void CallDirect(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[], Value returns[]) {
if (imm.sig->return_count() > 1) {
return unsupported(decoder, kMultiValue, "multi-return");
}
if (imm.sig->return_count() == 1 &&
!CheckSupportedType(decoder, kSupportedTypes, imm.sig->GetReturn(0),
"return")) {
for (ValueType ret : imm.sig->returns()) {
if (!CheckSupportedType(decoder, kSupportedTypes, ret, "return")) {
return;
}
}
auto call_descriptor =
compiler::GetWasmCallDescriptor(compilation_zone_, imm.sig);
......
......@@ -183,6 +183,26 @@ class LiftoffRegister {
}
}
// Shifts the register code depending on the type before converting to a
// LiftoffRegister.
static LiftoffRegister from_external_code(RegClass rc, ValueType type,
int code) {
if (!kSimpleFPAliasing && type == kWasmF32) {
// Liftoff assumes a one-to-one mapping between float registers and
// double registers, and so does not distinguish between f32 and f64
// registers. The f32 register code must therefore be halved in order
// to pass the f64 code to Liftoff.
DCHECK_EQ(0, code % 2);
return LiftoffRegister::from_code(rc, code >> 1);
}
if (kNeedS128RegPair && type == kWasmS128) {
// Similarly for double registers and SIMD registers, the SIMD code
// needs to be doubled to pass the f64 code to Liftoff.
return LiftoffRegister::ForFpPair(DoubleRegister::from_code(code << 1));
}
return LiftoffRegister::from_code(rc, code);
}
static LiftoffRegister ForPair(Register low, Register high) {
DCHECK(kNeedI64RegPair);
DCHECK_NE(low, high);
......
......@@ -36,7 +36,9 @@ static_assert((kLiftoffAssemblerFpCacheRegs &
// rbp-8 holds the stack marker, rbp-16 is the instance parameter.
constexpr int kInstanceOffset = 16;
inline Operand GetStackSlot(int offset) { return Operand(rbp, -offset); }
inline Operand GetStackSlot(int offset) {
return Operand(offset > 0 ? rbp : rsp, -offset);
}
// TODO(clemensb): Make this a constexpr variable once Operand is constexpr.
inline Operand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment