Commit 907c7fad authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[Liftoff] Implement i32/i64 to f32 conversions

This adds support for f32.convert_{s,u}/i{32,64}.
On 32-bit platforms, i64 conversions are implemented by a call to a c
function. Since the signature of this c function is very different to
the currently used functions in Liftoff (in particular they contain an
out parameter), this CL requires a major refactoring of how Liftoff
generates c-calls.

R=titzer@chromium.org

Bug: v8:6600
Change-Id: Iffb4d0263ca1ca3eb9c6216344220322cda16062
Reviewed-on: https://chromium-review.googlesource.com/952122
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarBen Titzer <titzer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51841}
parent b0fa9196
......@@ -155,6 +155,13 @@ UNIMPLEMENTED_FP_UNOP(f64_sqrt)
#undef UNIMPLEMENTED_FP_UNOP
#undef UNIMPLEMENTED_SHIFTOP
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src) {
BAILOUT("emit_type_conversion");
return true;
}
void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
......@@ -207,25 +214,34 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
BAILOUT("DropStackSlotsAndRet");
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
void LiftoffAssembler::PrepareCCall(wasm::FunctionSig* sig,
const LiftoffRegister* args,
ValueType out_argument_type) {
BAILOUT("PrepareCCall");
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
uint32_t num_params) {
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_offset,
ValueType type) {
BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
uint32_t param_idx,
uint32_t num_params) {
uint32_t param_offset,
ValueType type) {
BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::LoadCCallOutArgument(LiftoffRegister dst, ValueType type,
uint32_t num_lowered_args) {
BAILOUT("LoadCCallOutArgument");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
BAILOUT("CallC");
}
void LiftoffAssembler::FinishCCall() { BAILOUT("FinishCCall"); }
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
BAILOUT("CallNativeWasmCode");
}
......
......@@ -155,6 +155,13 @@ UNIMPLEMENTED_FP_UNOP(f64_sqrt)
#undef UNIMPLEMENTED_FP_UNOP
#undef UNIMPLEMENTED_SHIFTOP
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src) {
BAILOUT("emit_type_conversion");
return true;
}
void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
......@@ -207,25 +214,34 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
BAILOUT("DropStackSlotsAndRet");
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
void LiftoffAssembler::PrepareCCall(wasm::FunctionSig* sig,
const LiftoffRegister* args,
ValueType out_argument_type) {
BAILOUT("PrepareCCall");
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
uint32_t num_params) {
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_offset,
ValueType type) {
BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
uint32_t param_idx,
uint32_t num_params) {
uint32_t param_offset,
ValueType type) {
BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::LoadCCallOutArgument(LiftoffRegister dst, ValueType type,
uint32_t num_lowered_args) {
BAILOUT("LoadCCallOutArgument");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
BAILOUT("CallC");
}
void LiftoffAssembler::FinishCCall() { BAILOUT("FinishCCall"); }
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
BAILOUT("CallNativeWasmCode");
}
......
......@@ -42,8 +42,48 @@ static_assert((kByteRegs & kGpCacheRegList) == kByteRegs,
"kByteRegs only contains gp cache registers");
// Use this register to store the address of the last argument pushed on the
// stack for a call to C.
static constexpr Register kCCallLastArgAddrReg = eax;
// stack for a call to C. This register must be callee saved according to the c
// calling convention.
static constexpr Register kCCallLastArgAddrReg = ebx;
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src,
ValueType type) {
switch (type) {
case kWasmI32:
assm->mov(dst.gp(), src);
break;
case kWasmF32:
assm->movss(dst.fp(), src);
break;
case kWasmF64:
assm->movsd(dst.fp(), src);
break;
default:
UNREACHABLE();
}
}
inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
switch (type) {
case kWasmI32:
assm->push(reg.gp());
break;
case kWasmI64:
assm->push(reg.high_gp());
assm->push(reg.low_gp());
break;
case kWasmF32:
assm->sub(esp, Immediate(sizeof(float)));
assm->movss(Operand(esp, 0), reg.fp());
break;
case kWasmF64:
assm->sub(esp, Immediate(sizeof(double)));
assm->movsd(Operand(esp, 0), reg.fp());
break;
default:
UNREACHABLE();
}
}
} // namespace liftoff
......@@ -258,19 +298,7 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
Operand src(ebp, kPointerSize * (caller_slot_idx + 1));
switch (type) {
case kWasmI32:
mov(dst.gp(), src);
break;
case kWasmF32:
movss(dst.fp(), src);
break;
case kWasmF64:
movsd(dst.fp(), src);
break;
default:
UNREACHABLE();
}
liftoff::Load(this, dst, src, type);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
......@@ -666,6 +694,24 @@ void LiftoffAssembler::emit_f64_sqrt(DoubleRegister dst, DoubleRegister src) {
Sqrtsd(dst, src);
}
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src) {
switch (opcode) {
case kExprF32SConvertI32:
cvtsi2ss(dst.fp(), src.gp());
return true;
case kExprF32UConvertI32: {
LiftoffRegList pinned = LiftoffRegList::ForRegs(dst, src);
Register scratch = GetUnusedRegister(kGpReg, pinned).gp();
Cvtui2ss(dst.fp(), src.gp(), scratch);
return true;
}
default:
return false;
}
}
void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
......@@ -781,22 +827,7 @@ void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
ValueType type) {
switch (type) {
case kWasmI32:
push(reg.gp());
break;
case kWasmF32:
sub(esp, Immediate(sizeof(float)));
movss(Operand(esp, 0), reg.fp());
break;
case kWasmF64:
sub(esp, Immediate(sizeof(double)));
movsd(Operand(esp, 0), reg.fp());
break;
default:
// Also kWasmI64 is unreachable, as it will always be pushed as two halfs.
UNREACHABLE();
}
liftoff::push(this, reg, type);
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
......@@ -844,36 +875,57 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
ret(static_cast<int>(num_stack_slots * kPointerSize));
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
for (size_t param = 0; param < num_params; ++param) {
push(args[param]);
}
void LiftoffAssembler::PrepareCCall(wasm::FunctionSig* sig,
const LiftoffRegister* args,
ValueType out_argument_type) {
// Save current sp, such that we compute pointers to the values pushed above.
mov(liftoff::kCCallLastArgAddrReg, esp);
constexpr Register kScratch = ebx;
for (ValueType param_type : sig->parameters()) {
liftoff::push(this, *args++, param_type);
}
if (out_argument_type != kWasmStmt) {
int size = WasmOpcodes::MemSize(out_argument_type);
sub(esp, Immediate(std::max(kPointerSize, size)));
}
constexpr Register kScratch = ecx;
static_assert(kScratch != liftoff::kCCallLastArgAddrReg, "collision");
PrepareCallCFunction(num_params, kScratch);
PrepareCallCFunction(static_cast<uint32_t>(sig->parameter_count()), kScratch);
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
uint32_t num_params) {
int offset = kPointerSize * static_cast<int>(num_params - 1 - param_idx);
lea(dst, Operand(liftoff::kCCallLastArgAddrReg, offset));
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_offset,
ValueType type) {
// Check that we don't accidentally override kCCallLastArgAddrReg.
DCHECK_NE(liftoff::kCCallLastArgAddrReg, dst);
int offset =
kPointerSize * static_cast<int>(param_offset + 1 + needs_reg_pair(type));
lea(dst, Operand(liftoff::kCCallLastArgAddrReg, -offset));
}
void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
uint32_t param_idx,
uint32_t num_params) {
constexpr Register kScratch = ebx;
static_assert(kScratch != liftoff::kCCallLastArgAddrReg, "collision");
int offset = kPointerSize * static_cast<int>(num_params - 1 - param_idx);
lea(kScratch, Operand(liftoff::kCCallLastArgAddrReg, offset));
mov(Operand(esp, param_idx * kPointerSize), kScratch);
uint32_t param_offset,
ValueType type) {
static constexpr Register kScratch = ecx;
SetCCallRegParamAddr(kScratch, param_offset, type);
mov(Operand(esp, param_offset * kPointerSize), kScratch);
}
void LiftoffAssembler::LoadCCallOutArgument(LiftoffRegister dst, ValueType type,
uint32_t num_lowered_args) {
// Check that we don't accidentally override kCCallLastArgAddrReg.
DCHECK_NE(LiftoffRegister(liftoff::kCCallLastArgAddrReg), dst);
int offset = kPointerSize * num_lowered_args;
Operand src(liftoff::kCCallLastArgAddrReg, -offset);
liftoff::Load(this, dst, src, type);
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
CallCFunction(ext_ref, static_cast<int>(num_params));
}
void LiftoffAssembler::FinishCCall() {
mov(esp, liftoff::kCCallLastArgAddrReg);
}
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
wasm_call(addr, RelocInfo::WASM_CALL);
}
......
......@@ -411,6 +411,10 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_f64_neg(DoubleRegister dst, DoubleRegister src);
inline void emit_f64_sqrt(DoubleRegister dst, DoubleRegister src);
// type conversions.
inline bool emit_type_conversion(WasmOpcode opcode, LiftoffRegister dst,
LiftoffRegister src);
inline void emit_jump(Label*);
inline void emit_cond_jump(Condition, Label*, ValueType value, Register lhs,
Register rhs = no_reg);
......@@ -435,15 +439,25 @@ class LiftoffAssembler : public TurboAssembler {
inline void DropStackSlotsAndRet(uint32_t num_stack_slots);
// Push arguments on the stack (in the caller frame), then align the stack.
// The address of the last argument will be stored to {arg_addr_dst}. Previous
// arguments will be located at pointer sized buckets above that address.
inline void PrepareCCall(uint32_t num_params, const Register* args);
inline void SetCCallRegParamAddr(Register dst, uint32_t param_idx,
uint32_t num_params);
// {PrepareCCall} pushes the arguments on the stack (in the caller frame),
// then aligns the stack to do a c call. Pointers to the pushed arguments are
// later loaded to registers or stack slots via {SetCCall*ParamAddr}. After
// the c call, the output parameter (if it exists) can be loaded via
// {LoadCCallOutArgument}. {FinishCCall} resets the stack pointer to the state
// before {PrepareCCall}.
// The {FunctionSig} passed to {PrepareCCall} describes the types of
// parameters which are then passed ot the C function via pointers, excluding
// the out argument.
inline void PrepareCCall(wasm::FunctionSig* sig, const LiftoffRegister* args,
ValueType out_argument_type);
inline void SetCCallRegParamAddr(Register dst, uint32_t param_offset,
ValueType type);
inline void SetCCallStackParamAddr(uint32_t stack_param_idx,
uint32_t param_idx, uint32_t num_params);
uint32_t param_offset, ValueType type);
inline void LoadCCallOutArgument(LiftoffRegister dst, ValueType type,
uint32_t num_lowered_args);
inline void CallC(ExternalReference ext_ref, uint32_t num_params);
inline void FinishCCall();
inline void CallNativeWasmCode(Address addr);
inline void CallRuntime(Zone* zone, Runtime::FunctionId fid);
......
......@@ -217,7 +217,7 @@ class LiftoffCompiler {
// Returns the number of inputs processed (1 or 2).
uint32_t ProcessParameter(ValueType type, uint32_t input_idx) {
const int num_lowered_params = 1 + (kNeedI64RegPair && type == kWasmI64);
const int num_lowered_params = 1 + needs_reg_pair(type);
// Initialize to anything, will be set in the loop and used afterwards.
LiftoffRegister reg = LiftoffRegister::from_code(kGpReg, 0);
RegClass rc = num_lowered_params == 1 ? reg_class_for(type) : kGpReg;
......@@ -465,58 +465,92 @@ class LiftoffCompiler {
void EndControl(Decoder* decoder, Control* c) {}
void GenerateCCall(Register res_reg, uint32_t num_args,
const Register* arg_regs, ExternalReference ext_ref) {
static constexpr int kNumReturns = 1;
enum CCallReturn : bool { kHasReturn = true, kNoReturn = false };
void GenerateCCall(const LiftoffRegister* result_regs, FunctionSig* sig,
ValueType out_argument_type,
const LiftoffRegister* arg_regs,
ExternalReference ext_ref) {
static constexpr int kMaxReturns = 1;
static constexpr int kMaxArgs = 2;
static constexpr MachineType kReps[]{
MachineType::Uint32(), MachineType::Pointer(), MachineType::Pointer()};
static_assert(arraysize(kReps) == kNumReturns + kMaxArgs, "mismatch");
static_assert(arraysize(kReps) == kMaxReturns + kMaxArgs, "mismatch");
const bool has_out_argument = out_argument_type != kWasmStmt;
const uint32_t num_returns = static_cast<uint32_t>(sig->return_count());
// {total_num_args} is {num_args + 1} if the return value is stored in an
// out parameter, or {num_args} otherwise.
const uint32_t num_args = static_cast<uint32_t>(sig->parameter_count());
const uint32_t total_num_args = num_args + has_out_argument;
DCHECK_LE(num_args, kMaxArgs);
DCHECK_LE(num_returns, kMaxReturns);
MachineSignature sig(kNumReturns, num_args, kReps);
auto call_descriptor =
compiler::Linkage::GetSimplifiedCDescriptor(compilation_zone_, &sig);
MachineSignature machine_sig(num_returns, total_num_args,
kReps + (kMaxReturns - num_returns));
auto* call_descriptor = compiler::Linkage::GetSimplifiedCDescriptor(
compilation_zone_, &machine_sig);
// Before making a call, spill all cache registers.
__ SpillAllRegisters();
// Store arguments on our stack, then align the stack for calling to C.
uint32_t num_params =
static_cast<uint32_t>(call_descriptor->ParameterCount());
__ PrepareCCall(num_params, arg_regs);
__ PrepareCCall(sig, arg_regs, out_argument_type);
// Set parameters (in sp[0], sp[8], ...).
// The arguments to the c function are pointers to the stack slots we just
// pushed.
uint32_t num_stack_params = 0;
for (uint32_t param = 0; param < num_params; ++param) {
constexpr size_t kInputShift = 1; // Input 0 is the call target.
uint32_t input_idx = 1; // Input 0 is the call target.
uint32_t num_lowered_args = 0;
auto add_argument = [&](ValueType arg_type) {
compiler::LinkageLocation loc =
call_descriptor->GetInputLocation(param + kInputShift);
call_descriptor->GetInputLocation(input_idx);
if (loc.IsRegister()) {
Register reg = Register::from_code(loc.AsRegister());
// Load address of that parameter to the register.
__ SetCCallRegParamAddr(reg, param, num_params);
__ SetCCallRegParamAddr(reg, num_lowered_args, arg_type);
} else {
DCHECK(loc.IsCallerFrameSlot());
__ SetCCallStackParamAddr(num_stack_params, param, num_params);
__ SetCCallStackParamAddr(num_stack_params, num_lowered_args, arg_type);
++num_stack_params;
}
num_lowered_args += 1 + needs_reg_pair(arg_type);
++input_idx;
};
for (ValueType arg_type : sig->parameters()) {
add_argument(arg_type);
}
if (has_out_argument) {
add_argument(out_argument_type);
}
DCHECK_EQ(input_idx, call_descriptor->InputCount());
// Now execute the call.
__ CallC(ext_ref, num_params);
__ CallC(ext_ref, num_lowered_args);
// Load return value.
compiler::LinkageLocation return_loc =
call_descriptor->GetReturnLocation(0);
DCHECK(return_loc.IsRegister());
Register return_reg = Register::from_code(return_loc.AsRegister());
if (return_reg != res_reg) {
DCHECK_EQ(MachineRepresentation::kWord32,
sig.GetReturn(0).representation());
__ Move(LiftoffRegister(res_reg), LiftoffRegister(return_reg), kWasmI32);
const LiftoffRegister* next_result_reg = result_regs;
if (sig->return_count() > 0) {
DCHECK_EQ(1, sig->return_count());
compiler::LinkageLocation return_loc =
call_descriptor->GetReturnLocation(0);
DCHECK(return_loc.IsRegister());
Register return_reg = Register::from_code(return_loc.AsRegister());
if (return_reg != next_result_reg->gp()) {
__ Move(*next_result_reg, LiftoffRegister(return_reg),
sig->GetReturn(0));
}
++next_result_reg;
}
// Load potential return value from output argument.
if (has_out_argument) {
__ LoadCCallOutArgument(*next_result_reg, out_argument_type,
num_lowered_args);
}
// Reset the stack pointer.
__ FinishCCall();
}
template <ValueType type, class EmitFn>
......@@ -535,12 +569,33 @@ class LiftoffCompiler {
auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) {
if (emit_fn && (asm_->*emit_fn)(dst.gp(), src.gp())) return;
ExternalReference ext_ref = fallback_fn(asm_->isolate());
Register args[] = {src.gp()};
GenerateCCall(dst.gp(), arraysize(args), args, ext_ref);
ValueType sig_i_i_reps[] = {kWasmI32, kWasmI32};
FunctionSig sig_i_i(1, 1, sig_i_i_reps);
GenerateCCall(&dst, &sig_i_i, kWasmStmt, &src, ext_ref);
};
EmitUnOp<kWasmI32>(emit_with_c_fallback);
}
void EmitTypeConversion(WasmOpcode opcode, ValueType dst_type,
ValueType src_type,
ExternalReference (*fallback_fn)(Isolate*)) {
RegClass src_rc = reg_class_for(src_type);
RegClass dst_rc = reg_class_for(dst_type);
LiftoffRegList pinned;
LiftoffRegister src = pinned.set(__ PopToRegister());
LiftoffRegister dst = src_rc == dst_rc
? __ GetUnusedRegister(dst_rc, {src}, pinned)
: __ GetUnusedRegister(dst_rc, pinned);
if (!__ emit_type_conversion(opcode, dst, src)) {
DCHECK_NOT_NULL(fallback_fn);
ExternalReference ext_ref = fallback_fn(asm_->isolate());
ValueType sig_reps[] = {src_type};
FunctionSig sig(0, 1, sig_reps);
GenerateCCall(&dst, &sig, dst_type, &src, ext_ref);
}
__ PushRegister(dst_type, dst);
}
void UnOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*,
const Value& value, Value* result) {
#define CASE_I32_UNOP(opcode, fn) \
......@@ -555,6 +610,11 @@ class LiftoffCompiler {
__ emit_##fn(dst.fp(), src.fp()); \
}); \
break;
#define CASE_TYPE_CONVERSION(opcode, dst_type, src_type, ext_ref) \
case WasmOpcode::kExpr##opcode: \
EmitTypeConversion(kExpr##opcode, kWasm##dst_type, kWasm##src_type, \
ext_ref); \
break;
switch (opcode) {
CASE_I32_UNOP(I32Clz, i32_clz)
CASE_I32_UNOP(I32Ctz, i32_ctz)
......@@ -571,11 +631,18 @@ class LiftoffCompiler {
CASE_FLOAT_UNOP(F32Sqrt, F32, f32_sqrt)
CASE_FLOAT_UNOP(F64Neg, F64, f64_neg)
CASE_FLOAT_UNOP(F64Sqrt, F64, f64_sqrt)
CASE_TYPE_CONVERSION(F32SConvertI32, F32, I32, nullptr)
CASE_TYPE_CONVERSION(F32UConvertI32, F32, I32, nullptr)
CASE_TYPE_CONVERSION(F32SConvertI64, F32, I64,
&ExternalReference::wasm_int64_to_float32)
CASE_TYPE_CONVERSION(F32UConvertI64, F32, I64,
&ExternalReference::wasm_uint64_to_float32)
default:
return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
}
#undef CASE_I32_UNOP
#undef CASE_FLOAT_UNOP
#undef CASE_TYPE_CONVERSION
}
template <ValueType type, typename EmitFn>
......@@ -635,9 +702,11 @@ class LiftoffCompiler {
case WasmOpcode::kExpr##opcode: \
return EmitMonomorphicBinOp<kWasmI32>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
Register args[] = {lhs.gp(), rhs.gp()}; \
LiftoffRegister args[] = {lhs, rhs}; \
auto ext_ref = ExternalReference::ext_ref_fn(__ isolate()); \
GenerateCCall(dst.gp(), arraysize(args), args, ext_ref); \
ValueType sig_i_ii_reps[] = {kWasmI32, kWasmI32, kWasmI32}; \
FunctionSig sig_i_ii(1, 2, sig_i_ii_reps); \
GenerateCCall(&dst, &sig_i_ii, kWasmStmt, args, ext_ref); \
});
switch (opcode) {
CASE_I32_BINOP(I32Add, i32_add)
......
......@@ -28,9 +28,13 @@ enum RegClass : uint8_t {
enum RegPairHalf : uint8_t { kLowWord, kHighWord };
static inline constexpr bool needs_reg_pair(ValueType type) {
return kNeedI64RegPair && type == kWasmI64;
}
// TODO(clemensh): Use a switch once we require C++14 support.
static inline constexpr RegClass reg_class_for(ValueType type) {
return kNeedI64RegPair && type == kWasmI64 // i64 on 32 bit
return needs_reg_pair(type) // i64 on 32 bit
? kGpRegPair
: type == kWasmI32 || type == kWasmI64 // int types
? kGpReg
......
......@@ -414,6 +414,13 @@ UNIMPLEMENTED_FP_UNOP(f64_sqrt)
#undef FP_BINOP
#undef UNIMPLEMENTED_FP_BINOP
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src) {
BAILOUT("emit_type_conversion");
return true;
}
void LiftoffAssembler::emit_jump(Label* label) {
TurboAssembler::Branch(label);
}
......@@ -497,25 +504,34 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots * kPointerSize));
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
void LiftoffAssembler::PrepareCCall(wasm::FunctionSig* sig,
const LiftoffRegister* args,
ValueType out_argument_type) {
BAILOUT("PrepareCCall");
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
uint32_t num_params) {
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_offset,
ValueType type) {
BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
uint32_t param_idx,
uint32_t num_params) {
uint32_t param_offset,
ValueType type) {
BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::LoadCCallOutArgument(LiftoffRegister dst, ValueType type,
uint32_t num_lowered_args) {
BAILOUT("LoadCCallOutArgument");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
BAILOUT("CallC");
}
void LiftoffAssembler::FinishCCall() { BAILOUT("FinishCCall"); }
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
BAILOUT("CallNativeWasmCode");
}
......
......@@ -359,6 +359,13 @@ UNIMPLEMENTED_FP_UNOP(f64_sqrt)
#undef FP_BINOP
#undef UNIMPLEMENTED_FP_BINOP
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src) {
BAILOUT("emit_type_conversion");
return true;
}
void LiftoffAssembler::emit_jump(Label* label) {
TurboAssembler::Branch(label);
}
......@@ -442,25 +449,34 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots * kPointerSize));
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
void LiftoffAssembler::PrepareCCall(wasm::FunctionSig* sig,
const LiftoffRegister* args,
ValueType out_argument_type) {
BAILOUT("PrepareCCall");
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
uint32_t num_params) {
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_offset,
ValueType type) {
BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
uint32_t param_idx,
uint32_t num_params) {
uint32_t param_offset,
ValueType type) {
BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::LoadCCallOutArgument(LiftoffRegister dst, ValueType type,
uint32_t num_lowered_args) {
BAILOUT("LoadCCallOutArgument");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
BAILOUT("CallC");
}
void LiftoffAssembler::FinishCCall() { BAILOUT("FinishCCall"); }
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
BAILOUT("CallNativeWasmCode");
}
......
......@@ -155,6 +155,13 @@ UNIMPLEMENTED_FP_UNOP(f64_sqrt)
#undef UNIMPLEMENTED_FP_UNOP
#undef UNIMPLEMENTED_SHIFTOP
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src) {
BAILOUT("emit_type_conversion");
return true;
}
void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
......@@ -207,25 +214,34 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
BAILOUT("DropStackSlotsAndRet");
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
void LiftoffAssembler::PrepareCCall(wasm::FunctionSig* sig,
const LiftoffRegister* args,
ValueType out_argument_type) {
BAILOUT("PrepareCCall");
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
uint32_t num_params) {
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_offset,
ValueType type) {
BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
uint32_t param_idx,
uint32_t num_params) {
uint32_t param_offset,
ValueType type) {
BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::LoadCCallOutArgument(LiftoffRegister dst, ValueType type,
uint32_t num_lowered_args) {
BAILOUT("LoadCCallOutArgument");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
BAILOUT("CallC");
}
void LiftoffAssembler::FinishCCall() { BAILOUT("FinishCCall"); }
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
BAILOUT("CallNativeWasmCode");
}
......
......@@ -155,6 +155,13 @@ UNIMPLEMENTED_FP_UNOP(f64_sqrt)
#undef UNIMPLEMENTED_FP_UNOP
#undef UNIMPLEMENTED_SHIFTOP
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src) {
BAILOUT("emit_type_conversion");
return true;
}
void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
......@@ -207,25 +214,34 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
BAILOUT("DropStackSlotsAndRet");
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
void LiftoffAssembler::PrepareCCall(wasm::FunctionSig* sig,
const LiftoffRegister* args,
ValueType out_argument_type) {
BAILOUT("PrepareCCall");
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
uint32_t num_params) {
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_offset,
ValueType type) {
BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
uint32_t param_idx,
uint32_t num_params) {
uint32_t param_offset,
ValueType type) {
BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::LoadCCallOutArgument(LiftoffRegister dst, ValueType type,
uint32_t num_lowered_args) {
BAILOUT("LoadCCallOutArgument");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
BAILOUT("CallC");
}
void LiftoffAssembler::FinishCCall() { BAILOUT("FinishCCall"); }
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
BAILOUT("CallNativeWasmCode");
}
......
......@@ -36,8 +36,9 @@ inline Operand GetHalfStackSlot(uint32_t half_index) {
inline Operand GetContextOperand() { return Operand(rbp, -16); }
// Use this register to store the address of the last argument pushed on the
// stack for a call to C.
static constexpr Register kCCallLastArgAddrReg = rax;
// stack for a call to C. This register must be callee saved according to the c
// calling convention.
static constexpr Register kCCallLastArgAddrReg = rbx;
inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
uint32_t offset_imm, LiftoffRegList pinned) {
......@@ -49,6 +50,45 @@ inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
return Operand(addr, offset, times_1, offset_imm);
}
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src,
ValueType type) {
switch (type) {
case kWasmI32:
assm->movl(dst.gp(), src);
break;
case kWasmI64:
assm->movq(dst.gp(), src);
break;
case kWasmF32:
assm->Movss(dst.fp(), src);
break;
case kWasmF64:
assm->Movsd(dst.fp(), src);
break;
default:
UNREACHABLE();
}
}
inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
switch (type) {
case kWasmI32:
case kWasmI64:
assm->pushq(reg.gp());
break;
case kWasmF32:
assm->subp(rsp, Immediate(kPointerSize));
assm->Movss(Operand(rsp, 0), reg.fp());
break;
case kWasmF64:
assm->subp(rsp, Immediate(kPointerSize));
assm->Movsd(Operand(rsp, 0), reg.fp());
break;
default:
UNREACHABLE();
}
}
} // namespace liftoff
uint32_t LiftoffAssembler::PrepareStackFrame() {
......@@ -203,22 +243,7 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
Operand src(rbp, kPointerSize * (caller_slot_idx + 1));
switch (type) {
case kWasmI32:
movl(dst.gp(), src);
break;
case kWasmI64:
movq(dst.gp(), src);
break;
case kWasmF32:
Movss(dst.fp(), src);
break;
case kWasmF64:
Movsd(dst.fp(), src);
break;
default:
UNREACHABLE();
}
liftoff::Load(this, dst, src, type);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
......@@ -616,6 +641,28 @@ void LiftoffAssembler::emit_f64_sqrt(DoubleRegister dst, DoubleRegister src) {
Sqrtsd(dst, src);
}
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src) {
switch (opcode) {
case kExprF32SConvertI32:
Cvtlsi2ss(dst.fp(), src.gp());
return true;
case kExprF32UConvertI32:
movl(kScratchRegister, src.gp());
Cvtqsi2ss(dst.fp(), kScratchRegister);
return true;
case kExprF32SConvertI64:
Cvtqsi2ss(dst.fp(), src.gp());
return true;
case kExprF32UConvertI64:
Cvtqui2ss(dst.fp(), src.gp(), kScratchRegister);
return true;
default:
UNREACHABLE();
}
}
void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
......@@ -709,22 +756,7 @@ void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
ValueType type) {
switch (type) {
case kWasmI32:
case kWasmI64:
pushq(reg.gp());
break;
case kWasmF32:
subp(rsp, Immediate(kPointerSize));
Movss(Operand(rsp, 0), reg.fp());
break;
case kWasmF64:
subp(rsp, Immediate(kPointerSize));
Movsd(Operand(rsp, 0), reg.fp());
break;
default:
UNREACHABLE();
}
liftoff::push(this, reg, type);
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
......@@ -772,31 +804,53 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
ret(static_cast<int>(num_stack_slots * kPointerSize));
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
for (size_t param = 0; param < num_params; ++param) {
pushq(args[param]);
}
void LiftoffAssembler::PrepareCCall(wasm::FunctionSig* sig,
const LiftoffRegister* args,
ValueType out_argument_type) {
// Save current sp, such that we compute pointers to the values pushed above.
movq(liftoff::kCCallLastArgAddrReg, rsp);
PrepareCallCFunction(num_params);
for (ValueType param_type : sig->parameters()) {
liftoff::push(this, *args++, param_type);
}
if (out_argument_type != kWasmStmt) {
int size = WasmOpcodes::MemSize(out_argument_type);
subq(rsp, Immediate(std::max(kPointerSize, size)));
}
PrepareCallCFunction(static_cast<uint32_t>(sig->parameter_count()));
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
uint32_t num_params) {
int offset = kPointerSize * static_cast<int>(num_params - 1 - param_idx);
leaq(dst, Operand(liftoff::kCCallLastArgAddrReg, offset));
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_offset,
ValueType type) {
// Check that we don't accidentally override kCCallLastArgAddrReg.
DCHECK_NE(liftoff::kCCallLastArgAddrReg, dst);
int offset = kPointerSize * static_cast<int>(param_offset + 1);
leaq(dst, Operand(liftoff::kCCallLastArgAddrReg, -offset));
}
void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
uint32_t param_idx,
uint32_t num_params) {
uint32_t param_offset,
ValueType type) {
// On x64, all C call arguments fit in registers.
UNREACHABLE();
}
void LiftoffAssembler::LoadCCallOutArgument(LiftoffRegister dst, ValueType type,
uint32_t num_lowered_args) {
// Check that we don't accidentally override kCCallLastArgAddrReg.
DCHECK_NE(LiftoffRegister(liftoff::kCCallLastArgAddrReg), dst);
int offset = kPointerSize * num_lowered_args;
Operand src(liftoff::kCCallLastArgAddrReg, -offset);
liftoff::Load(this, dst, src, type);
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
CallCFunction(ext_ref, static_cast<int>(num_params));
}
void LiftoffAssembler::FinishCCall() {
movp(rsp, liftoff::kCCallLastArgAddrReg);
}
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
near_call(addr, RelocInfo::WASM_CALL);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment