Commit f831905c authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[Liftoff] Pass type for register moves

This allows to emit the best opcode for the register move. The type is
available at all call sites anyway.

R=ahaas@chromium.org

Bug: v8:6600
Change-Id: I8516deff4d8a5480cea9df37cfc003fb9c668e8c
Reviewed-on: https://chromium-review.googlesource.com/910910Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51210}
parent d18c314a
......@@ -54,11 +54,17 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
UNIMPLEMENTED();
}
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
ValueType type) {
UNIMPLEMENTED();
}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
UNIMPLEMENTED();
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueType type) {
UNIMPLEMENTED();
}
......
......@@ -54,11 +54,17 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
UNIMPLEMENTED();
}
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
ValueType type) {
UNIMPLEMENTED();
}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
UNIMPLEMENTED();
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueType type) {
UNIMPLEMENTED();
}
......
......@@ -291,29 +291,31 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
}
}
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
ValueType type) {
// TODO(wasm): Extract the destination register from the CallDescriptor.
// TODO(wasm): Add multi-return support.
LiftoffRegister dst =
reg.is_pair()
? LiftoffRegister::ForPair(LiftoffRegister(eax), LiftoffRegister(edx))
: reg.is_gp() ? LiftoffRegister(eax) : LiftoffRegister(xmm1);
if (reg != dst) Move(dst, reg);
if (reg != dst) Move(dst, reg, type);
}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
// The caller should check that the registers are not equal. For most
// occurences, this is already guaranteed, so no need to check within this
// method.
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
DCHECK_NE(dst, src);
DCHECK_EQ(dst.reg_class(), src.reg_class());
if (src.is_pair()) {
if (dst.low_gp() != src.low_gp()) mov(dst.low_gp(), src.low_gp());
if (dst.high_gp() != src.high_gp()) mov(dst.high_gp(), src.high_gp());
} else if (dst.is_gp()) {
mov(dst.gp(), src.gp());
DCHECK_EQ(kWasmI32, type);
mov(dst, src);
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueType type) {
DCHECK_NE(dst, src);
if (type == kWasmF32) {
movss(dst, src);
} else {
movsd(dst.fp(), src.fp());
DCHECK_EQ(kWasmF64, type);
movsd(dst, src);
}
}
......
......@@ -83,7 +83,9 @@ class StackTransferRecipe {
if ((move_dst_regs_ & move_src_regs_).is_empty()) {
// No overlap in src and dst registers. Just execute the moves in any
// order.
for (RegisterMove& rm : register_moves_) asm_->Move(rm.dst, rm.src);
for (RegisterMove& rm : register_moves_) {
asm_->Move(rm.dst, rm.src, rm.type);
}
register_moves_.clear();
} else {
// Keep use counters of src registers.
......@@ -102,7 +104,7 @@ class StackTransferRecipe {
int executed_moves = 0;
for (auto& rm : register_moves_) {
if (src_reg_use_count[rm.dst.liftoff_code()] == 0) {
asm_->Move(rm.dst, rm.src);
asm_->Move(rm.dst, rm.src, rm.type);
++executed_moves;
DCHECK_LT(0, src_reg_use_count[rm.src.liftoff_code()]);
--src_reg_use_count[rm.src.liftoff_code()];
......@@ -265,9 +267,6 @@ class StackTransferRecipe {
uint32_t max_used_spill_slot_ = 0;
};
static constexpr ValueType kWasmIntPtr =
kPointerSize == 8 ? kWasmI64 : kWasmI32;
} // namespace
// TODO(clemensh): Don't copy the full parent state (this makes us N^2).
......@@ -581,6 +580,19 @@ void LiftoffAssembler::FinishCall(wasm::FunctionSig* sig,
}
}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src,
ValueType type) {
DCHECK_EQ(dst.reg_class(), src.reg_class());
if (kNeedI64RegPair && dst.is_pair()) {
if (dst.low() != src.low()) Move(dst.low_gp(), src.low_gp(), kWasmI32);
if (dst.high() != src.high()) Move(dst.high_gp(), src.high_gp(), kWasmI32);
} else if (dst.is_gp()) {
Move(dst.gp(), src.gp(), type);
} else {
Move(dst.fp(), src.fp(), type);
}
}
LiftoffRegister LiftoffAssembler::SpillOneRegister(LiftoffRegList candidates,
LiftoffRegList pinned) {
// Spill one cached value to free a register.
......
......@@ -34,6 +34,9 @@ class LiftoffAssembler : public TurboAssembler {
// Each slot in our stack frame currently has exactly 8 bytes.
static constexpr uint32_t kStackSlotSize = 8;
static constexpr ValueType kWasmIntPtr =
kPointerSize == 8 ? kWasmI64 : kWasmI32;
class VarState {
public:
enum Location : uint8_t { kStack, kRegister, KIntConst };
......@@ -311,6 +314,8 @@ class LiftoffAssembler : public TurboAssembler {
// Process return values of the call.
void FinishCall(wasm::FunctionSig*, compiler::CallDescriptor*);
void Move(LiftoffRegister dst, LiftoffRegister src, ValueType);
////////////////////////////////////
// Platform-specific part. //
////////////////////////////////////
......@@ -332,9 +337,9 @@ class LiftoffAssembler : public TurboAssembler {
ValueType);
inline void MoveStackValue(uint32_t dst_index, uint32_t src_index, ValueType);
inline void MoveToReturnRegister(LiftoffRegister);
// TODO(clemensh): Pass the type to {Move}, to emit more efficient code.
inline void Move(LiftoffRegister dst, LiftoffRegister src);
inline void MoveToReturnRegister(LiftoffRegister src, ValueType);
inline void Move(Register dst, Register src, ValueType);
inline void Move(DoubleRegister dst, DoubleRegister src, ValueType);
inline void Spill(uint32_t index, LiftoffRegister, ValueType);
inline void Spill(uint32_t index, WasmValue);
......
......@@ -212,19 +212,26 @@ class LiftoffCompiler {
if (param_loc.IsRegister()) {
DCHECK(!param_loc.IsAnyRegister());
int reg_code = param_loc.AsRegister();
LiftoffRegister reg =
rc == kGpReg ? LiftoffRegister(Register::from_code(reg_code))
: LiftoffRegister(DoubleRegister::from_code(reg_code));
LiftoffRegList cache_regs = GetCacheRegList(rc);
if (cache_regs.has(reg)) {
RegList cache_regs = rc == kGpReg ? kLiftoffAssemblerGpCacheRegs
: kLiftoffAssemblerFpCacheRegs;
if (cache_regs & (1 << reg_code)) {
// This is a cache register, just use it.
LiftoffRegister reg =
rc == kGpReg ? LiftoffRegister(Register::from_code(reg_code))
: LiftoffRegister(DoubleRegister::from_code(reg_code));
__ PushRegister(type, reg);
return;
}
// Move to a cache register.
// Note that we cannot create a {LiftoffRegister} for reg_code, since
// {LiftoffRegister} can only store cache regs.
LiftoffRegister cache_reg = __ GetUnusedRegister(rc);
__ Move(cache_reg, reg);
__ PushRegister(type, reg);
if (rc == kGpReg) {
__ Move(cache_reg.gp(), Register::from_code(reg_code), type);
} else {
__ Move(cache_reg.fp(), DoubleRegister::from_code(reg_code), type);
}
__ PushRegister(type, cache_reg);
return;
}
if (param_loc.IsCallerFrameSlot()) {
......@@ -477,7 +484,9 @@ class LiftoffCompiler {
DCHECK(return_loc.IsRegister());
Register return_reg = Register::from_code(return_loc.AsRegister());
if (return_reg != res_reg) {
__ Move(LiftoffRegister(res_reg), LiftoffRegister(return_reg));
DCHECK_EQ(MachineRepresentation::kWord32,
sig.GetReturn(0).representation());
__ Move(LiftoffRegister(res_reg), LiftoffRegister(return_reg), kWasmI32);
}
}
......@@ -672,7 +681,7 @@ class LiftoffCompiler {
if (values.size() > 1) return unsupported(decoder, "multi-return");
RegClass rc = reg_class_for(values[0].type);
LiftoffRegister reg = __ PopToRegister(rc);
__ MoveToReturnRegister(reg);
__ MoveToReturnRegister(reg, values[0].type);
}
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ DropStackSlotsAndRet(
......@@ -926,7 +935,8 @@ class LiftoffCompiler {
compiler::LinkageLocation param_loc = desc->GetInputLocation(kInputShift);
if (param_loc.IsRegister()) {
Register reg = Register::from_code(param_loc.AsRegister());
__ Move(LiftoffRegister(reg), LiftoffRegister(args[0]));
__ Move(LiftoffRegister(reg), LiftoffRegister(args[0]),
LiftoffAssembler::kWasmIntPtr);
} else {
DCHECK(param_loc.IsCallerFrameSlot());
__ PushCallerFrameSlot(LiftoffRegister(args[0]));
......@@ -1075,7 +1085,7 @@ class LiftoffCompiler {
if (__ cache_state()->is_used(index)) {
LiftoffRegister new_index =
__ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(index));
__ Move(new_index, index);
__ Move(new_index, index, kWasmI32);
index = new_index;
}
......
......@@ -90,30 +90,26 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
UNIMPLEMENTED();
}
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
ValueType type) {
// TODO(wasm): Extract the destination register from the CallDescriptor.
// TODO(wasm): Add multi-return support.
LiftoffRegister dst =
reg.is_pair()
? LiftoffRegister::ForPair(LiftoffRegister(v0), LiftoffRegister(v1))
: reg.is_gp() ? LiftoffRegister(v0) : LiftoffRegister(f0);
if (reg != dst) Move(dst, reg);
if (reg != dst) Move(dst, reg, type);
}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
// The caller should check that the registers are not equal. For most
// occurences, this is already guaranteed, so no need to check within this
// method.
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
DCHECK_NE(dst, src);
DCHECK_EQ(dst.reg_class(), src.reg_class());
if (src.is_pair()) {
TurboAssembler::Move(dst.low_gp(), src.low_gp());
TurboAssembler::Move(dst.high_gp(), src.high_gp());
} else if (src.is_gp()) {
TurboAssembler::mov(dst.gp(), src.gp());
} else {
TurboAssembler::Move(dst.fp(), src.fp());
}
TurboAssembler::mov(dst, src);
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueType type) {
DCHECK_NE(dst, src);
TurboAssembler::Move(dst, src);
}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
......
......@@ -89,23 +89,22 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
UNIMPLEMENTED();
}
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
ValueType type) {
LiftoffRegister dst = reg.is_gp() ? LiftoffRegister(v0) : LiftoffRegister(f0);
if (reg != dst) Move(dst, reg);
if (reg != dst) Move(dst, reg, type);
}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
// The caller should check that the registers are not equal. For most
// occurences, this is already guaranteed, so no need to check within this
// method.
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
DCHECK_NE(dst, src);
DCHECK_EQ(dst.reg_class(), src.reg_class());
// TODO(ksreten): Handle different sizes here.
if (dst.is_gp()) {
TurboAssembler::Move(dst.gp(), src.gp());
} else {
TurboAssembler::Move(dst.fp(), src.fp());
}
TurboAssembler::Move(dst, src);
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueType type) {
DCHECK_NE(dst, src);
TurboAssembler::Move(dst, src);
}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
......
......@@ -54,11 +54,17 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
UNIMPLEMENTED();
}
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
ValueType type) {
UNIMPLEMENTED();
}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
UNIMPLEMENTED();
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueType type) {
UNIMPLEMENTED();
}
......
......@@ -54,11 +54,17 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
UNIMPLEMENTED();
}
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
ValueType type) {
UNIMPLEMENTED();
}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
UNIMPLEMENTED();
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueType type) {
UNIMPLEMENTED();
}
......
......@@ -226,25 +226,33 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
}
}
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
ValueType type) {
// TODO(wasm): Extract the destination register from the CallDescriptor.
// TODO(wasm): Add multi-return support.
LiftoffRegister dst =
reg.is_gp() ? LiftoffRegister(rax) : LiftoffRegister(xmm1);
if (reg != dst) Move(dst, reg);
if (reg != dst) Move(dst, reg, type);
}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
// The caller should check that the registers are not equal. For most
// occurences, this is already guaranteed, so no need to check within this
// method.
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
DCHECK_NE(dst, src);
DCHECK_EQ(dst.reg_class(), src.reg_class());
// TODO(clemensh): Handle different sizes here.
if (dst.is_gp()) {
movq(dst.gp(), src.gp());
if (type == kWasmI32) {
movl(dst, src);
} else {
Movsd(dst.fp(), src.fp());
DCHECK_EQ(kWasmI64, type);
movq(dst, src);
}
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueType type) {
DCHECK_NE(dst, src);
if (type == kWasmF32) {
Movss(dst, src);
} else {
DCHECK_EQ(kWasmF64, type);
Movsd(dst, src);
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment