Commit 6f790cbe authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[Liftoff][x64] Use more hardcoded scratch registers

Liftoff does not use all registers available on x64, so we can use
several hardcoded scratch registers instead of using the cache
registers which might need to be spilled.
This generates potentially smaller and more efficient code because we
need to spill and fill less.

R=titzer@chromium.org

Bug: v8:6600
Change-Id: I4ae20a1fb0ddd930d24130612825681752cfba24
Reviewed-on: https://chromium-review.googlesource.com/1146652Reviewed-by: 's avatarBen Titzer <titzer@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54688}
parent 2e213425
......@@ -23,6 +23,17 @@ namespace wasm {
namespace liftoff {
static_assert((kLiftoffAssemblerGpCacheRegs &
Register::ListOf<kScratchRegister>()) == 0,
"scratch register must not be used as cache registers");
constexpr DoubleRegister kScratchDoubleReg2 = xmm14;
static_assert(kScratchDoubleReg != kScratchDoubleReg2, "collision");
static_assert(
(kLiftoffAssemblerFpCacheRegs &
DoubleRegister::ListOf<kScratchDoubleReg, kScratchDoubleReg2>()) == 0,
"scratch registers must not be used as cache registers");
// rbp-8 holds the stack marker, rbp-16 is the instance parameter, first stack
// slot is located at rbp-24.
constexpr int32_t kConstantStackSpace = 16;
......@@ -296,9 +307,8 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
ValueType type) {
DCHECK_NE(dst_index, src_index);
if (cache_state_.has_unused_register(kGpReg)) {
LiftoffRegister reg = GetUnusedRegister(kGpReg);
Fill(reg, src_index, type);
Spill(dst_index, reg, type);
Fill(LiftoffRegister{kScratchRegister}, src_index, type);
Spill(dst_index, LiftoffRegister{kScratchRegister}, type);
} else {
pushq(liftoff::GetStackSlot(src_index));
popq(liftoff::GetStackSlot(dst_index));
......@@ -465,10 +475,8 @@ void EmitIntDivOrRem(LiftoffAssembler* assm, Register dst, Register lhs,
// unconditionally, as the cache state will also be modified unconditionally.
liftoff::SpillRegisters(assm, rdx, rax);
if (rhs == rax || rhs == rdx) {
LiftoffRegList unavailable = LiftoffRegList::ForRegs(rax, rdx, lhs);
Register tmp = assm->GetUnusedRegister(kGpReg, unavailable).gp();
iop(mov, tmp, rhs);
rhs = tmp;
iop(mov, kScratchRegister, rhs);
rhs = kScratchRegister;
}
// Check for division by zero.
......@@ -1098,10 +1106,8 @@ inline bool EmitTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
}
CpuFeatureScope feature(assm, SSE4_1);
LiftoffRegList pinned = LiftoffRegList::ForRegs(src, dst);
DoubleRegister rounded =
pinned.set(assm->GetUnusedRegister(kFpReg, pinned)).fp();
DoubleRegister converted_back = assm->GetUnusedRegister(kFpReg, pinned).fp();
DoubleRegister rounded = kScratchDoubleReg;
DoubleRegister converted_back = kScratchDoubleReg2;
if (std::is_same<double, src_type>::value) { // f64
assm->Roundsd(rounded, src, kRoundToZero);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment