Commit c6269350 authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

[liftoff] Fix handling of pinned registers

Pinned registers were not considered correctly when taking a volatile
register. This CL refactors handling of the pinned registers list by
combining the candidates list and the pinned list early. This avoid
additional parameters on some functions and might save some redundant
masking.
As a side effect, it also fixes the DCHECK error on arm.

R=ahaas@chromium.org

Bug: chromium:1179025
Change-Id: Ib9193b209c5741ea97fd1d0dffeeb9e824639439
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2699254Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72812}
parent 98966f56
......@@ -157,7 +157,7 @@ inline Register GetTmpByteRegister(LiftoffAssembler* assm, Register candidate) {
if (candidate.is_byte_register()) return candidate;
// {GetUnusedRegister()} may insert move instructions to spill registers to
// the stack. This is OK because {mov} does not change the status flags.
return assm->GetUnusedRegister(liftoff::kByteRegs, {}).gp();
return assm->GetUnusedRegister(liftoff::kByteRegs).gp();
}
inline void MoveStackValue(LiftoffAssembler* assm, const Operand& src,
......@@ -478,7 +478,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
LiftoffRegList pinned_byte = pinned | LiftoffRegList::ForRegs(dst_addr);
if (offset_reg != no_reg) pinned_byte.set(offset_reg);
Register byte_src =
GetUnusedRegister(liftoff::kByteRegs, pinned_byte).gp();
GetUnusedRegister(liftoff::kByteRegs.MaskOut(pinned_byte)).gp();
mov(byte_src, src.gp());
mov_b(dst_op, byte_src);
}
......@@ -572,11 +572,12 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
// If there are no unused candidate registers, but {src} is a candidate,
// then spill other uses of {src}. Otherwise spill any candidate register
// and use that.
if (!cache_state_.has_unused_register(src_candidates, pinned) &&
LiftoffRegList unpinned_candidates = src_candidates.MaskOut(pinned);
if (!cache_state_.has_unused_register(unpinned_candidates) &&
src_candidates.has(src)) {
SpillRegister(src);
} else {
Register safe_src = GetUnusedRegister(src_candidates, pinned).gp();
Register safe_src = GetUnusedRegister(unpinned_candidates).gp();
mov(safe_src, src_gp);
src_gp = safe_src;
}
......@@ -624,7 +625,7 @@ inline void AtomicAddOrSubOrExchange32(LiftoffAssembler* lasm, Binop binop,
// Ensure that {value_reg} is a valid register.
if (is_byte_store && !liftoff::kByteRegs.has(value_reg)) {
Register safe_value_reg =
__ GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
__ GetUnusedRegister(liftoff::kByteRegs.MaskOut(pinned)).gp();
__ mov(safe_value_reg, value_reg);
value_reg = safe_value_reg;
}
......@@ -991,7 +992,8 @@ void LiftoffAssembler::AtomicCompareExchange(
// Ensure that {value_reg} is a valid register.
if (is_byte_store && !liftoff::kByteRegs.has(value_reg)) {
Register safe_value_reg =
pinned.set(GetUnusedRegister(liftoff::kByteRegs, pinned)).gp();
pinned.set(GetUnusedRegister(liftoff::kByteRegs.MaskOut(pinned)))
.gp();
mov(safe_value_reg, value_reg);
value_reg = safe_value_reg;
pinned.clear(LiftoffRegister(value_reg));
......
......@@ -1126,10 +1126,9 @@ bool LiftoffAssembler::ValidateCacheState() const {
}
#endif
LiftoffRegister LiftoffAssembler::SpillOneRegister(LiftoffRegList candidates,
LiftoffRegList pinned) {
LiftoffRegister LiftoffAssembler::SpillOneRegister(LiftoffRegList candidates) {
// Spill one cached value to free a register.
LiftoffRegister spill_reg = cache_state_.GetNextSpillReg(candidates, pinned);
LiftoffRegister spill_reg = cache_state_.GetNextSpillReg(candidates);
SpillRegister(spill_reg);
return spill_reg;
}
......@@ -1161,7 +1160,7 @@ LiftoffRegister LiftoffAssembler::SpillAdjacentFpRegisters(
// b. If used, spill it.
// We spill one register in 2 and 3a, and two registers in 3b.
LiftoffRegister first_reg = GetUnusedRegister(kFpCacheRegList, pinned);
LiftoffRegister first_reg = GetUnusedRegister(kFpReg, pinned);
LiftoffRegister second_reg = first_reg, low_reg = first_reg;
if (first_reg.fp().code() % 2 == 0) {
......
......@@ -206,13 +206,11 @@ class LiftoffAssembler : public TurboAssembler {
}
DCHECK(rc == kGpReg || rc == kFpReg);
LiftoffRegList candidates = GetCacheRegList(rc);
return has_unused_register(candidates, pinned);
return has_unused_register(candidates.MaskOut(pinned));
}
bool has_unused_register(LiftoffRegList candidates,
LiftoffRegList pinned = {}) const {
LiftoffRegList available_regs =
candidates.MaskOut(used_registers).MaskOut(pinned);
bool has_unused_register(LiftoffRegList candidates) const {
LiftoffRegList available_regs = candidates.MaskOut(used_registers);
return !available_regs.is_empty();
}
......@@ -272,8 +270,9 @@ class LiftoffAssembler : public TurboAssembler {
Register TrySetCachedInstanceRegister(LiftoffRegList pinned) {
DCHECK_EQ(no_reg, cached_instance);
if (!has_unused_register(kGpCacheRegList, pinned)) return no_reg;
SetInstanceCacheRegister(unused_register(kGpCacheRegList, pinned).gp());
LiftoffRegList candidates = kGpCacheRegList.MaskOut(pinned);
if (!has_unused_register(candidates)) return no_reg;
SetInstanceCacheRegister(unused_register(candidates).gp());
DCHECK_NE(no_reg, cached_instance);
return cached_instance;
}
......@@ -340,15 +339,13 @@ class LiftoffAssembler : public TurboAssembler {
memset(register_use_count, 0, sizeof(register_use_count));
}
LiftoffRegister GetNextSpillReg(LiftoffRegList candidates,
LiftoffRegList pinned = {}) {
LiftoffRegList unpinned = candidates.MaskOut(pinned);
DCHECK(!unpinned.is_empty());
LiftoffRegister GetNextSpillReg(LiftoffRegList candidates) {
DCHECK(!candidates.is_empty());
// This method should only be called if none of the candidates is free.
DCHECK(unpinned.MaskOut(used_registers).is_empty());
LiftoffRegList unspilled = unpinned.MaskOut(last_spilled_regs);
DCHECK(candidates.MaskOut(used_registers).is_empty());
LiftoffRegList unspilled = candidates.MaskOut(last_spilled_regs);
if (unspilled.is_empty()) {
unspilled = unpinned;
unspilled = candidates;
last_spilled_regs = {};
}
LiftoffRegister reg = unspilled.GetFirstRegSet();
......@@ -467,9 +464,9 @@ class LiftoffAssembler : public TurboAssembler {
// Get an unused register for class {rc}, potentially spilling to free one.
LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned) {
if (kNeedI64RegPair && rc == kGpRegPair) {
LiftoffRegList candidates = kGpCacheRegList;
Register low = pinned.set(GetUnusedRegister(candidates, pinned)).gp();
Register high = GetUnusedRegister(candidates, pinned).gp();
LiftoffRegList candidates = kGpCacheRegList.MaskOut(pinned);
Register low = candidates.clear(GetUnusedRegister(candidates)).gp();
Register high = GetUnusedRegister(candidates).gp();
return LiftoffRegister::ForPair(low, high);
} else if (kNeedS128RegPair && rc == kFpRegPair) {
// kFpRegPair specific logic here because we need adjacent registers, not
......@@ -481,22 +478,20 @@ class LiftoffAssembler : public TurboAssembler {
return LiftoffRegister::ForFpPair(low_fp);
}
DCHECK(rc == kGpReg || rc == kFpReg);
LiftoffRegList candidates = GetCacheRegList(rc);
return GetUnusedRegister(candidates, pinned);
LiftoffRegList candidates = GetCacheRegList(rc).MaskOut(pinned);
return GetUnusedRegister(candidates);
}
// Get an unused register of {candidates}, potentially spilling to free one.
LiftoffRegister GetUnusedRegister(LiftoffRegList candidates,
LiftoffRegList pinned = {}) {
if (cache_state_.has_unused_register(candidates, pinned)) {
return cache_state_.unused_register(candidates, pinned);
LiftoffRegister GetUnusedRegister(LiftoffRegList candidates) {
DCHECK(!candidates.is_empty());
if (cache_state_.has_unused_register(candidates)) {
return cache_state_.unused_register(candidates);
}
if (cache_state_.has_volatile_register(candidates)) {
LiftoffRegister reg = cache_state_.take_volatile_register(candidates);
DCHECK(!pinned.has(reg));
return reg;
return cache_state_.take_volatile_register(candidates);
}
return SpillOneRegister(candidates, pinned);
return SpillOneRegister(candidates);
}
void MaterializeMergedConstants(uint32_t arity);
......@@ -1433,8 +1428,7 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffBailoutReason bailout_reason_ = kSuccess;
const char* bailout_detail_ = nullptr;
V8_NOINLINE LiftoffRegister SpillOneRegister(LiftoffRegList candidates,
LiftoffRegList pinned);
V8_NOINLINE LiftoffRegister SpillOneRegister(LiftoffRegList candidates);
// Spill one or two fp registers to get a pair of adjacent fp registers.
LiftoffRegister SpillAdjacentFpRegisters(LiftoffRegList pinned);
};
......
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --wasm-staging
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
builder.addMemory(1, 1, false, true);
builder.addType(makeSig([], []));
builder.addType(makeSig([kWasmI64], [kWasmF32]));
// Generate function 1 (out of 2).
builder.addFunction(undefined, 0 /* sig */)
.addBodyWithEnd([
// signature: v_v
// body:
kExprNop, // nop
kExprEnd, // end @2
]);
// Generate function 2 (out of 2).
builder.addFunction(undefined, 1 /* sig */)
.addLocals(kWasmI64, 1)
.addBodyWithEnd([
// signature: f_l
// body:
kExprBlock, kWasmF32, // block @3 f32
kExprI32Const, 0x00, // i32.const
kExprI32Const, 0x01, // i32.const
kExprIf, kWasmI64, // if @9 i64
kExprI64Const, 0x00, // i64.const
kExprElse, // else @13
kExprUnreachable, // unreachable
kExprEnd, // end @15
kAtomicPrefix, kExprI64AtomicStore, 0x03, 0x04, // i64.atomic.store64
kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
kExprEnd, // end @25
kExprDrop, // drop
kExprF32Const, 0x00, 0x00, 0x80, 0x51, // f32.const
kExprEnd, // end @32
]);
builder.instantiate();
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment