Commit 4a601911 authored by Ross McIlroy's avatar Ross McIlroy Committed by Commit Bot

[TurboProp] Add support for deferred block spills in fast reg alloc

Adds support for avoiding spills in non-deferred blocks by instead
restricting the spill ranges to deferred blocks if the virtual
register is only spilled in deferred blocks.

It does this by tracking registers that reach the exit point of deferred
blocks and spilling them them pre-emptively in the deferred block while
treating them as committed from the point of view of the non-deferred
blocks. We also now track whether virtual registers need to be spilled
at their SSA definition point (where they are output by an instruction),
or can instead be spilled at the entry to deferred blocks for use as
spill slots within those deferred blocks. In both cases, the tracking
of these deferred spills is kept as a pending operation until the
allocator confirms that adding these spills will avoid spills in the
non-deferred pathways, to avoid adding unnecessary extra spills in
deferred blocks.

BUG=v8:9684

Change-Id: Ib151e795567f0e4e7f95538415a8cc117d235b64
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2440603
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Reviewed-by: 's avatarTobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70374}
parent a19cf8e2
......@@ -23,6 +23,7 @@ namespace internal {
namespace compiler {
class RegisterState;
class DeferredBlocksRegion;
// BlockState stores details associated with a particular basic block.
class BlockState final {
......@@ -30,8 +31,10 @@ class BlockState final {
BlockState(int block_count, Zone* zone)
: general_registers_in_state_(nullptr),
double_registers_in_state_(nullptr),
deferred_blocks_region_(nullptr),
dominated_blocks_(block_count, zone),
successors_phi_index_(-1) {}
successors_phi_index_(-1),
is_deferred_block_boundary_(false) {}
// Returns the RegisterState that applies to the input of this block. Can be
// |nullptr| if the no registers of |kind| have been allocated up to this
......@@ -51,14 +54,34 @@ class BlockState final {
successors_phi_index_ = index;
}
// If this block is deferred, this represents region of deferred blocks
// that are directly reachable from this block.
DeferredBlocksRegion* deferred_blocks_region() const {
return deferred_blocks_region_;
}
void set_deferred_blocks_region(DeferredBlocksRegion* region) {
DCHECK_NULL(deferred_blocks_region_);
deferred_blocks_region_ = region;
}
// Returns true if this block represents either a transition from
// non-deferred to deferred or vice versa.
bool is_deferred_block_boundary() const {
return is_deferred_block_boundary_;
}
void MarkAsDeferredBlockBoundary() { is_deferred_block_boundary_ = true; }
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(BlockState);
private:
RegisterState* general_registers_in_state_;
RegisterState* double_registers_in_state_;
DeferredBlocksRegion* deferred_blocks_region_;
BitVector dominated_blocks_;
int successors_phi_index_;
bool is_deferred_block_boundary_;
};
RegisterState* BlockState::register_in_state(RegisterKind kind) {
......@@ -225,6 +248,32 @@ class Range {
int end_;
};
// Represents a connected region of deferred basic blocks.
class DeferredBlocksRegion final {
public:
explicit DeferredBlocksRegion(Zone* zone, int number_of_blocks)
: spilled_vregs_(zone), blocks_covered_(number_of_blocks, zone) {}
void AddBlock(RpoNumber block, MidTierRegisterAllocationData* data) {
DCHECK(data->GetBlock(block)->IsDeferred());
blocks_covered_.Add(block.ToInt());
data->block_state(block).set_deferred_blocks_region(this);
}
// Adds |vreg| to the list of variables to potentially defer their output to
// a spill slot until we enter this deferred block region.
void DeferSpillOutputUntilEntry(int vreg) { spilled_vregs_.insert(vreg); }
ZoneSet<int>::iterator begin() const { return spilled_vregs_.begin(); }
ZoneSet<int>::iterator end() const { return spilled_vregs_.end(); }
const BitVector* blocks_covered() const { return &blocks_covered_; }
private:
ZoneSet<int> spilled_vregs_;
BitVector blocks_covered_;
};
// VirtualRegisterData stores data specific to a particular virtual register,
// and tracks spilled operands for that virtual register.
class VirtualRegisterData final {
......@@ -233,11 +282,15 @@ class VirtualRegisterData final {
// Define VirtualRegisterData with the type of output that produces this
// virtual register.
void DefineAsUnallocatedOperand(int virtual_register, int instr_index);
void DefineAsUnallocatedOperand(int virtual_register, int instr_index,
bool is_deferred_block);
void DefineAsFixedSpillOperand(AllocatedOperand* operand,
int virtual_register, int instr_index);
void DefineAsConstantOperand(ConstantOperand* operand, int instr_index);
void DefineAsPhi(int virtual_register, int instr_index);
int virtual_register, int instr_index,
bool is_deferred_block);
void DefineAsConstantOperand(ConstantOperand* operand, int instr_index,
bool is_deferred_block);
void DefineAsPhi(int virtual_register, int instr_index,
bool is_deferred_block);
// Spill an operand that is assigned to this virtual register.
void SpillOperand(InstructionOperand* operand, int instr_index,
......@@ -254,6 +307,12 @@ class VirtualRegisterData final {
void EmitGapMoveToSpillSlot(AllocatedOperand from_operand, int instr_index,
MidTierRegisterAllocationData* data);
// Adds pending spills for deferred-blocks.
void AddDeferredSpillUse(int instr_index,
MidTierRegisterAllocationData* data);
void AddDeferredSpillOutput(AllocatedOperand allocated_op, int instr_index,
MidTierRegisterAllocationData* data);
// Accessors for spill operand, which may still be pending allocation.
bool HasSpillOperand() const { return spill_operand_ != nullptr; }
InstructionOperand* spill_operand() const {
......@@ -271,7 +330,29 @@ class VirtualRegisterData final {
DCHECK_EQ(is_constant(), HasSpillOperand() && spill_operand_->IsConstant());
return is_constant();
}
bool NeedsSpillAtOutput() const;
// Returns true if the virtual register should be spilled when it is output.
bool NeedsSpillAtOutput() const { return needs_spill_at_output_; }
void MarkAsNeedsSpillAtOutput() {
if (is_constant()) return;
needs_spill_at_output_ = true;
if (HasSpillRange()) spill_range()->ClearDeferredBlockSpills();
}
// Returns true if the virtual register should be spilled at entry to deferred
// blocks in which it is spilled (to avoid spilling on output on
// non-deferred blocks).
bool NeedsSpillAtDeferredBlocks() const;
void EmitDeferredSpillOutputs(MidTierRegisterAllocationData* data);
bool IsSpilledAt(int instr_index, MidTierRegisterAllocationData* data) {
DCHECK_GE(instr_index, output_instr_index());
if (NeedsSpillAtOutput() || HasConstantSpillOperand()) return true;
if (HasSpillOperand() && data->GetBlock(instr_index)->IsDeferred()) {
return true;
}
return false;
}
// Allocates pending spill operands to the |allocated| spill slot.
void AllocatePendingSpillOperand(const AllocatedOperand& allocated);
......@@ -279,9 +360,21 @@ class VirtualRegisterData final {
int vreg() const { return vreg_; }
int output_instr_index() const { return output_instr_index_; }
bool is_constant() const { return is_constant_; }
bool is_phi() const { return is_phi_; }
void set_is_phi(bool value) { is_phi_ = value; }
bool is_defined_in_deferred_block() const {
return is_defined_in_deferred_block_;
}
struct DeferredSpillSlotOutput {
public:
explicit DeferredSpillSlotOutput(int instr, AllocatedOperand op,
const BitVector* blocks)
: instr_index(instr), operand(op), live_blocks(blocks) {}
int instr_index;
AllocatedOperand operand;
const BitVector* live_blocks;
};
// Represents the range of instructions for which this virtual register needs
// to be spilled on the stack.
......@@ -290,15 +383,17 @@ class VirtualRegisterData final {
// Defines a spill range for an output operand.
SpillRange(int definition_instr_index, MidTierRegisterAllocationData* data)
: live_range_(definition_instr_index, definition_instr_index),
live_blocks_(data->GetBlocksDominatedBy(definition_instr_index)) {}
live_blocks_(data->GetBlocksDominatedBy(definition_instr_index)),
deferred_spill_outputs_(nullptr) {}
// Defines a spill range for a Phi variable.
SpillRange(const InstructionBlock* phi_block,
MidTierRegisterAllocationData* data)
: live_range_(phi_block->first_instruction_index(),
phi_block->first_instruction_index()),
live_blocks_(data->GetBlocksDominatedBy(
phi_block->first_instruction_index())) {
live_blocks_(
data->GetBlocksDominatedBy(phi_block->first_instruction_index())),
deferred_spill_outputs_(nullptr) {
// For phis, add the gap move instructions in the predecssor blocks to
// the live range.
for (RpoNumber pred_rpo : phi_block->predecessors()) {
......@@ -308,17 +403,59 @@ class VirtualRegisterData final {
}
bool IsLiveAt(int instr_index, InstructionBlock* block) {
return live_range_.Contains(instr_index) &&
live_blocks_->Contains(block->rpo_number().ToInt());
if (!live_range_.Contains(instr_index)) return false;
int block_rpo = block->rpo_number().ToInt();
if (!live_blocks_->Contains(block_rpo)) return false;
if (!HasDeferredBlockSpills()) {
return true;
} else {
// If this spill range is only output for deferred block, then the spill
// slot will only be live for the deferred blocks, not all blocks that
// the virtual register is live.
for (auto deferred_spill_output : *deferred_spill_outputs()) {
if (deferred_spill_output.live_blocks->Contains(block_rpo)) {
return true;
}
}
return false;
}
}
void ExtendRangeTo(int instr_index) { live_range_.AddInstr(instr_index); }
void AddDeferredSpillOutput(AllocatedOperand allocated_op, int instr_index,
MidTierRegisterAllocationData* data) {
if (deferred_spill_outputs_ == nullptr) {
Zone* zone = data->allocation_zone();
deferred_spill_outputs_ =
zone->New<ZoneVector<DeferredSpillSlotOutput>>(zone);
}
const InstructionBlock* block = data->GetBlock(instr_index);
DCHECK_EQ(block->first_instruction_index(), instr_index);
BlockState& block_state = data->block_state(block->rpo_number());
const BitVector* deferred_blocks =
block_state.deferred_blocks_region()->blocks_covered();
deferred_spill_outputs_->emplace_back(instr_index, allocated_op,
deferred_blocks);
}
void ClearDeferredBlockSpills() { deferred_spill_outputs_ = nullptr; }
bool HasDeferredBlockSpills() const {
return deferred_spill_outputs_ != nullptr;
}
const ZoneVector<DeferredSpillSlotOutput>* deferred_spill_outputs() const {
DCHECK(HasDeferredBlockSpills());
return deferred_spill_outputs_;
}
Range& live_range() { return live_range_; }
private:
Range live_range_;
const BitVector* live_blocks_;
ZoneVector<DeferredSpillSlotOutput>* deferred_spill_outputs_;
DISALLOW_COPY_AND_ASSIGN(SpillRange);
};
......@@ -331,11 +468,13 @@ class VirtualRegisterData final {
private:
void Initialize(int virtual_register, InstructionOperand* spill_operand,
int instr_index, bool is_phi, bool is_constant);
int instr_index, bool is_phi, bool is_constant,
bool is_defined_in_deferred_block);
void AddPendingSpillOperand(PendingOperand* pending_operand);
void AddSpillUse(int instr_index, MidTierRegisterAllocationData* data);
void AddPendingSpillOperand(PendingOperand* pending_operand);
void EnsureSpillRange(MidTierRegisterAllocationData* data);
bool CouldSpillOnEntryToDeferred(const InstructionBlock* block);
InstructionOperand* spill_operand_;
SpillRange* spill_range_;
......@@ -344,6 +483,8 @@ class VirtualRegisterData final {
int vreg_;
bool is_phi_ : 1;
bool is_constant_ : 1;
bool is_defined_in_deferred_block_ : 1;
bool needs_spill_at_output_ : 1;
};
VirtualRegisterData& MidTierRegisterAllocationData::VirtualRegisterDataFor(
......@@ -356,33 +497,44 @@ VirtualRegisterData& MidTierRegisterAllocationData::VirtualRegisterDataFor(
void VirtualRegisterData::Initialize(int virtual_register,
InstructionOperand* spill_operand,
int instr_index, bool is_phi,
bool is_constant) {
bool is_constant,
bool is_defined_in_deferred_block) {
vreg_ = virtual_register;
spill_operand_ = spill_operand;
spill_range_ = nullptr;
output_instr_index_ = instr_index;
is_phi_ = is_phi;
is_constant_ = is_constant;
is_defined_in_deferred_block_ = is_defined_in_deferred_block;
needs_spill_at_output_ = !is_constant_ && spill_operand_ != nullptr;
}
void VirtualRegisterData::DefineAsConstantOperand(ConstantOperand* operand,
int instr_index) {
Initialize(operand->virtual_register(), operand, instr_index, false, true);
int instr_index,
bool is_deferred_block) {
Initialize(operand->virtual_register(), operand, instr_index, false, true,
is_deferred_block);
}
void VirtualRegisterData::DefineAsFixedSpillOperand(AllocatedOperand* operand,
int virtual_register,
int instr_index) {
Initialize(virtual_register, operand, instr_index, false, false);
int instr_index,
bool is_deferred_block) {
Initialize(virtual_register, operand, instr_index, false, false,
is_deferred_block);
}
void VirtualRegisterData::DefineAsUnallocatedOperand(int virtual_register,
int instr_index) {
Initialize(virtual_register, nullptr, instr_index, false, false);
int instr_index,
bool is_deferred_block) {
Initialize(virtual_register, nullptr, instr_index, false, false,
is_deferred_block);
}
void VirtualRegisterData::DefineAsPhi(int virtual_register, int instr_index) {
Initialize(virtual_register, nullptr, instr_index, true, false);
void VirtualRegisterData::DefineAsPhi(int virtual_register, int instr_index,
bool is_deferred_block) {
Initialize(virtual_register, nullptr, instr_index, true, false,
is_deferred_block);
}
void VirtualRegisterData::EnsureSpillRange(
......@@ -407,8 +559,38 @@ void VirtualRegisterData::EnsureSpillRange(
void VirtualRegisterData::AddSpillUse(int instr_index,
MidTierRegisterAllocationData* data) {
if (is_constant()) return;
EnsureSpillRange(data);
spill_range_->ExtendRangeTo(instr_index);
const InstructionBlock* block = data->GetBlock(instr_index);
if (CouldSpillOnEntryToDeferred(block)) {
data->block_state(block->rpo_number())
.deferred_blocks_region()
->DeferSpillOutputUntilEntry(vreg());
} else {
MarkAsNeedsSpillAtOutput();
}
}
void VirtualRegisterData::AddDeferredSpillUse(
int instr_index, MidTierRegisterAllocationData* data) {
DCHECK(data->GetBlock(instr_index)->IsDeferred());
DCHECK(!is_defined_in_deferred_block());
AddSpillUse(instr_index, data);
}
bool VirtualRegisterData::CouldSpillOnEntryToDeferred(
const InstructionBlock* block) {
return !NeedsSpillAtOutput() && block->IsDeferred() &&
!is_defined_in_deferred_block() && !is_constant();
}
void VirtualRegisterData::AddDeferredSpillOutput(
AllocatedOperand allocated_op, int instr_index,
MidTierRegisterAllocationData* data) {
DCHECK(!NeedsSpillAtOutput());
spill_range_->AddDeferredSpillOutput(allocated_op, instr_index, data);
}
void VirtualRegisterData::SpillOperand(InstructionOperand* operand,
......@@ -424,8 +606,17 @@ void VirtualRegisterData::SpillOperand(InstructionOperand* operand,
}
}
bool VirtualRegisterData::NeedsSpillAtOutput() const {
return HasSpillOperand() && !is_constant();
bool VirtualRegisterData::NeedsSpillAtDeferredBlocks() const {
return HasSpillRange() && spill_range()->HasDeferredBlockSpills();
}
void VirtualRegisterData::EmitDeferredSpillOutputs(
MidTierRegisterAllocationData* data) {
DCHECK(NeedsSpillAtDeferredBlocks());
for (auto deferred_spill : *spill_range()->deferred_spill_outputs()) {
EmitGapMoveToSpillSlot(deferred_spill.operand, deferred_spill.instr_index,
data);
}
}
void VirtualRegisterData::EmitGapMoveToInputFromSpillSlot(
......@@ -511,17 +702,32 @@ class RegisterState final : public ZoneObject {
RegisterState(const RegisterState& other) V8_NOEXCEPT;
bool IsAllocated(RegisterIndex reg);
bool IsShared(RegisterIndex reg);
int VirtualRegisterForRegister(RegisterIndex reg);
// Commit the |reg| with the |allocated| operand.
void Commit(RegisterIndex reg, AllocatedOperand allocated,
InstructionOperand* operand, MidTierRegisterAllocationData* data);
// Spill the contents of |reg| for an instruction in |current_block| using
// the |allocated| operand to commit the spill gap move.
void Spill(RegisterIndex reg, AllocatedOperand allocated,
const InstructionBlock* current_block,
MidTierRegisterAllocationData* data);
// Add a pending spill of the contents of |reg| at the exit point of a
// deferred block at |instr_index| using |allocated| operand to commit the
// spill gap move, if the register never gets spilled in a non-deferred block.
void SpillForDeferred(RegisterIndex reg, AllocatedOperand allocated,
int instr_index, MidTierRegisterAllocationData* data);
// Add a pending gap move from |reg| to |virtual_register|'s spill at the
// entry point of a deferred block at |instr_index|, if the |virtual_register|
// never spilled in a non-deferred block.
void MoveToSpillSlotOnDeferred(RegisterIndex reg, int virtual_register,
int instr_index,
MidTierRegisterAllocationData* data);
// Allocate |reg| to |virtual_register| for the instruction at |instr_index|.
// If the register is later spilled, a gap move will be added immediately
// before |instr_index| to move |virtual_register| into this register.
......@@ -583,18 +789,30 @@ class RegisterState final : public ZoneObject {
void Reset();
// Operations for committing, spilling and allocating uses of the register.
void Commit(AllocatedOperand allocated_operand);
void Commit(AllocatedOperand allocated_operand,
MidTierRegisterAllocationData* data);
void Spill(AllocatedOperand allocated_op,
const InstructionBlock* current_block,
MidTierRegisterAllocationData* data);
void Use(int virtual_register, int instr_index);
void PendingUse(InstructionOperand* operand, int virtual_register,
int instr_index);
void SpillForDeferred(AllocatedOperand allocated, int instr_index,
MidTierRegisterAllocationData* data);
void MoveToSpillSlotOnDeferred(int virtual_register, int instr_index,
MidTierRegisterAllocationData* data);
// Mark register as holding a phi.
void MarkAsPhiMove();
bool is_phi_gap_move() const { return is_phi_gap_move_; }
// The register has deferred block spills, that will be emitted if the
// register is committed without having been spilled in a non-deferred block
void AddDeferredBlockSpill(int instr_index, bool on_exit, Zone* zone);
bool has_deferred_block_spills() const {
return deferred_block_spills_.has_value();
}
// Operations related to dealing with a Register that is shared across
// multiple basic blocks.
void CommitAtMerge();
......@@ -627,6 +845,14 @@ class RegisterState final : public ZoneObject {
PendingOperand* pending_uses() const { return pending_uses_; }
private:
struct DeferredBlockSpill {
DeferredBlockSpill(int instr, bool on_exit)
: instr_index(instr), on_deferred_exit(on_exit) {}
int instr_index;
bool on_deferred_exit;
};
void SpillPendingUses(MidTierRegisterAllocationData* data);
void SpillPhiGapMove(AllocatedOperand allocated_op,
const InstructionBlock* block,
......@@ -640,6 +866,7 @@ class RegisterState final : public ZoneObject {
int num_commits_required_;
int virtual_register_;
PendingOperand* pending_uses_;
base::Optional<ZoneVector<DeferredBlockSpill>> deferred_block_spills_;
};
void ResetDataFor(RegisterIndex reg);
......@@ -667,6 +894,7 @@ void RegisterState::Register::Reset() {
num_commits_required_ = 0;
virtual_register_ = InstructionOperand::kInvalidVirtualRegister;
pending_uses_ = nullptr;
deferred_block_spills_.reset();
}
void RegisterState::Register::Use(int virtual_register, int instr_index) {
......@@ -701,19 +929,31 @@ void RegisterState::Register::MarkAsPhiMove() {
is_phi_gap_move_ = true;
}
void RegisterState::Register::AddDeferredBlockSpill(int instr_index,
bool on_exit, Zone* zone) {
DCHECK(is_allocated());
if (!deferred_block_spills_) {
deferred_block_spills_.emplace(zone);
}
deferred_block_spills_->emplace_back(instr_index, on_exit);
}
void RegisterState::Register::AddSharedUses(int shared_use_count) {
is_shared_ = true;
num_commits_required_ += shared_use_count;
}
void RegisterState::Register::CommitAtMerge() {
DCHECK(is_shared());
DCHECK(is_allocated());
--num_commits_required_;
// We should still have commits required that will be resolved in the merge
// block.
DCHECK_GT(num_commits_required_, 0);
}
void RegisterState::Register::Commit(AllocatedOperand allocated_op) {
void RegisterState::Register::Commit(AllocatedOperand allocated_op,
MidTierRegisterAllocationData* data) {
DCHECK(is_allocated());
DCHECK_GT(num_commits_required_, 0);
......@@ -728,6 +968,29 @@ void RegisterState::Register::Commit(AllocatedOperand allocated_op) {
pending_use = next;
}
pending_uses_ = nullptr;
VirtualRegisterData& vreg_data =
data->VirtualRegisterDataFor(virtual_register());
// If there are deferred block gap moves pending, emit them now that the
// register has been committed.
if (has_deferred_block_spills()) {
for (DeferredBlockSpill& spill : *deferred_block_spills_) {
if (spill.on_deferred_exit) {
vreg_data.EmitGapMoveToInputFromSpillSlot(allocated_op,
spill.instr_index, data);
} else if (!vreg_data.NeedsSpillAtOutput()) {
vreg_data.AddDeferredSpillOutput(allocated_op, spill.instr_index,
data);
}
}
}
// If this register was used as a phi gap move, then it being commited
// is the point at which we have output the Phi.
if (is_phi_gap_move() && vreg_data.NeedsSpillAtDeferredBlocks()) {
vreg_data.EmitDeferredSpillOutputs(data);
}
}
DCHECK_IMPLIES(num_commits_required_ > 0, is_shared());
}
......@@ -735,16 +998,19 @@ void RegisterState::Register::Commit(AllocatedOperand allocated_op) {
void RegisterState::Register::Spill(AllocatedOperand allocated_op,
const InstructionBlock* current_block,
MidTierRegisterAllocationData* data) {
VirtualRegisterData& vreg_data =
data->VirtualRegisterDataFor(virtual_register());
SpillPendingUses(data);
if (is_phi_gap_move()) {
SpillPhiGapMove(allocated_op, current_block, data);
}
if (needs_gap_move_on_spill()) {
VirtualRegisterData& vreg_data =
data->VirtualRegisterDataFor(virtual_register());
vreg_data.EmitGapMoveToInputFromSpillSlot(allocated_op,
last_use_instr_index(), data);
}
SpillPendingUses(data);
if (has_deferred_block_spills() || !current_block->IsDeferred()) {
vreg_data.MarkAsNeedsSpillAtOutput();
}
virtual_register_ = InstructionOperand::kInvalidVirtualRegister;
}
......@@ -784,6 +1050,30 @@ void RegisterState::Register::SpillPendingUses(
pending_uses_ = nullptr;
}
void RegisterState::Register::SpillForDeferred(
AllocatedOperand allocated, int instr_index,
MidTierRegisterAllocationData* data) {
DCHECK(is_allocated());
DCHECK(is_shared());
// Add a pending deferred spill, then commit the register (with the commit
// being fullfilled by the deferred spill if the register is fully commited).
data->VirtualRegisterDataFor(virtual_register())
.AddDeferredSpillUse(instr_index, data);
AddDeferredBlockSpill(instr_index, true, data->allocation_zone());
Commit(allocated, data);
}
void RegisterState::Register::MoveToSpillSlotOnDeferred(
int virtual_register, int instr_index,
MidTierRegisterAllocationData* data) {
if (!is_allocated()) {
virtual_register_ = virtual_register;
last_use_instr_index_ = instr_index;
num_commits_required_ = 1;
}
AddDeferredBlockSpill(instr_index, false, data->allocation_zone());
}
RegisterState::RegisterState(RegisterKind kind, int num_allocatable_registers,
Zone* zone)
: register_data_(num_allocatable_registers, zone), zone_(zone) {}
......@@ -802,7 +1092,7 @@ int RegisterState::VirtualRegisterForRegister(RegisterIndex reg) {
}
bool RegisterState::IsPhiGapMove(RegisterIndex reg) {
DCHECK(RegisterState::IsAllocated(reg));
DCHECK(IsAllocated(reg));
return reg_data(reg).is_phi_gap_move();
}
......@@ -811,7 +1101,7 @@ void RegisterState::Commit(RegisterIndex reg, AllocatedOperand allocated,
MidTierRegisterAllocationData* data) {
InstructionOperand::ReplaceWith(operand, &allocated);
if (IsAllocated(reg)) {
reg_data(reg).Commit(allocated);
reg_data(reg).Commit(allocated, data);
ResetDataFor(reg);
}
}
......@@ -824,6 +1114,22 @@ void RegisterState::Spill(RegisterIndex reg, AllocatedOperand allocated,
ResetDataFor(reg);
}
void RegisterState::SpillForDeferred(RegisterIndex reg,
AllocatedOperand allocated,
int instr_index,
MidTierRegisterAllocationData* data) {
DCHECK(IsAllocated(reg));
reg_data(reg).SpillForDeferred(allocated, instr_index, data);
ResetDataFor(reg);
}
void RegisterState::MoveToSpillSlotOnDeferred(
RegisterIndex reg, int virtual_register, int instr_index,
MidTierRegisterAllocationData* data) {
EnsureRegisterData(reg);
reg_data(reg).MoveToSpillSlotOnDeferred(virtual_register, instr_index, data);
}
void RegisterState::AllocateUse(RegisterIndex reg, int virtual_register,
InstructionOperand* operand, int instr_index,
MidTierRegisterAllocationData* data) {
......@@ -848,6 +1154,10 @@ RegisterState::Register& RegisterState::reg_data(RegisterIndex reg) {
return *register_data_[reg.ToInt()];
}
bool RegisterState::IsShared(RegisterIndex reg) {
return HasRegisterData(reg) && reg_data(reg).is_shared();
}
bool RegisterState::IsAllocated(RegisterIndex reg) {
return HasRegisterData(reg) && reg_data(reg).is_allocated();
}
......@@ -953,6 +1263,11 @@ class SinglePassRegisterAllocator final {
void EndBlock(const InstructionBlock* block);
void EndInstruction();
void UpdateForDeferredBlock(int instr_index);
void AllocateDeferredBlockSpillOutput(int instr_index,
RpoNumber deferred_block,
int virtual_register);
RegisterKind kind() const { return kind_; }
BitVector* assigned_registers() const { return assigned_registers_; }
......@@ -1017,6 +1332,10 @@ class SinglePassRegisterAllocator final {
void SpillRegister(RegisterIndex reg);
void SpillRegisterForVirtualRegister(int virtual_register);
// Pre-emptively spill the register at the exit of deferred blocks such that
// uses of this register in non-deferred blocks don't need to be spilled.
void SpillRegisterForDeferred(RegisterIndex reg, int instr_index);
// Returns an AllocatedOperand corresponding to the use of |reg| for
// |virtual_register|.
AllocatedOperand AllocatedOperandForReg(RegisterIndex reg,
......@@ -1031,8 +1350,8 @@ class SinglePassRegisterAllocator final {
// Helper functions to choose the best register for a given operand.
V8_INLINE RegisterIndex
ChooseRegisterFor(VirtualRegisterData& virtual_register, UsePosition pos,
bool must_use_register);
ChooseRegisterFor(VirtualRegisterData& virtual_register, int instr_index,
UsePosition pos, bool must_use_register);
V8_INLINE RegisterIndex ChooseRegisterFor(MachineRepresentation rep,
UsePosition pos,
bool must_use_register);
......@@ -1189,6 +1508,13 @@ RegisterIndex SinglePassRegisterAllocator::RegisterForVirtualRegister(
return virtual_register_to_reg_[virtual_register];
}
void SinglePassRegisterAllocator::UpdateForDeferredBlock(int instr_index) {
if (!HasRegisterState()) return;
for (RegisterIndex reg : *register_state()) {
SpillRegisterForDeferred(reg, instr_index);
}
}
void SinglePassRegisterAllocator::EndInstruction() {
in_use_at_instr_end_bits_ = 0;
in_use_at_instr_start_bits_ = 0;
......@@ -1449,14 +1775,15 @@ void SinglePassRegisterAllocator::FreeRegister(RegisterIndex reg,
}
RegisterIndex SinglePassRegisterAllocator::ChooseRegisterFor(
VirtualRegisterData& virtual_register, UsePosition pos,
VirtualRegisterData& virtual_register, int instr_index, UsePosition pos,
bool must_use_register) {
// If register is already allocated to the virtual register, use that.
RegisterIndex reg = RegisterForVirtualRegister(virtual_register.vreg());
// If we don't need a register, only try to allocate one if the virtual
// register hasn't yet been spilled, to try to avoid spilling it.
if (!reg.is_valid() &&
(must_use_register || !virtual_register.HasSpillOperand())) {
if (!reg.is_valid() && (must_use_register ||
!virtual_register.IsSpilledAt(instr_index, data()))) {
reg = ChooseRegisterFor(RepresentationFor(virtual_register.vreg()), pos,
must_use_register);
}
......@@ -1610,6 +1937,45 @@ void SinglePassRegisterAllocator::SpillRegisterForVirtualRegister(
}
}
void SinglePassRegisterAllocator::SpillRegisterForDeferred(RegisterIndex reg,
int instr_index) {
// Committing the output operation, and mark the register use in this
// instruction, then mark it as free going forward.
if (register_state()->IsAllocated(reg) && register_state()->IsShared(reg)) {
int virtual_register = VirtualRegisterForRegister(reg);
AllocatedOperand allocated = AllocatedOperandForReg(reg, virtual_register);
register_state()->SpillForDeferred(reg, allocated, instr_index, data());
FreeRegister(reg, virtual_register);
}
CheckConsistency();
}
void SinglePassRegisterAllocator::AllocateDeferredBlockSpillOutput(
int instr_index, RpoNumber deferred_block, int virtual_register) {
DCHECK(data()->GetBlock(deferred_block)->IsDeferred());
VirtualRegisterData& vreg_data =
data()->VirtualRegisterDataFor(virtual_register);
if (!vreg_data.NeedsSpillAtOutput() &&
!DefinedAfter(virtual_register, instr_index, UsePosition::kEnd)) {
// If a register has been assigned to the virtual register, and the virtual
// register still doesn't need to be spilled at it's output, and add a
// pending move to output the virtual register to it's spill slot on entry
// of the deferred block (to avoid spilling on in non-deferred code).
// TODO(rmcilroy): Consider assigning a register even if the virtual
// register isn't yet assigned - currently doing this regresses performance.
RegisterIndex reg = RegisterForVirtualRegister(virtual_register);
if (reg.is_valid()) {
int deferred_block_start =
data()->GetBlock(deferred_block)->first_instruction_index();
register_state()->MoveToSpillSlotOnDeferred(reg, virtual_register,
deferred_block_start, data());
return;
} else {
vreg_data.MarkAsNeedsSpillAtOutput();
}
}
}
AllocatedOperand SinglePassRegisterAllocator::AllocatedOperandForReg(
RegisterIndex reg, int virtual_register) {
MachineRepresentation rep = RepresentationFor(virtual_register);
......@@ -1709,7 +2075,8 @@ void SinglePassRegisterAllocator::AllocateInput(UnallocatedOperand* operand,
bool must_use_register = operand->HasRegisterPolicy() ||
(vreg_data.is_constant() &&
!operand->HasRegisterOrSlotOrConstantPolicy());
RegisterIndex reg = ChooseRegisterFor(vreg_data, pos, must_use_register);
RegisterIndex reg =
ChooseRegisterFor(vreg_data, instr_index, pos, must_use_register);
if (reg.is_valid()) {
if (must_use_register) {
......@@ -1731,7 +2098,8 @@ void SinglePassRegisterAllocator::AllocateGapMoveInput(
// Gap move inputs should be unconstrained.
DCHECK(operand->HasRegisterOrSlotPolicy());
RegisterIndex reg = ChooseRegisterFor(vreg_data, UsePosition::kStart, false);
RegisterIndex reg =
ChooseRegisterFor(vreg_data, instr_index, UsePosition::kStart, false);
if (reg.is_valid()) {
AllocatePendingUse(reg, virtual_register, operand, instr_index);
} else {
......@@ -1769,7 +2137,8 @@ RegisterIndex SinglePassRegisterAllocator::AllocateOutput(
reg = FromRegCode(operand->fixed_register_index(),
RepresentationFor(virtual_register));
} else {
reg = ChooseRegisterFor(vreg_data, pos, operand->HasRegisterPolicy());
reg = ChooseRegisterFor(vreg_data, instr_index, pos,
operand->HasRegisterPolicy());
}
// TODO(rmcilroy): support secondary storage.
......@@ -1797,6 +2166,8 @@ RegisterIndex SinglePassRegisterAllocator::AllocateOutput(
vreg_data.EmitGapMoveFromOutputToSpillSlot(
*AllocatedOperand::cast(operand), current_block(), instr_index,
data());
} else if (vreg_data.NeedsSpillAtDeferredBlocks()) {
vreg_data.EmitDeferredSpillOutputs(data());
}
}
......@@ -1985,6 +2356,8 @@ class MidTierOutputProcessor final {
void DefineOutputs(const InstructionBlock* block);
private:
void PopulateDeferredBlockRegion(RpoNumber initial_block);
VirtualRegisterData& VirtualRegisterDataFor(int virtual_register) const {
return data()->VirtualRegisterDataFor(virtual_register);
}
......@@ -1992,16 +2365,71 @@ class MidTierOutputProcessor final {
return data()->RepresentationFor(virtual_register);
}
bool IsDeferredBlockBoundary(const ZoneVector<RpoNumber>& blocks) {
return blocks.size() == 1 && !data()->GetBlock(blocks[0])->IsDeferred();
}
MidTierRegisterAllocationData* data() const { return data_; }
InstructionSequence* code() const { return data()->code(); }
Zone* allocation_zone() const { return data()->allocation_zone(); }
Zone* zone() const { return data()->allocation_zone(); }
MidTierRegisterAllocationData* const data_;
ZoneQueue<RpoNumber> deferred_blocks_worklist_;
ZoneSet<RpoNumber> deferred_blocks_processed_;
};
MidTierOutputProcessor::MidTierOutputProcessor(
MidTierRegisterAllocationData* data)
: data_(data) {}
: data_(data),
deferred_blocks_worklist_(data->allocation_zone()),
deferred_blocks_processed_(data->allocation_zone()) {}
void MidTierOutputProcessor::PopulateDeferredBlockRegion(
RpoNumber initial_block) {
DeferredBlocksRegion* deferred_blocks_region =
zone()->New<DeferredBlocksRegion>(zone(),
code()->InstructionBlockCount());
DCHECK(deferred_blocks_worklist_.empty());
deferred_blocks_worklist_.push(initial_block);
deferred_blocks_processed_.insert(initial_block);
while (!deferred_blocks_worklist_.empty()) {
RpoNumber current = deferred_blocks_worklist_.front();
deferred_blocks_worklist_.pop();
deferred_blocks_region->AddBlock(current, data());
const InstructionBlock* curr_block = data()->GetBlock(current);
// Check for whether the predecessor blocks are still deferred.
if (IsDeferredBlockBoundary(curr_block->predecessors())) {
// If not, mark the predecessor as having a deferred successor.
data()
->block_state(curr_block->predecessors()[0])
.MarkAsDeferredBlockBoundary();
} else {
// Otherwise process predecessors.
for (RpoNumber pred : curr_block->predecessors()) {
if (deferred_blocks_processed_.count(pred) == 0) {
deferred_blocks_worklist_.push(pred);
deferred_blocks_processed_.insert(pred);
}
}
}
// Check for whether the successor blocks are still deferred.
// Process any unprocessed successors if we aren't at a boundary.
if (IsDeferredBlockBoundary(curr_block->successors())) {
// If not, mark the predecessor as having a deferred successor.
data()->block_state(current).MarkAsDeferredBlockBoundary();
} else {
// Otherwise process successors.
for (RpoNumber succ : curr_block->successors()) {
if (deferred_blocks_processed_.count(succ) == 0) {
deferred_blocks_worklist_.push(succ);
deferred_blocks_processed_.insert(succ);
}
}
}
}
}
void MidTierOutputProcessor::InitializeBlockState(
const InstructionBlock* block) {
......@@ -2013,8 +2441,13 @@ void MidTierOutputProcessor::InitializeBlockState(
}
}
// Mark this block as dominating itself.
BlockState& block_state = data()->block_state(block->rpo_number());
if (block->IsDeferred() && !block_state.deferred_blocks_region()) {
PopulateDeferredBlockRegion(block->rpo_number());
}
// Mark this block as dominating itself.
block_state.dominated_blocks()->Add(block->rpo_number().ToInt());
if (block->dominator().IsValid()) {
......@@ -2030,6 +2463,7 @@ void MidTierOutputProcessor::InitializeBlockState(
void MidTierOutputProcessor::DefineOutputs(const InstructionBlock* block) {
int block_start = block->first_instruction_index();
bool is_deferred = block->IsDeferred();
for (int index = block->last_instruction_index(); index >= block_start;
index--) {
Instruction* instr = code()->InstructionAt(index);
......@@ -2042,7 +2476,7 @@ void MidTierOutputProcessor::DefineOutputs(const InstructionBlock* block) {
ConstantOperand* constant_operand = ConstantOperand::cast(output);
int virtual_register = constant_operand->virtual_register();
VirtualRegisterDataFor(virtual_register)
.DefineAsConstantOperand(constant_operand, index);
.DefineAsConstantOperand(constant_operand, index, is_deferred);
} else {
DCHECK(output->IsUnallocated());
UnallocatedOperand* unallocated_operand =
......@@ -2052,15 +2486,15 @@ void MidTierOutputProcessor::DefineOutputs(const InstructionBlock* block) {
// If output has a fixed slot policy, allocate its spill operand now
// so that the register allocator can use this knowledge.
MachineRepresentation rep = RepresentationFor(virtual_register);
AllocatedOperand* fixed_spill_operand = AllocatedOperand::New(
allocation_zone(), AllocatedOperand::STACK_SLOT, rep,
AllocatedOperand* fixed_spill_operand =
AllocatedOperand::New(zone(), AllocatedOperand::STACK_SLOT, rep,
unallocated_operand->fixed_slot_index());
VirtualRegisterDataFor(virtual_register)
.DefineAsFixedSpillOperand(fixed_spill_operand, virtual_register,
index);
index, is_deferred);
} else {
VirtualRegisterDataFor(virtual_register)
.DefineAsUnallocatedOperand(virtual_register, index);
.DefineAsUnallocatedOperand(virtual_register, index, is_deferred);
}
}
}
......@@ -2076,7 +2510,8 @@ void MidTierOutputProcessor::DefineOutputs(const InstructionBlock* block) {
for (PhiInstruction* phi : block->phis()) {
int virtual_register = phi->virtual_register();
VirtualRegisterDataFor(virtual_register)
.DefineAsPhi(virtual_register, block->first_instruction_index());
.DefineAsPhi(virtual_register, block->first_instruction_index(),
is_deferred);
}
}
......@@ -2142,9 +2577,31 @@ MidTierRegisterAllocator::MidTierRegisterAllocator(
void MidTierRegisterAllocator::AllocateRegisters(
const InstructionBlock* block) {
RpoNumber block_rpo = block->rpo_number();
bool is_deferred_block_boundary =
data()->block_state(block_rpo).is_deferred_block_boundary();
general_reg_allocator().StartBlock(block);
double_reg_allocator().StartBlock(block);
// If the block is not deferred but has deferred successors, then try to
// output spill slots for virtual_registers that are only spilled in the
// deferred blocks at the start of those deferred blocks to avoid spilling
// them at their output in non-deferred blocks.
if (is_deferred_block_boundary && !block->IsDeferred()) {
for (RpoNumber successor : block->successors()) {
if (!data()->GetBlock(successor)->IsDeferred()) continue;
DCHECK_GT(successor, block_rpo);
for (int virtual_register :
*data()->block_state(successor).deferred_blocks_region()) {
USE(virtual_register);
AllocatorFor(RepresentationFor(virtual_register))
.AllocateDeferredBlockSpillOutput(block->last_instruction_index(),
successor, virtual_register);
}
}
}
// Allocate registers for instructions in reverse, from the end of the block
// to the start.
int block_start = block->first_instruction_index();
......@@ -2215,6 +2672,13 @@ void MidTierRegisterAllocator::AllocateRegisters(
// phi gap move operations that are needed to resolve phis in our successor.
if (instr_index == block->last_instruction_index()) {
AllocatePhiGapMoves(block);
// If this block is deferred but it's successor isn't, update the state to
// limit spills to the deferred blocks where possible.
if (is_deferred_block_boundary && block->IsDeferred()) {
general_reg_allocator().UpdateForDeferredBlock(instr_index);
double_reg_allocator().UpdateForDeferredBlock(instr_index);
}
}
// Allocate any unallocated gap move inputs.
......
......@@ -11,7 +11,7 @@ namespace v8 {
namespace internal {
#ifdef DEBUG
void BitVector::Print() {
void BitVector::Print() const {
bool first = true;
PrintF("{");
for (int i = 0; i < length(); i++) {
......
......@@ -277,7 +277,7 @@ class V8_EXPORT_PRIVATE BitVector : public ZoneObject {
int length() const { return length_; }
#ifdef DEBUG
void Print();
void Print() const;
#endif
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(BitVector);
......
......@@ -12,6 +12,67 @@ namespace compiler {
namespace {
// We can't just use the size of the moves collection, because of
// redundant moves which need to be discounted.
int GetMoveCount(const ParallelMove& moves) {
int move_count = 0;
for (auto move : moves) {
if (move->IsEliminated() || move->IsRedundant()) continue;
++move_count;
}
return move_count;
}
bool AreOperandsOfSameType(
const AllocatedOperand& op,
const InstructionSequenceTest::TestOperand& test_op) {
bool test_op_is_reg =
(test_op.type_ ==
InstructionSequenceTest::TestOperandType::kFixedRegister ||
test_op.type_ == InstructionSequenceTest::TestOperandType::kRegister);
return (op.IsRegister() && test_op_is_reg) ||
(op.IsStackSlot() && !test_op_is_reg);
}
bool AllocatedOperandMatches(
const AllocatedOperand& op,
const InstructionSequenceTest::TestOperand& test_op) {
return AreOperandsOfSameType(op, test_op) &&
((op.IsRegister() ? op.GetRegister().code() : op.index()) ==
test_op.value_ ||
test_op.value_ == InstructionSequenceTest::kNoValue);
}
int GetParallelMoveCount(int instr_index, Instruction::GapPosition gap_pos,
const InstructionSequence* sequence) {
const ParallelMove* moves =
sequence->InstructionAt(instr_index)->GetParallelMove(gap_pos);
if (moves == nullptr) return 0;
return GetMoveCount(*moves);
}
bool IsParallelMovePresent(int instr_index, Instruction::GapPosition gap_pos,
const InstructionSequence* sequence,
const InstructionSequenceTest::TestOperand& src,
const InstructionSequenceTest::TestOperand& dest) {
const ParallelMove* moves =
sequence->InstructionAt(instr_index)->GetParallelMove(gap_pos);
EXPECT_NE(nullptr, moves);
bool found_match = false;
for (auto move : *moves) {
if (move->IsEliminated() || move->IsRedundant()) continue;
if (AllocatedOperandMatches(AllocatedOperand::cast(move->source()), src) &&
AllocatedOperandMatches(AllocatedOperand::cast(move->destination()),
dest)) {
found_match = true;
break;
}
}
return found_match;
}
class MidTierRegisterAllocatorTest : public InstructionSequenceTest {
public:
void Allocate() {
......@@ -608,6 +669,99 @@ TEST_F(MidTierRegisterAllocatorTest, DiamondWithCallSecondBlock) {
Allocate();
}
TEST_F(MidTierRegisterAllocatorTest, SingleDeferredBlockSpill) {
StartBlock(); // B0
auto var = EmitOI(Reg(0));
EndBlock(Branch(Reg(var), 1, 2));
StartBlock(); // B1
EndBlock(Jump(2));
StartBlock(true); // B2
EmitCall(Slot(-1), Slot(var));
EndBlock();
StartBlock(); // B3
EmitNop();
EndBlock();
StartBlock(); // B4
Return(Reg(var, 0));
EndBlock();
Allocate();
const int var_def_index = 1;
const int call_index = 3;
// We should have no parallel moves at the "var_def_index" position.
EXPECT_EQ(
0, GetParallelMoveCount(var_def_index, Instruction::START, sequence()));
// The spill should be performed at the position "call_index".
EXPECT_TRUE(IsParallelMovePresent(call_index, Instruction::START, sequence(),
Reg(0), Slot(0)));
}
TEST_F(MidTierRegisterAllocatorTest, ValidMultipleDeferredBlockSpills) {
StartBlock(); // B0
auto var1 = EmitOI(Reg(0));
auto var2 = EmitOI(Reg(1));
auto var3 = EmitOI(Reg(2));
EndBlock(Branch(Reg(var1, 0), 1, 2));
StartBlock(true); // B1
EmitCall(Slot(-2), Slot(var1));
EndBlock(Jump(5));
StartBlock(); // B2
EmitNop();
EndBlock();
StartBlock(); // B3
EmitNop();
EndBlock(Branch(Reg(var2, 0), 1, 2));
StartBlock(true); // B4
EmitCall(Slot(-1), Slot(var2));
EndBlock(Jump(2));
StartBlock(); // B5
EmitNop();
EndBlock();
StartBlock(); // B6
Return(Reg(var3, 2));
EndBlock();
const int def_of_v2 = 2;
const int start_of_b1 = 4;
const int start_of_b4 = 10;
const int end_of_b1 = 5;
const int end_of_b4 = 11;
const int start_of_b6 = 14;
Allocate();
const int var3_reg = 2;
const int var3_slot = 2;
EXPECT_FALSE(IsParallelMovePresent(def_of_v2, Instruction::START, sequence(),
Reg(var3_reg), Slot()));
EXPECT_TRUE(IsParallelMovePresent(start_of_b1, Instruction::START, sequence(),
Reg(var3_reg), Slot(var3_slot)));
EXPECT_TRUE(IsParallelMovePresent(end_of_b1, Instruction::END, sequence(),
Slot(var3_slot), Reg()));
EXPECT_TRUE(IsParallelMovePresent(start_of_b4, Instruction::START, sequence(),
Reg(var3_reg), Slot(var3_slot)));
EXPECT_TRUE(IsParallelMovePresent(end_of_b4, Instruction::END, sequence(),
Slot(var3_slot), Reg()));
EXPECT_EQ(0,
GetParallelMoveCount(start_of_b6, Instruction::START, sequence()));
}
namespace {
enum class ParameterType { kFixedSlot, kSlot, kRegister, kFixedRegister };
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment