Commit 9b1ab6f8 authored by Stephan Herhut's avatar Stephan Herhut Committed by Commit Bot

[regalloc] Add two spill modes.

This change adds two spilling modes: SpillAtDefinition and SpillDeferred.
The former is the known spilling mode where we spill at definition. The
latter spills only in deferred code regions. This is implemented based on
control flow aware allocation and its invariants.

The effect is mostly the same as splintering with the exception of
forward looking allocation decisions still being impacted by register
constraints in deferred code.

Change-Id: Ia708e5765dd095196a8127deb2d8bec950d37e04
Reviewed-on: https://chromium-review.googlesource.com/c/1437118Reviewed-by: 's avatarSigurd Schneider <sigurds@chromium.org>
Commit-Queue: Stephan Herhut <herhut@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59759}
parent ff8f4144
......@@ -976,6 +976,11 @@ class RpoNumber final {
return other.index_ == this->index_ + 1;
}
RpoNumber Next() const {
DCHECK(IsValid());
return RpoNumber(index_ + 1);
}
// Comparison operators.
bool operator==(RpoNumber other) const { return index_ == other.index_; }
bool operator!=(RpoNumber other) const { return index_ != other.index_; }
......
......@@ -57,11 +57,11 @@ void CreateSplinter(TopLevelLiveRange* range, RegisterAllocationData* data,
}
void SetSlotUse(TopLevelLiveRange* range) {
range->set_has_slot_use(false);
range->reset_slot_use();
for (const UsePosition* pos = range->first_pos();
!range->has_slot_use() && pos != nullptr; pos = pos->next()) {
if (pos->type() == UsePositionType::kRequiresSlot) {
range->set_has_slot_use(true);
range->register_slot_use(TopLevelLiveRange::SlotUseKind::kGeneralSlotUse);
}
}
}
......
......@@ -1082,8 +1082,7 @@ void TopLevelLiveRange::Merge(TopLevelLiveRange* other, Zone* zone) {
TopLevel()->UpdateParentForAllChildren(TopLevel());
TopLevel()->UpdateSpillRangePostMerge(other);
TopLevel()->set_has_slot_use(TopLevel()->has_slot_use() ||
other->has_slot_use());
TopLevel()->register_slot_use(other->slot_use_kind());
#if DEBUG
Verify();
......@@ -1617,7 +1616,8 @@ bool RegisterAllocationData::RangesDefinedInDeferredStayInDeferred() {
}
SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange(
TopLevelLiveRange* range) {
TopLevelLiveRange* range, SpillMode spill_mode) {
using SpillType = TopLevelLiveRange::SpillType;
DCHECK(!range->HasSpillOperand());
SpillRange* spill_range = range->GetAllocatedSpillRange();
......@@ -1625,7 +1625,13 @@ SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange(
DCHECK(!range->IsSplinter());
spill_range = new (allocation_zone()) SpillRange(range, allocation_zone());
}
range->set_spill_type(TopLevelLiveRange::SpillType::kSpillRange);
if (spill_mode == SpillMode::kSpillDeferred &&
(range->spill_type() != SpillType::kSpillRange)) {
DCHECK(FLAG_turbo_control_flow_aware_allocation);
range->set_spill_type(SpillType::kDeferredSpillRange);
} else {
range->set_spill_type(SpillType::kSpillRange);
}
int spill_range_index =
range->IsSplinter() ? range->splintered_from()->vreg() : range->vreg();
......@@ -1637,6 +1643,7 @@ SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange(
SpillRange* RegisterAllocationData::CreateSpillRangeForLiveRange(
TopLevelLiveRange* range) {
DCHECK(FLAG_turbo_preprocess_ranges);
DCHECK(!range->HasSpillOperand());
DCHECK(!range->IsSplinter());
SpillRange* spill_range =
......@@ -2284,7 +2291,15 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
int vreg = unalloc->virtual_register();
live->Add(vreg);
if (unalloc->HasSlotPolicy()) {
data()->GetOrCreateLiveRangeFor(vreg)->set_has_slot_use(true);
if (FLAG_turbo_control_flow_aware_allocation) {
data()->GetOrCreateLiveRangeFor(vreg)->register_slot_use(
block->IsDeferred()
? TopLevelLiveRange::SlotUseKind::kDeferredSlotUse
: TopLevelLiveRange::SlotUseKind::kGeneralSlotUse);
} else {
data()->GetOrCreateLiveRangeFor(vreg)->register_slot_use(
TopLevelLiveRange::SlotUseKind::kGeneralSlotUse);
}
}
}
Use(block_start_position, use_pos, input);
......@@ -2548,7 +2563,12 @@ void LiveRangeBuilder::BuildLiveRanges() {
if (range == nullptr) continue;
// Give slots to all ranges with a non fixed slot use.
if (range->has_slot_use() && range->HasNoSpillType()) {
data()->AssignSpillRangeToLiveRange(range);
SpillMode spill_mode =
range->slot_use_kind() ==
TopLevelLiveRange::SlotUseKind::kDeferredSlotUse
? SpillMode::kSpillDeferred
: SpillMode::kSpillAtDefinition;
data()->AssignSpillRangeToLiveRange(range, spill_mode);
}
// TODO(bmeurer): This is a horrible hack to make sure that for constant
// live ranges, every use requires the constant to be in a register.
......@@ -2575,7 +2595,8 @@ void LiveRangeBuilder::BuildLiveRanges() {
int slot_id = preassigned.second;
SpillRange* spill = range->HasSpillRange()
? range->GetSpillRange()
: data()->AssignSpillRangeToLiveRange(range);
: data()->AssignSpillRangeToLiveRange(
range, SpillMode::kSpillAtDefinition);
spill->set_assigned_slot(slot_id);
}
#ifdef DEBUG
......@@ -2804,8 +2825,10 @@ void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
data()->live_ranges().size()); // TODO(neis): crbug.com/831822
TopLevelLiveRange* range = data()->live_ranges()[i];
if (!CanProcessRange(range)) continue;
// Only assume defined by memory operand if we are guaranteed to spill it or
// it has a spill operand.
if (range->HasNoSpillType() ||
(range->HasSpillRange() && !range->has_slot_use())) {
(range->HasSpillRange() && !range->has_non_deferred_slot_use())) {
continue;
}
LifetimePosition start = range->Start();
......@@ -2825,7 +2848,7 @@ void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
// If the range already has a spill operand and it doesn't need a
// register immediately, split it and spill the first part of the range.
if (pos == nullptr) {
Spill(range);
Spill(range, SpillMode::kSpillAtDefinition);
} else if (pos->pos() > range->Start().NextStart()) {
// Do not spill live range eagerly if use position that can benefit from
// the register is too close to the start of live range.
......@@ -2838,7 +2861,7 @@ void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
FindOptimalSplitPos(range->Start().NextFullStart(), split_pos);
SplitRangeAt(range, split_pos);
Spill(range);
Spill(range, SpillMode::kSpillAtDefinition);
}
}
}
......@@ -2944,14 +2967,28 @@ LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
return pos;
}
void RegisterAllocator::Spill(LiveRange* range) {
void RegisterAllocator::Spill(LiveRange* range, SpillMode spill_mode) {
DCHECK(!range->spilled());
DCHECK(spill_mode == SpillMode::kSpillAtDefinition ||
GetInstructionBlock(code(), range->Start())->IsDeferred());
TopLevelLiveRange* first = range->TopLevel();
TRACE("Spilling live range %d:%d\n", first->vreg(), range->relative_id());
TRACE("Spilling live range %d:%d mode %d\n", first->vreg(),
range->relative_id(), spill_mode);
TRACE("Starting spill type is %d\n", first->spill_type());
if (first->HasNoSpillType()) {
data()->AssignSpillRangeToLiveRange(first);
}
TRACE("New spill range needed");
data()->AssignSpillRangeToLiveRange(first, spill_mode);
}
// Upgrade the spillmode, in case this was only spilled in deferred code so
// far.
if ((spill_mode == SpillMode::kSpillAtDefinition) &&
(first->spill_type() ==
TopLevelLiveRange::SpillType::kDeferredSpillRange)) {
TRACE("Upgrading\n");
first->set_spill_type(TopLevelLiveRange::SpillType::kSpillRange);
}
TRACE("Final spill type is %d\n", first->spill_type());
range->Spill();
}
......@@ -2993,7 +3030,8 @@ void LinearScanAllocator::MaybeUndoPreviousSplit(LiveRange* range) {
}
void LinearScanAllocator::SpillNotLiveRanges(RangeWithRegisterSet& to_be_live,
LifetimePosition position) {
LifetimePosition position,
SpillMode spill_mode) {
for (auto it = active_live_ranges().begin();
it != active_live_ranges().end();) {
LiveRange* active_range = *it;
......@@ -3039,7 +3077,7 @@ void LinearScanAllocator::SpillNotLiveRanges(RangeWithRegisterSet& to_be_live,
if (position < revisit_at) {
LiveRange* third_part = SplitRangeAt(split, revisit_at);
DCHECK_NE(split, third_part);
Spill(split);
Spill(split, spill_mode);
TRACE("Marking %d:%d to recombine\n", toplevel->vreg(),
third_part->relative_id());
third_part->SetRecombine();
......@@ -3048,7 +3086,7 @@ void LinearScanAllocator::SpillNotLiveRanges(RangeWithRegisterSet& to_be_live,
AddToUnhandled(split);
}
} else {
Spill(split);
Spill(split, spill_mode);
}
it = ActiveToHandled(it);
}
......@@ -3298,6 +3336,7 @@ void LinearScanAllocator::ComputeStateFromManyPredecessors(
int reg = kUnassignedRegister;
for (int idx = 0; idx < RegisterConfiguration::kMaxRegisters; idx++) {
int uses = val.second.used_registers[idx];
if (uses == 0) continue;
if (uses > register_max) {
reg = idx;
register_max = val.second.used_registers[idx];
......@@ -3338,19 +3377,18 @@ bool LinearScanAllocator::ConsiderBlockForControlFlow(
!code()->InstructionBlockAt(predecessor)->IsDeferred());
}
bool LinearScanAllocator::BlockOrImmediatePredecessorIsDeferred(
bool LinearScanAllocator::BlockIsDeferredOrImmediatePredecessorIsNotDeferred(
const InstructionBlock* block) {
if (!FLAG_turbo_preprocess_ranges) return false;
if (block->IsDeferred()) return true;
if (block->PredecessorCount() == 0) return false;
bool pred_is_splinter = false;
if (block->PredecessorCount() == 0) return true;
bool pred_is_deferred = false;
for (auto pred : block->predecessors()) {
if (pred.IsNext(block->rpo_number())) {
pred_is_splinter = code()->InstructionBlockAt(pred)->IsDeferred();
pred_is_deferred = code()->InstructionBlockAt(pred)->IsDeferred();
break;
}
}
return pred_is_splinter;
return !pred_is_deferred;
}
void LinearScanAllocator::AllocateRegisters() {
......@@ -3406,6 +3444,8 @@ void LinearScanAllocator::AllocateRegisters() {
->InstructionBlockAt(last_block)
->last_instruction_index())
.NextFullStart();
SpillMode spill_mode = SpillMode::kSpillAtDefinition;
// Process all ranges. We also need to ensure that we have seen all block
// boundaries. Linear scan might have assigned and spilled ranges before
// reaching the last block and hence we would ignore control flow effects for
......@@ -3424,6 +3464,8 @@ void LinearScanAllocator::AllocateRegisters() {
allocation_finger_ = position;
#endif
if (FLAG_turbo_control_flow_aware_allocation) {
// Splintering is not supported.
CHECK(!FLAG_turbo_preprocess_ranges);
// Check whether we just moved across a block boundary. This will trigger
// for the first range that is past the current boundary.
if (position >= next_block_boundary) {
......@@ -3443,18 +3485,16 @@ void LinearScanAllocator::AllocateRegisters() {
// are not spilled.
data()->RememberSpillState(last_block, active_live_ranges());
bool fallthrough = (current_block->PredecessorCount() == 1) &&
current_block->predecessors()[0].IsNext(
current_block->rpo_number());
// Only reset the state if this was not a direct fallthrough. Otherwise
// control flow resolution will get confused (it does not expect changes
// across fallthrough edges.).
bool fallthrough = (current_block->PredecessorCount() == 1) &&
current_block->predecessors()[0].IsNext(
current_block->rpo_number());
// Also do not process deferred code boundaries. Splintering takes care
// of their control flow.
fallthrough =
fallthrough || BlockOrImmediatePredecessorIsDeferred(current_block);
spill_mode = current_block->IsDeferred()
? SpillMode::kSpillDeferred
: SpillMode::kSpillAtDefinition;
if (!fallthrough) {
#ifdef DEBUG
......@@ -3467,17 +3507,23 @@ void LinearScanAllocator::AllocateRegisters() {
// reactivate inactive ranges so that they get rescheduled for
// allocation if they were not live at the predecessors.
ForwardStateTo(next_block_boundary);
RangeWithRegisterSet to_be_live(data()->allocation_zone());
// If we end up deciding to use the state of the immediate
// predecessor, it is better not to perform a change. It would lead to
// the same outcome anyway.
// This may never happen on boundaries between deferred and
// non-deferred code, as we rely on explicit respill to ensure we
// spill at definition.
bool no_change_required = false;
auto pick_state_from = [this, current_block](
RpoNumber pred,
RangeWithRegisterSet* to_be_live) -> bool {
TRACE("Using information from B%d\n", pred.ToInt());
// If this is a fall-through that is not across a deferred
// boundary, there is nothing to do.
bool is_noop = pred.IsNext(current_block->rpo_number());
if (!is_noop) {
auto& spill_state = data()->GetSpillState(pred);
......@@ -3533,7 +3579,7 @@ void LinearScanAllocator::AllocateRegisters() {
}
if (!no_change_required) {
SpillNotLiveRanges(to_be_live, next_block_boundary);
SpillNotLiveRanges(to_be_live, next_block_boundary, spill_mode);
ReloadLiveRanges(to_be_live, next_block_boundary);
}
......@@ -3569,7 +3615,7 @@ void LinearScanAllocator::AllocateRegisters() {
DCHECK(!current->HasRegisterAssigned() && !current->spilled());
ProcessCurrentRange(current);
ProcessCurrentRange(current, spill_mode);
}
if (FLAG_trace_alloc) {
......@@ -3578,12 +3624,13 @@ void LinearScanAllocator::AllocateRegisters() {
}
bool LinearScanAllocator::TrySplitAndSpillSplinter(LiveRange* range) {
DCHECK(!FLAG_turbo_control_flow_aware_allocation);
DCHECK(range->TopLevel()->IsSplinter());
// If we can spill the whole range, great. Otherwise, split above the
// first use needing a register and spill the top part.
const UsePosition* next_reg = range->NextRegisterPosition(range->Start());
if (next_reg == nullptr) {
Spill(range);
Spill(range, SpillMode::kSpillAtDefinition);
return true;
} else if (range->FirstHintPosition() == nullptr) {
// If there was no hint, but we have a use position requiring a
......@@ -3592,7 +3639,7 @@ bool LinearScanAllocator::TrySplitAndSpillSplinter(LiveRange* range) {
} else if (next_reg->pos().PrevStart() > range->Start()) {
LiveRange* tail = SplitRangeAt(range, next_reg->pos().PrevStart());
AddToUnhandled(tail);
Spill(range);
Spill(range, SpillMode::kSpillAtDefinition);
return true;
}
return false;
......@@ -3708,6 +3755,19 @@ void LinearScanAllocator::ForwardStateTo(LifetimePosition position) {
}
}
int LinearScanAllocator::LastDeferredInstructionIndex(InstructionBlock* start) {
DCHECK(start->IsDeferred());
RpoNumber last_block =
RpoNumber::FromInt(code()->InstructionBlockCount() - 1);
while ((start->rpo_number() < last_block)) {
InstructionBlock* next =
code()->InstructionBlockAt(start->rpo_number().Next());
if (!next->IsDeferred()) break;
start = next;
}
return start->last_instruction_index();
}
void LinearScanAllocator::GetFPRegisterSet(MachineRepresentation rep,
int* num_regs, int* num_codes,
const int** codes) const {
......@@ -3825,16 +3885,18 @@ void LinearScanAllocator::FindFreeRegistersForRange(
//
// - a phi. The same analysis as in the case of the input constraint applies.
//
void LinearScanAllocator::ProcessCurrentRange(LiveRange* current) {
void LinearScanAllocator::ProcessCurrentRange(LiveRange* current,
SpillMode spill_mode) {
EmbeddedVector<LifetimePosition, RegisterConfiguration::kMaxRegisters>
free_until_pos;
FindFreeRegistersForRange(current, free_until_pos);
if (!TryAllocatePreferredReg(current, free_until_pos)) {
if (current->TopLevel()->IsSplinter()) {
DCHECK(!FLAG_turbo_control_flow_aware_allocation);
if (TrySplitAndSpillSplinter(current)) return;
}
if (!TryAllocateFreeReg(current, free_until_pos)) {
AllocateBlockedReg(current);
AllocateBlockedReg(current, spill_mode);
}
}
if (current->HasRegisterAssigned()) {
......@@ -3944,12 +4006,13 @@ bool LinearScanAllocator::TryAllocateFreeReg(
return true;
}
void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
void LinearScanAllocator::AllocateBlockedReg(LiveRange* current,
SpillMode spill_mode) {
UsePosition* register_use = current->NextRegisterPosition(current->Start());
if (register_use == nullptr) {
// There is no use in the current live range that requires a register.
// We can just spill it.
Spill(current);
Spill(current, spill_mode);
return;
}
......@@ -4053,18 +4116,33 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
// spill until there. The gap position will then fit the fill move.
if (LifetimePosition::ExistsGapPositionBetween(current->Start(),
register_use->pos())) {
SpillBetween(current, current->Start(), register_use->pos());
SpillBetween(current, current->Start(), register_use->pos(), spill_mode);
return;
}
}
// When in deferred spilling mode avoid stealing registers beyond the current
// deferred region. This is required as we otherwise might spill an inactive
// range with a start outside of deferred code and that would not be reloaded.
LifetimePosition new_end = current->End();
if (spill_mode == SpillMode::kSpillDeferred) {
InstructionBlock* deferred_block =
code()->GetInstructionBlock(current->Start().ToInstructionIndex());
new_end = Min(new_end, LifetimePosition::GapFromInstructionIndex(
LastDeferredInstructionIndex(deferred_block)));
}
// We couldn't spill until the next register use. Split before the register
// is blocked, if applicable.
if (block_pos[reg] < current->End()) {
if (block_pos[reg] < new_end) {
// Register becomes blocked before the current range end. Split before that
// position.
LiveRange* tail =
SplitBetween(current, current->Start(), block_pos[reg].Start());
new_end = block_pos[reg].Start();
}
// Split at the new end if we found one.
if (new_end != current->End()) {
LiveRange* tail = SplitBetween(current, current->Start(), new_end);
AddToUnhandled(tail);
}
......@@ -4077,10 +4155,11 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
// This register was not free. Thus we need to find and spill
// parts of active and inactive live regions that use the same register
// at the same lifetime positions as current.
SplitAndSpillIntersecting(current);
SplitAndSpillIntersecting(current, spill_mode);
}
void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current,
SpillMode spill_mode) {
DCHECK(current->HasRegisterAssigned());
int reg = current->assigned_register();
LifetimePosition split_pos = current->Start();
......@@ -4102,9 +4181,13 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
}
UsePosition* next_pos = range->NextRegisterPosition(current->Start());
LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
// TODO(herhut): Be more clever here as long as we do not move split_pos
// out of deferred code.
LifetimePosition spill_pos = spill_mode == SpillMode::kSpillDeferred
? split_pos
: FindOptimalSpillingPos(range, split_pos);
if (next_pos == nullptr) {
SpillAfter(range, spill_pos);
SpillAfter(range, spill_pos, spill_mode);
} else {
// When spilling between spill_pos and next_pos ensure that the range
// remains spilled at least until the start of the current live range.
......@@ -4116,7 +4199,8 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
// current live-range is larger than their end.
DCHECK(LifetimePosition::ExistsGapPositionBetween(current->Start(),
next_pos->pos()));
SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos(),
spill_mode);
}
it = ActiveToHandled(it);
}
......@@ -4147,10 +4231,10 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
if (next_intersection.IsValid()) {
UsePosition* next_pos = range->NextRegisterPosition(current->Start());
if (next_pos == nullptr) {
SpillAfter(range, split_pos);
SpillAfter(range, split_pos, spill_mode);
} else {
next_intersection = Min(next_intersection, next_pos->pos());
SpillBetween(range, split_pos, next_intersection);
SpillBetween(range, split_pos, next_intersection, spill_mode);
}
it = InactiveToHandled(it);
} else {
......@@ -4201,29 +4285,33 @@ bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart();
UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
if (pos == nullptr) {
Spill(range);
Spill(range, SpillMode::kSpillAtDefinition);
return true;
} else if (pos->pos() > range->Start().NextStart()) {
SpillBetween(range, range->Start(), pos->pos());
SpillBetween(range, range->Start(), pos->pos(),
SpillMode::kSpillAtDefinition);
return true;
}
return false;
}
void LinearScanAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
void LinearScanAllocator::SpillAfter(LiveRange* range, LifetimePosition pos,
SpillMode spill_mode) {
LiveRange* second_part = SplitRangeAt(range, pos);
Spill(second_part);
Spill(second_part, spill_mode);
}
void LinearScanAllocator::SpillBetween(LiveRange* range, LifetimePosition start,
LifetimePosition end) {
SpillBetweenUntil(range, start, start, end);
LifetimePosition end,
SpillMode spill_mode) {
SpillBetweenUntil(range, start, start, end, spill_mode);
}
void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
LifetimePosition start,
LifetimePosition until,
LifetimePosition end) {
LifetimePosition end,
SpillMode spill_mode) {
CHECK(start < end);
LiveRange* second_part = SplitRangeAt(range, start);
......@@ -4252,10 +4340,13 @@ void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
LiveRange* third_part =
SplitBetween(second_part, split_start, third_part_end);
DCHECK(third_part != second_part);
Spill(second_part);
AddToUnhandled(third_part);
// This can happen, even if we checked for start < end above, as we fiddle
// with the end location. However, we are guaranteed to be after or at
// until, so this is fine.
if (third_part != second_part) {
Spill(second_part, spill_mode);
}
} else {
// The split result does not intersect with [start, end[.
// Nothing to spill. Just put it to unhandled as whole.
......@@ -4288,6 +4379,34 @@ void SpillSlotLocator::LocateSpillSlots() {
OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {}
void OperandAssigner::DecideSpillingMode() {
if (FLAG_turbo_control_flow_aware_allocation) {
for (auto range : data()->live_ranges()) {
int max_blocks = data()->code()->InstructionBlockCount();
if (range != nullptr && range->IsSpilledOnlyInDeferredBlocks()) {
// If the range is spilled only in deferred blocks and starts in
// a non-deferred block, we transition its representation here so
// that the LiveRangeConnector processes them correctly. If,
// however, they start in a deferred block, we uograde them to
// spill at definition, as that definition is in a deferred block
// anyway. While this is an optimization, the code in LiveRangeConnector
// relies on it!
if (GetInstructionBlock(data()->code(), range->Start())->IsDeferred()) {
TRACE("Live range %d is spilled and alive in deferred code only\n",
range->vreg());
range->TransitionRangeToSpillAtDefinition();
} else {
TRACE(
"Live range %d is spilled deferred code only but alive outside\n",
range->vreg());
range->TransitionRangeToDeferredSpill(data()->allocation_zone(),
max_blocks);
}
}
}
}
}
void OperandAssigner::AssignSpillSlots() {
for (auto range : data()->live_ranges()) {
if (range != nullptr && range->get_bundle() != nullptr) {
......@@ -4563,6 +4682,9 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
pred_block->IsDeferred()) {
// The spill location should be defined in pred_block, so add
// pred_block to the list of blocks requiring a spill operand.
TRACE("Adding B%d to list of spill blocks for %d\n",
pred_block->rpo_number().ToInt(),
current->TopLevel()->vreg());
current->TopLevel()->GetListOfBlocksRequiringSpillOperands()->Add(
pred_block->rpo_number().ToInt());
}
......@@ -4770,6 +4892,8 @@ void LiveRangeConnector::CommitSpillsInDeferredBlocks(
RpoNumber spill_block_number = spill_block->rpo_number();
if (done_moves.find(std::make_pair(
spill_block_number, range->vreg())) == done_moves.end()) {
TRACE("Spilling deferred spill for range %d at B%d\n", range->vreg(),
spill_block_number.ToInt());
data()->AddGapMove(spill_block->first_instruction_index(),
Instruction::GapPosition::START, pred_op,
spill_operand);
......
......@@ -467,11 +467,11 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
void VerifyIntervals() const;
typedef BitField<bool, 0, 1> SpilledField;
// Bits (1,6] are used by TopLevelLiveRange.
typedef BitField<int32_t, 6, 6> AssignedRegisterField;
typedef BitField<MachineRepresentation, 12, 8> RepresentationField;
typedef BitField<bool, 20, 1> RecombineField;
typedef BitField<uint8_t, 21, 6> ControlFlowRegisterHint;
// Bits (1,7] are used by TopLevelLiveRange.
typedef BitField<int32_t, 7, 6> AssignedRegisterField;
typedef BitField<MachineRepresentation, 13, 8> RepresentationField;
typedef BitField<bool, 21, 1> RecombineField;
typedef BitField<uint8_t, 22, 6> ControlFlowRegisterHint;
// Unique among children and splinters of the same virtual register.
int relative_id_;
......@@ -577,10 +577,23 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
bits_ = IsNonLoopPhiField::update(bits_, value);
}
bool has_slot_use() const { return HasSlotUseField::decode(bits_); }
void set_has_slot_use(bool value) {
bits_ = HasSlotUseField::update(bits_, value);
enum SlotUseKind { kNoSlotUse, kDeferredSlotUse, kGeneralSlotUse };
bool has_slot_use() const {
return slot_use_kind() > SlotUseKind::kNoSlotUse;
}
bool has_non_deferred_slot_use() const {
return slot_use_kind() == SlotUseKind::kGeneralSlotUse;
}
void reset_slot_use() {
bits_ = HasSlotUseField::update(bits_, SlotUseKind::kNoSlotUse);
}
void register_slot_use(SlotUseKind value) {
bits_ = HasSlotUseField::update(bits_, Max(slot_use_kind(), value));
}
SlotUseKind slot_use_kind() const { return HasSlotUseField::decode(bits_); }
// Add a new interval or a new use position to this live range.
void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
......@@ -602,7 +615,24 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
// Spill range management.
void SetSpillRange(SpillRange* spill_range);
enum class SpillType { kNoSpillType, kSpillOperand, kSpillRange };
// Encodes whether a range is also available from a memory localtion:
// kNoSpillType: not availble in memory location.
// kSpillOperand: computed in a memory location at range start.
// kSpillRange: copied (spilled) to memory location at range start.
// kDeferredSpillRange: copied (spilled) to memory location at entry
// to deferred blocks that have a use from memory.
//
// Ranges either start out at kSpillOperand, which is also their final
// state, or kNoSpillType. When spilled only in deferred code, a range
// ends up with kDeferredSpillRange, while when spilled in regular code,
// a range will be tagged as kSpillRange.
enum class SpillType {
kNoSpillType,
kSpillOperand,
kSpillRange,
kDeferredSpillRange
};
void set_spill_type(SpillType value) {
bits_ = SpillTypeField::update(bits_, value);
}
......@@ -618,7 +648,7 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
}
SpillRange* GetSpillRange() const {
DCHECK_EQ(SpillType::kSpillRange, spill_type());
DCHECK_GE(spill_type(), SpillType::kSpillRange);
return spill_range_;
}
bool HasNoSpillType() const {
......@@ -627,8 +657,10 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
bool HasSpillOperand() const {
return spill_type() == SpillType::kSpillOperand;
}
bool HasSpillRange() const { return spill_type() == SpillType::kSpillRange; }
bool HasSpillRange() const { return spill_type() >= SpillType::kSpillRange; }
bool HasGeneralSpillRange() const {
return spill_type() == SpillType::kSpillRange;
}
AllocatedOperand GetSpillRangeOperand() const;
void RecordSpillLocation(Zone* zone, int gap_index,
......@@ -650,6 +682,7 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
// deferred blocks. If so, we insert here spills for non-spilled ranges
// with slot use positions.
void TreatAsSpilledInDeferredBlock(Zone* zone, int total_block_count) {
DCHECK(!FLAG_turbo_control_flow_aware_allocation);
spill_start_index_ = -1;
spilled_in_deferred_blocks_ = true;
spill_move_insertion_locations_ = nullptr;
......@@ -657,9 +690,24 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
new (zone) BitVector(total_block_count, zone);
}
void CommitSpillInDeferredBlocks(RegisterAllocationData* data,
const InstructionOperand& spill_operand,
BitVector* necessary_spill_points);
// Updates internal data structures to reflect that this range is not
// spilled at definition but instead spilled in some blocks only.
void TransitionRangeToDeferredSpill(Zone* zone, int total_block_count) {
DCHECK(FLAG_turbo_control_flow_aware_allocation);
spill_start_index_ = -1;
spill_move_insertion_locations_ = nullptr;
list_of_blocks_requiring_spill_operands_ =
new (zone) BitVector(total_block_count, zone);
}
// Promotes this range to spill at definition if it was marked for spilling
// in deferred blocks before.
void TransitionRangeToSpillAtDefinition() {
DCHECK_NOT_NULL(spill_move_insertion_locations_);
if (spill_type() == SpillType::kDeferredSpillRange) {
set_spill_type(SpillType::kSpillRange);
}
}
TopLevelLiveRange* splintered_from() const { return splintered_from_; }
bool IsSplinter() const { return splintered_from_ != nullptr; }
......@@ -685,6 +733,9 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
int GetMaxChildCount() const { return last_child_id_ + 1; }
bool IsSpilledOnlyInDeferredBlocks() const {
if (FLAG_turbo_control_flow_aware_allocation) {
return spill_type() == SpillType::kDeferredSpillRange;
}
return spilled_in_deferred_blocks_;
}
......@@ -723,10 +774,10 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
friend class LiveRange;
void SetSplinteredFrom(TopLevelLiveRange* splinter_parent);
typedef BitField<bool, 1, 1> HasSlotUseField;
typedef BitField<bool, 2, 1> IsPhiField;
typedef BitField<bool, 3, 1> IsNonLoopPhiField;
typedef BitField<SpillType, 4, 2> SpillTypeField;
typedef BitField<SlotUseKind, 1, 2> HasSlotUseField;
typedef BitField<bool, 3, 1> IsPhiField;
typedef BitField<bool, 4, 1> IsNonLoopPhiField;
typedef BitField<SpillType, 5, 2> SpillTypeField;
int vreg_;
int last_child_id_;
......@@ -806,6 +857,9 @@ class SpillRange final : public ZoneObject {
class RegisterAllocationData final : public ZoneObject {
public:
// Encodes whether a spill happens in deferred code (kSpillDeferred) or
// regular code (kSpillAtDefinition).
enum SpillMode { kSpillAtDefinition, kSpillDeferred };
class PhiMapValue : public ZoneObject {
public:
PhiMapValue(PhiInstruction* phi, const InstructionBlock* block, Zone* zone);
......@@ -895,7 +949,8 @@ class RegisterAllocationData final : public ZoneObject {
TopLevelLiveRange* NewLiveRange(int index, MachineRepresentation rep);
TopLevelLiveRange* NextLiveRange(MachineRepresentation rep);
SpillRange* AssignSpillRangeToLiveRange(TopLevelLiveRange* range);
SpillRange* AssignSpillRangeToLiveRange(TopLevelLiveRange* range,
SpillMode spill_mode);
SpillRange* CreateSpillRangeForLiveRange(TopLevelLiveRange* range);
MoveOperands* AddGapMove(int index, Instruction::GapPosition position,
......@@ -1005,6 +1060,8 @@ class LiveRangeBuilder final : public ZoneObject {
RegisterAllocationData* data);
private:
using SpillMode = RegisterAllocationData::SpillMode;
RegisterAllocationData* data() const { return data_; }
InstructionSequence* code() const { return data()->code(); }
Zone* allocation_zone() const { return data()->allocation_zone(); }
......@@ -1054,7 +1111,6 @@ class LiveRangeBuilder final : public ZoneObject {
InstructionOperand* operand) {
Use(block_start, position, operand, nullptr, UsePositionHintType::kNone);
}
RegisterAllocationData* const data_;
ZoneMap<InstructionOperand*, UsePosition*> phi_hints_;
......@@ -1079,6 +1135,7 @@ class RegisterAllocator : public ZoneObject {
RegisterAllocator(RegisterAllocationData* data, RegisterKind kind);
protected:
using SpillMode = RegisterAllocationData::SpillMode;
RegisterAllocationData* data() const { return data_; }
InstructionSequence* code() const { return data()->code(); }
RegisterKind mode() const { return mode_; }
......@@ -1122,7 +1179,7 @@ class RegisterAllocator : public ZoneObject {
LifetimePosition FindOptimalSplitPos(LifetimePosition start,
LifetimePosition end);
void Spill(LiveRange* range);
void Spill(LiveRange* range, SpillMode spill_mode);
// If we are trying to spill a range inside the loop try to
// hoist spill position out to the point just before the loop.
......@@ -1183,12 +1240,13 @@ class LinearScanAllocator final : public RegisterAllocator {
void MaybeUndoPreviousSplit(LiveRange* range);
void SpillNotLiveRanges(RangeWithRegisterSet& to_be_live,
LifetimePosition position);
LifetimePosition position, SpillMode spill_mode);
LiveRange* AssignRegisterOnReload(LiveRange* range, int reg);
void ReloadLiveRanges(RangeWithRegisterSet& to_be_live,
LifetimePosition position);
bool BlockOrImmediatePredecessorIsDeferred(const InstructionBlock* block);
bool BlockIsDeferredOrImmediatePredecessorIsNotDeferred(
const InstructionBlock* block);
struct LiveRangeOrdering {
bool operator()(const LiveRange* a, const LiveRange* b) const {
......@@ -1219,6 +1277,8 @@ class LinearScanAllocator final : public RegisterAllocator {
void ForwardStateTo(LifetimePosition position);
int LastDeferredInstructionIndex(InstructionBlock* start);
// Helper methods for choosing state after control flow events.
bool ConsiderBlockForControlFlow(InstructionBlock* current_block,
......@@ -1241,23 +1301,23 @@ class LinearScanAllocator final : public RegisterAllocator {
int* num_codes, const int** codes) const;
void FindFreeRegistersForRange(LiveRange* range,
Vector<LifetimePosition> free_until_pos);
void ProcessCurrentRange(LiveRange* current);
void AllocateBlockedReg(LiveRange* range);
void ProcessCurrentRange(LiveRange* current, SpillMode spill_mode);
void AllocateBlockedReg(LiveRange* range, SpillMode spill_mode);
bool TrySplitAndSpillSplinter(LiveRange* range);
// Spill the given life range after position pos.
void SpillAfter(LiveRange* range, LifetimePosition pos);
void SpillAfter(LiveRange* range, LifetimePosition pos, SpillMode spill_mode);
// Spill the given life range after position [start] and up to position [end].
void SpillBetween(LiveRange* range, LifetimePosition start,
LifetimePosition end);
LifetimePosition end, SpillMode spill_mode);
// Spill the given life range after position [start] and up to position [end].
// Range is guaranteed to be spilled at least until position [until].
void SpillBetweenUntil(LiveRange* range, LifetimePosition start,
LifetimePosition until, LifetimePosition end);
void SplitAndSpillIntersecting(LiveRange* range);
LifetimePosition until, LifetimePosition end,
SpillMode spill_mode);
void SplitAndSpillIntersecting(LiveRange* range, SpillMode spill_mode);
void PrintRangeRow(std::ostream& os, const TopLevelLiveRange* toplevel);
......@@ -1297,10 +1357,13 @@ class OperandAssigner final : public ZoneObject {
public:
explicit OperandAssigner(RegisterAllocationData* data);
// Phase 5: assign spill splots.
// Phase 5: final decision on spilling mode.
void DecideSpillingMode();
// Phase 6: assign spill splots.
void AssignSpillSlots();
// Phase 6: commit assignment.
// Phase 7: commit assignment.
void CommitAssignment();
private:
......@@ -1315,7 +1378,7 @@ class ReferenceMapPopulator final : public ZoneObject {
public:
explicit ReferenceMapPopulator(RegisterAllocationData* data);
// Phase 7: compute values for pointer maps.
// Phase 8: compute values for pointer maps.
void PopulateReferenceMaps();
private:
......@@ -1340,11 +1403,11 @@ class LiveRangeConnector final : public ZoneObject {
public:
explicit LiveRangeConnector(RegisterAllocationData* data);
// Phase 8: reconnect split ranges with moves, when the control flow
// Phase 9: reconnect split ranges with moves, when the control flow
// between the ranges is trivial (no branches).
void ConnectRanges(Zone* local_zone);
// Phase 9: insert moves to connect ranges across basic blocks, when the
// Phase 10: insert moves to connect ranges across basic blocks, when the
// control flow between them cannot be trivially resolved, such as joining
// branches.
void ResolveControlFlow(Zone* local_zone);
......
......@@ -1721,6 +1721,14 @@ struct LocateSpillSlotsPhase {
}
};
struct DecideSpillingModePhase {
static const char* phase_name() { return "decide spilling mode"; }
void Run(PipelineData* data, Zone* temp_zone) {
OperandAssigner assigner(data->register_allocation_data());
assigner.DecideSpillingMode();
}
};
struct AssignSpillSlotsPhase {
static const char* phase_name() { return "assign spill slots"; }
......@@ -2875,8 +2883,8 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
Run<MergeSplintersPhase>();
}
Run<DecideSpillingModePhase>();
Run<AssignSpillSlotsPhase>();
Run<CommitAssignmentPhase>();
// TODO(chromium:725559): remove this check once
......@@ -2894,7 +2902,6 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
if (FLAG_turbo_move_optimization) {
Run<OptimizeMovesPhase>();
}
Run<LocateSpillSlotsPhase>();
TraceSequence(info(), data, "after register allocation");
......
......@@ -411,6 +411,8 @@ DEFINE_BOOL(turbo_preprocess_ranges, true,
"run pre-register allocation heuristics")
DEFINE_BOOL(turbo_control_flow_aware_allocation, false,
"consider control flow while allocating registers")
DEFINE_NEG_IMPLICATION(turbo_control_flow_aware_allocation,
turbo_preprocess_ranges)
DEFINE_STRING(turbo_filter, "*", "optimization filter for TurboFan compiler")
DEFINE_BOOL(trace_turbo, false, "trace generated TurboFan IR")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment