Commit 33629651 authored by mtrofin's avatar mtrofin Committed by Commit bot

[turbofan] Avoid large deopt blocks

Treat allocation of splintered ranges differently, by optimizing for move
counts (i.e. try to have less move counts), rather than optimizing for
quality of moves (which is what normal allocation does).

We can see reductions in code size in the benchmarks that measure it
(e.g. Unity)

BUG=

Review-Url: https://codereview.chromium.org/2347563004
Cr-Commit-Position: refs/heads/master@{#40178}
parent e46f1016
......@@ -589,9 +589,7 @@ InstructionBlock::InstructionBlock(Zone* zone, RpoNumber rpo_number,
handler_(handler),
needs_frame_(false),
must_construct_frame_(false),
must_deconstruct_frame_(false),
last_deferred_(RpoNumber::Invalid()) {}
must_deconstruct_frame_(false) {}
size_t InstructionBlock::PredecessorIndexOf(RpoNumber rpo_number) const {
size_t j = 0;
......
......@@ -1300,9 +1300,6 @@ class InstructionBlock final : public ZoneObject {
bool must_deconstruct_frame() const { return must_deconstruct_frame_; }
void mark_must_deconstruct_frame() { must_deconstruct_frame_ = true; }
void set_last_deferred(RpoNumber last) { last_deferred_ = last; }
RpoNumber last_deferred() const { return last_deferred_; }
private:
Successors successors_;
Predecessors predecessors_;
......@@ -1318,7 +1315,6 @@ class InstructionBlock final : public ZoneObject {
bool needs_frame_;
bool must_construct_frame_;
bool must_deconstruct_frame_;
RpoNumber last_deferred_;
};
class InstructionSequence;
......
......@@ -342,6 +342,11 @@ UsePositionHintType UsePosition::HintTypeForOperand(
return UsePositionHintType::kNone;
}
void UsePosition::SetHint(UsePosition* use_pos) {
DCHECK_NOT_NULL(use_pos);
hint_ = use_pos;
flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos);
}
void UsePosition::ResolveHint(UsePosition* use_pos) {
DCHECK_NOT_NULL(use_pos);
......@@ -581,7 +586,9 @@ void LiveRange::AdvanceLastProcessedMarker(
LiveRange* LiveRange::SplitAt(LifetimePosition position, Zone* zone) {
int new_id = TopLevel()->GetNextChildId();
LiveRange* child = new (zone) LiveRange(new_id, representation(), TopLevel());
DetachAt(position, child, zone);
// If we split, we do so because we're about to switch registers or move
// to/from a slot, so there's no value in connecting hints.
DetachAt(position, child, zone, DoNotConnectHints);
child->top_level_ = TopLevel();
child->next_ = next_;
......@@ -589,9 +596,9 @@ LiveRange* LiveRange::SplitAt(LifetimePosition position, Zone* zone) {
return child;
}
UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
Zone* zone) {
Zone* zone,
HintConnectionOption connect_hints) {
DCHECK(Start() < position);
DCHECK(End() > position);
DCHECK(result->IsEmpty());
......@@ -670,6 +677,10 @@ UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
last_processed_use_ = nullptr;
current_interval_ = nullptr;
if (connect_hints == ConnectHints && use_before != nullptr &&
use_after != nullptr) {
use_after->SetHint(use_before);
}
#ifdef DEBUG
VerifyChildStructure();
result->VerifyChildStructure();
......@@ -912,17 +923,21 @@ void TopLevelLiveRange::Splinter(LifetimePosition start, LifetimePosition end,
if (end >= End()) {
DCHECK(start > Start());
DetachAt(start, &splinter_temp, zone);
DetachAt(start, &splinter_temp, zone, ConnectHints);
next_ = nullptr;
} else {
DCHECK(start < End() && Start() < end);
const int kInvalidId = std::numeric_limits<int>::max();
UsePosition* last = DetachAt(start, &splinter_temp, zone);
UsePosition* last = DetachAt(start, &splinter_temp, zone, ConnectHints);
LiveRange end_part(kInvalidId, this->representation(), nullptr);
last_in_splinter = splinter_temp.DetachAt(end, &end_part, zone);
// The last chunk exits the deferred region, and we don't want to connect
// hints here, because the non-deferred region shouldn't be affected
// by allocation decisions on the deferred path.
last_in_splinter =
splinter_temp.DetachAt(end, &end_part, zone, DoNotConnectHints);
next_ = end_part.next_;
last_interval_->set_next(end_part.first_interval_);
......@@ -2401,7 +2416,13 @@ void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
if (next_pos.IsGapPosition()) {
next_pos = next_pos.NextStart();
}
UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
// With splinters, we can be more strict and skip over positions
// not strictly needing registers.
UsePosition* pos =
range->IsSplinter()
? range->NextRegisterPosition(next_pos)
: range->NextUsePositionRegisterIsBeneficial(next_pos);
// If the range already has a spill operand and it doesn't need a
// register immediately, split it and spill the first part of the range.
if (pos == nullptr) {
......@@ -2634,14 +2655,28 @@ void LinearScanAllocator::AllocateRegisters() {
DCHECK(!current->HasRegisterAssigned() && !current->spilled());
bool result = TryAllocateFreeReg(current);
if (!result) AllocateBlockedReg(current);
if (current->HasRegisterAssigned()) {
AddToActive(current);
}
ProcessCurrentRange(current);
}
}
bool LinearScanAllocator::TrySplitAndSpillSplinter(LiveRange* range) {
DCHECK(range->TopLevel()->IsSplinter());
// If there was no hint, apply regular (hot path) heuristics.
if (range->FirstHintPosition() == nullptr) return false;
// If we can spill the whole range, great. Otherwise, split above the
// first use needing a register and spill the top part.
const UsePosition* next_reg = range->NextRegisterPosition(range->Start());
if (next_reg == nullptr) {
Spill(range);
return true;
} else if (next_reg->pos().PrevStart() > range->Start()) {
LiveRange* tail = SplitRangeAt(range, next_reg->pos().PrevStart());
AddToUnhandledSorted(tail);
Spill(range);
return true;
}
return false;
}
void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
int reg) {
......@@ -2757,35 +2792,88 @@ void LinearScanAllocator::InactiveToActive(LiveRange* range) {
range->TopLevel()->vreg(), range->relative_id());
}
bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
void LinearScanAllocator::FindFreeRegistersForRange(
LiveRange* range, Vector<LifetimePosition> positions) {
int num_regs = num_registers();
int num_codes = num_allocatable_registers();
const int* codes = allocatable_register_codes();
DCHECK_GE(positions.length(), num_regs);
LifetimePosition free_until_pos[RegisterConfiguration::kMaxFPRegisters];
for (int i = 0; i < num_regs; i++) {
free_until_pos[i] = LifetimePosition::MaxPosition();
positions[i] = LifetimePosition::MaxPosition();
}
for (LiveRange* cur_active : active_live_ranges()) {
int cur_reg = cur_active->assigned_register();
free_until_pos[cur_reg] = LifetimePosition::GapFromInstructionIndex(0);
positions[cur_reg] = LifetimePosition::GapFromInstructionIndex(0);
TRACE("Register %s is free until pos %d (1)\n", RegisterName(cur_reg),
LifetimePosition::GapFromInstructionIndex(0).value());
}
for (LiveRange* cur_inactive : inactive_live_ranges()) {
DCHECK(cur_inactive->End() > current->Start());
LifetimePosition next_intersection =
cur_inactive->FirstIntersection(current);
DCHECK(cur_inactive->End() > range->Start());
LifetimePosition next_intersection = cur_inactive->FirstIntersection(range);
if (!next_intersection.IsValid()) continue;
int cur_reg = cur_inactive->assigned_register();
free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
positions[cur_reg] = Min(positions[cur_reg], next_intersection);
TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
Min(free_until_pos[cur_reg], next_intersection).value());
}
Min(positions[cur_reg], next_intersection).value());
}
}
// High-level register allocation summary:
//
// For regular, or hot (i.e. not splinter) ranges, we attempt to first
// allocate first the preferred (hint) register. If that is not possible,
// we find a register that's free, and allocate that. If that's not possible,
// we search for a register to steal from a range that was allocated. The
// goal is to optimize for throughput by avoiding register-to-memory
// moves, which are expensive.
//
// For splinters, the goal is to minimize the number of moves. First we try
// to allocate the preferred register (more discussion follows). Failing that,
// we bail out and spill as far as we can, unless the first use is at start,
// case in which we apply the same behavior as we do for regular ranges.
// If there is no hint, we apply the hot-path behavior.
//
// For the splinter, the hint register may come from:
//
// - the hot path (we set it at splintering time with SetHint). In this case, if
// we cannot offer the hint register, spilling is better because it's at most
// 1 move, while trying to find and offer another register is at least 1 move.
//
// - a constraint. If we cannot offer that register, it's because there is some
// interference. So offering the hint register up to the interference would
// result
// in a move at the interference, plus a move to satisfy the constraint. This is
// also the number of moves if we spill, with the potential of the range being
// already spilled and thus saving a move (the spill).
// Note that this can only be an input constraint, if it were an output one,
// the range wouldn't be a splinter because it means it'd be defined in a
// deferred
// block, and we don't mark those as splinters (they live in deferred blocks
// only).
//
// - a phi. The same analysis as in the case of the input constraint applies.
//
void LinearScanAllocator::ProcessCurrentRange(LiveRange* current) {
LifetimePosition free_until_pos_buff[RegisterConfiguration::kMaxFPRegisters];
Vector<LifetimePosition> free_until_pos(
free_until_pos_buff, RegisterConfiguration::kMaxFPRegisters);
FindFreeRegistersForRange(current, free_until_pos);
if (!TryAllocatePreferredReg(current, free_until_pos)) {
if (current->TopLevel()->IsSplinter()) {
if (TrySplitAndSpillSplinter(current)) return;
}
if (!TryAllocateFreeReg(current, free_until_pos)) {
AllocateBlockedReg(current);
}
}
if (current->HasRegisterAssigned()) {
AddToActive(current);
}
}
bool LinearScanAllocator::TryAllocatePreferredReg(
LiveRange* current, const Vector<LifetimePosition>& free_until_pos) {
int hint_register;
if (current->FirstHintPosition(&hint_register) != nullptr) {
TRACE(
......@@ -2803,6 +2891,14 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
return true;
}
}
return false;
}
bool LinearScanAllocator::TryAllocateFreeReg(
LiveRange* current, const Vector<LifetimePosition>& free_until_pos) {
int num_codes = num_allocatable_registers();
const int* codes = allocatable_register_codes();
DCHECK_GE(free_until_pos.length(), num_codes);
// Find the register which stays free for the longest time.
int reg = codes[0];
......@@ -2837,7 +2933,6 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
return true;
}
void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
UsePosition* register_use = current->NextRegisterPosition(current->Start());
if (register_use == nullptr) {
......
......@@ -275,6 +275,7 @@ class UsePosition final : public ZoneObject {
}
bool HasHint() const;
bool HintRegister(int* register_code) const;
void SetHint(UsePosition* use_pos);
void ResolveHint(UsePosition* use_pos);
bool IsResolved() const {
return hint_type() != UsePositionHintType::kUnresolved;
......@@ -368,8 +369,12 @@ class LiveRange : public ZoneObject {
// live range to the result live range.
// The current range will terminate at position, while result will start from
// position.
enum HintConnectionOption : bool {
DoNotConnectHints = false,
ConnectHints = true
};
UsePosition* DetachAt(LifetimePosition position, LiveRange* result,
Zone* zone);
Zone* zone, HintConnectionOption connect_hints);
// Detaches at position, and then links the resulting ranges. Returns the
// child, which starts at position.
......@@ -1047,8 +1052,15 @@ class LinearScanAllocator final : public RegisterAllocator {
// Helper methods for allocating registers.
bool TryReuseSpillForPhi(TopLevelLiveRange* range);
bool TryAllocateFreeReg(LiveRange* range);
bool TryAllocateFreeReg(LiveRange* range,
const Vector<LifetimePosition>& free_until_pos);
bool TryAllocatePreferredReg(LiveRange* range,
const Vector<LifetimePosition>& free_until_pos);
void FindFreeRegistersForRange(LiveRange* range,
Vector<LifetimePosition> free_until_pos);
void ProcessCurrentRange(LiveRange* current);
void AllocateBlockedReg(LiveRange* range);
bool TrySplitAndSpillSplinter(LiveRange* range);
// Spill the given life range after position pos.
void SpillAfter(LiveRange* range, LifetimePosition pos);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment