Commit d024b9a1 authored by Stephan Herhut's avatar Stephan Herhut Committed by Commit Bot

[regalloc] Introduce LiveRangeBundles

The idea behind this change is to restore some information about
pre-ssa values to aid register allocation in sharing spill slots and
reusing registers for connected live ranges.

By itself, this change does not improve much but it allows upcoming
changes to freely spill and reload ranges without worrying about
keeping the assignment stable.

Change-Id: I9320522592546655cc8fd0236d45fe075276a49e
Reviewed-on: https://chromium-review.googlesource.com/c/1375665
Commit-Queue: Stephan Herhut <herhut@chromium.org>
Reviewed-by: 's avatarSigurd Schneider <sigurds@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58688}
parent ba56d282
......@@ -552,6 +552,7 @@ void LiveRange::AdvanceLastProcessedMarker(
LiveRange* LiveRange::SplitAt(LifetimePosition position, Zone* zone) {
int new_id = TopLevel()->GetNextChildId();
LiveRange* child = new (zone) LiveRange(new_id, representation(), TopLevel());
child->set_bundle(bundle_);
// If we split, we do so because we're about to switch registers or move
// to/from a slot, so there's no value in connecting hints.
DetachAt(position, child, zone, DoNotConnectHints);
......@@ -789,6 +790,17 @@ void LiveRange::Print(bool with_children) const {
Print(RegisterConfiguration::Default(), with_children);
}
bool LiveRange::RegisterFromBundle(int* hint) const {
if (bundle_ == nullptr || bundle_->reg() == kUnassignedRegister) return false;
*hint = bundle_->reg();
return true;
}
void LiveRange::UpdateBundleRegister(int reg) const {
if (bundle_ == nullptr || bundle_->reg() != kUnassignedRegister) return;
bundle_->set_reg(reg);
}
struct TopLevelLiveRange::SpillMoveInsertionList : ZoneObject {
SpillMoveInsertionList(int gap_index, InstructionOperand* operand,
SpillMoveInsertionList* next)
......@@ -2605,6 +2617,100 @@ bool LiveRangeBuilder::NextIntervalStartsInDifferentBlocks(
return block->rpo_number() < next_block->rpo_number();
}
void BundleBuilder::BuildBundles() {
TRACE("Build bundles\n");
// Process the blocks in reverse order.
for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
--block_id) {
InstructionBlock* block =
code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
TRACE("Block B%d\n", block_id);
for (auto phi : block->phis()) {
LiveRange* out_range =
data()->GetOrCreateLiveRangeFor(phi->virtual_register());
LiveRangeBundle* out = out_range->get_bundle();
if (out == nullptr) {
out = new (data()->allocation_zone())
LiveRangeBundle(data()->allocation_zone());
out->TryAddRange(out_range);
}
TRACE("Processing phi for v%d with %d:%d\n", phi->virtual_register(),
out_range->TopLevel()->vreg(), out_range->relative_id());
for (auto input : phi->operands()) {
LiveRange* input_range = data()->GetOrCreateLiveRangeFor(input);
TRACE("Input value v%d with range %d:%d\n", input,
input_range->TopLevel()->vreg(), input_range->relative_id());
LiveRangeBundle* input_bundle = input_range->get_bundle();
if (input_bundle != nullptr) {
TRACE("Merge\n");
if (out->TryMerge(input_bundle))
TRACE("Merged %d and %d to %d\n", phi->virtual_register(), input,
out->id());
} else {
TRACE("Add\n");
if (out->TryAddRange(input_range))
TRACE("Added %d and %d to %d\n", phi->virtual_register(), input,
out->id());
}
}
}
TRACE("Done block B%d\n", block_id);
}
}
bool LiveRangeBundle::TryAddRange(LiveRange* range) {
DCHECK_NULL(range->get_bundle());
// We may only add a new live range if its use intervals do not
// overlap with existing intervals in the bundle.
if (UsesOverlap(range->first_interval())) return false;
ranges_.insert(range);
range->set_bundle(this);
InsertUses(range->first_interval());
return true;
}
int LiveRangeBundle::bundle_id = 0;
bool LiveRangeBundle::TryMerge(LiveRangeBundle* other) {
if (other == this) return true;
auto iter1 = uses_.begin();
auto iter2 = other->uses_.begin();
while (iter1 != uses_.end() && iter2 != other->uses_.end()) {
if (iter1->start > iter2->end) {
++iter2;
} else if (iter2->start > iter1->end) {
++iter1;
} else {
TRACE("No merge %d:%d %d:%d\n", iter1->start, iter1->end, iter2->start,
iter2->end);
return false;
}
}
// Uses are disjoint, merging is possible.
for (auto it = other->ranges_.begin(); it != other->ranges_.end(); ++it) {
(*it)->set_bundle(this);
InsertUses((*it)->first_interval());
}
ranges_.insert(other->ranges_.begin(), other->ranges_.end());
other->ranges_.clear();
return true;
}
void LiveRangeBundle::MergeSpillRanges() {
SpillRange* target = nullptr;
for (auto range : ranges_) {
if (range->TopLevel()->HasSpillRange()) {
SpillRange* current = range->TopLevel()->GetSpillRange();
if (target == nullptr) {
target = current;
} else if (target != current) {
target->TryMerge(current);
}
}
}
}
RegisterAllocator::RegisterAllocator(RegisterAllocationData* data,
RegisterKind kind)
: data_(data),
......@@ -2897,14 +3003,15 @@ void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
data()->MarkAllocated(range->representation(), reg);
range->set_assigned_register(reg);
range->SetUseHints(reg);
range->UpdateBundleRegister(reg);
if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
data()->GetPhiMapValueFor(range->TopLevel())->set_assigned_register(reg);
}
}
void LinearScanAllocator::AddToActive(LiveRange* range) {
TRACE("Add live range %d:%d to active\n", range->TopLevel()->vreg(),
range->relative_id());
TRACE("Add live range %d:%d in %s to active\n", range->TopLevel()->vreg(),
range->relative_id(), RegisterName(range->assigned_register()));
active_live_ranges().push_back(range);
next_active_ranges_change_ =
std::min(next_active_ranges_change_, range->NextEndAfter(range->Start()));
......@@ -3037,8 +3144,10 @@ void LinearScanAllocator::FindFreeRegistersForRange(
int cur_reg = cur_active->assigned_register();
if (kSimpleFPAliasing || !check_fp_aliasing()) {
positions[cur_reg] = LifetimePosition::GapFromInstructionIndex(0);
TRACE("Register %s is free until pos %d (1)\n", RegisterName(cur_reg),
LifetimePosition::GapFromInstructionIndex(0).value());
TRACE("Register %s is free until pos %d (1) due to %d\n",
RegisterName(cur_reg),
LifetimePosition::GapFromInstructionIndex(0).value(),
cur_active->TopLevel()->vreg());
} else {
int alias_base_index = -1;
int aliases = data()->config()->GetAliases(
......@@ -3136,7 +3245,8 @@ void LinearScanAllocator::ProcessCurrentRange(LiveRange* current) {
bool LinearScanAllocator::TryAllocatePreferredReg(
LiveRange* current, const Vector<LifetimePosition>& free_until_pos) {
int hint_register;
if (current->FirstHintPosition(&hint_register) != nullptr) {
if (current->FirstHintPosition(&hint_register) != nullptr ||
current->RegisterFromBundle(&hint_register)) {
TRACE(
"Found reg hint %s (free until [%d) for live range %d:%d (end %d[).\n",
RegisterName(hint_register), free_until_pos[hint_register].value(),
......@@ -3178,7 +3288,8 @@ bool LinearScanAllocator::TryAllocateFreeReg(
// as their available time is shorter.
int hint_reg = kUnassignedRegister;
int reg = codes[0];
if (current->FirstHintPosition(&hint_reg) != nullptr) {
if (current->FirstHintPosition(&hint_reg) != nullptr ||
current->RegisterFromBundle(&hint_reg)) {
reg = hint_reg;
}
for (int i = 0; i < num_codes; ++i) {
......@@ -3326,7 +3437,8 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
}
int reg = codes[0];
for (int i = 1; i < num_codes; ++i) {
register_use->HintRegister(&reg) || current->RegisterFromBundle(&reg);
for (int i = 0; i < num_codes; ++i) {
int code = codes[i];
if (use_pos[code] > use_pos[reg]) {
reg = code;
......@@ -3448,13 +3560,14 @@ bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
if (!range->is_phi()) return false;
DCHECK(!range->HasSpillOperand());
// Check how many operands belong to the same bundle as the output.
LiveRangeBundle* out_bundle = range->get_bundle();
RegisterAllocationData::PhiMapValue* phi_map_value =
data()->GetPhiMapValueFor(range);
const PhiInstruction* phi = phi_map_value->phi();
const InstructionBlock* block = phi_map_value->block();
// Count the number of spilled operands.
size_t spilled_count = 0;
LiveRange* first_op = nullptr;
for (size_t i = 0; i < phi->operands().size(); i++) {
int op = phi->operands()[i];
LiveRange* op_range = data()->GetOrCreateLiveRangeFor(op);
......@@ -3467,63 +3580,27 @@ bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
while (op_range != nullptr && !op_range->CanCover(pred_end)) {
op_range = op_range->next();
}
if (op_range != nullptr && op_range->spilled()) {
if (op_range != nullptr && op_range->spilled() &&
op_range->get_bundle() == out_bundle) {
spilled_count++;
if (first_op == nullptr) {
first_op = op_range->TopLevel();
}
}
}
// Only continue if more than half of the operands are spilled.
// Only continue if more than half of the operands are spilled to the same
// slot (because part of same bundle).
if (spilled_count * 2 <= phi->operands().size()) {
return false;
}
// Try to merge the spilled operands and count the number of merged spilled
// operands.
DCHECK_NOT_NULL(first_op);
SpillRange* first_op_spill = first_op->TopLevel()->GetSpillRange();
size_t num_merged = 1;
for (size_t i = 1; i < phi->operands().size(); i++) {
int op = phi->operands()[i];
TopLevelLiveRange* op_range = data()->live_ranges()[op];
if (!op_range->HasSpillRange()) continue;
SpillRange* op_spill = op_range->GetSpillRange();
if (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill)) {
num_merged++;
}
}
// Only continue if enough operands could be merged to the
// same spill slot.
if (num_merged * 2 <= phi->operands().size() ||
AreUseIntervalsIntersecting(first_op_spill->interval(),
range->first_interval())) {
return false;
}
// If the range does not need register soon, spill it to the merged
// spill range.
LifetimePosition next_pos = range->Start();
if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart();
UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
if (pos == nullptr) {
SpillRange* spill_range =
range->TopLevel()->HasSpillRange()
? range->TopLevel()->GetSpillRange()
: data()->AssignSpillRangeToLiveRange(range->TopLevel());
bool merged = first_op_spill->TryMerge(spill_range);
if (!merged) return false;
Spill(range);
return true;
} else if (pos->pos() > range->Start().NextStart()) {
SpillRange* spill_range =
range->TopLevel()->HasSpillRange()
? range->TopLevel()->GetSpillRange()
: data()->AssignSpillRangeToLiveRange(range->TopLevel());
bool merged = first_op_spill->TryMerge(spill_range);
if (!merged) return false;
SpillBetween(range, range->Start(), pos->pos());
return true;
}
......@@ -3595,6 +3672,11 @@ void SpillSlotLocator::LocateSpillSlots() {
OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {}
void OperandAssigner::AssignSpillSlots() {
for (auto range : data()->live_ranges()) {
if (range != nullptr && range->get_bundle() != nullptr) {
range->get_bundle()->MergeSpillRanges();
}
}
ZoneVector<SpillRange*>& spill_ranges = data()->spill_ranges();
// Merge disjoint spill ranges
for (size_t i = 0; i < spill_ranges.size(); ++i) {
......
......@@ -300,6 +300,7 @@ class V8_EXPORT_PRIVATE UsePosition final
class SpillRange;
class RegisterAllocationData;
class TopLevelLiveRange;
class LiveRangeBundle;
// Representation of SSA values' live ranges as a collection of (continuous)
// intervals over the instruction ordering.
......@@ -425,6 +426,11 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
void Print(const RegisterConfiguration* config, bool with_children) const;
void Print(bool with_children) const;
void set_bundle(LiveRangeBundle* bundle) { bundle_ = bundle; }
LiveRangeBundle* get_bundle() const { return bundle_; }
bool RegisterFromBundle(int* hint) const;
void UpdateBundleRegister(int reg) const;
private:
friend class TopLevelLiveRange;
explicit LiveRange(int relative_id, MachineRepresentation rep,
......@@ -461,10 +467,79 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
mutable UsePosition* current_hint_position_;
// Cache the last position splintering stopped at.
mutable UsePosition* splitting_pointer_;
LiveRangeBundle* bundle_ = nullptr;
DISALLOW_COPY_AND_ASSIGN(LiveRange);
};
struct LiveRangeOrdering {
bool operator()(const LiveRange* left, const LiveRange* right) const {
return left->Start() < right->Start();
}
};
class LiveRangeBundle : public ZoneObject {
public:
void MergeSpillRanges();
int id() { return id_; }
int reg() { return reg_; }
void set_reg(int reg) {
DCHECK_EQ(reg_, kUnassignedRegister);
reg_ = reg;
}
private:
friend class BundleBuilder;
static int bundle_id;
class Range {
public:
Range(int s, int e) : start(s), end(e) {}
Range(LifetimePosition s, LifetimePosition e)
: start(s.value()), end(e.value()) {}
int start;
int end;
};
struct RangeOrdering {
bool operator()(const Range left, const Range right) const {
return left.start < right.start;
}
};
bool UsesOverlap(UseInterval* interval) {
auto use = uses_.begin();
while (interval != nullptr && use != uses_.end()) {
if (use->end <= interval->start().value()) {
++use;
} else if (interval->end().value() <= use->start) {
interval = interval->next();
} else {
return true;
}
}
return false;
}
void InsertUses(UseInterval* interval) {
while (interval != nullptr) {
auto done = uses_.insert({interval->start(), interval->end()});
USE(done);
DCHECK_EQ(done.second, 1);
interval = interval->next();
}
}
explicit LiveRangeBundle(Zone* zone) : ranges_(zone), uses_(zone) {}
bool TryAddRange(LiveRange* range);
bool TryMerge(LiveRangeBundle* other);
ZoneSet<LiveRange*, LiveRangeOrdering> ranges_;
ZoneSet<Range, RangeOrdering> uses_;
int id_ = bundle_id++;
int reg_ = kUnassignedRegister;
};
class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
public:
explicit TopLevelLiveRange(int vreg, MachineRepresentation rep);
......@@ -948,6 +1023,18 @@ class LiveRangeBuilder final : public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(LiveRangeBuilder);
};
class BundleBuilder final : public ZoneObject {
public:
explicit BundleBuilder(RegisterAllocationData* data) : data_(data) {}
void BuildBundles();
private:
RegisterAllocationData* data() const { return data_; }
InstructionSequence* code() const { return data_->code(); }
RegisterAllocationData* data_;
};
class RegisterAllocator : public ZoneObject {
public:
RegisterAllocator(RegisterAllocationData* data, RegisterKind kind);
......
......@@ -776,7 +776,11 @@ void GraphC1Visualizer::PrintLiveRange(const LiveRange* range, const char* type,
os_ << " " << parent->vreg() << ":" << parent->relative_id();
// TODO(herhut) Find something useful to print for the hint field
os_ << " unknown";
if (range->get_bundle() != nullptr) {
os_ << " B" << range->get_bundle()->id();
} else {
os_ << " unknown";
}
for (const UseInterval* interval = range->first_interval();
interval != nullptr; interval = interval->next()) {
......
......@@ -1668,6 +1668,14 @@ struct BuildLiveRangesPhase {
}
};
struct BuildBundlesPhase {
static const char* phase_name() { return "build live range bundles"; }
void Run(PipelineData* data, Zone* temp_zone) {
BundleBuilder builder(data->register_allocation_data());
builder.BuildBundles();
}
};
struct SplinterLiveRangesPhase {
static const char* phase_name() { return "splinter live ranges"; }
......@@ -2816,6 +2824,8 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
Run<MeetRegisterConstraintsPhase>();
Run<ResolvePhisPhase>();
Run<BuildLiveRangesPhase>();
Run<BuildBundlesPhase>();
TraceSequence(info(), data, "before register allocation");
if (verifier != nullptr) {
CHECK(!data->register_allocation_data()->ExistsUseWithoutDefinition());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment