Commit 0ee4b473 authored by mtrofin's avatar mtrofin Committed by Commit bot

[turbofan] Separate LiveRange and TopLevelLiveRange concepts

A TopLevelLiveRange is the live range of a virtual register. Through
register allocation, it may end up being split in a succession of child
live ranges, where data flow is handled through moves from
predecessor to successor child.

Today, the concepts of "top level" and "child" live ranges are conflated
under the LiveRange class. However, a good few APIs pertain solely
to TopLevelLiveRanges. This was communicated through comments or
DCHECKs - but this makes for poor code comprehensibility and maintainability.

For example, the worklist of the register allocator (live_ranges()) needs
to only contain TopLevelLiveRanges; spill range concerns are associated
only with the top range; phi-ness; certain phases in the allocation pipeline;
APIs on LiveRange used for initial construction - before splitting;
splintering - these are all responsibilities associated to TopLevelLiveRanges,
and not child live ranges.

This change separates the concepts.

An effect of this change is that child live range allocation need not involve
RegisterAllocationData. That's "a good thing" (lower coupling), but it has
the side-effect of not having a good way to construct unique identifiers for
child live ranges, relative to a given InstructionSequence.

LiveRange Id are used primarily for tracing/output-ing, and debugging.

I propose a 2-component identifier: a virtual register (vreg) number,
uniquely identifying TopLevelLiveRanges; and a relative identifier, which
uniquely identifies children of a given TopLevelLiveRange. "0" is reserved
for the TopLevel range. The relative identifier does not necessarily
indicate order in the child chain, which is no worse than the current state
of affairs.

I believe this change should make it easier to understand a trace output
(because the virtual register number is readily available). I plan to formalize
with a small structure the notion of live range id, and consolidate tracing
around that, as part of a separate CL. (there are seemingly disparate ways
to trace - printf or stream-based APIs - so this seems like an opportune
change to consolidate that)

Review URL: https://codereview.chromium.org/1311983002

Cr-Commit-Position: refs/heads/master@{#30370}
parent 268420af
......@@ -420,7 +420,9 @@ class GraphC1Visualizer {
void PrintInputs(InputIterator* i, int count, const char* prefix);
void PrintType(Node* node);
void PrintLiveRange(LiveRange* range, const char* type);
void PrintLiveRange(LiveRange* range, const char* type, int vreg);
void PrintLiveRangeChain(TopLevelLiveRange* range, const char* type);
class Tag final BASE_EMBEDDED {
public:
Tag(GraphC1Visualizer* visualizer, const char* name) {
......@@ -694,23 +696,33 @@ void GraphC1Visualizer::PrintLiveRanges(const char* phase,
PrintStringProperty("name", phase);
for (auto range : data->fixed_double_live_ranges()) {
PrintLiveRange(range, "fixed");
PrintLiveRangeChain(range, "fixed");
}
for (auto range : data->fixed_live_ranges()) {
PrintLiveRange(range, "fixed");
PrintLiveRangeChain(range, "fixed");
}
for (auto range : data->live_ranges()) {
PrintLiveRange(range, "object");
PrintLiveRangeChain(range, "object");
}
}
void GraphC1Visualizer::PrintLiveRangeChain(TopLevelLiveRange* range,
const char* type) {
int vreg = range->vreg();
for (LiveRange* child = range; child != nullptr; child = child->next()) {
PrintLiveRange(child, type, vreg);
}
}
void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type) {
void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type,
int vreg) {
if (range != NULL && !range->IsEmpty()) {
PrintIndent();
os_ << range->id() << " " << type;
os_ << vreg << ":" << range->relative_id() << " " << type;
if (range->HasRegisterAssigned()) {
AllocatedOperand op = AllocatedOperand::cast(range->GetAssignedOperand());
int assigned_reg = op.index();
......@@ -739,13 +751,8 @@ void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type) {
}
}
}
int parent_index = -1;
if (range->IsChild()) {
parent_index = range->parent()->id();
} else {
parent_index = range->id();
}
os_ << " " << parent_index;
os_ << " " << vreg;
for (auto interval = range->first_interval(); interval != nullptr;
interval = interval->next()) {
os_ << " [" << interval->start().value() << ", "
......
......@@ -22,11 +22,11 @@ const float GreedyAllocator::kAllocatedRangeMultiplier = 10.0;
namespace {
void UpdateOperands(LiveRange* range, RegisterAllocationData* data) {
void UpdateOperands(TopLevelLiveRange* range, RegisterAllocationData* data) {
int reg_id = range->assigned_register();
range->SetUseHints(reg_id);
if (range->is_phi()) {
data->GetPhiMapValueFor(range->id())->set_assigned_register(reg_id);
data->GetPhiMapValueFor(range)->set_assigned_register(reg_id);
}
}
......@@ -38,8 +38,7 @@ LiveRange* Split(LiveRange* range, RegisterAllocationData* data,
(data->code()
->GetInstructionBlock(pos.ToInstructionIndex())
->last_instruction_index() != pos.ToInstructionIndex()));
LiveRange* result = data->NewChildRangeFor(range);
range->SplitAt(pos, result, data->allocation_zone());
LiveRange* result = range->SplitAt(pos, data->allocation_zone());
return result;
}
......@@ -117,7 +116,8 @@ AllocationCandidate AllocationScheduler::GetNext() {
void AllocationScheduler::Schedule(LiveRange* range) {
TRACE("Scheduling live range %d.\n", range->id());
TRACE("Scheduling live range %d:%d.\n", range->TopLevel()->vreg(),
range->relative_id());
queue_.push(AllocationCandidate(range));
}
......@@ -130,14 +130,15 @@ GreedyAllocator::GreedyAllocator(RegisterAllocationData* data,
void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) {
TRACE("Assigning register %s to live range %d\n", RegisterName(reg_id),
range->id());
TRACE("Assigning register %s to live range %d:%d\n", RegisterName(reg_id),
range->TopLevel()->vreg(), range->relative_id());
DCHECK(!range->HasRegisterAssigned());
AllocateRegisterToRange(reg_id, range);
TRACE("Assigning %s to range %d\n", RegisterName(reg_id), range->id());
TRACE("Assigning %s to range %d%d.\n", RegisterName(reg_id),
range->TopLevel()->vreg(), range->relative_id());
range->set_assigned_register(reg_id);
}
......@@ -151,7 +152,7 @@ void GreedyAllocator::PreallocateFixedRanges() {
for (LiveRange* fixed_range : GetFixedRegisters()) {
if (fixed_range != nullptr) {
DCHECK_EQ(mode(), fixed_range->kind());
DCHECK(fixed_range->IsFixed());
DCHECK(fixed_range->TopLevel()->IsFixed());
int reg_nr = fixed_range->assigned_register();
EnsureValidRangeWeight(fixed_range);
......@@ -180,7 +181,8 @@ void GreedyAllocator::TryAllocateCandidate(
void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
// TODO(mtrofin): once we introduce groups, we'll want to first try and
// allocate at the preferred register.
TRACE("Attempting to allocate live range %d\n", range->id());
TRACE("Attempting to allocate live range %d:%d.\n", range->TopLevel()->vreg(),
range->relative_id());
int free_reg = -1;
int evictable_reg = -1;
EnsureValidRangeWeight(range);
......@@ -206,8 +208,9 @@ void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
// We have a free register, so we use it.
if (free_reg >= 0) {
TRACE("Found free register %s for live range %d\n", RegisterName(free_reg),
range->id());
TRACE("Found free register %s for live range %d:%d.\n",
RegisterName(free_reg), range->TopLevel()->vreg(),
range->relative_id());
AssignRangeToRegister(free_reg, range);
return;
}
......@@ -215,8 +218,9 @@ void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
// We found a register to perform evictions, so we evict and allocate our
// candidate.
if (evictable_reg >= 0) {
TRACE("Found evictable register %s for live range %d\n",
RegisterName(free_reg), range->id());
TRACE("Found evictable register %s for live range %d:%d.\n",
RegisterName(free_reg), range->TopLevel()->vreg(),
range->relative_id());
EvictAndRescheduleConflicts(evictable_reg, range);
AssignRangeToRegister(evictable_reg, range);
return;
......@@ -233,11 +237,12 @@ void GreedyAllocator::EvictAndRescheduleConflicts(unsigned reg_id,
for (LiveRange* conflict = conflicts.Current(); conflict != nullptr;
conflict = conflicts.RemoveCurrentAndGetNext()) {
DCHECK(conflict->HasRegisterAssigned());
CHECK(!conflict->IsFixed());
CHECK(!conflict->TopLevel()->IsFixed());
conflict->UnsetAssignedRegister();
UpdateWeightAtEviction(conflict);
scheduler().Schedule(conflict);
TRACE("Evicted range %d.\n", conflict->id());
TRACE("Evicted range %d%d.\n", conflict->TopLevel()->vreg(),
conflict->relative_id());
}
}
......@@ -250,7 +255,8 @@ void GreedyAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
if (range->HasNoSpillType()) continue;
LifetimePosition start = range->Start();
TRACE("Live range %d is defined by a spill operand.\n", range->id());
TRACE("Live range %d:%d is defined by a spill operand.\n",
range->TopLevel()->vreg(), range->relative_id());
auto next_pos = start;
if (next_pos.IsGapPosition()) {
next_pos = next_pos.NextStart();
......@@ -335,7 +341,7 @@ void GreedyAllocator::EnsureValidRangeWeight(LiveRange* range) {
// unallocated.
if (range->weight() != LiveRange::kInvalidWeight) return;
if (range->IsFixed()) {
if (range->TopLevel()->IsFixed()) {
range->set_weight(LiveRange::kMaxWeight);
return;
}
......
......@@ -66,9 +66,8 @@ bool IsIntervalAlreadyExcluded(const LiveRange *range, LifetimePosition start,
}
void CreateSplinter(LiveRange *range, RegisterAllocationData *data,
void CreateSplinter(TopLevelLiveRange *range, RegisterAllocationData *data,
LifetimePosition first_cut, LifetimePosition last_cut) {
DCHECK(!range->IsChild());
DCHECK(!range->IsSplinter());
// We can ignore ranges that live solely in deferred blocks.
// If a range ends right at the end of a deferred block, it is marked by
......@@ -94,7 +93,10 @@ void CreateSplinter(LiveRange *range, RegisterAllocationData *data,
if (range->MayRequireSpillRange()) {
data->CreateSpillRangeForLiveRange(range);
}
LiveRange *result = data->NewChildRangeFor(range);
TopLevelLiveRange *result = data->NextLiveRange(range->machine_type());
DCHECK_NULL(data->live_ranges()[result->vreg()]);
data->live_ranges()[result->vreg()] = result;
Zone *zone = data->allocation_zone();
range->Splinter(start, end, result, zone);
}
......@@ -138,7 +140,7 @@ void SplinterRangesInDeferredBlocks(RegisterAllocationData *data) {
int range_id = iterator.Current();
iterator.Advance();
LiveRange *range = data->live_ranges()[range_id];
TopLevelLiveRange *range = data->live_ranges()[range_id];
CreateSplinter(range, data, first_cut, last_cut);
}
}
......@@ -155,12 +157,11 @@ void LiveRangeSeparator::Splinter() {
void LiveRangeMerger::Merge() {
int live_range_count = static_cast<int>(data()->live_ranges().size());
for (int i = 0; i < live_range_count; ++i) {
LiveRange *range = data()->live_ranges()[i];
if (range == nullptr || range->IsEmpty() || range->IsChild() ||
!range->IsSplinter()) {
TopLevelLiveRange *range = data()->live_ranges()[i];
if (range == nullptr || range->IsEmpty() || !range->IsSplinter()) {
continue;
}
LiveRange *splinter_parent = range->splintered_from();
TopLevelLiveRange *splinter_parent = range->splintered_from();
splinter_parent->Merge(range, data());
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -29,7 +29,8 @@ class TestRangeBuilder {
LiveRange* Build(int start, int end) { return Add(start, end).Build(); }
LiveRange* Build() {
LiveRange* range = new (zone_) LiveRange(id_, MachineType::kRepTagged);
TopLevelLiveRange* range =
new (zone_) TopLevelLiveRange(id_, MachineType::kRepTagged);
// Traverse the provided interval specifications backwards, because that is
// what LiveRange expects.
for (int i = static_cast<int>(pairs_.size()) - 1; i >= 0; --i) {
......@@ -100,8 +101,9 @@ void CoalescedLiveRangesTest::RemoveConflicts(LiveRange* range) {
LiveRangeIDs seen(zone());
for (auto c = conflicts.Current(); c != nullptr;
c = conflicts.RemoveCurrentAndGetNext()) {
EXPECT_FALSE(seen.count(c->id()) > 0);
seen.insert(c->id());
int id = c->TopLevel()->vreg();
EXPECT_FALSE(seen.count(id) > 0);
seen.insert(c->TopLevel()->vreg());
}
}
......@@ -118,7 +120,7 @@ bool CoalescedLiveRangesTest::IsRangeConflictingWith(const LiveRange* range,
auto conflicts = ranges().GetConflicts(range);
for (auto conflict = conflicts.Current(); conflict != nullptr;
conflict = conflicts.GetNext()) {
found_ids.insert(conflict->id());
found_ids.insert(conflict->TopLevel()->vreg());
}
return found_ids == ids;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment