Commit 8043a53a authored by mtrofin's avatar mtrofin Committed by Commit bot

[turbofan] Greedy: live range grouping.

Grouping of live ranges that would be beneficial if
allocated on the same register. Currently, that means phi
outputs and inputs.

Review URL: https://codereview.chromium.org/1312473018

Cr-Commit-Position: refs/heads/master@{#30690}
parent 6127d37d
......@@ -127,12 +127,18 @@ void AllocationScheduler::Schedule(LiveRange* range) {
queue_.push(AllocationCandidate(range));
}
void AllocationScheduler::Schedule(LiveRangeGroup* group) {
queue_.push(AllocationCandidate(group));
}
GreedyAllocator::GreedyAllocator(RegisterAllocationData* data,
RegisterKind kind, Zone* local_zone)
: RegisterAllocator(data, kind),
local_zone_(local_zone),
allocations_(local_zone),
scheduler_(local_zone) {}
scheduler_(local_zone),
groups_(local_zone) {}
void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) {
......@@ -169,11 +175,99 @@ void GreedyAllocator::PreallocateFixedRanges() {
}
void GreedyAllocator::GroupLiveRanges() {
CoalescedLiveRanges groupper(local_zone());
for (TopLevelLiveRange* range : data()->live_ranges()) {
// Skip splinters, because we do not want to optimize for them, and moves
// due to assigning them to different registers occur in deferred blocks.
if (!CanProcessRange(range) || range->IsSplinter() || !range->is_phi()) {
continue;
}
// A phi can't be a memory operand, so it couldn't have been split.
DCHECK(!range->spilled());
// Maybe this phi range is itself an input to another phi which was already
// processed.
LiveRangeGroup* latest_grp = range->group() != nullptr
? range->group()
: new (local_zone())
LiveRangeGroup(local_zone());
// Populate the groupper.
if (range->group() == nullptr) {
groupper.clear();
groupper.AllocateRange(range);
} else {
for (LiveRange* member : range->group()->ranges()) {
groupper.AllocateRange(member);
}
}
for (int j : data()->GetPhiMapValueFor(range)->phi()->operands()) {
// skip output also in input, which may happen for loops.
if (j == range->vreg()) continue;
TopLevelLiveRange* other_top = data()->live_ranges()[j];
if (other_top->IsSplinter()) continue;
// If the other was a memory operand, it might have been split.
// So get the unsplit part.
LiveRange* other =
other_top->next() == nullptr ? other_top : other_top->next();
if (other->spilled()) continue;
LiveRangeGroup* other_group = other->group();
if (other_group != nullptr) {
bool can_merge = true;
for (LiveRange* member : other_group->ranges()) {
if (groupper.GetConflicts(member).Current() != nullptr) {
can_merge = false;
break;
}
}
// If each member doesn't conflict with the current group, then since
// the members don't conflict with eachother either, we can merge them.
if (can_merge) {
latest_grp->ranges().insert(latest_grp->ranges().end(),
other_group->ranges().begin(),
other_group->ranges().end());
for (LiveRange* member : other_group->ranges()) {
groupper.AllocateRange(member);
member->set_group(latest_grp);
}
// Clear the other range, so we avoid scheduling it.
other_group->ranges().clear();
}
} else if (groupper.GetConflicts(other).Current() == nullptr) {
groupper.AllocateRange(other);
latest_grp->ranges().push_back(other);
other->set_group(latest_grp);
}
}
if (latest_grp->ranges().size() > 0 && range->group() == nullptr) {
latest_grp->ranges().push_back(range);
DCHECK(latest_grp->ranges().size() > 1);
groups().push_back(latest_grp);
range->set_group(latest_grp);
}
}
}
void GreedyAllocator::ScheduleAllocationCandidates() {
for (auto range : data()->live_ranges()) {
for (LiveRangeGroup* group : groups()) {
if (group->ranges().size() > 0) {
// We shouldn't have added single-range groups.
DCHECK(group->ranges().size() != 1);
scheduler().Schedule(group);
}
}
for (LiveRange* range : data()->live_ranges()) {
if (CanProcessRange(range)) {
for (LiveRange* child = range; child != nullptr; child = child->next()) {
if (!child->spilled()) {
if (!child->spilled() && child->group() == nullptr) {
scheduler().Schedule(child);
}
}
......@@ -184,8 +278,53 @@ void GreedyAllocator::ScheduleAllocationCandidates() {
void GreedyAllocator::TryAllocateCandidate(
const AllocationCandidate& candidate) {
// At this point, this is just a live range. TODO: groups.
TryAllocateLiveRange(candidate.live_range());
if (candidate.is_group()) {
TryAllocateGroup(candidate.group());
} else {
TryAllocateLiveRange(candidate.live_range());
}
}
void GreedyAllocator::TryAllocateGroup(LiveRangeGroup* group) {
float group_weight = 0.0;
for (LiveRange* member : group->ranges()) {
EnsureValidRangeWeight(member);
group_weight = Max(group_weight, member->weight());
}
float eviction_weight = group_weight;
int eviction_reg = -1;
int free_reg = -1;
for (int reg = 0; reg < num_registers(); ++reg) {
float weight = GetMaximumConflictingWeight(reg, group, group_weight);
if (weight == LiveRange::kInvalidWeight) {
free_reg = reg;
break;
}
if (weight < eviction_weight) {
eviction_weight = weight;
eviction_reg = reg;
}
}
if (eviction_reg < 0 && free_reg < 0) {
for (LiveRange* member : group->ranges()) {
scheduler().Schedule(member);
}
return;
}
if (free_reg < 0) {
DCHECK(eviction_reg >= 0);
for (LiveRange* member : group->ranges()) {
EvictAndRescheduleConflicts(eviction_reg, member);
}
free_reg = eviction_reg;
}
DCHECK(free_reg >= 0);
for (LiveRange* member : group->ranges()) {
AssignRangeToRegister(free_reg, member);
}
}
......@@ -280,7 +419,7 @@ void GreedyAllocator::EvictAndRescheduleConflicts(unsigned reg_id,
void GreedyAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
size_t initial_range_count = data()->live_ranges().size();
for (size_t i = 0; i < initial_range_count; ++i) {
auto range = data()->live_ranges()[i];
TopLevelLiveRange* range = data()->live_ranges()[i];
if (!CanProcessRange(range)) continue;
if (!range->HasSpillOperand()) continue;
......@@ -322,9 +461,9 @@ void GreedyAllocator::AllocateRegisters() {
data()->debug_name());
SplitAndSpillRangesDefinedByMemoryOperand();
PreallocateFixedRanges();
GroupLiveRanges();
ScheduleAllocationCandidates();
PreallocateFixedRanges();
while (!scheduler().empty()) {
AllocationCandidate candidate = scheduler().GetNext();
TryAllocateCandidate(candidate);
......@@ -358,6 +497,24 @@ float GreedyAllocator::GetMaximumConflictingWeight(
}
float GreedyAllocator::GetMaximumConflictingWeight(unsigned reg_id,
const LiveRangeGroup* group,
float group_weight) const {
float ret = LiveRange::kInvalidWeight;
for (LiveRange* member : group->ranges()) {
float member_conflict_weight = GetMaximumConflictingWeight(reg_id, member);
if (member_conflict_weight == LiveRange::kMaxWeight) {
return LiveRange::kMaxWeight;
}
if (member_conflict_weight > group_weight) return LiveRange::kMaxWeight;
ret = Max(member_conflict_weight, ret);
}
return ret;
}
void GreedyAllocator::EnsureValidRangeWeight(LiveRange* range) {
// The live range weight will be invalidated when ranges are created or split.
// Otherwise, it is consistently updated when the range is allocated or
......
......@@ -18,21 +18,45 @@ namespace compiler {
// we may extend this to groups of LiveRanges. It has to be comparable.
class AllocationCandidate {
public:
explicit AllocationCandidate(LiveRange* range) : range_(range) {}
explicit AllocationCandidate(LiveRange* range)
: is_group_(false), size_(range->GetSize()) {
candidate_.range_ = range;
}
explicit AllocationCandidate(LiveRangeGroup* ranges)
: is_group_(true), size_(CalculateGroupSize(ranges)) {
candidate_.group_ = ranges;
}
// Strict ordering operators
bool operator<(const AllocationCandidate& other) const {
return range_->GetSize() < other.range_->GetSize();
return size() < other.size();
}
bool operator>(const AllocationCandidate& other) const {
return range_->GetSize() > other.range_->GetSize();
return size() > other.size();
}
LiveRange* live_range() const { return range_; }
bool is_group() const { return is_group_; }
LiveRange* live_range() const { return candidate_.range_; }
LiveRangeGroup* group() const { return candidate_.group_; }
private:
LiveRange* range_;
unsigned CalculateGroupSize(LiveRangeGroup* group) {
unsigned ret = 0;
for (LiveRange* range : group->ranges()) {
ret += range->GetSize();
}
return ret;
}
unsigned size() const { return size_; }
bool is_group_;
unsigned size_;
union {
LiveRange* range_;
LiveRangeGroup* group_;
} candidate_;
};
......@@ -41,6 +65,7 @@ class AllocationScheduler final : ZoneObject {
public:
explicit AllocationScheduler(Zone* zone) : queue_(zone) {}
void Schedule(LiveRange* range);
void Schedule(LiveRangeGroup* group);
AllocationCandidate GetNext();
bool empty() const { return queue_.empty(); }
......@@ -85,12 +110,15 @@ class GreedyAllocator final : public RegisterAllocator {
}
Zone* local_zone() const { return local_zone_; }
ZoneVector<LiveRangeGroup*>& groups() { return groups_; }
const ZoneVector<LiveRangeGroup*>& groups() const { return groups_; }
// Insert fixed ranges.
void PreallocateFixedRanges();
void GroupLiveRanges();
// Schedule unassigned live ranges for allocation.
// TODO(mtrofin): groups.
void ScheduleAllocationCandidates();
void AllocateRegisterToRange(unsigned reg_id, LiveRange* range) {
......@@ -106,6 +134,7 @@ class GreedyAllocator final : public RegisterAllocator {
void TryAllocateCandidate(const AllocationCandidate& candidate);
void TryAllocateLiveRange(LiveRange* range);
void TryAllocateGroup(LiveRangeGroup* group);
bool CanProcessRange(LiveRange* range) const {
return range != nullptr && !range->IsEmpty() && range->kind() == mode();
......@@ -122,6 +151,12 @@ class GreedyAllocator final : public RegisterAllocator {
float GetMaximumConflictingWeight(unsigned reg_id,
const LiveRange* range) const;
// Returns kInvalidWeight if there are no conflicts, or the largest weight of
// a range conflicting with the given range, at the given register.
float GetMaximumConflictingWeight(unsigned reg_id,
const LiveRangeGroup* group,
float group_weight) const;
// This is the extension point for splitting heuristics.
void SplitOrSpillBlockedRange(LiveRange* range);
......@@ -152,6 +187,8 @@ class GreedyAllocator final : public RegisterAllocator {
Zone* local_zone_;
ZoneVector<CoalescedLiveRanges*> allocations_;
AllocationScheduler scheduler_;
ZoneVector<LiveRangeGroup*> groups_;
DISALLOW_COPY_AND_ASSIGN(GreedyAllocator);
};
} // namespace compiler
......
......@@ -247,7 +247,8 @@ LiveRange::LiveRange(int relative_id, MachineType machine_type,
last_processed_use_(nullptr),
current_hint_position_(nullptr),
size_(kInvalidSize),
weight_(kInvalidWeight) {
weight_(kInvalidWeight),
group_(nullptr) {
DCHECK(AllocatedOperand::IsSupportedMachineType(machine_type));
bits_ = AssignedRegisterField::encode(kUnassignedRegister) |
MachineTypeField::encode(machine_type);
......
......@@ -275,6 +275,7 @@ class UsePosition final : public ZoneObject {
class SpillRange;
class RegisterAllocationData;
class TopLevelLiveRange;
class LiveRangeGroup;
// Representation of SSA values' live ranges as a collection of (continuous)
// intervals over the instruction ordering.
......@@ -384,6 +385,8 @@ class LiveRange : public ZoneObject {
unsigned GetSize();
float weight() const { return weight_; }
void set_weight(float weight) { weight_ = weight; }
LiveRangeGroup* group() const { return group_; }
void set_group(LiveRangeGroup* group) { group_ = group; }
static const int kInvalidSize = -1;
static const float kInvalidWeight;
......@@ -431,10 +434,30 @@ class LiveRange : public ZoneObject {
// register and ranges that intersect them and need a register.
float weight_;
// greedy: groupping
LiveRangeGroup* group_;
DISALLOW_COPY_AND_ASSIGN(LiveRange);
};
class LiveRangeGroup final : public ZoneObject {
public:
explicit LiveRangeGroup(Zone* zone) : ranges_(zone) {}
ZoneVector<LiveRange*>& ranges() { return ranges_; }
const ZoneVector<LiveRange*>& ranges() const { return ranges_; }
// TODO(mtrofin): populate assigned register and use in weight calculation.
int assigned_register() const { return assigned_register_; }
void set_assigned_register(int reg) { assigned_register_ = reg; }
private:
ZoneVector<LiveRange*> ranges_;
int assigned_register_;
DISALLOW_COPY_AND_ASSIGN(LiveRangeGroup);
};
class TopLevelLiveRange final : public LiveRange {
public:
explicit TopLevelLiveRange(int vreg, MachineType machine_type);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment