Commit 3e3608cd authored by mtrofin's avatar mtrofin Committed by Commit bot

Unit tests for the live range conflict detection mechanism...

Unit tests for the live range conflict detection mechanism (CoalescedLiveRanges) in the Greedy Allocator.

Consolidated conflict detection and traversal logic in CoalescedLiveRanges to avoid duplication in both code and testing. In addition, this change achieves better separation between CoalescedLiveRanges and other register allocator components, improving testability and maintainability.

BUG=

Review URL: https://codereview.chromium.org/1219063017

Cr-Commit-Position: refs/heads/master@{#29783}
parent 9ec20f9c
......@@ -10,136 +10,131 @@ namespace v8 {
namespace internal {
namespace compiler {
#define TRACE(...) \
do { \
if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
} while (false)
LiveRangeConflictIterator::LiveRangeConflictIterator(const LiveRange* range,
IntervalStore* storage)
: query_(range->first_interval()),
pos_(storage->end()),
intervals_(storage) {
MovePosAndQueryToFirstConflict();
}
const float CoalescedLiveRanges::kAllocatedRangeMultiplier = 10.0;
void CoalescedLiveRanges::AllocateRange(LiveRange* range) {
UpdateWeightAtAllocation(range);
for (auto interval = range->first_interval(); interval != nullptr;
interval = interval->next()) {
storage().insert({interval->start(), interval->end(), range});
}
LiveRange* LiveRangeConflictIterator::Current() const {
if (IsFinished()) return nullptr;
return pos_->range_;
}
void CoalescedLiveRanges::Remove(LiveRange* range) {
for (auto interval = range->first_interval(); interval != nullptr;
interval = interval->next()) {
storage().erase({interval->start(), interval->end(), nullptr});
}
range->UnsetAssignedRegister();
}
void LiveRangeConflictIterator::MovePosToFirstConflictForQuery() {
DCHECK(query_ != nullptr);
auto end = intervals_->end();
LifetimePosition q_start = query_->start();
LifetimePosition q_end = query_->end();
if (intervals_->empty() || intervals_->rbegin()->end_ <= q_start ||
intervals_->begin()->start_ >= q_end) {
pos_ = end;
return;
}
float CoalescedLiveRanges::GetMaximumConflictingWeight(
const LiveRange* range) const {
float ret = LiveRange::kInvalidWeight;
auto end = storage().end();
for (auto query = range->first_interval(); query != nullptr;
query = query->next()) {
auto conflict = GetFirstConflict(query);
if (conflict == end) continue;
for (; QueryIntersectsAllocatedInterval(query, conflict); ++conflict) {
// It is possible we'll visit the same range multiple times, because
// successive (not necessarily consecutive) intervals belong to the same
// range, or because different intervals of the query range have the same
// range as conflict.
DCHECK_NE(conflict->range->weight(), LiveRange::kInvalidWeight);
ret = Max(ret, conflict->range->weight());
if (ret == LiveRange::kMaxWeight) break;
pos_ = intervals_->upper_bound(AsAllocatedInterval(q_start));
// pos is either at the end (no start strictly greater than q_start) or
// at some position with the aforementioned property. In either case, the
// allocated interval before this one may intersect our query:
// either because, although it starts before this query's start, it ends
// after; or because it starts exactly at the query start. So unless we're
// right at the beginning of the storage - meaning the first allocated
// interval is also starting after this query's start - see what's behind.
if (pos_ != intervals_->begin()) {
--pos_;
if (!QueryIntersectsAllocatedInterval()) {
// The interval behind wasn't intersecting, so move back.
++pos_;
}
}
return ret;
if (pos_ == end || !QueryIntersectsAllocatedInterval()) {
pos_ = end;
}
}
void CoalescedLiveRanges::EvictAndRescheduleConflicts(
LiveRange* range, AllocationScheduler* scheduler) {
auto end = storage().end();
for (auto query = range->first_interval(); query != nullptr;
query = query->next()) {
auto conflict = GetFirstConflict(query);
if (conflict == end) continue;
while (QueryIntersectsAllocatedInterval(query, conflict)) {
LiveRange* range_to_evict = conflict->range;
// Bypass successive intervals belonging to the same range, because we're
// about to remove this range, and we don't want the storage iterator to
// become invalid.
while (conflict != end && conflict->range == range_to_evict) {
++conflict;
}
DCHECK(range_to_evict->HasRegisterAssigned());
CHECK(!range_to_evict->IsFixed());
Remove(range_to_evict);
UpdateWeightAtEviction(range_to_evict);
TRACE("Evicted range %d.\n", range_to_evict->id());
scheduler->Schedule(range_to_evict);
void LiveRangeConflictIterator::MovePosAndQueryToFirstConflict() {
auto end = intervals_->end();
for (; query_ != nullptr; query_ = query_->next()) {
MovePosToFirstConflictForQuery();
if (pos_ != end) {
DCHECK(QueryIntersectsAllocatedInterval());
return;
}
}
Invalidate();
}
bool CoalescedLiveRanges::VerifyAllocationsAreValid() const {
LifetimePosition last_end = LifetimePosition::GapFromInstructionIndex(0);
for (auto i : storage_) {
if (i.start < last_end) {
return false;
}
last_end = i.end;
void LiveRangeConflictIterator::IncrementPosAndSkipOverRepetitions() {
auto end = intervals_->end();
DCHECK(pos_ != end);
LiveRange* current_conflict = Current();
while (pos_ != end && pos_->range_ == current_conflict) {
++pos_;
}
return true;
}
void CoalescedLiveRanges::UpdateWeightAtAllocation(LiveRange* range) {
DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
range->set_weight(range->weight() * kAllocatedRangeMultiplier);
LiveRange* LiveRangeConflictIterator::InternalGetNext(bool clean_behind) {
if (IsFinished()) return nullptr;
LiveRange* to_clear = Current();
IncrementPosAndSkipOverRepetitions();
// At this point, pos_ is either at the end, or on an interval that doesn't
// correspond to the same range as to_clear. This interval may not even be
// a conflict.
if (clean_behind) {
// Since we parked pos_ on an iterator that won't be affected by removal,
// we can safely delete to_clear's intervals.
for (auto interval = to_clear->first_interval(); interval != nullptr;
interval = interval->next()) {
AllocatedInterval erase_key(interval->start(), interval->end(), nullptr);
intervals_->erase(erase_key);
}
}
// We may have parked pos_ at the end, or on a non-conflict. In that case,
// move to the next query and reinitialize pos and query. This may invalidate
// the iterator, if no more conflicts are available.
if (!QueryIntersectsAllocatedInterval()) {
query_ = query_->next();
MovePosAndQueryToFirstConflict();
}
return Current();
}
void CoalescedLiveRanges::UpdateWeightAtEviction(LiveRange* range) {
DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
range->set_weight(range->weight() / kAllocatedRangeMultiplier);
LiveRangeConflictIterator CoalescedLiveRanges::GetConflicts(
const LiveRange* range) {
return LiveRangeConflictIterator(range, &intervals());
}
CoalescedLiveRanges::interval_iterator CoalescedLiveRanges::GetFirstConflict(
const UseInterval* query) const {
DCHECK(query != nullptr);
auto end = storage().end();
LifetimePosition q_start = query->start();
LifetimePosition q_end = query->end();
if (storage().empty() || storage().rbegin()->end <= q_start ||
storage().begin()->start >= q_end) {
return end;
void CoalescedLiveRanges::AllocateRange(LiveRange* range) {
for (auto interval = range->first_interval(); interval != nullptr;
interval = interval->next()) {
AllocatedInterval to_insert(interval->start(), interval->end(), range);
intervals().insert(to_insert);
}
}
auto ret = storage().upper_bound(AsAllocatedInterval(q_start));
// ret is either at the end (no start strictly greater than q_start) or
// at some position with the aforementioned property. In either case, the
// allocated interval before this one may intersect our query:
// either because, although it starts before this query's start, it ends
// after; or because it starts exactly at the query start. So unless we're
// right at the beginning of the storage - meaning the first allocated
// interval is also starting after this query's start - see what's behind.
if (ret != storage().begin()) {
--ret;
if (!QueryIntersectsAllocatedInterval(query, ret)) {
// The interval behind wasn't intersecting, so move back.
++ret;
bool CoalescedLiveRanges::VerifyAllocationsAreValidForTesting() const {
LifetimePosition last_end = LifetimePosition::GapFromInstructionIndex(0);
for (auto i : intervals_) {
if (i.start_ < last_end) {
return false;
}
last_end = i.end_;
}
if (ret != end && QueryIntersectsAllocatedInterval(query, ret)) return ret;
return end;
return true;
}
......
......@@ -13,8 +13,96 @@ namespace internal {
namespace compiler {
class AllocationScheduler;
// Implementation detail for CoalescedLiveRanges.
struct AllocatedInterval {
AllocatedInterval(LifetimePosition start, LifetimePosition end,
LiveRange* range)
: start_(start), end_(end), range_(range) {}
LifetimePosition start_;
LifetimePosition end_;
LiveRange* range_;
bool operator<(const AllocatedInterval& other) const {
return start_ < other.start_;
}
bool operator>(const AllocatedInterval& other) const {
return start_ > other.start_;
}
};
typedef ZoneSet<AllocatedInterval> IntervalStore;
// An iterator over conflicts of a live range, obtained from CoalescedLiveRanges
// The design supports two main scenarios (see GreedyAllocator):
// (1) observing each conflicting range, without mutating the allocations, and
// (2) observing each conflicting range, and then moving to the next, after
// removing the current conflict.
class LiveRangeConflictIterator {
public:
// Current conflict. nullptr if no conflicts, or if we reached the end of
// conflicts.
LiveRange* Current() const;
// Get the next conflict. Caller should handle non-consecutive repetitions of
// the same range.
LiveRange* GetNext() { return InternalGetNext(false); }
// Get the next conflict, after evicting the current one. Caller may expect
// to never observe the same live range more than once.
LiveRange* RemoveCurrentAndGetNext() { return InternalGetNext(true); }
private:
friend class CoalescedLiveRanges;
typedef IntervalStore::const_iterator interval_iterator;
LiveRangeConflictIterator(const LiveRange* range, IntervalStore* store);
// Move the store iterator to first interval intersecting query. Since the
// intervals are sorted, subsequent intervals intersecting query follow. May
// leave the store iterator at "end", meaning that the current query does not
// have an intersection.
void MovePosToFirstConflictForQuery();
// Move both query and store iterator to the first intersection, if any. If
// none, then it invalidates the iterator (IsFinished() == true)
void MovePosAndQueryToFirstConflict();
// Increment pos and skip over intervals belonging to the same range we
// started with (i.e. Current() before the call). It is possible that range
// will be seen again, but not consecutively.
void IncrementPosAndSkipOverRepetitions();
// Common implementation used by both GetNext as well as
// ClearCurrentAndGetNext.
LiveRange* InternalGetNext(bool clean_behind);
bool IsFinished() const { return query_ == nullptr; }
static AllocatedInterval AsAllocatedInterval(LifetimePosition pos) {
return AllocatedInterval(pos, LifetimePosition::Invalid(), nullptr);
}
// Intersection utilities.
static bool Intersects(LifetimePosition a_start, LifetimePosition a_end,
LifetimePosition b_start, LifetimePosition b_end) {
return a_start < b_end && b_start < a_end;
}
bool QueryIntersectsAllocatedInterval() const {
DCHECK(query_ != nullptr);
return pos_ != intervals_->end() &&
Intersects(query_->start(), query_->end(), pos_->start_, pos_->end_);
}
void Invalidate() {
query_ = nullptr;
pos_ = intervals_->end();
}
const UseInterval* query_;
interval_iterator pos_;
IntervalStore* intervals_;
};
// Collection of live ranges allocated to the same register.
// It supports efficiently finding all conflicts for a given, non-allocated
......@@ -30,45 +118,27 @@ class AllocationScheduler;
// traversal of conflicts.
class CoalescedLiveRanges : public ZoneObject {
public:
explicit CoalescedLiveRanges(Zone* zone) : storage_(zone) {}
void clear() { storage_.clear(); }
explicit CoalescedLiveRanges(Zone* zone) : intervals_(zone) {}
void clear() { intervals_.clear(); }
bool empty() const { return storage_.empty(); }
bool empty() const { return intervals_.empty(); }
// Returns kInvalidWeight if there are no conflicts, or the largest weight of
// a range conflicting with the given range.
float GetMaximumConflictingWeight(const LiveRange* range) const;
// Iterate over each live range conflicting with the provided one.
// The same live range may be observed multiple, but non-consecutive times.
LiveRangeConflictIterator GetConflicts(const LiveRange* range);
// Evicts all conflicts of the given range, and reschedules them with the
// provided scheduler.
void EvictAndRescheduleConflicts(LiveRange* range,
AllocationScheduler* scheduler);
// Allocates a range with a pre-calculated candidate weight.
void AllocateRange(LiveRange* range);
// TODO(mtrofin): remove this in favor of comprehensive unit tests.
bool VerifyAllocationsAreValid() const;
// Unit testing API, verifying that allocated intervals do not overlap.
bool VerifyAllocationsAreValidForTesting() const;
private:
static const float kAllocatedRangeMultiplier;
// Storage detail for CoalescedLiveRanges.
struct AllocatedInterval {
LifetimePosition start;
LifetimePosition end;
LiveRange* range;
bool operator<(const AllocatedInterval& other) const {
return start < other.start;
}
bool operator>(const AllocatedInterval& other) const {
return start > other.start;
}
};
typedef ZoneSet<AllocatedInterval> IntervalStore;
typedef IntervalStore::const_iterator interval_iterator;
IntervalStore& storage() { return storage_; }
const IntervalStore& storage() const { return storage_; }
IntervalStore& intervals() { return intervals_; }
const IntervalStore& intervals() const { return intervals_; }
// Augment the weight of a range that is about to be allocated.
static void UpdateWeightAtAllocation(LiveRange* range);
......@@ -76,29 +146,8 @@ class CoalescedLiveRanges : public ZoneObject {
// Reduce the weight of a range that has lost allocation.
static void UpdateWeightAtEviction(LiveRange* range);
// Intersection utilities.
static bool Intersects(LifetimePosition a_start, LifetimePosition a_end,
LifetimePosition b_start, LifetimePosition b_end) {
return a_start < b_end && b_start < a_end;
}
static AllocatedInterval AsAllocatedInterval(LifetimePosition pos) {
return {pos, LifetimePosition::Invalid(), nullptr};
}
bool QueryIntersectsAllocatedInterval(const UseInterval* query,
interval_iterator& pos) const {
DCHECK(query != nullptr);
return pos != storage().end() &&
Intersects(query->start(), query->end(), pos->start, pos->end);
}
void Remove(LiveRange* range);
// Get the first interval intersecting query. Since the intervals are sorted,
// subsequent intervals intersecting query follow.
interval_iterator GetFirstConflict(const UseInterval* query) const;
IntervalStore storage_;
IntervalStore intervals_;
DISALLOW_COPY_AND_ASSIGN(CoalescedLiveRanges);
};
......
......@@ -9,12 +9,16 @@ namespace v8 {
namespace internal {
namespace compiler {
#define TRACE(...) \
do { \
if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
} while (false)
const float GreedyAllocator::kAllocatedRangeMultiplier = 10.0;
namespace {
......@@ -131,12 +135,10 @@ void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) {
DCHECK(!range->HasRegisterAssigned());
current_allocations(reg_id)->AllocateRange(range);
AllocateRegisterToRange(reg_id, range);
TRACE("Assigning %s to range %d\n", RegisterName(reg_id), range->id());
range->set_assigned_register(reg_id);
DCHECK(current_allocations(reg_id)->VerifyAllocationsAreValid());
}
......@@ -153,7 +155,7 @@ void GreedyAllocator::PreallocateFixedRanges() {
int reg_nr = fixed_range->assigned_register();
EnsureValidRangeWeight(fixed_range);
current_allocations(reg_nr)->AllocateRange(fixed_range);
AllocateRegisterToRange(reg_nr, fixed_range);
}
}
}
......@@ -190,8 +192,7 @@ void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
// where the maximum conflict is lower than the candidate's weight, the one
// with the smallest such weight.
for (int i = 0; i < num_registers(); i++) {
float max_conflict_weight =
current_allocations(i)->GetMaximumConflictingWeight(range);
float max_conflict_weight = GetMaximumConflictingWeight(i, range);
if (max_conflict_weight == LiveRange::kInvalidWeight) {
free_reg = i;
break;
......@@ -216,8 +217,7 @@ void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
if (evictable_reg >= 0) {
TRACE("Found evictable register %s for live range %d\n",
RegisterName(free_reg), range->id());
current_allocations(evictable_reg)
->EvictAndRescheduleConflicts(range, &scheduler());
EvictAndRescheduleConflicts(evictable_reg, range);
AssignRangeToRegister(evictable_reg, range);
return;
}
......@@ -227,6 +227,21 @@ void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
}
void GreedyAllocator::EvictAndRescheduleConflicts(unsigned reg_id,
const LiveRange* range) {
auto conflicts = current_allocations(reg_id)->GetConflicts(range);
for (LiveRange* conflict = conflicts.Current(); conflict != nullptr;
conflict = conflicts.RemoveCurrentAndGetNext()) {
DCHECK(conflict->HasRegisterAssigned());
CHECK(!conflict->IsFixed());
conflict->UnsetAssignedRegister();
UpdateWeightAtEviction(conflict);
scheduler().Schedule(conflict);
TRACE("Evicted range %d.\n", conflict->id());
}
}
void GreedyAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
size_t initial_range_count = data()->live_ranges().size();
for (size_t i = 0; i < initial_range_count; ++i) {
......@@ -298,6 +313,22 @@ void GreedyAllocator::AllocateRegisters() {
}
float GreedyAllocator::GetMaximumConflictingWeight(
unsigned reg_id, const LiveRange* range) const {
float ret = LiveRange::kInvalidWeight;
auto conflicts = current_allocations(reg_id)->GetConflicts(range);
for (LiveRange* conflict = conflicts.Current(); conflict != nullptr;
conflict = conflicts.GetNext()) {
DCHECK_NE(conflict->weight(), LiveRange::kInvalidWeight);
ret = Max(ret, conflict->weight());
if (ret == LiveRange::kMaxWeight) return ret;
}
return ret;
}
void GreedyAllocator::EnsureValidRangeWeight(LiveRange* range) {
// The live range weight will be invalidated when ranges are created or split.
// Otherwise, it is consistently updated when the range is allocated or
......
......@@ -62,10 +62,28 @@ class GreedyAllocator final : public RegisterAllocator {
void AllocateRegisters();
private:
static const float kAllocatedRangeMultiplier;
static void UpdateWeightAtAllocation(LiveRange* range) {
DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
range->set_weight(range->weight() * kAllocatedRangeMultiplier);
}
static void UpdateWeightAtEviction(LiveRange* range) {
DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
range->set_weight(range->weight() / kAllocatedRangeMultiplier);
}
AllocationScheduler& scheduler() { return scheduler_; }
CoalescedLiveRanges* current_allocations(unsigned i) {
return allocations_[i];
}
CoalescedLiveRanges* current_allocations(unsigned i) const {
return allocations_[i];
}
Zone* local_zone() const { return local_zone_; }
// Insert fixed ranges.
......@@ -75,6 +93,13 @@ class GreedyAllocator final : public RegisterAllocator {
// TODO(mtrofin): groups.
void ScheduleAllocationCandidates();
void AllocateRegisterToRange(unsigned reg_id, LiveRange* range) {
UpdateWeightAtAllocation(range);
current_allocations(reg_id)->AllocateRange(range);
}
// Evict and reschedule conflicts of a given range, at a given register.
void EvictAndRescheduleConflicts(unsigned reg_id, const LiveRange* range);
// Find the optimal split for ranges defined by a memory operand, e.g.
// constants or function parameters passed on the stack.
void SplitAndSpillRangesDefinedByMemoryOperand();
......@@ -92,6 +117,11 @@ class GreedyAllocator final : public RegisterAllocator {
// Calculate the new weight of a range that is about to be allocated.
float GetAllocatedRangeWeight(float candidate_weight);
// Returns kInvalidWeight if there are no conflicts, or the largest weight of
// a range conflicting with the given range, at the given register.
float GetMaximumConflictingWeight(unsigned reg_id,
const LiveRange* range) const;
// This is the extension point for splitting heuristics.
void SplitOrSpillBlockedRange(LiveRange* range);
......
......@@ -140,6 +140,10 @@ class LifetimePosition final {
return LifetimePosition(kMaxInt);
}
static inline LifetimePosition FromInt(int value) {
return LifetimePosition(value);
}
private:
static const int kHalfStep = 2;
static const int kStep = 2 * kHalfStep;
......
This diff is collapsed.
......@@ -43,6 +43,7 @@
'base/utils/random-number-generator-unittest.cc',
'char-predicates-unittest.cc',
'compiler/change-lowering-unittest.cc',
'compiler/coalesced-live-ranges-unittest.cc',
'compiler/common-operator-reducer-unittest.cc',
'compiler/common-operator-unittest.cc',
'compiler/compiler-test-utils.h',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment