Commit 8e1ccba3 authored by mtrofin's avatar mtrofin Committed by Commit bot

[turbofan] Retiring Greedy Allocator

We were able to achieve our goals for register allocation independent of
the allocation algorithm. Performance data so far is inconclusive re. the
value of the Greedy algorithm, compared to the particular Linear Scan
implementation we're currently using, and the performance measurement
techniques we currently use are too imprecise to help with this matter.

Retiring the algorithm to lower maintenance and evolution cost (e.g. lower
cost of adding aliasing support). Once we improve benchmarking stability,
and establish a suite sensitive enough for codegen improvement studies,
we may revive the algorithm, should the need arise.

BUG=

Review-Url: https://codereview.chromium.org/2060673002
Cr-Commit-Position: refs/heads/master@{#36912}
parent 8c1ba59a
......@@ -858,8 +858,6 @@ v8_source_set("v8_base") {
"src/compiler/c-linkage.cc",
"src/compiler/checkpoint-elimination.cc",
"src/compiler/checkpoint-elimination.h",
"src/compiler/coalesced-live-ranges.cc",
"src/compiler/coalesced-live-ranges.h",
"src/compiler/code-assembler.cc",
"src/compiler/code-assembler.h",
"src/compiler/code-generator-impl.h",
......@@ -904,8 +902,6 @@ v8_source_set("v8_base") {
"src/compiler/graph-visualizer.h",
"src/compiler/graph.cc",
"src/compiler/graph.h",
"src/compiler/greedy-allocator.cc",
"src/compiler/greedy-allocator.h",
"src/compiler/instruction-codes.h",
"src/compiler/instruction-scheduler.cc",
"src/compiler/instruction-scheduler.h",
......
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/coalesced-live-ranges.h"
#include "src/compiler/greedy-allocator.h"
#include "src/compiler/register-allocator.h"
namespace v8 {
namespace internal {
namespace compiler {
LiveRangeConflictIterator::LiveRangeConflictIterator(const LiveRange* range,
IntervalStore* storage)
: query_(range->first_interval()),
pos_(storage->end()),
intervals_(storage) {
MovePosAndQueryToFirstConflict();
}
LiveRange* LiveRangeConflictIterator::Current() const {
if (IsFinished()) return nullptr;
return pos_->range_;
}
void LiveRangeConflictIterator::MovePosToFirstConflictForQuery() {
DCHECK_NOT_NULL(query_);
auto end = intervals_->end();
LifetimePosition q_start = query_->start();
LifetimePosition q_end = query_->end();
if (intervals_->empty() || intervals_->rbegin()->end_ <= q_start ||
intervals_->begin()->start_ >= q_end) {
pos_ = end;
return;
}
pos_ = intervals_->upper_bound(AsAllocatedInterval(q_start));
// pos is either at the end (no start strictly greater than q_start) or
// at some position with the aforementioned property. In either case, the
// allocated interval before this one may intersect our query:
// either because, although it starts before this query's start, it ends
// after; or because it starts exactly at the query start. So unless we're
// right at the beginning of the storage - meaning the first allocated
// interval is also starting after this query's start - see what's behind.
if (pos_ != intervals_->begin()) {
--pos_;
if (!QueryIntersectsAllocatedInterval()) {
// The interval behind wasn't intersecting, so move back.
++pos_;
}
}
if (pos_ == end || !QueryIntersectsAllocatedInterval()) {
pos_ = end;
}
}
void LiveRangeConflictIterator::MovePosAndQueryToFirstConflict() {
auto end = intervals_->end();
for (; query_ != nullptr; query_ = query_->next()) {
MovePosToFirstConflictForQuery();
if (pos_ != end) {
DCHECK(QueryIntersectsAllocatedInterval());
return;
}
}
Invalidate();
}
void LiveRangeConflictIterator::IncrementPosAndSkipOverRepetitions() {
auto end = intervals_->end();
DCHECK(pos_ != end);
LiveRange* current_conflict = Current();
while (pos_ != end && pos_->range_ == current_conflict) {
++pos_;
}
}
LiveRange* LiveRangeConflictIterator::InternalGetNext(bool clean_behind) {
if (IsFinished()) return nullptr;
LiveRange* to_clear = Current();
IncrementPosAndSkipOverRepetitions();
// At this point, pos_ is either at the end, or on an interval that doesn't
// correspond to the same range as to_clear. This interval may not even be
// a conflict.
if (clean_behind) {
// Since we parked pos_ on an iterator that won't be affected by removal,
// we can safely delete to_clear's intervals.
for (auto interval = to_clear->first_interval(); interval != nullptr;
interval = interval->next()) {
AllocatedInterval erase_key(interval->start(), interval->end(), nullptr);
intervals_->erase(erase_key);
}
}
// We may have parked pos_ at the end, or on a non-conflict. In that case,
// move to the next query and reinitialize pos and query. This may invalidate
// the iterator, if no more conflicts are available.
if (!QueryIntersectsAllocatedInterval()) {
query_ = query_->next();
MovePosAndQueryToFirstConflict();
}
return Current();
}
LiveRangeConflictIterator CoalescedLiveRanges::GetConflicts(
const LiveRange* range) {
return LiveRangeConflictIterator(range, &intervals());
}
void CoalescedLiveRanges::AllocateRange(LiveRange* range) {
for (auto interval = range->first_interval(); interval != nullptr;
interval = interval->next()) {
AllocatedInterval to_insert(interval->start(), interval->end(), range);
intervals().insert(to_insert);
}
}
bool CoalescedLiveRanges::VerifyAllocationsAreValidForTesting() const {
LifetimePosition last_end = LifetimePosition::GapFromInstructionIndex(0);
for (auto i : intervals_) {
if (i.start_ < last_end) {
return false;
}
last_end = i.end_;
}
return true;
}
} // namespace compiler
} // namespace internal
} // namespace v8
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COALESCED_LIVE_RANGES_H_
#define V8_COALESCED_LIVE_RANGES_H_
#include "src/compiler/register-allocator.h"
#include "src/zone-containers.h"
namespace v8 {
namespace internal {
namespace compiler {
// Implementation detail for CoalescedLiveRanges.
struct AllocatedInterval {
AllocatedInterval(LifetimePosition start, LifetimePosition end,
LiveRange* range)
: start_(start), end_(end), range_(range) {}
LifetimePosition start_;
LifetimePosition end_;
LiveRange* range_;
bool operator<(const AllocatedInterval& other) const {
return start_ < other.start_;
}
bool operator>(const AllocatedInterval& other) const {
return start_ > other.start_;
}
};
typedef ZoneSet<AllocatedInterval> IntervalStore;
// An iterator over conflicts of a live range, obtained from CoalescedLiveRanges
// The design supports two main scenarios (see GreedyAllocator):
// (1) observing each conflicting range, without mutating the allocations, and
// (2) observing each conflicting range, and then moving to the next, after
// removing the current conflict.
class LiveRangeConflictIterator {
public:
// Current conflict. nullptr if no conflicts, or if we reached the end of
// conflicts.
LiveRange* Current() const;
// Get the next conflict. Caller should handle non-consecutive repetitions of
// the same range.
LiveRange* GetNext() { return InternalGetNext(false); }
// Get the next conflict, after evicting the current one. Caller may expect
// to never observe the same live range more than once.
LiveRange* RemoveCurrentAndGetNext() { return InternalGetNext(true); }
private:
friend class CoalescedLiveRanges;
typedef IntervalStore::const_iterator interval_iterator;
LiveRangeConflictIterator(const LiveRange* range, IntervalStore* store);
// Move the store iterator to first interval intersecting query. Since the
// intervals are sorted, subsequent intervals intersecting query follow. May
// leave the store iterator at "end", meaning that the current query does not
// have an intersection.
void MovePosToFirstConflictForQuery();
// Move both query and store iterator to the first intersection, if any. If
// none, then it invalidates the iterator (IsFinished() == true)
void MovePosAndQueryToFirstConflict();
// Increment pos and skip over intervals belonging to the same range we
// started with (i.e. Current() before the call). It is possible that range
// will be seen again, but not consecutively.
void IncrementPosAndSkipOverRepetitions();
// Common implementation used by both GetNext as well as
// ClearCurrentAndGetNext.
LiveRange* InternalGetNext(bool clean_behind);
bool IsFinished() const { return query_ == nullptr; }
static AllocatedInterval AsAllocatedInterval(LifetimePosition pos) {
return AllocatedInterval(pos, LifetimePosition::Invalid(), nullptr);
}
// Intersection utilities.
static bool Intersects(LifetimePosition a_start, LifetimePosition a_end,
LifetimePosition b_start, LifetimePosition b_end) {
return a_start < b_end && b_start < a_end;
}
bool QueryIntersectsAllocatedInterval() const {
DCHECK_NOT_NULL(query_);
return pos_ != intervals_->end() &&
Intersects(query_->start(), query_->end(), pos_->start_, pos_->end_);
}
void Invalidate() {
query_ = nullptr;
pos_ = intervals_->end();
}
const UseInterval* query_;
interval_iterator pos_;
IntervalStore* intervals_;
};
// Collection of live ranges allocated to the same register.
// It supports efficiently finding all conflicts for a given, non-allocated
// range. See AllocatedInterval.
// Allocated live ranges do not intersect. At most, individual use intervals
// touch. We store, for a live range, an AllocatedInterval corresponding to each
// of that range's UseIntervals. We keep the list of AllocatedIntervals sorted
// by starts. Then, given the non-intersecting property, we know that
// consecutive AllocatedIntervals have the property that the "smaller"'s end is
// less or equal to the "larger"'s start.
// This allows for quick (logarithmic complexity) identification of the first
// AllocatedInterval to conflict with a given LiveRange, and then for efficient
// traversal of conflicts.
class CoalescedLiveRanges : public ZoneObject {
public:
explicit CoalescedLiveRanges(Zone* zone) : intervals_(zone) {}
void clear() { intervals_.clear(); }
bool empty() const { return intervals_.empty(); }
// Iterate over each live range conflicting with the provided one.
// The same live range may be observed multiple, but non-consecutive times.
LiveRangeConflictIterator GetConflicts(const LiveRange* range);
// Allocates a range with a pre-calculated candidate weight.
void AllocateRange(LiveRange* range);
// Unit testing API, verifying that allocated intervals do not overlap.
bool VerifyAllocationsAreValidForTesting() const;
private:
static const float kAllocatedRangeMultiplier;
IntervalStore& intervals() { return intervals_; }
const IntervalStore& intervals() const { return intervals_; }
// Augment the weight of a range that is about to be allocated.
static void UpdateWeightAtAllocation(LiveRange* range);
// Reduce the weight of a range that has lost allocation.
static void UpdateWeightAtEviction(LiveRange* range);
IntervalStore intervals_;
DISALLOW_COPY_AND_ASSIGN(CoalescedLiveRanges);
};
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COALESCED_LIVE_RANGES_H_
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/greedy-allocator.h"
#include "src/compiler/register-allocator.h"
namespace v8 {
namespace internal {
namespace compiler {
#define TRACE(...) \
do { \
if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
} while (false)
const float GreedyAllocator::kAllocatedRangeMultiplier = 10.0;
namespace {
void UpdateOperands(LiveRange* range, RegisterAllocationData* data) {
int reg_id = range->assigned_register();
range->SetUseHints(reg_id);
if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
data->GetPhiMapValueFor(range->TopLevel())->set_assigned_register(reg_id);
}
}
void UnsetOperands(LiveRange* range, RegisterAllocationData* data) {
range->UnsetUseHints();
if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
data->GetPhiMapValueFor(range->TopLevel())->UnsetAssignedRegister();
}
}
LiveRange* Split(LiveRange* range, RegisterAllocationData* data,
LifetimePosition pos) {
DCHECK(range->Start() < pos && pos < range->End());
DCHECK(pos.IsStart() || pos.IsGapPosition() ||
(data->code()
->GetInstructionBlock(pos.ToInstructionIndex())
->last_instruction_index() != pos.ToInstructionIndex()));
LiveRange* result = range->SplitAt(pos, data->allocation_zone());
return result;
}
} // namespace
AllocationCandidate AllocationScheduler::GetNext() {
DCHECK(!queue_.empty());
AllocationCandidate ret = queue_.top();
queue_.pop();
return ret;
}
void AllocationScheduler::Schedule(LiveRange* range) {
TRACE("Scheduling live range %d:%d.\n", range->TopLevel()->vreg(),
range->relative_id());
queue_.push(AllocationCandidate(range));
}
void AllocationScheduler::Schedule(LiveRangeGroup* group) {
queue_.push(AllocationCandidate(group));
}
GreedyAllocator::GreedyAllocator(RegisterAllocationData* data,
RegisterKind kind, Zone* local_zone)
: RegisterAllocator(data, kind),
local_zone_(local_zone),
allocations_(local_zone),
scheduler_(local_zone),
groups_(local_zone) {}
void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) {
TRACE("Assigning register %s to live range %d:%d\n", RegisterName(reg_id),
range->TopLevel()->vreg(), range->relative_id());
DCHECK(!range->HasRegisterAssigned());
AllocateRegisterToRange(reg_id, range);
TRACE("Assigning %s to range %d%d.\n", RegisterName(reg_id),
range->TopLevel()->vreg(), range->relative_id());
range->set_assigned_register(reg_id);
UpdateOperands(range, data());
}
void GreedyAllocator::PreallocateFixedRanges() {
allocations_.resize(num_registers());
for (int i = 0; i < num_registers(); i++) {
allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone());
}
for (LiveRange* fixed_range : GetFixedRegisters()) {
if (fixed_range != nullptr) {
DCHECK_EQ(mode(), fixed_range->kind());
DCHECK(fixed_range->TopLevel()->IsFixed());
int reg_nr = fixed_range->assigned_register();
EnsureValidRangeWeight(fixed_range);
AllocateRegisterToRange(reg_nr, fixed_range);
}
}
}
void GreedyAllocator::GroupLiveRanges() {
CoalescedLiveRanges grouper(local_zone());
for (TopLevelLiveRange* range : data()->live_ranges()) {
grouper.clear();
// Skip splinters, because we do not want to optimize for them, and moves
// due to assigning them to different registers occur in deferred blocks.
if (!CanProcessRange(range) || range->IsSplinter() || !range->is_phi()) {
continue;
}
// A phi can't be a memory operand, so it couldn't have been split.
DCHECK(!range->spilled());
// Maybe this phi range is itself an input to another phi which was already
// processed.
LiveRangeGroup* latest_grp = range->group() != nullptr
? range->group()
: new (local_zone())
LiveRangeGroup(local_zone());
// Populate the grouper.
if (range->group() == nullptr) {
grouper.AllocateRange(range);
} else {
for (LiveRange* member : range->group()->ranges()) {
grouper.AllocateRange(member);
}
}
for (int j : data()->GetPhiMapValueFor(range)->phi()->operands()) {
// skip output also in input, which may happen for loops.
if (j == range->vreg()) continue;
TopLevelLiveRange* other_top = data()->live_ranges()[j];
if (other_top->IsSplinter()) continue;
// If the other was a memory operand, it might have been split.
// So get the unsplit part.
LiveRange* other =
other_top->next() == nullptr ? other_top : other_top->next();
if (other->spilled()) continue;
LiveRangeGroup* other_group = other->group();
if (other_group != nullptr) {
bool can_merge = true;
for (LiveRange* member : other_group->ranges()) {
if (grouper.GetConflicts(member).Current() != nullptr) {
can_merge = false;
break;
}
}
// If each member doesn't conflict with the current group, then since
// the members don't conflict with eachother either, we can merge them.
if (can_merge) {
latest_grp->ranges().insert(latest_grp->ranges().end(),
other_group->ranges().begin(),
other_group->ranges().end());
for (LiveRange* member : other_group->ranges()) {
grouper.AllocateRange(member);
member->set_group(latest_grp);
}
// Clear the other range, so we avoid scheduling it.
other_group->ranges().clear();
}
} else if (grouper.GetConflicts(other).Current() == nullptr) {
grouper.AllocateRange(other);
latest_grp->ranges().push_back(other);
other->set_group(latest_grp);
}
}
if (latest_grp->ranges().size() > 0 && range->group() == nullptr) {
latest_grp->ranges().push_back(range);
DCHECK(latest_grp->ranges().size() > 1);
groups().push_back(latest_grp);
range->set_group(latest_grp);
}
}
}
void GreedyAllocator::ScheduleAllocationCandidates() {
for (LiveRangeGroup* group : groups()) {
if (group->ranges().size() > 0) {
// We shouldn't have added single-range groups.
DCHECK(group->ranges().size() != 1);
scheduler().Schedule(group);
}
}
for (LiveRange* range : data()->live_ranges()) {
if (CanProcessRange(range)) {
for (LiveRange* child = range; child != nullptr; child = child->next()) {
if (!child->spilled() && child->group() == nullptr) {
scheduler().Schedule(child);
}
}
}
}
}
void GreedyAllocator::TryAllocateCandidate(
const AllocationCandidate& candidate) {
if (candidate.is_group()) {
TryAllocateGroup(candidate.group());
} else {
TryAllocateLiveRange(candidate.live_range());
}
}
void GreedyAllocator::TryAllocateGroup(LiveRangeGroup* group) {
float group_weight = 0.0;
for (LiveRange* member : group->ranges()) {
EnsureValidRangeWeight(member);
group_weight = Max(group_weight, member->weight());
}
float eviction_weight = group_weight;
int eviction_reg = -1;
int free_reg = -1;
for (int i = 0; i < num_allocatable_registers(); ++i) {
int reg = allocatable_register_code(i);
float weight = GetMaximumConflictingWeight(reg, group, group_weight);
if (weight == LiveRange::kInvalidWeight) {
free_reg = reg;
break;
}
if (weight < eviction_weight) {
eviction_weight = weight;
eviction_reg = reg;
}
}
if (eviction_reg < 0 && free_reg < 0) {
for (LiveRange* member : group->ranges()) {
scheduler().Schedule(member);
}
return;
}
if (free_reg < 0) {
DCHECK(eviction_reg >= 0);
for (LiveRange* member : group->ranges()) {
EvictAndRescheduleConflicts(eviction_reg, member);
}
free_reg = eviction_reg;
}
DCHECK(free_reg >= 0);
for (LiveRange* member : group->ranges()) {
AssignRangeToRegister(free_reg, member);
}
}
void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
// TODO(mtrofin): once we introduce groups, we'll want to first try and
// allocate at the preferred register.
TRACE("Attempting to allocate live range %d:%d.\n", range->TopLevel()->vreg(),
range->relative_id());
int free_reg = -1;
int evictable_reg = -1;
int hinted_reg = -1;
EnsureValidRangeWeight(range);
float competing_weight = range->weight();
DCHECK(competing_weight != LiveRange::kInvalidWeight);
// Can we allocate at the hinted register?
if (range->FirstHintPosition(&hinted_reg) != nullptr) {
DCHECK(hinted_reg >= 0);
float max_conflict_weight =
GetMaximumConflictingWeight(hinted_reg, range, competing_weight);
if (max_conflict_weight == LiveRange::kInvalidWeight) {
free_reg = hinted_reg;
} else if (max_conflict_weight < range->weight()) {
evictable_reg = hinted_reg;
}
}
if (free_reg < 0 && evictable_reg < 0) {
// There was no hinted reg, or we cannot allocate there.
float smallest_weight = LiveRange::kMaxWeight;
// Seek either the first free register, or, from the set of registers
// where the maximum conflict is lower than the candidate's weight, the one
// with the smallest such weight.
for (int i = 0; i < num_allocatable_registers(); i++) {
int reg = allocatable_register_code(i);
// Skip unnecessarily re-visiting the hinted register, if any.
if (reg == hinted_reg) continue;
float max_conflict_weight =
GetMaximumConflictingWeight(reg, range, competing_weight);
if (max_conflict_weight == LiveRange::kInvalidWeight) {
free_reg = reg;
break;
}
if (max_conflict_weight < range->weight() &&
max_conflict_weight < smallest_weight) {
smallest_weight = max_conflict_weight;
evictable_reg = reg;
}
}
}
// We have a free register, so we use it.
if (free_reg >= 0) {
TRACE("Found free register %s for live range %d:%d.\n",
RegisterName(free_reg), range->TopLevel()->vreg(),
range->relative_id());
AssignRangeToRegister(free_reg, range);
return;
}
// We found a register to perform evictions, so we evict and allocate our
// candidate.
if (evictable_reg >= 0) {
TRACE("Found evictable register %s for live range %d:%d.\n",
RegisterName(free_reg), range->TopLevel()->vreg(),
range->relative_id());
EvictAndRescheduleConflicts(evictable_reg, range);
AssignRangeToRegister(evictable_reg, range);
return;
}
// The range needs to be split or spilled.
SplitOrSpillBlockedRange(range);
}
void GreedyAllocator::EvictAndRescheduleConflicts(unsigned reg_id,
const LiveRange* range) {
auto conflicts = current_allocations(reg_id)->GetConflicts(range);
for (LiveRange* conflict = conflicts.Current(); conflict != nullptr;
conflict = conflicts.RemoveCurrentAndGetNext()) {
DCHECK(conflict->HasRegisterAssigned());
CHECK(!conflict->TopLevel()->IsFixed());
conflict->UnsetAssignedRegister();
UnsetOperands(conflict, data());
UpdateWeightAtEviction(conflict);
scheduler().Schedule(conflict);
TRACE("Evicted range %d%d.\n", conflict->TopLevel()->vreg(),
conflict->relative_id());
}
}
void GreedyAllocator::AllocateRegisters() {
CHECK(scheduler().empty());
CHECK(allocations_.empty());
TRACE("Begin allocating function %s with the Greedy Allocator\n",
data()->debug_name());
SplitAndSpillRangesDefinedByMemoryOperand(true);
GroupLiveRanges();
ScheduleAllocationCandidates();
PreallocateFixedRanges();
while (!scheduler().empty()) {
AllocationCandidate candidate = scheduler().GetNext();
TryAllocateCandidate(candidate);
}
for (size_t i = 0; i < allocations_.size(); ++i) {
if (!allocations_[i]->empty()) {
data()->MarkAllocated(mode(), static_cast<int>(i));
}
}
allocations_.clear();
TryReuseSpillRangesForGroups();
TRACE("End allocating function %s with the Greedy Allocator\n",
data()->debug_name());
}
void GreedyAllocator::TryReuseSpillRangesForGroups() {
for (TopLevelLiveRange* top : data()->live_ranges()) {
if (!CanProcessRange(top) || !top->is_phi() || top->group() == nullptr) {
continue;
}
SpillRange* spill_range = nullptr;
for (LiveRange* member : top->group()->ranges()) {
if (!member->TopLevel()->HasSpillRange()) continue;
SpillRange* member_range = member->TopLevel()->GetSpillRange();
if (spill_range == nullptr) {
spill_range = member_range;
} else {
// This may not always succeed, because we group non-conflicting ranges
// that may have been splintered, and the splinters may cause conflicts
// in the spill ranges.
// TODO(mtrofin): should the splinters own their own spill ranges?
spill_range->TryMerge(member_range);
}
}
}
}
float GreedyAllocator::GetMaximumConflictingWeight(
unsigned reg_id, const LiveRange* range, float competing_weight) const {
float ret = LiveRange::kInvalidWeight;
auto conflicts = current_allocations(reg_id)->GetConflicts(range);
for (LiveRange* conflict = conflicts.Current(); conflict != nullptr;
conflict = conflicts.GetNext()) {
DCHECK_NE(conflict->weight(), LiveRange::kInvalidWeight);
if (competing_weight <= conflict->weight()) return LiveRange::kMaxWeight;
ret = Max(ret, conflict->weight());
DCHECK(ret < LiveRange::kMaxWeight);
}
return ret;
}
float GreedyAllocator::GetMaximumConflictingWeight(unsigned reg_id,
const LiveRangeGroup* group,
float group_weight) const {
float ret = LiveRange::kInvalidWeight;
for (LiveRange* member : group->ranges()) {
float member_conflict_weight =
GetMaximumConflictingWeight(reg_id, member, group_weight);
if (member_conflict_weight == LiveRange::kMaxWeight) {
return LiveRange::kMaxWeight;
}
if (member_conflict_weight > group_weight) return LiveRange::kMaxWeight;
ret = Max(member_conflict_weight, ret);
}
return ret;
}
void GreedyAllocator::EnsureValidRangeWeight(LiveRange* range) {
// The live range weight will be invalidated when ranges are created or split.
// Otherwise, it is consistently updated when the range is allocated or
// unallocated.
if (range->weight() != LiveRange::kInvalidWeight) return;
if (range->TopLevel()->IsFixed()) {
range->set_weight(LiveRange::kMaxWeight);
return;
}
if (!IsProgressPossible(range)) {
range->set_weight(LiveRange::kMaxWeight);
return;
}
float use_count = 0.0;
for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next()) {
++use_count;
}
range->set_weight(use_count / static_cast<float>(range->GetSize()));
}
void GreedyAllocator::SpillRangeAsLastResort(LiveRange* range) {
LifetimePosition start = range->Start();
CHECK(range->CanBeSpilled(start));
DCHECK(range->NextRegisterPosition(start) == nullptr);
Spill(range);
}
LiveRange* GreedyAllocator::GetRemainderAfterSplittingAroundFirstCall(
LiveRange* range) {
LiveRange* ret = range;
for (UseInterval* interval = range->first_interval(); interval != nullptr;
interval = interval->next()) {
LifetimePosition start = interval->start();
LifetimePosition end = interval->end();
// If the interval starts at instruction end, then the first instruction
// in the interval is the next one.
int first_full_instruction = (start.IsGapPosition() || start.IsStart())
? start.ToInstructionIndex()
: start.ToInstructionIndex() + 1;
// If the interval ends in a gap or at instruction start, then the last
// instruction is the previous one.
int last_full_instruction = (end.IsGapPosition() || end.IsStart())
? end.ToInstructionIndex() - 1
: end.ToInstructionIndex();
for (int instruction_index = first_full_instruction;
instruction_index <= last_full_instruction; ++instruction_index) {
if (!code()->InstructionAt(instruction_index)->IsCall()) continue;
LifetimePosition before =
GetSplitPositionForInstruction(range, instruction_index);
LiveRange* second_part =
before.IsValid() ? Split(range, data(), before) : range;
if (range != second_part) scheduler().Schedule(range);
LifetimePosition after =
FindSplitPositionAfterCall(second_part, instruction_index);
if (after.IsValid()) {
ret = Split(second_part, data(), after);
} else {
ret = nullptr;
}
Spill(second_part);
return ret;
}
}
return ret;
}
bool GreedyAllocator::TrySplitAroundCalls(LiveRange* range) {
bool modified = false;
while (range != nullptr) {
LiveRange* remainder = GetRemainderAfterSplittingAroundFirstCall(range);
// If we performed no modification, we're done.
if (remainder == range) {
break;
}
// We performed a modification.
modified = true;
range = remainder;
}
// If we have a remainder and we made modifications, it means the remainder
// has no calls and we should schedule it for further processing. If we made
// no modifications, we will just return false, because we want the algorithm
// to make progress by trying some other heuristic.
if (modified && range != nullptr) {
DCHECK(!range->spilled());
DCHECK(!range->HasRegisterAssigned());
scheduler().Schedule(range);
}
return modified;
}
LifetimePosition GreedyAllocator::FindSplitPositionAfterCall(
const LiveRange* range, int call_index) {
LifetimePosition after_call =
Max(range->Start(),
LifetimePosition::GapFromInstructionIndex(call_index + 1));
UsePosition* next_use = range->NextRegisterPosition(after_call);
if (!next_use) return LifetimePosition::Invalid();
LifetimePosition split_pos = FindOptimalSplitPos(after_call, next_use->pos());
split_pos =
GetSplitPositionForInstruction(range, split_pos.ToInstructionIndex());
return split_pos;
}
LifetimePosition GreedyAllocator::FindSplitPositionBeforeLoops(
LiveRange* range) {
LifetimePosition end = range->End();
if (end.ToInstructionIndex() >= code()->LastInstructionIndex()) {
end =
LifetimePosition::GapFromInstructionIndex(end.ToInstructionIndex() - 1);
}
LifetimePosition pos = FindOptimalSplitPos(range->Start(), end);
pos = GetSplitPositionForInstruction(range, pos.ToInstructionIndex());
return pos;
}
void GreedyAllocator::SplitOrSpillBlockedRange(LiveRange* range) {
if (TrySplitAroundCalls(range)) return;
LifetimePosition pos = FindSplitPositionBeforeLoops(range);
if (!pos.IsValid()) pos = GetLastResortSplitPosition(range);
if (pos.IsValid()) {
LiveRange* tail = Split(range, data(), pos);
DCHECK(tail != range);
scheduler().Schedule(tail);
scheduler().Schedule(range);
return;
}
SpillRangeAsLastResort(range);
}
// Basic heuristic for advancing the algorithm, if any other splitting heuristic
// failed.
LifetimePosition GreedyAllocator::GetLastResortSplitPosition(
const LiveRange* range) {
LifetimePosition previous = range->Start();
for (UsePosition *pos = range->NextRegisterPosition(previous); pos != nullptr;
previous = previous.NextFullStart(),
pos = range->NextRegisterPosition(previous)) {
LifetimePosition optimal = FindOptimalSplitPos(previous, pos->pos());
LifetimePosition before =
GetSplitPositionForInstruction(range, optimal.ToInstructionIndex());
if (before.IsValid()) return before;
LifetimePosition after = GetSplitPositionForInstruction(
range, pos->pos().ToInstructionIndex() + 1);
if (after.IsValid()) return after;
}
return LifetimePosition::Invalid();
}
bool GreedyAllocator::IsProgressPossible(const LiveRange* range) {
return range->CanBeSpilled(range->Start()) ||
GetLastResortSplitPosition(range).IsValid();
}
} // namespace compiler
} // namespace internal
} // namespace v8
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_GREEDY_ALLOCATOR_H_
#define V8_GREEDY_ALLOCATOR_H_
#include "src/compiler/coalesced-live-ranges.h"
#include "src/compiler/register-allocator.h"
#include "src/zone-containers.h"
namespace v8 {
namespace internal {
namespace compiler {
// The object of allocation scheduling. At minimum, this is a LiveRange, but
// we may extend this to groups of LiveRanges. It has to be comparable.
class AllocationCandidate {
public:
explicit AllocationCandidate(LiveRange* range)
: is_group_(false), size_(range->GetSize()) {
candidate_.range_ = range;
}
explicit AllocationCandidate(LiveRangeGroup* ranges)
: is_group_(true), size_(CalculateGroupSize(ranges)) {
candidate_.group_ = ranges;
}
// Strict ordering operators
bool operator<(const AllocationCandidate& other) const {
return size() < other.size();
}
bool operator>(const AllocationCandidate& other) const {
return size() > other.size();
}
bool is_group() const { return is_group_; }
LiveRange* live_range() const { return candidate_.range_; }
LiveRangeGroup* group() const { return candidate_.group_; }
private:
unsigned CalculateGroupSize(LiveRangeGroup* group) {
unsigned ret = 0;
for (LiveRange* range : group->ranges()) {
ret += range->GetSize();
}
return ret;
}
unsigned size() const { return size_; }
bool is_group_;
unsigned size_;
union {
LiveRange* range_;
LiveRangeGroup* group_;
} candidate_;
};
// Schedule processing (allocating) of AllocationCandidates.
class AllocationScheduler final : ZoneObject {
public:
explicit AllocationScheduler(Zone* zone) : queue_(zone) {}
void Schedule(LiveRange* range);
void Schedule(LiveRangeGroup* group);
AllocationCandidate GetNext();
bool empty() const { return queue_.empty(); }
private:
typedef ZonePriorityQueue<AllocationCandidate> ScheduleQueue;
ScheduleQueue queue_;
DISALLOW_COPY_AND_ASSIGN(AllocationScheduler);
};
// A variant of the LLVM Greedy Register Allocator. See
// http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html
class GreedyAllocator final : public RegisterAllocator {
public:
explicit GreedyAllocator(RegisterAllocationData* data, RegisterKind kind,
Zone* local_zone);
void AllocateRegisters();
private:
static const float kAllocatedRangeMultiplier;
static void UpdateWeightAtAllocation(LiveRange* range) {
DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
range->set_weight(range->weight() * kAllocatedRangeMultiplier);
}
static void UpdateWeightAtEviction(LiveRange* range) {
DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
range->set_weight(range->weight() / kAllocatedRangeMultiplier);
}
AllocationScheduler& scheduler() { return scheduler_; }
CoalescedLiveRanges* current_allocations(unsigned i) {
return allocations_[i];
}
CoalescedLiveRanges* current_allocations(unsigned i) const {
return allocations_[i];
}
Zone* local_zone() const { return local_zone_; }
ZoneVector<LiveRangeGroup*>& groups() { return groups_; }
const ZoneVector<LiveRangeGroup*>& groups() const { return groups_; }
// Insert fixed ranges.
void PreallocateFixedRanges();
void GroupLiveRanges();
// Schedule unassigned live ranges for allocation.
void ScheduleAllocationCandidates();
void AllocateRegisterToRange(unsigned reg_id, LiveRange* range) {
UpdateWeightAtAllocation(range);
current_allocations(reg_id)->AllocateRange(range);
}
// Evict and reschedule conflicts of a given range, at a given register.
void EvictAndRescheduleConflicts(unsigned reg_id, const LiveRange* range);
void TryAllocateCandidate(const AllocationCandidate& candidate);
void TryAllocateLiveRange(LiveRange* range);
void TryAllocateGroup(LiveRangeGroup* group);
// Calculate the weight of a candidate for allocation.
void EnsureValidRangeWeight(LiveRange* range);
// Calculate the new weight of a range that is about to be allocated.
float GetAllocatedRangeWeight(float candidate_weight);
// Returns kInvalidWeight if there are no conflicts, or the largest weight of
// a range conflicting with the given range, at the given register.
float GetMaximumConflictingWeight(unsigned reg_id, const LiveRange* range,
float competing_weight) const;
// Returns kInvalidWeight if there are no conflicts, or the largest weight of
// a range conflicting with the given range, at the given register.
float GetMaximumConflictingWeight(unsigned reg_id,
const LiveRangeGroup* group,
float group_weight) const;
// This is the extension point for splitting heuristics.
void SplitOrSpillBlockedRange(LiveRange* range);
// Find a good position where to fill, after a range was spilled after a call.
LifetimePosition FindSplitPositionAfterCall(const LiveRange* range,
int call_index);
// Split a range around all calls it passes over. Returns true if any changes
// were made, or false if no calls were found.
bool TrySplitAroundCalls(LiveRange* range);
// Find a split position at the outmost loop.
LifetimePosition FindSplitPositionBeforeLoops(LiveRange* range);
// Finds the first call instruction in the path of this range. Splits before
// and requeues that segment (if any), spills the section over the call, and
// returns the section after the call. The return is:
// - same range, if no call was found
// - nullptr, if the range finished at the call and there's no "after the
// call" portion.
// - the portion after the call.
LiveRange* GetRemainderAfterSplittingAroundFirstCall(LiveRange* range);
// While we attempt to merge spill ranges later on in the allocation pipeline,
// we want to ensure group elements get merged. Waiting until later may hinder
// merge-ability, since the pipeline merger (being naive) may create conflicts
// between spill ranges of group members.
void TryReuseSpillRangesForGroups();
LifetimePosition GetLastResortSplitPosition(const LiveRange* range);
bool IsProgressPossible(const LiveRange* range);
// Necessary heuristic: spill when all else failed.
void SpillRangeAsLastResort(LiveRange* range);
void AssignRangeToRegister(int reg_id, LiveRange* range);
Zone* local_zone_;
ZoneVector<CoalescedLiveRanges*> allocations_;
AllocationScheduler scheduler_;
ZoneVector<LiveRangeGroup*> groups_;
DISALLOW_COPY_AND_ASSIGN(GreedyAllocator);
};
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_GREEDY_ALLOCATOR_H_
......@@ -26,7 +26,6 @@
#include "src/compiler/graph-replay.h"
#include "src/compiler/graph-trimmer.h"
#include "src/compiler/graph-visualizer.h"
#include "src/compiler/greedy-allocator.h"
#include "src/compiler/instruction-selector.h"
#include "src/compiler/instruction.h"
#include "src/compiler/js-builtin-reducer.h"
......@@ -1766,13 +1765,8 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
Run<SplinterLiveRangesPhase>();
}
if (FLAG_turbo_greedy_regalloc) {
Run<AllocateGeneralRegistersPhase<GreedyAllocator>>();
Run<AllocateFPRegistersPhase<GreedyAllocator>>();
} else {
Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
}
Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
if (FLAG_turbo_preprocess_ranges) {
Run<MergeSplintersPhase>();
......
......@@ -413,11 +413,6 @@ std::ostream& operator<<(std::ostream& os, const LifetimePosition pos) {
return os;
}
const float LiveRange::kInvalidWeight = -1;
const float LiveRange::kMaxWeight = std::numeric_limits<float>::max();
LiveRange::LiveRange(int relative_id, MachineRepresentation rep,
TopLevelLiveRange* top_level)
: relative_id_(relative_id),
......@@ -430,10 +425,7 @@ LiveRange::LiveRange(int relative_id, MachineRepresentation rep,
current_interval_(nullptr),
last_processed_use_(nullptr),
current_hint_position_(nullptr),
splitting_pointer_(nullptr),
size_(kInvalidSize),
weight_(kInvalidWeight),
group_(nullptr) {
splitting_pointer_(nullptr) {
DCHECK(AllocatedOperand::IsSupportedRepresentation(rep));
bits_ = AssignedRegisterField::encode(kUnassignedRegister) |
RepresentationField::encode(rep);
......@@ -699,10 +691,6 @@ UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
last_processed_use_ = nullptr;
current_interval_ = nullptr;
// Invalidate size and weight of this range. The child range has them
// invalid at construction.
size_ = kInvalidSize;
weight_ = kInvalidWeight;
#ifdef DEBUG
VerifyChildStructure();
result->VerifyChildStructure();
......@@ -818,20 +806,6 @@ LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const {
return LifetimePosition::Invalid();
}
unsigned LiveRange::GetSize() {
if (size_ == kInvalidSize) {
size_ = 0;
for (const UseInterval* interval = first_interval(); interval != nullptr;
interval = interval->next()) {
size_ += (interval->end().value() - interval->start().value());
}
}
return static_cast<unsigned>(size_);
}
void LiveRange::Print(const RegisterConfiguration* config,
bool with_children) const {
OFStream os(stdout);
......
......@@ -412,19 +412,9 @@ class LiveRange : public ZoneObject {
void SetUseHints(int register_index);
void UnsetUseHints() { SetUseHints(kUnassignedRegister); }
// Used solely by the Greedy Allocator:
unsigned GetSize();
float weight() const { return weight_; }
void set_weight(float weight) { weight_ = weight; }
LiveRangeGroup* group() const { return group_; }
void set_group(LiveRangeGroup* group) { group_ = group; }
void Print(const RegisterConfiguration* config, bool with_children) const;
void Print(bool with_children) const;
static const int kInvalidSize = -1;
static const float kInvalidWeight;
static const float kMaxWeight;
private:
friend class TopLevelLiveRange;
explicit LiveRange(int relative_id, MachineRepresentation rep,
......@@ -461,17 +451,6 @@ class LiveRange : public ZoneObject {
mutable UsePosition* current_hint_position_;
// Cache the last position splintering stopped at.
mutable UsePosition* splitting_pointer_;
// greedy: the number of LifetimePositions covered by this range. Used to
// prioritize selecting live ranges for register assignment, as well as
// in weight calculations.
int size_;
// greedy: a metric for resolving conflicts between ranges with an assigned
// register and ranges that intersect them and need a register.
float weight_;
// greedy: groupping
LiveRangeGroup* group_;
DISALLOW_COPY_AND_ASSIGN(LiveRange);
};
......@@ -483,7 +462,6 @@ class LiveRangeGroup final : public ZoneObject {
ZoneVector<LiveRange*>& ranges() { return ranges_; }
const ZoneVector<LiveRange*>& ranges() const { return ranges_; }
// TODO(mtrofin): populate assigned register and use in weight calculation.
int assigned_register() const { return assigned_register_; }
void set_assigned_register(int reg) { assigned_register_ = reg; }
......
......@@ -428,7 +428,6 @@ DEFINE_BOOL(turbo, false, "enable TurboFan compiler")
DEFINE_IMPLICATION(turbo, turbo_asm_deoptimization)
DEFINE_BOOL(turbo_shipping, true, "enable TurboFan compiler on subset")
DEFINE_BOOL(turbo_from_bytecode, false, "enable building graphs from bytecode")
DEFINE_BOOL(turbo_greedy_regalloc, false, "use the greedy register allocator")
DEFINE_BOOL(turbo_sp_frame_access, false,
"use stack pointer-relative access to frame wherever possible")
DEFINE_BOOL(turbo_preprocess_ranges, true,
......
......@@ -507,8 +507,6 @@
'compiler/c-linkage.cc',
'compiler/checkpoint-elimination.cc',
'compiler/checkpoint-elimination.h',
'compiler/coalesced-live-ranges.cc',
'compiler/coalesced-live-ranges.h',
'compiler/code-generator-impl.h',
'compiler/code-generator.cc',
'compiler/code-generator.h',
......@@ -553,8 +551,6 @@
'compiler/graph-visualizer.h',
'compiler/graph.cc',
'compiler/graph.h',
'compiler/greedy-allocator.cc',
'compiler/greedy-allocator.h',
'compiler/instruction-codes.h',
'compiler/instruction-selector-impl.h',
'compiler/instruction-selector.cc',
......
......@@ -29,7 +29,6 @@ executable("unittests") {
"char-predicates-unittest.cc",
"compiler/branch-elimination-unittest.cc",
"compiler/checkpoint-elimination-unittest.cc",
"compiler/coalesced-live-ranges-unittest.cc",
"compiler/common-operator-reducer-unittest.cc",
"compiler/common-operator-unittest.cc",
"compiler/compiler-test-utils.h",
......@@ -174,7 +173,10 @@ executable("unittests") {
# Suppress warnings about importing locally defined symbols.
if (is_component_build) {
ldflags = [ "/ignore:4049", "/ignore:4217" ]
ldflags = [
"/ignore:4049",
"/ignore:4217",
]
}
}
}
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/coalesced-live-ranges.h"
#include "test/unittests/compiler/live-range-builder.h"
#include "test/unittests/test-utils.h"
namespace v8 {
namespace internal {
namespace compiler {
class CoalescedLiveRangesTest : public TestWithZone {
public:
CoalescedLiveRangesTest() : TestWithZone(), ranges_(zone()) {}
bool HasNoConflicts(const LiveRange* range);
bool ConflictsPreciselyWith(const LiveRange* range, int id);
bool ConflictsPreciselyWith(const LiveRange* range, int id1, int id2);
CoalescedLiveRanges& ranges() { return ranges_; }
const CoalescedLiveRanges& ranges() const { return ranges_; }
bool AllocationsAreValid() const;
void RemoveConflicts(LiveRange* range);
private:
typedef ZoneSet<int> LiveRangeIDs;
bool IsRangeConflictingWith(const LiveRange* range, const LiveRangeIDs& ids);
CoalescedLiveRanges ranges_;
};
bool CoalescedLiveRangesTest::ConflictsPreciselyWith(const LiveRange* range,
int id) {
LiveRangeIDs set(zone());
set.insert(id);
return IsRangeConflictingWith(range, set);
}
bool CoalescedLiveRangesTest::ConflictsPreciselyWith(const LiveRange* range,
int id1, int id2) {
LiveRangeIDs set(zone());
set.insert(id1);
set.insert(id2);
return IsRangeConflictingWith(range, set);
}
bool CoalescedLiveRangesTest::HasNoConflicts(const LiveRange* range) {
LiveRangeIDs set(zone());
return IsRangeConflictingWith(range, set);
}
void CoalescedLiveRangesTest::RemoveConflicts(LiveRange* range) {
auto conflicts = ranges().GetConflicts(range);
LiveRangeIDs seen(zone());
for (auto c = conflicts.Current(); c != nullptr;
c = conflicts.RemoveCurrentAndGetNext()) {
int id = c->TopLevel()->vreg();
EXPECT_FALSE(seen.count(id) > 0);
seen.insert(c->TopLevel()->vreg());
}
}
bool CoalescedLiveRangesTest::AllocationsAreValid() const {
return ranges().VerifyAllocationsAreValidForTesting();
}
bool CoalescedLiveRangesTest::IsRangeConflictingWith(const LiveRange* range,
const LiveRangeIDs& ids) {
LiveRangeIDs found_ids(zone());
auto conflicts = ranges().GetConflicts(range);
for (auto conflict = conflicts.Current(); conflict != nullptr;
conflict = conflicts.GetNext()) {
found_ids.insert(conflict->TopLevel()->vreg());
}
return found_ids == ids;
}
TEST_F(CoalescedLiveRangesTest, VisitEmptyAllocations) {
LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
ASSERT_TRUE(ranges().empty());
ASSERT_TRUE(AllocationsAreValid());
ASSERT_TRUE(HasNoConflicts(range));
}
TEST_F(CoalescedLiveRangesTest, CandidateBeforeAfterAllocations) {
LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(5, 6);
ranges().AllocateRange(range);
ASSERT_FALSE(ranges().empty());
ASSERT_TRUE(AllocationsAreValid());
LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(1, 2);
ASSERT_TRUE(HasNoConflicts(query));
query = TestRangeBuilder(zone()).Id(3).Build(1, 5);
ASSERT_TRUE(HasNoConflicts(query));
}
TEST_F(CoalescedLiveRangesTest, CandidateBeforeAfterManyAllocations) {
LiveRange* range =
TestRangeBuilder(zone()).Id(1).Add(5, 7).Add(10, 12).Build();
ranges().AllocateRange(range);
ASSERT_FALSE(ranges().empty());
ASSERT_TRUE(AllocationsAreValid());
LiveRange* query =
TestRangeBuilder(zone()).Id(2).Add(1, 2).Add(13, 15).Build();
ASSERT_TRUE(HasNoConflicts(query));
query = TestRangeBuilder(zone()).Id(3).Add(1, 5).Add(12, 15).Build();
ASSERT_TRUE(HasNoConflicts(query));
}
TEST_F(CoalescedLiveRangesTest, SelfConflictsPreciselyWithSelf) {
LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
ranges().AllocateRange(range);
ASSERT_FALSE(ranges().empty());
ASSERT_TRUE(AllocationsAreValid());
ASSERT_TRUE(ConflictsPreciselyWith(range, 1));
range = TestRangeBuilder(zone()).Id(2).Build(8, 10);
ranges().AllocateRange(range);
ASSERT_TRUE(ConflictsPreciselyWith(range, 2));
}
TEST_F(CoalescedLiveRangesTest, QueryStartsBeforeConflict) {
LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(2, 5);
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(1, 3);
ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
range = TestRangeBuilder(zone()).Id(3).Build(8, 10);
ranges().AllocateRange(range);
query = TestRangeBuilder(zone()).Id(4).Build(6, 9);
ASSERT_TRUE(ConflictsPreciselyWith(query, 3));
}
TEST_F(CoalescedLiveRangesTest, QueryStartsInConflict) {
LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(2, 5);
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(3, 6);
ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
range = TestRangeBuilder(zone()).Id(3).Build(8, 10);
ranges().AllocateRange(range);
query = TestRangeBuilder(zone()).Id(4).Build(9, 11);
ASSERT_TRUE(ConflictsPreciselyWith(query, 3));
}
TEST_F(CoalescedLiveRangesTest, QueryContainedInConflict) {
LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(2, 3);
ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
}
TEST_F(CoalescedLiveRangesTest, QueryContainsConflict) {
LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(2, 3);
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(1, 5);
ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
}
TEST_F(CoalescedLiveRangesTest, QueryCoversManyIntervalsSameRange) {
LiveRange* range =
TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(7, 9).Add(20, 25).Build();
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(2, 8);
ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
}
TEST_F(CoalescedLiveRangesTest, QueryCoversManyIntervalsDifferentRanges) {
LiveRange* range =
TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(20, 25).Build();
ranges().AllocateRange(range);
range = TestRangeBuilder(zone()).Id(2).Build(7, 10);
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(2, 22);
ASSERT_TRUE(ConflictsPreciselyWith(query, 1, 2));
}
TEST_F(CoalescedLiveRangesTest, QueryFitsInGaps) {
LiveRange* range =
TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(10, 15).Add(20, 25).Build();
ranges().AllocateRange(range);
LiveRange* query =
TestRangeBuilder(zone()).Id(3).Add(5, 10).Add(16, 19).Add(27, 30).Build();
ASSERT_TRUE(HasNoConflicts(query));
}
TEST_F(CoalescedLiveRangesTest, DeleteConflictBefore) {
LiveRange* range = TestRangeBuilder(zone()).Id(1).Add(1, 4).Add(5, 6).Build();
ranges().AllocateRange(range);
range = TestRangeBuilder(zone()).Id(2).Build(40, 50);
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(3, 7);
RemoveConflicts(query);
query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
}
TEST_F(CoalescedLiveRangesTest, DeleteConflictAfter) {
LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
ranges().AllocateRange(range);
range = TestRangeBuilder(zone()).Id(2).Add(40, 50).Add(60, 70).Build();
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(45, 60);
RemoveConflicts(query);
query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
}
TEST_F(CoalescedLiveRangesTest, DeleteConflictStraddle) {
LiveRange* range =
TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(10, 20).Build();
ranges().AllocateRange(range);
range = TestRangeBuilder(zone()).Id(2).Build(40, 50);
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(4, 15);
RemoveConflicts(query);
query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
}
TEST_F(CoalescedLiveRangesTest, DeleteConflictManyOverlapsBefore) {
LiveRange* range =
TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(6, 10).Add(10, 20).Build();
ranges().AllocateRange(range);
range = TestRangeBuilder(zone()).Id(2).Build(40, 50);
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(4, 15);
RemoveConflicts(query);
query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
}
TEST_F(CoalescedLiveRangesTest, DeleteWhenConflictRepeatsAfterNonConflict) {
LiveRange* range =
TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(6, 10).Add(20, 30).Build();
ranges().AllocateRange(range);
range = TestRangeBuilder(zone()).Id(2).Build(12, 15);
ranges().AllocateRange(range);
LiveRange* query =
TestRangeBuilder(zone()).Id(3).Add(1, 8).Add(22, 25).Build();
RemoveConflicts(query);
query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -678,8 +678,7 @@ TEST_F(RegisterAllocatorTest, MultipleDeferredBlockSpills) {
Allocate();
// TODO(mtrofin): at the moment, the linear allocator spills var1 and var2,
// so only var3 is spilled in deferred blocks. Greedy avoids spilling 1&2.
// Expand the test once greedy is back online with this facility.
// so only var3 is spilled in deferred blocks.
const int var3_reg = 2;
const int var3_slot = 2;
......
......@@ -44,7 +44,6 @@
'char-predicates-unittest.cc',
'compiler/branch-elimination-unittest.cc',
'compiler/checkpoint-elimination-unittest.cc',
'compiler/coalesced-live-ranges-unittest.cc',
'compiler/common-operator-reducer-unittest.cc',
'compiler/common-operator-unittest.cc',
'compiler/compiler-test-utils.h',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment