Commit 8e1ccba3 authored by mtrofin's avatar mtrofin Committed by Commit bot

[turbofan] Retiring Greedy Allocator

We were able to achieve our goals for register allocation independent of
the allocation algorithm. Performance data so far is inconclusive re. the
value of the Greedy algorithm, compared to the particular Linear Scan
implementation we're currently using, and the performance measurement
techniques we currently use are too imprecise to help with this matter.

Retiring the algorithm to lower maintenance and evolution cost (e.g. lower
cost of adding aliasing support). Once we improve benchmarking stability,
and establish a suite sensitive enough for codegen improvement studies,
we may revive the algorithm, should the need arise.

BUG=

Review-Url: https://codereview.chromium.org/2060673002
Cr-Commit-Position: refs/heads/master@{#36912}
parent 8c1ba59a
...@@ -858,8 +858,6 @@ v8_source_set("v8_base") { ...@@ -858,8 +858,6 @@ v8_source_set("v8_base") {
"src/compiler/c-linkage.cc", "src/compiler/c-linkage.cc",
"src/compiler/checkpoint-elimination.cc", "src/compiler/checkpoint-elimination.cc",
"src/compiler/checkpoint-elimination.h", "src/compiler/checkpoint-elimination.h",
"src/compiler/coalesced-live-ranges.cc",
"src/compiler/coalesced-live-ranges.h",
"src/compiler/code-assembler.cc", "src/compiler/code-assembler.cc",
"src/compiler/code-assembler.h", "src/compiler/code-assembler.h",
"src/compiler/code-generator-impl.h", "src/compiler/code-generator-impl.h",
...@@ -904,8 +902,6 @@ v8_source_set("v8_base") { ...@@ -904,8 +902,6 @@ v8_source_set("v8_base") {
"src/compiler/graph-visualizer.h", "src/compiler/graph-visualizer.h",
"src/compiler/graph.cc", "src/compiler/graph.cc",
"src/compiler/graph.h", "src/compiler/graph.h",
"src/compiler/greedy-allocator.cc",
"src/compiler/greedy-allocator.h",
"src/compiler/instruction-codes.h", "src/compiler/instruction-codes.h",
"src/compiler/instruction-scheduler.cc", "src/compiler/instruction-scheduler.cc",
"src/compiler/instruction-scheduler.h", "src/compiler/instruction-scheduler.h",
......
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/coalesced-live-ranges.h"
#include "src/compiler/greedy-allocator.h"
#include "src/compiler/register-allocator.h"
namespace v8 {
namespace internal {
namespace compiler {
LiveRangeConflictIterator::LiveRangeConflictIterator(const LiveRange* range,
IntervalStore* storage)
: query_(range->first_interval()),
pos_(storage->end()),
intervals_(storage) {
MovePosAndQueryToFirstConflict();
}
LiveRange* LiveRangeConflictIterator::Current() const {
if (IsFinished()) return nullptr;
return pos_->range_;
}
void LiveRangeConflictIterator::MovePosToFirstConflictForQuery() {
DCHECK_NOT_NULL(query_);
auto end = intervals_->end();
LifetimePosition q_start = query_->start();
LifetimePosition q_end = query_->end();
if (intervals_->empty() || intervals_->rbegin()->end_ <= q_start ||
intervals_->begin()->start_ >= q_end) {
pos_ = end;
return;
}
pos_ = intervals_->upper_bound(AsAllocatedInterval(q_start));
// pos is either at the end (no start strictly greater than q_start) or
// at some position with the aforementioned property. In either case, the
// allocated interval before this one may intersect our query:
// either because, although it starts before this query's start, it ends
// after; or because it starts exactly at the query start. So unless we're
// right at the beginning of the storage - meaning the first allocated
// interval is also starting after this query's start - see what's behind.
if (pos_ != intervals_->begin()) {
--pos_;
if (!QueryIntersectsAllocatedInterval()) {
// The interval behind wasn't intersecting, so move back.
++pos_;
}
}
if (pos_ == end || !QueryIntersectsAllocatedInterval()) {
pos_ = end;
}
}
void LiveRangeConflictIterator::MovePosAndQueryToFirstConflict() {
auto end = intervals_->end();
for (; query_ != nullptr; query_ = query_->next()) {
MovePosToFirstConflictForQuery();
if (pos_ != end) {
DCHECK(QueryIntersectsAllocatedInterval());
return;
}
}
Invalidate();
}
void LiveRangeConflictIterator::IncrementPosAndSkipOverRepetitions() {
auto end = intervals_->end();
DCHECK(pos_ != end);
LiveRange* current_conflict = Current();
while (pos_ != end && pos_->range_ == current_conflict) {
++pos_;
}
}
LiveRange* LiveRangeConflictIterator::InternalGetNext(bool clean_behind) {
if (IsFinished()) return nullptr;
LiveRange* to_clear = Current();
IncrementPosAndSkipOverRepetitions();
// At this point, pos_ is either at the end, or on an interval that doesn't
// correspond to the same range as to_clear. This interval may not even be
// a conflict.
if (clean_behind) {
// Since we parked pos_ on an iterator that won't be affected by removal,
// we can safely delete to_clear's intervals.
for (auto interval = to_clear->first_interval(); interval != nullptr;
interval = interval->next()) {
AllocatedInterval erase_key(interval->start(), interval->end(), nullptr);
intervals_->erase(erase_key);
}
}
// We may have parked pos_ at the end, or on a non-conflict. In that case,
// move to the next query and reinitialize pos and query. This may invalidate
// the iterator, if no more conflicts are available.
if (!QueryIntersectsAllocatedInterval()) {
query_ = query_->next();
MovePosAndQueryToFirstConflict();
}
return Current();
}
LiveRangeConflictIterator CoalescedLiveRanges::GetConflicts(
const LiveRange* range) {
return LiveRangeConflictIterator(range, &intervals());
}
void CoalescedLiveRanges::AllocateRange(LiveRange* range) {
for (auto interval = range->first_interval(); interval != nullptr;
interval = interval->next()) {
AllocatedInterval to_insert(interval->start(), interval->end(), range);
intervals().insert(to_insert);
}
}
bool CoalescedLiveRanges::VerifyAllocationsAreValidForTesting() const {
LifetimePosition last_end = LifetimePosition::GapFromInstructionIndex(0);
for (auto i : intervals_) {
if (i.start_ < last_end) {
return false;
}
last_end = i.end_;
}
return true;
}
} // namespace compiler
} // namespace internal
} // namespace v8
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COALESCED_LIVE_RANGES_H_
#define V8_COALESCED_LIVE_RANGES_H_
#include "src/compiler/register-allocator.h"
#include "src/zone-containers.h"
namespace v8 {
namespace internal {
namespace compiler {
// Implementation detail for CoalescedLiveRanges.
struct AllocatedInterval {
AllocatedInterval(LifetimePosition start, LifetimePosition end,
LiveRange* range)
: start_(start), end_(end), range_(range) {}
LifetimePosition start_;
LifetimePosition end_;
LiveRange* range_;
bool operator<(const AllocatedInterval& other) const {
return start_ < other.start_;
}
bool operator>(const AllocatedInterval& other) const {
return start_ > other.start_;
}
};
typedef ZoneSet<AllocatedInterval> IntervalStore;
// An iterator over conflicts of a live range, obtained from CoalescedLiveRanges
// The design supports two main scenarios (see GreedyAllocator):
// (1) observing each conflicting range, without mutating the allocations, and
// (2) observing each conflicting range, and then moving to the next, after
// removing the current conflict.
class LiveRangeConflictIterator {
public:
// Current conflict. nullptr if no conflicts, or if we reached the end of
// conflicts.
LiveRange* Current() const;
// Get the next conflict. Caller should handle non-consecutive repetitions of
// the same range.
LiveRange* GetNext() { return InternalGetNext(false); }
// Get the next conflict, after evicting the current one. Caller may expect
// to never observe the same live range more than once.
LiveRange* RemoveCurrentAndGetNext() { return InternalGetNext(true); }
private:
friend class CoalescedLiveRanges;
typedef IntervalStore::const_iterator interval_iterator;
LiveRangeConflictIterator(const LiveRange* range, IntervalStore* store);
// Move the store iterator to first interval intersecting query. Since the
// intervals are sorted, subsequent intervals intersecting query follow. May
// leave the store iterator at "end", meaning that the current query does not
// have an intersection.
void MovePosToFirstConflictForQuery();
// Move both query and store iterator to the first intersection, if any. If
// none, then it invalidates the iterator (IsFinished() == true)
void MovePosAndQueryToFirstConflict();
// Increment pos and skip over intervals belonging to the same range we
// started with (i.e. Current() before the call). It is possible that range
// will be seen again, but not consecutively.
void IncrementPosAndSkipOverRepetitions();
// Common implementation used by both GetNext as well as
// ClearCurrentAndGetNext.
LiveRange* InternalGetNext(bool clean_behind);
bool IsFinished() const { return query_ == nullptr; }
static AllocatedInterval AsAllocatedInterval(LifetimePosition pos) {
return AllocatedInterval(pos, LifetimePosition::Invalid(), nullptr);
}
// Intersection utilities.
static bool Intersects(LifetimePosition a_start, LifetimePosition a_end,
LifetimePosition b_start, LifetimePosition b_end) {
return a_start < b_end && b_start < a_end;
}
bool QueryIntersectsAllocatedInterval() const {
DCHECK_NOT_NULL(query_);
return pos_ != intervals_->end() &&
Intersects(query_->start(), query_->end(), pos_->start_, pos_->end_);
}
void Invalidate() {
query_ = nullptr;
pos_ = intervals_->end();
}
const UseInterval* query_;
interval_iterator pos_;
IntervalStore* intervals_;
};
// Collection of live ranges allocated to the same register.
// It supports efficiently finding all conflicts for a given, non-allocated
// range. See AllocatedInterval.
// Allocated live ranges do not intersect. At most, individual use intervals
// touch. We store, for a live range, an AllocatedInterval corresponding to each
// of that range's UseIntervals. We keep the list of AllocatedIntervals sorted
// by starts. Then, given the non-intersecting property, we know that
// consecutive AllocatedIntervals have the property that the "smaller"'s end is
// less or equal to the "larger"'s start.
// This allows for quick (logarithmic complexity) identification of the first
// AllocatedInterval to conflict with a given LiveRange, and then for efficient
// traversal of conflicts.
class CoalescedLiveRanges : public ZoneObject {
public:
explicit CoalescedLiveRanges(Zone* zone) : intervals_(zone) {}
void clear() { intervals_.clear(); }
bool empty() const { return intervals_.empty(); }
// Iterate over each live range conflicting with the provided one.
// The same live range may be observed multiple, but non-consecutive times.
LiveRangeConflictIterator GetConflicts(const LiveRange* range);
// Allocates a range with a pre-calculated candidate weight.
void AllocateRange(LiveRange* range);
// Unit testing API, verifying that allocated intervals do not overlap.
bool VerifyAllocationsAreValidForTesting() const;
private:
static const float kAllocatedRangeMultiplier;
IntervalStore& intervals() { return intervals_; }
const IntervalStore& intervals() const { return intervals_; }
// Augment the weight of a range that is about to be allocated.
static void UpdateWeightAtAllocation(LiveRange* range);
// Reduce the weight of a range that has lost allocation.
static void UpdateWeightAtEviction(LiveRange* range);
IntervalStore intervals_;
DISALLOW_COPY_AND_ASSIGN(CoalescedLiveRanges);
};
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COALESCED_LIVE_RANGES_H_
This diff is collapsed.
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_GREEDY_ALLOCATOR_H_
#define V8_GREEDY_ALLOCATOR_H_
#include "src/compiler/coalesced-live-ranges.h"
#include "src/compiler/register-allocator.h"
#include "src/zone-containers.h"
namespace v8 {
namespace internal {
namespace compiler {
// The object of allocation scheduling. At minimum, this is a LiveRange, but
// we may extend this to groups of LiveRanges. It has to be comparable.
class AllocationCandidate {
public:
explicit AllocationCandidate(LiveRange* range)
: is_group_(false), size_(range->GetSize()) {
candidate_.range_ = range;
}
explicit AllocationCandidate(LiveRangeGroup* ranges)
: is_group_(true), size_(CalculateGroupSize(ranges)) {
candidate_.group_ = ranges;
}
// Strict ordering operators
bool operator<(const AllocationCandidate& other) const {
return size() < other.size();
}
bool operator>(const AllocationCandidate& other) const {
return size() > other.size();
}
bool is_group() const { return is_group_; }
LiveRange* live_range() const { return candidate_.range_; }
LiveRangeGroup* group() const { return candidate_.group_; }
private:
unsigned CalculateGroupSize(LiveRangeGroup* group) {
unsigned ret = 0;
for (LiveRange* range : group->ranges()) {
ret += range->GetSize();
}
return ret;
}
unsigned size() const { return size_; }
bool is_group_;
unsigned size_;
union {
LiveRange* range_;
LiveRangeGroup* group_;
} candidate_;
};
// Schedule processing (allocating) of AllocationCandidates.
class AllocationScheduler final : ZoneObject {
public:
explicit AllocationScheduler(Zone* zone) : queue_(zone) {}
void Schedule(LiveRange* range);
void Schedule(LiveRangeGroup* group);
AllocationCandidate GetNext();
bool empty() const { return queue_.empty(); }
private:
typedef ZonePriorityQueue<AllocationCandidate> ScheduleQueue;
ScheduleQueue queue_;
DISALLOW_COPY_AND_ASSIGN(AllocationScheduler);
};
// A variant of the LLVM Greedy Register Allocator. See
// http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html
class GreedyAllocator final : public RegisterAllocator {
public:
explicit GreedyAllocator(RegisterAllocationData* data, RegisterKind kind,
Zone* local_zone);
void AllocateRegisters();
private:
static const float kAllocatedRangeMultiplier;
static void UpdateWeightAtAllocation(LiveRange* range) {
DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
range->set_weight(range->weight() * kAllocatedRangeMultiplier);
}
static void UpdateWeightAtEviction(LiveRange* range) {
DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
range->set_weight(range->weight() / kAllocatedRangeMultiplier);
}
AllocationScheduler& scheduler() { return scheduler_; }
CoalescedLiveRanges* current_allocations(unsigned i) {
return allocations_[i];
}
CoalescedLiveRanges* current_allocations(unsigned i) const {
return allocations_[i];
}
Zone* local_zone() const { return local_zone_; }
ZoneVector<LiveRangeGroup*>& groups() { return groups_; }
const ZoneVector<LiveRangeGroup*>& groups() const { return groups_; }
// Insert fixed ranges.
void PreallocateFixedRanges();
void GroupLiveRanges();
// Schedule unassigned live ranges for allocation.
void ScheduleAllocationCandidates();
void AllocateRegisterToRange(unsigned reg_id, LiveRange* range) {
UpdateWeightAtAllocation(range);
current_allocations(reg_id)->AllocateRange(range);
}
// Evict and reschedule conflicts of a given range, at a given register.
void EvictAndRescheduleConflicts(unsigned reg_id, const LiveRange* range);
void TryAllocateCandidate(const AllocationCandidate& candidate);
void TryAllocateLiveRange(LiveRange* range);
void TryAllocateGroup(LiveRangeGroup* group);
// Calculate the weight of a candidate for allocation.
void EnsureValidRangeWeight(LiveRange* range);
// Calculate the new weight of a range that is about to be allocated.
float GetAllocatedRangeWeight(float candidate_weight);
// Returns kInvalidWeight if there are no conflicts, or the largest weight of
// a range conflicting with the given range, at the given register.
float GetMaximumConflictingWeight(unsigned reg_id, const LiveRange* range,
float competing_weight) const;
// Returns kInvalidWeight if there are no conflicts, or the largest weight of
// a range conflicting with the given range, at the given register.
float GetMaximumConflictingWeight(unsigned reg_id,
const LiveRangeGroup* group,
float group_weight) const;
// This is the extension point for splitting heuristics.
void SplitOrSpillBlockedRange(LiveRange* range);
// Find a good position where to fill, after a range was spilled after a call.
LifetimePosition FindSplitPositionAfterCall(const LiveRange* range,
int call_index);
// Split a range around all calls it passes over. Returns true if any changes
// were made, or false if no calls were found.
bool TrySplitAroundCalls(LiveRange* range);
// Find a split position at the outmost loop.
LifetimePosition FindSplitPositionBeforeLoops(LiveRange* range);
// Finds the first call instruction in the path of this range. Splits before
// and requeues that segment (if any), spills the section over the call, and
// returns the section after the call. The return is:
// - same range, if no call was found
// - nullptr, if the range finished at the call and there's no "after the
// call" portion.
// - the portion after the call.
LiveRange* GetRemainderAfterSplittingAroundFirstCall(LiveRange* range);
// While we attempt to merge spill ranges later on in the allocation pipeline,
// we want to ensure group elements get merged. Waiting until later may hinder
// merge-ability, since the pipeline merger (being naive) may create conflicts
// between spill ranges of group members.
void TryReuseSpillRangesForGroups();
LifetimePosition GetLastResortSplitPosition(const LiveRange* range);
bool IsProgressPossible(const LiveRange* range);
// Necessary heuristic: spill when all else failed.
void SpillRangeAsLastResort(LiveRange* range);
void AssignRangeToRegister(int reg_id, LiveRange* range);
Zone* local_zone_;
ZoneVector<CoalescedLiveRanges*> allocations_;
AllocationScheduler scheduler_;
ZoneVector<LiveRangeGroup*> groups_;
DISALLOW_COPY_AND_ASSIGN(GreedyAllocator);
};
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_GREEDY_ALLOCATOR_H_
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include "src/compiler/graph-replay.h" #include "src/compiler/graph-replay.h"
#include "src/compiler/graph-trimmer.h" #include "src/compiler/graph-trimmer.h"
#include "src/compiler/graph-visualizer.h" #include "src/compiler/graph-visualizer.h"
#include "src/compiler/greedy-allocator.h"
#include "src/compiler/instruction-selector.h" #include "src/compiler/instruction-selector.h"
#include "src/compiler/instruction.h" #include "src/compiler/instruction.h"
#include "src/compiler/js-builtin-reducer.h" #include "src/compiler/js-builtin-reducer.h"
...@@ -1766,13 +1765,8 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config, ...@@ -1766,13 +1765,8 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
Run<SplinterLiveRangesPhase>(); Run<SplinterLiveRangesPhase>();
} }
if (FLAG_turbo_greedy_regalloc) { Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
Run<AllocateGeneralRegistersPhase<GreedyAllocator>>(); Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
Run<AllocateFPRegistersPhase<GreedyAllocator>>();
} else {
Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
}
if (FLAG_turbo_preprocess_ranges) { if (FLAG_turbo_preprocess_ranges) {
Run<MergeSplintersPhase>(); Run<MergeSplintersPhase>();
......
...@@ -413,11 +413,6 @@ std::ostream& operator<<(std::ostream& os, const LifetimePosition pos) { ...@@ -413,11 +413,6 @@ std::ostream& operator<<(std::ostream& os, const LifetimePosition pos) {
return os; return os;
} }
const float LiveRange::kInvalidWeight = -1;
const float LiveRange::kMaxWeight = std::numeric_limits<float>::max();
LiveRange::LiveRange(int relative_id, MachineRepresentation rep, LiveRange::LiveRange(int relative_id, MachineRepresentation rep,
TopLevelLiveRange* top_level) TopLevelLiveRange* top_level)
: relative_id_(relative_id), : relative_id_(relative_id),
...@@ -430,10 +425,7 @@ LiveRange::LiveRange(int relative_id, MachineRepresentation rep, ...@@ -430,10 +425,7 @@ LiveRange::LiveRange(int relative_id, MachineRepresentation rep,
current_interval_(nullptr), current_interval_(nullptr),
last_processed_use_(nullptr), last_processed_use_(nullptr),
current_hint_position_(nullptr), current_hint_position_(nullptr),
splitting_pointer_(nullptr), splitting_pointer_(nullptr) {
size_(kInvalidSize),
weight_(kInvalidWeight),
group_(nullptr) {
DCHECK(AllocatedOperand::IsSupportedRepresentation(rep)); DCHECK(AllocatedOperand::IsSupportedRepresentation(rep));
bits_ = AssignedRegisterField::encode(kUnassignedRegister) | bits_ = AssignedRegisterField::encode(kUnassignedRegister) |
RepresentationField::encode(rep); RepresentationField::encode(rep);
...@@ -699,10 +691,6 @@ UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result, ...@@ -699,10 +691,6 @@ UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
last_processed_use_ = nullptr; last_processed_use_ = nullptr;
current_interval_ = nullptr; current_interval_ = nullptr;
// Invalidate size and weight of this range. The child range has them
// invalid at construction.
size_ = kInvalidSize;
weight_ = kInvalidWeight;
#ifdef DEBUG #ifdef DEBUG
VerifyChildStructure(); VerifyChildStructure();
result->VerifyChildStructure(); result->VerifyChildStructure();
...@@ -818,20 +806,6 @@ LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const { ...@@ -818,20 +806,6 @@ LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const {
return LifetimePosition::Invalid(); return LifetimePosition::Invalid();
} }
unsigned LiveRange::GetSize() {
if (size_ == kInvalidSize) {
size_ = 0;
for (const UseInterval* interval = first_interval(); interval != nullptr;
interval = interval->next()) {
size_ += (interval->end().value() - interval->start().value());
}
}
return static_cast<unsigned>(size_);
}
void LiveRange::Print(const RegisterConfiguration* config, void LiveRange::Print(const RegisterConfiguration* config,
bool with_children) const { bool with_children) const {
OFStream os(stdout); OFStream os(stdout);
......
...@@ -412,19 +412,9 @@ class LiveRange : public ZoneObject { ...@@ -412,19 +412,9 @@ class LiveRange : public ZoneObject {
void SetUseHints(int register_index); void SetUseHints(int register_index);
void UnsetUseHints() { SetUseHints(kUnassignedRegister); } void UnsetUseHints() { SetUseHints(kUnassignedRegister); }
// Used solely by the Greedy Allocator:
unsigned GetSize();
float weight() const { return weight_; }
void set_weight(float weight) { weight_ = weight; }
LiveRangeGroup* group() const { return group_; }
void set_group(LiveRangeGroup* group) { group_ = group; }
void Print(const RegisterConfiguration* config, bool with_children) const; void Print(const RegisterConfiguration* config, bool with_children) const;
void Print(bool with_children) const; void Print(bool with_children) const;
static const int kInvalidSize = -1;
static const float kInvalidWeight;
static const float kMaxWeight;
private: private:
friend class TopLevelLiveRange; friend class TopLevelLiveRange;
explicit LiveRange(int relative_id, MachineRepresentation rep, explicit LiveRange(int relative_id, MachineRepresentation rep,
...@@ -461,17 +451,6 @@ class LiveRange : public ZoneObject { ...@@ -461,17 +451,6 @@ class LiveRange : public ZoneObject {
mutable UsePosition* current_hint_position_; mutable UsePosition* current_hint_position_;
// Cache the last position splintering stopped at. // Cache the last position splintering stopped at.
mutable UsePosition* splitting_pointer_; mutable UsePosition* splitting_pointer_;
// greedy: the number of LifetimePositions covered by this range. Used to
// prioritize selecting live ranges for register assignment, as well as
// in weight calculations.
int size_;
// greedy: a metric for resolving conflicts between ranges with an assigned
// register and ranges that intersect them and need a register.
float weight_;
// greedy: groupping
LiveRangeGroup* group_;
DISALLOW_COPY_AND_ASSIGN(LiveRange); DISALLOW_COPY_AND_ASSIGN(LiveRange);
}; };
...@@ -483,7 +462,6 @@ class LiveRangeGroup final : public ZoneObject { ...@@ -483,7 +462,6 @@ class LiveRangeGroup final : public ZoneObject {
ZoneVector<LiveRange*>& ranges() { return ranges_; } ZoneVector<LiveRange*>& ranges() { return ranges_; }
const ZoneVector<LiveRange*>& ranges() const { return ranges_; } const ZoneVector<LiveRange*>& ranges() const { return ranges_; }
// TODO(mtrofin): populate assigned register and use in weight calculation.
int assigned_register() const { return assigned_register_; } int assigned_register() const { return assigned_register_; }
void set_assigned_register(int reg) { assigned_register_ = reg; } void set_assigned_register(int reg) { assigned_register_ = reg; }
......
...@@ -428,7 +428,6 @@ DEFINE_BOOL(turbo, false, "enable TurboFan compiler") ...@@ -428,7 +428,6 @@ DEFINE_BOOL(turbo, false, "enable TurboFan compiler")
DEFINE_IMPLICATION(turbo, turbo_asm_deoptimization) DEFINE_IMPLICATION(turbo, turbo_asm_deoptimization)
DEFINE_BOOL(turbo_shipping, true, "enable TurboFan compiler on subset") DEFINE_BOOL(turbo_shipping, true, "enable TurboFan compiler on subset")
DEFINE_BOOL(turbo_from_bytecode, false, "enable building graphs from bytecode") DEFINE_BOOL(turbo_from_bytecode, false, "enable building graphs from bytecode")
DEFINE_BOOL(turbo_greedy_regalloc, false, "use the greedy register allocator")
DEFINE_BOOL(turbo_sp_frame_access, false, DEFINE_BOOL(turbo_sp_frame_access, false,
"use stack pointer-relative access to frame wherever possible") "use stack pointer-relative access to frame wherever possible")
DEFINE_BOOL(turbo_preprocess_ranges, true, DEFINE_BOOL(turbo_preprocess_ranges, true,
......
...@@ -507,8 +507,6 @@ ...@@ -507,8 +507,6 @@
'compiler/c-linkage.cc', 'compiler/c-linkage.cc',
'compiler/checkpoint-elimination.cc', 'compiler/checkpoint-elimination.cc',
'compiler/checkpoint-elimination.h', 'compiler/checkpoint-elimination.h',
'compiler/coalesced-live-ranges.cc',
'compiler/coalesced-live-ranges.h',
'compiler/code-generator-impl.h', 'compiler/code-generator-impl.h',
'compiler/code-generator.cc', 'compiler/code-generator.cc',
'compiler/code-generator.h', 'compiler/code-generator.h',
...@@ -553,8 +551,6 @@ ...@@ -553,8 +551,6 @@
'compiler/graph-visualizer.h', 'compiler/graph-visualizer.h',
'compiler/graph.cc', 'compiler/graph.cc',
'compiler/graph.h', 'compiler/graph.h',
'compiler/greedy-allocator.cc',
'compiler/greedy-allocator.h',
'compiler/instruction-codes.h', 'compiler/instruction-codes.h',
'compiler/instruction-selector-impl.h', 'compiler/instruction-selector-impl.h',
'compiler/instruction-selector.cc', 'compiler/instruction-selector.cc',
......
...@@ -29,7 +29,6 @@ executable("unittests") { ...@@ -29,7 +29,6 @@ executable("unittests") {
"char-predicates-unittest.cc", "char-predicates-unittest.cc",
"compiler/branch-elimination-unittest.cc", "compiler/branch-elimination-unittest.cc",
"compiler/checkpoint-elimination-unittest.cc", "compiler/checkpoint-elimination-unittest.cc",
"compiler/coalesced-live-ranges-unittest.cc",
"compiler/common-operator-reducer-unittest.cc", "compiler/common-operator-reducer-unittest.cc",
"compiler/common-operator-unittest.cc", "compiler/common-operator-unittest.cc",
"compiler/compiler-test-utils.h", "compiler/compiler-test-utils.h",
...@@ -174,7 +173,10 @@ executable("unittests") { ...@@ -174,7 +173,10 @@ executable("unittests") {
# Suppress warnings about importing locally defined symbols. # Suppress warnings about importing locally defined symbols.
if (is_component_build) { if (is_component_build) {
ldflags = [ "/ignore:4049", "/ignore:4217" ] ldflags = [
"/ignore:4049",
"/ignore:4217",
]
} }
} }
} }
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/coalesced-live-ranges.h"
#include "test/unittests/compiler/live-range-builder.h"
#include "test/unittests/test-utils.h"
namespace v8 {
namespace internal {
namespace compiler {
class CoalescedLiveRangesTest : public TestWithZone {
public:
CoalescedLiveRangesTest() : TestWithZone(), ranges_(zone()) {}
bool HasNoConflicts(const LiveRange* range);
bool ConflictsPreciselyWith(const LiveRange* range, int id);
bool ConflictsPreciselyWith(const LiveRange* range, int id1, int id2);
CoalescedLiveRanges& ranges() { return ranges_; }
const CoalescedLiveRanges& ranges() const { return ranges_; }
bool AllocationsAreValid() const;
void RemoveConflicts(LiveRange* range);
private:
typedef ZoneSet<int> LiveRangeIDs;
bool IsRangeConflictingWith(const LiveRange* range, const LiveRangeIDs& ids);
CoalescedLiveRanges ranges_;
};
bool CoalescedLiveRangesTest::ConflictsPreciselyWith(const LiveRange* range,
int id) {
LiveRangeIDs set(zone());
set.insert(id);
return IsRangeConflictingWith(range, set);
}
bool CoalescedLiveRangesTest::ConflictsPreciselyWith(const LiveRange* range,
int id1, int id2) {
LiveRangeIDs set(zone());
set.insert(id1);
set.insert(id2);
return IsRangeConflictingWith(range, set);
}
bool CoalescedLiveRangesTest::HasNoConflicts(const LiveRange* range) {
LiveRangeIDs set(zone());
return IsRangeConflictingWith(range, set);
}
void CoalescedLiveRangesTest::RemoveConflicts(LiveRange* range) {
auto conflicts = ranges().GetConflicts(range);
LiveRangeIDs seen(zone());
for (auto c = conflicts.Current(); c != nullptr;
c = conflicts.RemoveCurrentAndGetNext()) {
int id = c->TopLevel()->vreg();
EXPECT_FALSE(seen.count(id) > 0);
seen.insert(c->TopLevel()->vreg());
}
}
bool CoalescedLiveRangesTest::AllocationsAreValid() const {
return ranges().VerifyAllocationsAreValidForTesting();
}
bool CoalescedLiveRangesTest::IsRangeConflictingWith(const LiveRange* range,
const LiveRangeIDs& ids) {
LiveRangeIDs found_ids(zone());
auto conflicts = ranges().GetConflicts(range);
for (auto conflict = conflicts.Current(); conflict != nullptr;
conflict = conflicts.GetNext()) {
found_ids.insert(conflict->TopLevel()->vreg());
}
return found_ids == ids;
}
TEST_F(CoalescedLiveRangesTest, VisitEmptyAllocations) {
LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
ASSERT_TRUE(ranges().empty());
ASSERT_TRUE(AllocationsAreValid());
ASSERT_TRUE(HasNoConflicts(range));
}
TEST_F(CoalescedLiveRangesTest, CandidateBeforeAfterAllocations) {
LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(5, 6);
ranges().AllocateRange(range);
ASSERT_FALSE(ranges().empty());
ASSERT_TRUE(AllocationsAreValid());
LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(1, 2);
ASSERT_TRUE(HasNoConflicts(query));
query = TestRangeBuilder(zone()).Id(3).Build(1, 5);
ASSERT_TRUE(HasNoConflicts(query));
}
TEST_F(CoalescedLiveRangesTest, CandidateBeforeAfterManyAllocations) {
LiveRange* range =
TestRangeBuilder(zone()).Id(1).Add(5, 7).Add(10, 12).Build();
ranges().AllocateRange(range);
ASSERT_FALSE(ranges().empty());
ASSERT_TRUE(AllocationsAreValid());
LiveRange* query =
TestRangeBuilder(zone()).Id(2).Add(1, 2).Add(13, 15).Build();
ASSERT_TRUE(HasNoConflicts(query));
query = TestRangeBuilder(zone()).Id(3).Add(1, 5).Add(12, 15).Build();
ASSERT_TRUE(HasNoConflicts(query));
}
TEST_F(CoalescedLiveRangesTest, SelfConflictsPreciselyWithSelf) {
LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
ranges().AllocateRange(range);
ASSERT_FALSE(ranges().empty());
ASSERT_TRUE(AllocationsAreValid());
ASSERT_TRUE(ConflictsPreciselyWith(range, 1));
range = TestRangeBuilder(zone()).Id(2).Build(8, 10);
ranges().AllocateRange(range);
ASSERT_TRUE(ConflictsPreciselyWith(range, 2));
}
TEST_F(CoalescedLiveRangesTest, QueryStartsBeforeConflict) {
LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(2, 5);
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(1, 3);
ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
range = TestRangeBuilder(zone()).Id(3).Build(8, 10);
ranges().AllocateRange(range);
query = TestRangeBuilder(zone()).Id(4).Build(6, 9);
ASSERT_TRUE(ConflictsPreciselyWith(query, 3));
}
TEST_F(CoalescedLiveRangesTest, QueryStartsInConflict) {
LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(2, 5);
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(3, 6);
ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
range = TestRangeBuilder(zone()).Id(3).Build(8, 10);
ranges().AllocateRange(range);
query = TestRangeBuilder(zone()).Id(4).Build(9, 11);
ASSERT_TRUE(ConflictsPreciselyWith(query, 3));
}
TEST_F(CoalescedLiveRangesTest, QueryContainedInConflict) {
LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(2, 3);
ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
}
TEST_F(CoalescedLiveRangesTest, QueryContainsConflict) {
LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(2, 3);
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(1, 5);
ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
}
TEST_F(CoalescedLiveRangesTest, QueryCoversManyIntervalsSameRange) {
LiveRange* range =
TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(7, 9).Add(20, 25).Build();
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(2, 8);
ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
}
TEST_F(CoalescedLiveRangesTest, QueryCoversManyIntervalsDifferentRanges) {
LiveRange* range =
TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(20, 25).Build();
ranges().AllocateRange(range);
range = TestRangeBuilder(zone()).Id(2).Build(7, 10);
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(2, 22);
ASSERT_TRUE(ConflictsPreciselyWith(query, 1, 2));
}
TEST_F(CoalescedLiveRangesTest, QueryFitsInGaps) {
LiveRange* range =
TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(10, 15).Add(20, 25).Build();
ranges().AllocateRange(range);
LiveRange* query =
TestRangeBuilder(zone()).Id(3).Add(5, 10).Add(16, 19).Add(27, 30).Build();
ASSERT_TRUE(HasNoConflicts(query));
}
TEST_F(CoalescedLiveRangesTest, DeleteConflictBefore) {
LiveRange* range = TestRangeBuilder(zone()).Id(1).Add(1, 4).Add(5, 6).Build();
ranges().AllocateRange(range);
range = TestRangeBuilder(zone()).Id(2).Build(40, 50);
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(3, 7);
RemoveConflicts(query);
query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
}
TEST_F(CoalescedLiveRangesTest, DeleteConflictAfter) {
LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
ranges().AllocateRange(range);
range = TestRangeBuilder(zone()).Id(2).Add(40, 50).Add(60, 70).Build();
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(45, 60);
RemoveConflicts(query);
query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
}
TEST_F(CoalescedLiveRangesTest, DeleteConflictStraddle) {
LiveRange* range =
TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(10, 20).Build();
ranges().AllocateRange(range);
range = TestRangeBuilder(zone()).Id(2).Build(40, 50);
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(4, 15);
RemoveConflicts(query);
query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
}
TEST_F(CoalescedLiveRangesTest, DeleteConflictManyOverlapsBefore) {
LiveRange* range =
TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(6, 10).Add(10, 20).Build();
ranges().AllocateRange(range);
range = TestRangeBuilder(zone()).Id(2).Build(40, 50);
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(4, 15);
RemoveConflicts(query);
query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
}
TEST_F(CoalescedLiveRangesTest, DeleteWhenConflictRepeatsAfterNonConflict) {
LiveRange* range =
TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(6, 10).Add(20, 30).Build();
ranges().AllocateRange(range);
range = TestRangeBuilder(zone()).Id(2).Build(12, 15);
ranges().AllocateRange(range);
LiveRange* query =
TestRangeBuilder(zone()).Id(3).Add(1, 8).Add(22, 25).Build();
RemoveConflicts(query);
query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
}
} // namespace compiler
} // namespace internal
} // namespace v8
...@@ -678,8 +678,7 @@ TEST_F(RegisterAllocatorTest, MultipleDeferredBlockSpills) { ...@@ -678,8 +678,7 @@ TEST_F(RegisterAllocatorTest, MultipleDeferredBlockSpills) {
Allocate(); Allocate();
// TODO(mtrofin): at the moment, the linear allocator spills var1 and var2, // TODO(mtrofin): at the moment, the linear allocator spills var1 and var2,
// so only var3 is spilled in deferred blocks. Greedy avoids spilling 1&2. // so only var3 is spilled in deferred blocks.
// Expand the test once greedy is back online with this facility.
const int var3_reg = 2; const int var3_reg = 2;
const int var3_slot = 2; const int var3_slot = 2;
......
...@@ -44,7 +44,6 @@ ...@@ -44,7 +44,6 @@
'char-predicates-unittest.cc', 'char-predicates-unittest.cc',
'compiler/branch-elimination-unittest.cc', 'compiler/branch-elimination-unittest.cc',
'compiler/checkpoint-elimination-unittest.cc', 'compiler/checkpoint-elimination-unittest.cc',
'compiler/coalesced-live-ranges-unittest.cc',
'compiler/common-operator-reducer-unittest.cc', 'compiler/common-operator-reducer-unittest.cc',
'compiler/common-operator-unittest.cc', 'compiler/common-operator-unittest.cc',
'compiler/compiler-test-utils.h', 'compiler/compiler-test-utils.h',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment