Commit 5d954d65 authored by mtrofin's avatar mtrofin Committed by Commit bot

[turbofan] Deferred blocks splintering.

This change encompasses what is necessary to enable stack checks in loops without suffering large regressions.

Primarily, it consists of a new mechanism for dealing with deferred blocks by "splintering", rather than splitting, inside deferred blocks.

My initial change was splitting along deferred block boundaries, but the regression introduced by stackchecks wasn't resolved conclusively. After investigation, it appears that just splitting ranges along cold block boundaries leads to a greater opportunity for moves on the hot path, hence the suboptimal outcome.

The alternative "splinters" ranges rather than splitting them. While splitting creates 2 ranges and links them (parent-child), in contrast, splintering creates a new independent range with no parent-child relation to the original. The original range appears as if it has a liveness hole in the place of the splintered one. All thus obtained ranges are then register allocated with no change to the register allocator.

The splinters (cold blocks) do not conflict with the hot path ranges, by construction. The hot path ones have less pressure to split, because we remove a source of conflicts. After allocation, we merge the splinters back to their original ranges and continue the pipeline. We leverage the previous changes made for deferred blocks (determining where to spill, for example).

Review URL: https://codereview.chromium.org/1305393003

Cr-Commit-Position: refs/heads/master@{#30357}
parent 6a80027f
......@@ -781,6 +781,8 @@ source_set("v8_base") {
"src/compiler/jump-threading.h",
"src/compiler/linkage.cc",
"src/compiler/linkage.h",
"src/compiler/live-range-separator.cc",
"src/compiler/live-range-separator.h",
"src/compiler/liveness-analyzer.cc",
"src/compiler/liveness-analyzer.h",
"src/compiler/load-elimination.cc",
......@@ -819,8 +821,6 @@ source_set("v8_base") {
"src/compiler/pipeline.h",
"src/compiler/pipeline-statistics.cc",
"src/compiler/pipeline-statistics.h",
"src/compiler/preprocess-live-ranges.cc",
"src/compiler/preprocess-live-ranges.h",
"src/compiler/raw-machine-assembler.cc",
"src/compiler/raw-machine-assembler.h",
"src/compiler/register-allocator.cc",
......
......@@ -21,7 +21,7 @@ void BitVector::Print() {
PrintF("%d", i);
}
}
PrintF("}");
PrintF("}\n");
}
#endif
......
......@@ -2928,9 +2928,7 @@ void AstGraphBuilder::VisitInScope(Statement* stmt, Scope* s, Node* context) {
void AstGraphBuilder::VisitIterationBody(IterationStatement* stmt,
LoopBuilder* loop) {
ControlScopeForIteration scope(this, stmt, loop);
// TODO(mstarzinger): For now we only allow to interrupt non-asm.js code,
// which is a gigantic hack and should be extended to all code at some point.
if (!info()->shared_info()->asm_function()) {
if (FLAG_turbo_loop_stackcheck) {
Node* node = NewNode(javascript()->StackCheck());
PrepareFrameState(node, stmt->StackCheckId());
}
......
......@@ -417,7 +417,8 @@ InstructionBlock::InstructionBlock(Zone* zone, RpoNumber rpo_number,
handler_(handler),
needs_frame_(false),
must_construct_frame_(false),
must_deconstruct_frame_(false) {}
must_deconstruct_frame_(false),
last_deferred_(RpoNumber::Invalid()) {}
size_t InstructionBlock::PredecessorIndexOf(RpoNumber rpo_number) const {
......
......@@ -778,9 +778,13 @@ class RpoNumber final {
return other.index_ == this->index_ + 1;
}
bool operator==(RpoNumber other) const {
return this->index_ == other.index_;
}
// Comparison operators.
bool operator==(RpoNumber other) const { return index_ == other.index_; }
bool operator!=(RpoNumber other) const { return index_ != other.index_; }
bool operator>(RpoNumber other) const { return index_ > other.index_; }
bool operator<(RpoNumber other) const { return index_ < other.index_; }
bool operator<=(RpoNumber other) const { return index_ <= other.index_; }
bool operator>=(RpoNumber other) const { return index_ >= other.index_; }
private:
explicit RpoNumber(int32_t index) : index_(index) {}
......@@ -992,6 +996,9 @@ class InstructionBlock final : public ZoneObject {
bool must_deconstruct_frame() const { return must_deconstruct_frame_; }
void mark_must_deconstruct_frame() { must_deconstruct_frame_ = true; }
void set_last_deferred(RpoNumber last) { last_deferred_ = last; }
RpoNumber last_deferred() const { return last_deferred_; }
private:
Successors successors_;
Predecessors predecessors_;
......@@ -1007,6 +1014,7 @@ class InstructionBlock final : public ZoneObject {
bool needs_frame_;
bool must_construct_frame_;
bool must_deconstruct_frame_;
RpoNumber last_deferred_;
};
typedef ZoneDeque<Constant> ConstantDeque;
......
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/live-range-separator.h"
#include "src/compiler/register-allocator.h"
namespace v8 {
namespace internal {
namespace compiler {
#define TRACE(...) \
do { \
if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
} while (false)
namespace {
// Starting from a deferred block, find the last consecutive deferred block.
RpoNumber GetLastDeferredBlock(const InstructionBlock *block,
const InstructionSequence *code) {
DCHECK(block->IsDeferred());
RpoNumber first = block->rpo_number();
RpoNumber last = first;
for (int i = first.ToInt(); i < code->InstructionBlockCount(); ++i) {
RpoNumber at_i = RpoNumber::FromInt(i);
const InstructionBlock *block_at_i = code->InstructionBlockAt(at_i);
if (!block_at_i->IsDeferred()) break;
last = at_i;
}
return last;
}
// Delimits consecutive deferred block sequences.
void AssociateDeferredBlockSequences(InstructionSequence *code) {
for (int blk_id = 0; blk_id < code->InstructionBlockCount(); ++blk_id) {
InstructionBlock *block =
code->InstructionBlockAt(RpoNumber::FromInt(blk_id));
if (!block->IsDeferred()) continue;
RpoNumber last = GetLastDeferredBlock(block, code);
block->set_last_deferred(last);
// We know last is still deferred, and that last + 1, is not (or is an
// invalid index). So skip over last + 1 and continue from last + 2. This
// way, we visit each block exactly once, and the total complexity of this
// function is O(n), n being jthe number of blocks.
blk_id = last.ToInt() + 1;
}
}
// If the live range has a liveness hole right between start and end,
// we don't need to splinter it.
bool IsIntervalAlreadyExcluded(const LiveRange *range, LifetimePosition start,
LifetimePosition end) {
for (UseInterval *interval = range->first_interval(); interval != nullptr;
interval = interval->next()) {
if (interval->start() <= start && start < interval->end()) return false;
if (interval->start() < end && end <= interval->end()) return false;
}
return true;
}
void CreateSplinter(LiveRange *range, RegisterAllocationData *data,
LifetimePosition first_cut, LifetimePosition last_cut) {
DCHECK(!range->IsChild());
DCHECK(!range->IsSplinter());
// We can ignore ranges that live solely in deferred blocks.
// If a range ends right at the end of a deferred block, it is marked by
// the range builder as ending at gap start of the next block - since the
// end is a position where the variable isn't live. We need to take that
// into consideration.
LifetimePosition max_allowed_end = last_cut.NextFullStart();
if (first_cut <= range->Start() && max_allowed_end >= range->End()) {
return;
}
LifetimePosition start = Max(first_cut, range->Start());
LifetimePosition end = Min(last_cut, range->End());
// Skip ranges that have a hole where the deferred block(s) are.
if (IsIntervalAlreadyExcluded(range, start, end)) return;
if (start < end) {
// Ensure the original range has a spill range associated, before it gets
// splintered. Splinters will point to it. This way, when attempting to
// reuse spill slots of splinters, during allocation, we avoid clobbering
// such slots.
if (range->MayRequireSpillRange()) {
data->CreateSpillRangeForLiveRange(range);
}
LiveRange *result = data->NewChildRangeFor(range);
Zone *zone = data->allocation_zone();
range->Splinter(start, end, result, zone);
}
}
// Splinter all ranges live inside successive deferred blocks.
// No control flow analysis is performed. After the register allocation, we will
// merge the splinters back into the original ranges, and then rely on the
// range connector to properly connect them.
void SplinterRangesInDeferredBlocks(RegisterAllocationData *data) {
InstructionSequence *code = data->code();
int code_block_count = code->InstructionBlockCount();
Zone *zone = data->allocation_zone();
ZoneVector<BitVector *> &in_sets = data->live_in_sets();
for (int i = 0; i < code_block_count; ++i) {
InstructionBlock *block = code->InstructionBlockAt(RpoNumber::FromInt(i));
if (!block->IsDeferred()) continue;
RpoNumber last_deferred = block->last_deferred();
i = last_deferred.ToInt();
LifetimePosition first_cut = LifetimePosition::GapFromInstructionIndex(
block->first_instruction_index());
LifetimePosition last_cut = LifetimePosition::GapFromInstructionIndex(
static_cast<int>(code->instructions().size()));
const BitVector *in_set = in_sets[i];
InstructionBlock *last = code->InstructionBlockAt(last_deferred);
const BitVector *out_set = LiveRangeBuilder::ComputeLiveOut(last, data);
last_cut = LifetimePosition::GapFromInstructionIndex(
last->last_instruction_index());
BitVector ranges_to_splinter(*in_set, zone);
ranges_to_splinter.Union(*out_set);
BitVector::Iterator iterator(&ranges_to_splinter);
while (!iterator.Done()) {
int range_id = iterator.Current();
iterator.Advance();
LiveRange *range = data->live_ranges()[range_id];
CreateSplinter(range, data, first_cut, last_cut);
}
}
}
} // namespace
void LiveRangeSeparator::Splinter() {
AssociateDeferredBlockSequences(data()->code());
SplinterRangesInDeferredBlocks(data());
}
void LiveRangeMerger::Merge() {
int live_range_count = static_cast<int>(data()->live_ranges().size());
for (int i = 0; i < live_range_count; ++i) {
LiveRange *range = data()->live_ranges()[i];
if (range == nullptr || range->IsEmpty() || range->IsChild() ||
!range->IsSplinter()) {
continue;
}
LiveRange *splinter_parent = range->splintered_from();
splinter_parent->Merge(range, data());
}
}
} // namespace compiler
} // namespace internal
} // namespace v8
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_LIVE_RANGE_SEPARATOR_H_
#define V8_LIVE_RANGE_SEPARATOR_H_
#include <src/zone.h>
namespace v8 {
namespace internal {
class Zone;
namespace compiler {
class RegisterAllocationData;
// A register allocation pair of transformations: splinter and merge live ranges
class LiveRangeSeparator final : public ZoneObject {
public:
LiveRangeSeparator(RegisterAllocationData* data, Zone* zone)
: data_(data), zone_(zone) {}
void Splinter();
private:
RegisterAllocationData* data() const { return data_; }
Zone* zone() const { return zone_; }
RegisterAllocationData* const data_;
Zone* const zone_;
DISALLOW_COPY_AND_ASSIGN(LiveRangeSeparator);
};
class LiveRangeMerger final : public ZoneObject {
public:
LiveRangeMerger(RegisterAllocationData* data, Zone* zone)
: data_(data), zone_(zone) {}
void Merge();
private:
RegisterAllocationData* data() const { return data_; }
Zone* zone() const { return zone_; }
RegisterAllocationData* const data_;
Zone* const zone_;
DISALLOW_COPY_AND_ASSIGN(LiveRangeMerger);
};
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_LIVE_RANGE_SEPARATOR_H_
......@@ -35,6 +35,7 @@
#include "src/compiler/js-type-feedback-lowering.h"
#include "src/compiler/js-typed-lowering.h"
#include "src/compiler/jump-threading.h"
#include "src/compiler/live-range-separator.h"
#include "src/compiler/load-elimination.h"
#include "src/compiler/loop-analysis.h"
#include "src/compiler/loop-peeling.h"
......@@ -42,7 +43,6 @@
#include "src/compiler/move-optimizer.h"
#include "src/compiler/osr.h"
#include "src/compiler/pipeline-statistics.h"
#include "src/compiler/preprocess-live-ranges.h"
#include "src/compiler/register-allocator.h"
#include "src/compiler/register-allocator-verifier.h"
#include "src/compiler/schedule.h"
......@@ -778,13 +778,13 @@ struct BuildLiveRangesPhase {
};
struct PreprocessLiveRangesPhase {
static const char* phase_name() { return "preprocess live ranges"; }
struct SplinterLiveRangesPhase {
static const char* phase_name() { return "splinter live ranges"; }
void Run(PipelineData* data, Zone* temp_zone) {
PreprocessLiveRanges live_range_preprocessor(
data->register_allocation_data(), temp_zone);
live_range_preprocessor.PreprocessRanges();
LiveRangeSeparator live_range_splinterer(data->register_allocation_data(),
temp_zone);
live_range_splinterer.Splinter();
}
};
......@@ -813,6 +813,16 @@ struct AllocateDoubleRegistersPhase {
};
struct MergeSplintersPhase {
static const char* phase_name() { return "merge splintered ranges"; }
void Run(PipelineData* pipeline_data, Zone* temp_zone) {
RegisterAllocationData* data = pipeline_data->register_allocation_data();
LiveRangeMerger live_range_merger(data, temp_zone);
live_range_merger.Merge();
}
};
struct LocateSpillSlotsPhase {
static const char* phase_name() { return "locate spill slots"; }
......@@ -1350,14 +1360,14 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
CHECK(!data->register_allocation_data()->ExistsUseWithoutDefinition());
}
if (FLAG_turbo_preprocess_ranges) {
Run<PreprocessLiveRangesPhase>();
}
Run<SplinterLiveRangesPhase>();
// TODO(mtrofin): re-enable greedy once we have bots for range preprocessing.
Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
Run<AllocateDoubleRegistersPhase<LinearScanAllocator>>();
Run<MergeSplintersPhase>();
if (FLAG_turbo_frame_elision) {
Run<LocateSpillSlotsPhase>();
Run<FrameElisionPhase>();
......
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/preprocess-live-ranges.h"
#include "src/compiler/register-allocator.h"
namespace v8 {
namespace internal {
namespace compiler {
#define TRACE(...) \
do { \
if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
} while (false)
namespace {
LiveRange* Split(LiveRange* range, RegisterAllocationData* data,
LifetimePosition pos) {
DCHECK(range->Start() < pos && pos < range->End());
DCHECK(pos.IsStart() || pos.IsGapPosition() ||
(data->code()
->GetInstructionBlock(pos.ToInstructionIndex())
->last_instruction_index() != pos.ToInstructionIndex()));
LiveRange* result = data->NewChildRangeFor(range);
range->SplitAt(pos, result, data->allocation_zone());
TRACE("Split range %d(v%d) @%d => %d.\n", range->id(),
range->TopLevel()->id(), pos.ToInstructionIndex(), result->id());
return result;
}
LifetimePosition GetSplitPositionForInstruction(const LiveRange* range,
int instruction_index) {
LifetimePosition ret = LifetimePosition::Invalid();
ret = LifetimePosition::GapFromInstructionIndex(instruction_index);
if (range->Start() >= ret || ret >= range->End()) {
return LifetimePosition::Invalid();
}
return ret;
}
LiveRange* SplitRangeAfterBlock(LiveRange* range, RegisterAllocationData* data,
const InstructionBlock* block) {
const InstructionSequence* code = data->code();
int last_index = block->last_instruction_index();
int outside_index = static_cast<int>(code->instructions().size());
bool has_handler = false;
for (auto successor_id : block->successors()) {
const InstructionBlock* successor = code->InstructionBlockAt(successor_id);
if (successor->IsHandler()) {
has_handler = true;
}
outside_index = Min(outside_index, successor->first_instruction_index());
}
int split_at = has_handler ? outside_index : last_index;
LifetimePosition after_block =
GetSplitPositionForInstruction(range, split_at);
if (after_block.IsValid()) {
return Split(range, data, after_block);
}
return range;
}
int GetFirstInstructionIndex(const UseInterval* interval) {
int ret = interval->start().ToInstructionIndex();
if (!interval->start().IsGapPosition() && !interval->start().IsStart()) {
++ret;
}
return ret;
}
bool DoesSubsequenceClobber(const InstructionSequence* code, int start,
int end) {
for (int i = start; i <= end; ++i) {
if (code->InstructionAt(i)->IsCall()) return true;
}
return false;
}
void SplitRangeAtDeferredBlocksWithCalls(LiveRange* range,
RegisterAllocationData* data) {
DCHECK(!range->IsFixed());
DCHECK(!range->spilled());
if (range->TopLevel()->HasSpillOperand()) {
TRACE(
"Skipping deferred block analysis for live range %d because it has a "
"spill operand.\n",
range->TopLevel()->id());
return;
}
const InstructionSequence* code = data->code();
LiveRange* current_subrange = range;
UseInterval* interval = current_subrange->first_interval();
while (interval != nullptr) {
int first_index = GetFirstInstructionIndex(interval);
int last_index = interval->end().ToInstructionIndex();
if (last_index > code->LastInstructionIndex()) {
last_index = code->LastInstructionIndex();
}
interval = interval->next();
for (int index = first_index; index <= last_index;) {
const InstructionBlock* block = code->GetInstructionBlock(index);
int last_block_index = static_cast<int>(block->last_instruction_index());
int last_covered_index = Min(last_index, last_block_index);
int working_index = index;
index = block->last_instruction_index() + 1;
if (!block->IsDeferred() ||
!DoesSubsequenceClobber(code, working_index, last_covered_index)) {
continue;
}
TRACE("Deferred block B%d clobbers range %d(v%d).\n",
block->rpo_number().ToInt(), current_subrange->id(),
current_subrange->TopLevel()->id());
LifetimePosition block_start =
GetSplitPositionForInstruction(current_subrange, working_index);
LiveRange* block_and_after = nullptr;
if (block_start.IsValid()) {
block_and_after = Split(current_subrange, data, block_start);
} else {
block_and_after = current_subrange;
}
LiveRange* next = SplitRangeAfterBlock(block_and_after, data, block);
if (next != current_subrange) interval = next->first_interval();
current_subrange = next;
break;
}
}
}
}
void PreprocessLiveRanges::PreprocessRanges() {
SplitRangesAroundDeferredBlocks();
}
void PreprocessLiveRanges::SplitRangesAroundDeferredBlocks() {
size_t live_range_count = data()->live_ranges().size();
for (size_t i = 0; i < live_range_count; i++) {
LiveRange* range = data()->live_ranges()[i];
if (range != nullptr && !range->IsEmpty() && !range->spilled() &&
!range->IsFixed() && !range->IsChild()) {
SplitRangeAtDeferredBlocksWithCalls(range, data());
}
}
}
} // namespace compiler
} // namespace internal
} // namespace v8
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_PREPROCESS_LIVE_RANGES_H_
#define V8_PREPROCESS_LIVE_RANGES_H_
#include "src/compiler/register-allocator.h"
namespace v8 {
namespace internal {
namespace compiler {
class PreprocessLiveRanges final {
public:
PreprocessLiveRanges(RegisterAllocationData* data, Zone* zone)
: data_(data), zone_(zone) {}
void PreprocessRanges();
private:
void SplitRangesAroundDeferredBlocks();
RegisterAllocationData* data() { return data_; }
Zone* zone() { return zone_; }
RegisterAllocationData* data_;
Zone* zone_;
};
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_PREPROCESS_LIVE_RANGES_H_
This diff is collapsed.
......@@ -273,7 +273,7 @@ class UsePosition final : public ZoneObject {
class SpillRange;
class RegisterAllocationData;
// Representation of SSA values' live ranges as a collection of (continuous)
// intervals over the instruction ordering.
......@@ -355,6 +355,9 @@ class LiveRange final : public ZoneObject {
// All uses following the given position will be moved from this
// live range to the result live range.
void SplitAt(LifetimePosition position, LiveRange* result, Zone* zone);
void Splinter(LifetimePosition start, LifetimePosition end, LiveRange* result,
Zone* zone);
void Merge(LiveRange* other, RegisterAllocationData* data);
// Returns nullptr when no register is hinted, otherwise sets register_index.
UsePosition* FirstHintPosition(int* register_index) const;
......@@ -384,6 +387,12 @@ class LiveRange final : public ZoneObject {
DCHECK(spill_type() == SpillType::kSpillOperand);
return spill_operand_;
}
SpillRange* GetAllocatedSpillRange() const {
DCHECK(spill_type() != SpillType::kSpillOperand);
return spill_range_;
}
SpillRange* GetSpillRange() const {
DCHECK(spill_type() == SpillType::kSpillRange);
return spill_range_;
......@@ -395,6 +404,11 @@ class LiveRange final : public ZoneObject {
return spill_type() == SpillType::kSpillOperand;
}
bool HasSpillRange() const { return spill_type() == SpillType::kSpillRange; }
bool MayRequireSpillRange() const {
DCHECK(!IsChild() && !IsSplinter());
return !HasSpillOperand() && spill_range_ == nullptr;
}
AllocatedOperand GetSpillRangeOperand() const;
void SpillAtDefinition(Zone* zone, int gap_index,
......@@ -458,17 +472,35 @@ class LiveRange final : public ZoneObject {
static const float kInvalidWeight;
static const float kMaxWeight;
private:
LiveRange* splintered_from() const {
DCHECK(!IsChild());
return splintered_from_;
}
bool IsSplinter() const {
DCHECK(!IsChild());
return splintered_from_ != nullptr;
}
void set_spill_type(SpillType value) {
bits_ = SpillTypeField::update(bits_, value);
}
private:
void AppendChild(LiveRange* other);
void UpdateParentForAllChildren(LiveRange* new_parent);
void UpdateSpillRangePostMerge(LiveRange* merged);
void SetSplinteredFrom(LiveRange* splinter_parent);
void set_spilled(bool value) { bits_ = SpilledField::update(bits_, value); }
UseInterval* FirstSearchIntervalForPosition(LifetimePosition position) const;
void AdvanceLastProcessedMarker(UseInterval* to_start_of,
LifetimePosition but_not_past) const;
LiveRange* GetLastChild();
typedef BitField<bool, 0, 1> SpilledField;
typedef BitField<bool, 1, 1> HasSlotUseField;
typedef BitField<bool, 2, 1> IsPhiField;
......@@ -485,6 +517,7 @@ class LiveRange final : public ZoneObject {
UsePosition* first_pos_;
LiveRange* parent_;
LiveRange* next_;
LiveRange* splintered_from_;
union {
// Correct value determined by spill_type()
InstructionOperand* spill_operand_;
......@@ -543,10 +576,13 @@ class SpillRange final : public ZoneObject {
DCHECK_NE(kUnassignedSlot, assigned_slot_);
return assigned_slot_;
}
const ZoneVector<LiveRange*>& live_ranges() const { return live_ranges_; }
ZoneVector<LiveRange*>& live_ranges() { return live_ranges_; }
int byte_width() const { return byte_width_; }
RegisterKind kind() const { return kind_; }
private:
LifetimePosition End() const { return end_position_; }
ZoneVector<LiveRange*>& live_ranges() { return live_ranges_; }
bool IsIntersectingWith(SpillRange* other) const;
// Merge intervals, making sure the use intervals are sorted
void MergeDisjointIntervals(UseInterval* other);
......@@ -555,6 +591,8 @@ class SpillRange final : public ZoneObject {
UseInterval* use_interval_;
LifetimePosition end_position_;
int assigned_slot_;
int byte_width_;
RegisterKind kind_;
DISALLOW_COPY_AND_ASSIGN(SpillRange);
};
......@@ -612,7 +650,7 @@ class RegisterAllocationData final : public ZoneObject {
return fixed_double_live_ranges_;
}
ZoneVector<BitVector*>& live_in_sets() { return live_in_sets_; }
ZoneVector<SpillRange*>& spill_ranges() { return spill_ranges_; }
ZoneSet<SpillRange*>& spill_ranges() { return spill_ranges_; }
DelayedReferences& delayed_references() { return delayed_references_; }
InstructionSequence* code() const { return code_; }
// This zone is for datastructures only needed during register allocation
......@@ -630,9 +668,11 @@ class RegisterAllocationData final : public ZoneObject {
LiveRange* LiveRangeFor(int index);
// Creates a new live range.
LiveRange* NewLiveRange(int index, MachineType machine_type);
LiveRange* NextLiveRange(MachineType machine_type);
LiveRange* NewChildRangeFor(LiveRange* range);
SpillRange* AssignSpillRangeToLiveRange(LiveRange* range);
SpillRange* CreateSpillRangeForLiveRange(LiveRange* range);
MoveOperands* AddGapMove(int index, Instruction::GapPosition position,
const InstructionOperand& from,
......@@ -656,6 +696,7 @@ class RegisterAllocationData final : public ZoneObject {
void Print(const LiveRange* range, bool with_children = false);
void Print(const InstructionOperand& op);
void Print(const MoveOperands* move);
void Print(const SpillRange* spill_range);
private:
Zone* const allocation_zone_;
......@@ -668,7 +709,7 @@ class RegisterAllocationData final : public ZoneObject {
ZoneVector<LiveRange*> live_ranges_;
ZoneVector<LiveRange*> fixed_live_ranges_;
ZoneVector<LiveRange*> fixed_double_live_ranges_;
ZoneVector<SpillRange*> spill_ranges_;
ZoneSet<SpillRange*> spill_ranges_;
DelayedReferences delayed_references_;
BitVector* assigned_registers_;
BitVector* assigned_double_registers_;
......@@ -721,6 +762,8 @@ class LiveRangeBuilder final : public ZoneObject {
// Phase 3: compute liveness of all virtual register.
void BuildLiveRanges();
static BitVector* ComputeLiveOut(const InstructionBlock* block,
RegisterAllocationData* data);
private:
RegisterAllocationData* data() const { return data_; }
......@@ -737,7 +780,6 @@ class LiveRangeBuilder final : public ZoneObject {
void Verify() const;
// Liveness analysis support.
BitVector* ComputeLiveOut(const InstructionBlock* block);
void AddInitialIntervals(const InstructionBlock* block, BitVector* live_out);
void ProcessInstructions(const InstructionBlock* block, BitVector* live);
void ProcessPhis(const InstructionBlock* block, BitVector* live);
......
......@@ -402,9 +402,14 @@ DEFINE_BOOL(omit_map_checks_for_leaf_maps, true,
DEFINE_BOOL(turbo, false, "enable TurboFan compiler")
DEFINE_BOOL(turbo_shipping, true, "enable TurboFan compiler on subset")
DEFINE_BOOL(turbo_greedy_regalloc, false, "use the greedy register allocator")
DEFINE_BOOL(turbo_preprocess_ranges, false,
DEFINE_BOOL(turbo_preprocess_ranges, true,
"run pre-register allocation heuristics")
DEFINE_BOOL(turbo_loop_stackcheck, true, "enable stack checks in loops")
// TODO(mtrofin): remove these implications, as they are here just for trybot
// purposes.
DEFINE_IMPLICATION(turbo_greedy_regalloc, turbo_preprocess_ranges)
DEFINE_IMPLICATION(turbo_greedy_regalloc, turbo_loop_stackcheck)
DEFINE_IMPLICATION(turbo, turbo_asm_deoptimization)
DEFINE_STRING(turbo_filter, "~~", "optimization filter for TurboFan compiler")
DEFINE_BOOL(trace_turbo, false, "trace generated TurboFan IR")
......
......@@ -686,7 +686,7 @@ TEST_F(RegisterAllocatorTest, MultipleDeferredBlockSpills) {
EXPECT_TRUE(IsParallelMovePresent(call_in_b1, Instruction::START, sequence(),
Reg(var3_reg), Slot(var3_slot)));
EXPECT_TRUE(IsParallelMovePresent(end_of_b1, Instruction::START, sequence(),
Slot(var3_slot), Reg()));
Slot(var3_slot), Reg(var3_reg)));
EXPECT_TRUE(IsParallelMovePresent(call_in_b2, Instruction::START, sequence(),
Reg(var3_reg), Slot(var3_slot)));
......
......@@ -540,6 +540,8 @@
'../../src/compiler/linkage.h',
'../../src/compiler/liveness-analyzer.cc',
'../../src/compiler/liveness-analyzer.h',
'../../src/compiler/live-range-separator.cc',
'../../src/compiler/live-range-separator.h',
'../../src/compiler/load-elimination.cc',
'../../src/compiler/load-elimination.h',
'../../src/compiler/loop-analysis.cc',
......@@ -577,8 +579,6 @@
'../../src/compiler/pipeline.h',
'../../src/compiler/pipeline-statistics.cc',
'../../src/compiler/pipeline-statistics.h',
'../../src/compiler/preprocess-live-ranges.cc',
'../../src/compiler/preprocess-live-ranges.h',
'../../src/compiler/raw-machine-assembler.cc',
'../../src/compiler/raw-machine-assembler.h',
'../../src/compiler/register-allocator.cc',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment