Commit 9b385eb7 authored by Thibaud Michaud's avatar Thibaud Michaud Committed by Commit Bot

[regalloc] Remove live range splintering

Control-flow aware allocation has been enabled by default for a long
time now. This removes the unused code paths related to splintering.

R=neis@chromium.org

Bug: v8:10933
Change-Id: I19d9eb448c3912b24a1ad16030e7dd556b13accc
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2434328Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Commit-Queue: Thibaud Michaud <thibaudm@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70172}
parent d11b97dd
......@@ -1971,8 +1971,6 @@ v8_compiler_sources = [
"src/compiler/backend/instruction.h",
"src/compiler/backend/jump-threading.cc",
"src/compiler/backend/jump-threading.h",
"src/compiler/backend/live-range-separator.cc",
"src/compiler/backend/live-range-separator.h",
"src/compiler/backend/mid-tier-register-allocator.cc",
"src/compiler/backend/mid-tier-register-allocator.h",
"src/compiler/backend/move-optimizer.cc",
......
......@@ -111,12 +111,6 @@ void OptimizedCompilationInfo::ConfigureFlags() {
default:
break;
}
if (FLAG_turbo_control_flow_aware_allocation) {
set_turbo_control_flow_aware_allocation();
} else {
set_turbo_preprocess_ranges();
}
}
OptimizedCompilationInfo::~OptimizedCompilationInfo() {
......
......@@ -46,29 +46,27 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
// Various configuration flags for a compilation, as well as some properties
// of the compiled code produced by a compilation.
#define FLAGS(V) \
V(FunctionContextSpecializing, function_context_specializing, 0) \
V(Inlining, inlining, 1) \
V(DisableFutureOptimization, disable_future_optimization, 2) \
V(Splitting, splitting, 3) \
V(SourcePositions, source_positions, 4) \
V(BailoutOnUninitialized, bailout_on_uninitialized, 5) \
V(LoopPeeling, loop_peeling, 6) \
V(UntrustedCodeMitigations, untrusted_code_mitigations, 7) \
V(SwitchJumpTable, switch_jump_table, 8) \
V(CalledWithCodeStartRegister, called_with_code_start_register, 9) \
V(PoisonRegisterArguments, poison_register_arguments, 10) \
V(AllocationFolding, allocation_folding, 11) \
V(AnalyzeEnvironmentLiveness, analyze_environment_liveness, 12) \
V(TraceTurboJson, trace_turbo_json, 13) \
V(TraceTurboGraph, trace_turbo_graph, 14) \
V(TraceTurboScheduled, trace_turbo_scheduled, 15) \
V(TraceTurboAllocation, trace_turbo_allocation, 16) \
V(TraceHeapBroker, trace_heap_broker, 17) \
V(WasmRuntimeExceptionSupport, wasm_runtime_exception_support, 18) \
V(TurboControlFlowAwareAllocation, turbo_control_flow_aware_allocation, 19) \
V(TurboPreprocessRanges, turbo_preprocess_ranges, 20) \
V(ConcurrentInlining, concurrent_inlining, 21)
#define FLAGS(V) \
V(FunctionContextSpecializing, function_context_specializing, 0) \
V(Inlining, inlining, 1) \
V(DisableFutureOptimization, disable_future_optimization, 2) \
V(Splitting, splitting, 3) \
V(SourcePositions, source_positions, 4) \
V(BailoutOnUninitialized, bailout_on_uninitialized, 5) \
V(LoopPeeling, loop_peeling, 6) \
V(UntrustedCodeMitigations, untrusted_code_mitigations, 7) \
V(SwitchJumpTable, switch_jump_table, 8) \
V(CalledWithCodeStartRegister, called_with_code_start_register, 9) \
V(PoisonRegisterArguments, poison_register_arguments, 10) \
V(AllocationFolding, allocation_folding, 11) \
V(AnalyzeEnvironmentLiveness, analyze_environment_liveness, 12) \
V(TraceTurboJson, trace_turbo_json, 13) \
V(TraceTurboGraph, trace_turbo_graph, 14) \
V(TraceTurboScheduled, trace_turbo_scheduled, 15) \
V(TraceTurboAllocation, trace_turbo_allocation, 16) \
V(TraceHeapBroker, trace_heap_broker, 17) \
V(WasmRuntimeExceptionSupport, wasm_runtime_exception_support, 18) \
V(ConcurrentInlining, concurrent_inlining, 19)
enum Flag {
#define DEF_ENUM(Camel, Lower, Bit) k##Camel = 1 << Bit,
......
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/backend/live-range-separator.h"
#include "src/compiler/backend/register-allocator.h"
namespace v8 {
namespace internal {
namespace compiler {
#define TRACE_COND(cond, ...) \
do { \
if (cond) PrintF(__VA_ARGS__); \
} while (false)
namespace {
void CreateSplinter(TopLevelLiveRange* range,
TopTierRegisterAllocationData* data,
LifetimePosition first_cut, LifetimePosition last_cut,
bool trace_alloc) {
DCHECK(!range->IsSplinter());
// We can ignore ranges that live solely in deferred blocks.
// If a range ends right at the end of a deferred block, it is marked by
// the range builder as ending at gap start of the next block - since the
// end is a position where the variable isn't live. We need to take that
// into consideration.
LifetimePosition max_allowed_end = last_cut.NextFullStart();
if (first_cut <= range->Start() && max_allowed_end >= range->End()) {
return;
}
LifetimePosition start = Max(first_cut, range->Start());
LifetimePosition end = Min(last_cut, range->End());
if (start < end) {
// Ensure the original range has a spill range associated, before it gets
// splintered. Splinters will point to it. This way, when attempting to
// reuse spill slots of splinters, during allocation, we avoid clobbering
// such slots.
if (range->MayRequireSpillRange()) {
data->CreateSpillRangeForLiveRange(range);
}
if (range->splinter() == nullptr) {
TopLevelLiveRange* splinter =
data->NextLiveRange(range->representation());
DCHECK_NULL(data->live_ranges()[splinter->vreg()]);
data->live_ranges()[splinter->vreg()] = splinter;
range->SetSplinter(splinter);
}
Zone* zone = data->allocation_zone();
TRACE_COND(trace_alloc,
"creating splinter %d for range %d between %d and %d\n",
range->splinter()->vreg(), range->vreg(),
start.ToInstructionIndex(), end.ToInstructionIndex());
range->Splinter(start, end, zone);
}
}
void SetSlotUse(TopLevelLiveRange* range) {
range->reset_slot_use();
for (const UsePosition* pos = range->first_pos();
!range->has_slot_use() && pos != nullptr; pos = pos->next()) {
if (pos->type() == UsePositionType::kRequiresSlot) {
range->register_slot_use(TopLevelLiveRange::SlotUseKind::kGeneralSlotUse);
}
}
}
void SplinterLiveRange(TopLevelLiveRange* range,
TopTierRegisterAllocationData* data) {
const InstructionSequence* code = data->code();
UseInterval* interval = range->first_interval();
LifetimePosition first_cut = LifetimePosition::Invalid();
LifetimePosition last_cut = LifetimePosition::Invalid();
while (interval != nullptr) {
// We have to cache these here, as splintering might destroy the original
// interval below.
UseInterval* next_interval = interval->next();
LifetimePosition interval_end = interval->end();
const InstructionBlock* first_block =
code->GetInstructionBlock(interval->FirstGapIndex());
const InstructionBlock* last_block =
code->GetInstructionBlock(interval->LastGapIndex());
int first_block_nr = first_block->rpo_number().ToInt();
int last_block_nr = last_block->rpo_number().ToInt();
for (int block_id = first_block_nr; block_id <= last_block_nr; ++block_id) {
const InstructionBlock* current_block =
code->InstructionBlockAt(RpoNumber::FromInt(block_id));
if (current_block->IsDeferred()) {
if (!first_cut.IsValid()) {
first_cut = LifetimePosition::GapFromInstructionIndex(
current_block->first_instruction_index());
}
// We splinter until the last gap in the block. I assume this is done to
// leave a little range to be allocated by normal register allocation
// and then use that range to connect when splinters are merged back.
// This might be done as control flow resolution does not insert moves
// if two consecutive blocks in rpo order are also consecutive in
// control flow.
last_cut = LifetimePosition::GapFromInstructionIndex(
current_block->last_instruction_index());
} else {
if (first_cut.IsValid()) {
CreateSplinter(range, data, first_cut, last_cut,
data->is_trace_alloc());
first_cut = LifetimePosition::Invalid();
last_cut = LifetimePosition::Invalid();
}
}
}
// If we reach the end of an interval with a first_cut and last_cut set, it
// means that we can splinter to the end of the interval, as the value dies
// in this control flow branch or is not live in the next block. In the
// former case, we won't need to reload the value, so we can splinter to the
// end of its lifetime. In the latter case, control flow resolution will
// have to connect blocks anyway, so we can also splinter to the end of the
// block, too.
if (first_cut.IsValid()) {
CreateSplinter(range, data, first_cut, interval_end,
data->is_trace_alloc());
first_cut = LifetimePosition::Invalid();
last_cut = LifetimePosition::Invalid();
}
interval = next_interval;
}
// Redo has_slot_use
if (range->has_slot_use() && range->splinter() != nullptr) {
SetSlotUse(range);
SetSlotUse(range->splinter());
}
}
} // namespace
void LiveRangeSeparator::Splinter() {
size_t virt_reg_count = data()->live_ranges().size();
for (size_t vreg = 0; vreg < virt_reg_count; ++vreg) {
TopLevelLiveRange* range = data()->live_ranges()[vreg];
if (range == nullptr || range->IsEmpty() || range->IsSplinter()) {
continue;
}
int first_instr = range->first_interval()->FirstGapIndex();
if (!data()->code()->GetInstructionBlock(first_instr)->IsDeferred()) {
SplinterLiveRange(range, data());
}
}
}
void LiveRangeMerger::MarkRangesSpilledInDeferredBlocks() {
const InstructionSequence* code = data()->code();
for (TopLevelLiveRange* top : data()->live_ranges()) {
if (top == nullptr || top->IsEmpty() || top->splinter() == nullptr ||
top->HasSpillOperand() || !top->splinter()->HasSpillRange()) {
continue;
}
LiveRange* child = top;
for (; child != nullptr; child = child->next()) {
if (child->spilled() ||
child->NextSlotPosition(child->Start()) != nullptr) {
break;
}
}
if (child == nullptr) {
DCHECK(!data()->is_turbo_control_flow_aware_allocation());
top->TreatAsSpilledInDeferredBlock(data()->allocation_zone(),
code->InstructionBlockCount());
}
}
}
void LiveRangeMerger::Merge() {
MarkRangesSpilledInDeferredBlocks();
int live_range_count = static_cast<int>(data()->live_ranges().size());
for (int i = 0; i < live_range_count; ++i) {
TopLevelLiveRange* range = data()->live_ranges()[i];
if (range == nullptr || range->IsEmpty() || !range->IsSplinter()) {
continue;
}
TopLevelLiveRange* splinter_parent = range->splintered_from();
int to_remove = range->vreg();
splinter_parent->Merge(range, data()->allocation_zone());
data()->live_ranges()[to_remove] = nullptr;
}
}
#undef TRACE_COND
} // namespace compiler
} // namespace internal
} // namespace v8
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_BACKEND_LIVE_RANGE_SEPARATOR_H_
#define V8_COMPILER_BACKEND_LIVE_RANGE_SEPARATOR_H_
#include "src/zone/zone.h"
namespace v8 {
namespace internal {
class Zone;
namespace compiler {
class TopTierRegisterAllocationData;
// A register allocation pair of transformations: splinter and merge live ranges
class LiveRangeSeparator final : public ZoneObject {
public:
LiveRangeSeparator(TopTierRegisterAllocationData* data, Zone* zone)
: data_(data), zone_(zone) {}
void Splinter();
private:
TopTierRegisterAllocationData* data() const { return data_; }
Zone* zone() const { return zone_; }
TopTierRegisterAllocationData* const data_;
Zone* const zone_;
DISALLOW_COPY_AND_ASSIGN(LiveRangeSeparator);
};
class LiveRangeMerger final : public ZoneObject {
public:
LiveRangeMerger(TopTierRegisterAllocationData* data, Zone* zone)
: data_(data), zone_(zone) {}
void Merge();
private:
TopTierRegisterAllocationData* data() const { return data_; }
Zone* zone() const { return zone_; }
// Mark ranges spilled in deferred blocks, that also cover non-deferred code.
// We do nothing special for ranges fully contained in deferred blocks,
// because they would "spill in deferred blocks" anyway.
void MarkRangesSpilledInDeferredBlocks();
TopTierRegisterAllocationData* const data_;
Zone* const zone_;
DISALLOW_COPY_AND_ASSIGN(LiveRangeMerger);
};
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_BACKEND_LIVE_RANGE_SEPARATOR_H_
This diff is collapsed.
......@@ -175,11 +175,7 @@ class LifetimePosition final {
std::ostream& operator<<(std::ostream& os, const LifetimePosition pos);
enum class RegisterAllocationFlag : unsigned {
kTurboControlFlowAwareAllocation = 1 << 0,
kTurboPreprocessRanges = 1 << 1,
kTraceAllocation = 1 << 2
};
enum class RegisterAllocationFlag : unsigned { kTraceAllocation = 1 << 0 };
using RegisterAllocationFlags = base::Flags<RegisterAllocationFlag>;
......@@ -210,14 +206,6 @@ class TopTierRegisterAllocationData final : public RegisterAllocationData {
// regular code (kSpillAtDefinition).
enum SpillMode { kSpillAtDefinition, kSpillDeferred };
bool is_turbo_control_flow_aware_allocation() const {
return flags_ & RegisterAllocationFlag::kTurboControlFlowAwareAllocation;
}
bool is_turbo_preprocess_ranges() const {
return flags_ & RegisterAllocationFlag::kTurboPreprocessRanges;
}
bool is_trace_alloc() {
return flags_ & RegisterAllocationFlag::kTraceAllocation;
}
......@@ -615,7 +603,7 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
// Can this live range be spilled at this position.
bool CanBeSpilled(LifetimePosition pos) const;
// Splitting primitive used by both splitting and splintering members.
// Splitting primitive used by splitting members.
// Performs the split, but does not link the resulting ranges.
// The given position must follow the start of the range.
// All uses following the given position will be moved from this
......@@ -708,7 +696,7 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
using ControlFlowRegisterHint = base::BitField<uint8_t, 22, 6>;
// Bits 28-31 are used by TopLevelLiveRange.
// Unique among children and splinters of the same virtual register.
// Unique among children of the same virtual register.
int relative_id_;
uint32_t bits_;
UseInterval* last_interval_;
......@@ -720,8 +708,6 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
mutable UseInterval* current_interval_;
// This is used as a cache, it doesn't affect correctness.
mutable UsePosition* last_processed_use_;
// Cache the last position splintering stopped at.
mutable UsePosition* splitting_pointer_;
// This is used as a cache in BuildLiveRanges and during register allocation.
UsePosition* current_hint_position_;
LiveRangeBundle* bundle_ = nullptr;
......@@ -853,16 +839,6 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
// Shorten the most recently added interval by setting a new start.
void ShortenTo(LifetimePosition start, bool trace_alloc);
// Detaches between start and end, and attributes the resulting range to
// result.
// The current range is pointed to as "splintered_from". No parent/child
// relationship is established between this and result.
void Splinter(LifetimePosition start, LifetimePosition end, Zone* zone);
// Assuming other was splintered from this range, embeds other and its
// children as part of the children sequence of this range.
void Merge(TopLevelLiveRange* other, Zone* zone);
// Spill range management.
void SetSpillRange(SpillRange* spill_range);
......@@ -963,19 +939,12 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
}
}
TopLevelLiveRange* splintered_from() const { return splintered_from_; }
bool IsSplinter() const { return splintered_from_ != nullptr; }
bool MayRequireSpillRange() const {
DCHECK(!IsSplinter());
return !HasSpillOperand() && spill_range_ == nullptr;
}
void UpdateSpillRangePostMerge(TopLevelLiveRange* merged);
int vreg() const { return vreg_; }
#if DEBUG
int debug_virt_reg() const;
#endif
void Verify() const;
void VerifyChildrenInOrder() const;
......@@ -985,19 +954,13 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
// if you call it with a non-decreasing sequence of positions.
LiveRange* GetChildCovers(LifetimePosition pos);
int GetNextChildId() {
return IsSplinter() ? splintered_from()->GetNextChildId()
: ++last_child_id_;
}
int GetNextChildId() { return ++last_child_id_; }
int GetMaxChildCount() const { return last_child_id_ + 1; }
bool IsSpilledOnlyInDeferredBlocks(
const TopTierRegisterAllocationData* data) const {
if (data->is_turbo_control_flow_aware_allocation()) {
return spill_type() == SpillType::kDeferredSpillRange;
}
return spilled_in_deferred_blocks_;
return spill_type() == SpillType::kDeferredSpillRange;
}
struct SpillMoveInsertionList;
......@@ -1007,17 +970,6 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
DCHECK(!IsSpilledOnlyInDeferredBlocks(data));
return spill_move_insertion_locations_;
}
TopLevelLiveRange* splinter() const { return splinter_; }
void SetSplinter(TopLevelLiveRange* splinter) {
DCHECK_NULL(splinter_);
DCHECK_NOT_NULL(splinter);
splinter_ = splinter;
splinter->relative_id_ = GetNextChildId();
splinter->set_spill_type(spill_type());
splinter->SetSplinteredFrom(this);
if (bundle_ != nullptr) splinter->set_bundle(bundle_);
}
void MarkHasPreassignedSlot() { has_preassigned_slot_ = true; }
bool has_preassigned_slot() const { return has_preassigned_slot_; }
......@@ -1056,7 +1008,6 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
private:
friend class LiveRange;
void SetSplinteredFrom(TopLevelLiveRange* splinter_parent);
// If spill type is kSpillRange, then this value indicates whether we've
// chosen to spill at the definition or at some later points.
......@@ -1076,7 +1027,6 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
int vreg_;
int last_child_id_;
TopLevelLiveRange* splintered_from_;
union {
// Correct value determined by spill_type()
InstructionOperand* spill_operand_;
......@@ -1096,7 +1046,6 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
int spill_start_index_;
UsePosition* last_pos_;
LiveRange* last_child_covers_;
TopLevelLiveRange* splinter_;
DISALLOW_COPY_AND_ASSIGN(TopLevelLiveRange);
};
......@@ -1310,11 +1259,8 @@ class LiveRangeBuilder final : public ZoneObject {
spill_mode);
}
SpillMode SpillModeForBlock(const InstructionBlock* block) const {
if (data()->is_turbo_control_flow_aware_allocation()) {
return block->IsDeferred() ? SpillMode::kSpillDeferred
: SpillMode::kSpillAtDefinition;
}
return SpillMode::kSpillAtDefinition;
return block->IsDeferred() ? SpillMode::kSpillDeferred
: SpillMode::kSpillAtDefinition;
}
TopTierRegisterAllocationData* const data_;
ZoneMap<InstructionOperand*, UsePosition*> phi_hints_;
......@@ -1529,7 +1475,6 @@ class LinearScanAllocator final : public RegisterAllocator {
Vector<LifetimePosition> free_until_pos);
void ProcessCurrentRange(LiveRange* current, SpillMode spill_mode);
void AllocateBlockedReg(LiveRange* range, SpillMode spill_mode);
bool TrySplitAndSpillSplinter(LiveRange* range);
// Spill the given life range after position pos.
void SpillAfter(LiveRange* range, LifetimePosition pos, SpillMode spill_mode);
......
......@@ -774,10 +774,7 @@ void GraphC1Visualizer::PrintLiveRange(const LiveRange* range, const char* type,
}
}
// The toplevel range might be a splinter. Pre-resolve those here so that
// they have a proper parent.
const TopLevelLiveRange* parent = range->TopLevel();
if (parent->IsSplinter()) parent = parent->splintered_from();
os_ << " " << parent->vreg() << ":" << parent->relative_id();
// TODO(herhut) Find something useful to print for the hint field
......
......@@ -22,7 +22,6 @@
#include "src/compiler/backend/instruction-selector.h"
#include "src/compiler/backend/instruction.h"
#include "src/compiler/backend/jump-threading.h"
#include "src/compiler/backend/live-range-separator.h"
#include "src/compiler/backend/mid-tier-register-allocator.h"
#include "src/compiler/backend/move-optimizer.h"
#include "src/compiler/backend/register-allocator-verifier.h"
......@@ -2199,17 +2198,6 @@ struct BuildBundlesPhase {
}
};
struct SplinterLiveRangesPhase {
DECL_PIPELINE_PHASE_CONSTANTS(SplinterLiveRanges)
void Run(PipelineData* data, Zone* temp_zone) {
LiveRangeSeparator live_range_splinterer(
data->top_tier_register_allocation_data(), temp_zone);
live_range_splinterer.Splinter();
}
};
template <typename RegAllocator>
struct AllocateGeneralRegistersPhase {
DECL_PIPELINE_PHASE_CONSTANTS(AllocateGeneralRegisters)
......@@ -2232,18 +2220,6 @@ struct AllocateFPRegistersPhase {
}
};
struct MergeSplintersPhase {
DECL_PIPELINE_PHASE_CONSTANTS(MergeSplinteredRanges)
void Run(PipelineData* pipeline_data, Zone* temp_zone) {
TopTierRegisterAllocationData* data =
pipeline_data->top_tier_register_allocation_data();
LiveRangeMerger live_range_merger(data, temp_zone);
live_range_merger.Merge();
}
};
struct DecideSpillingModePhase {
DECL_PIPELINE_PHASE_CONSTANTS(DecideSpillingMode)
......@@ -3626,12 +3602,6 @@ void PipelineImpl::AllocateRegistersForTopTier(
#endif
RegisterAllocationFlags flags;
if (data->info()->turbo_control_flow_aware_allocation()) {
flags |= RegisterAllocationFlag::kTurboControlFlowAwareAllocation;
}
if (data->info()->turbo_preprocess_ranges()) {
flags |= RegisterAllocationFlag::kTurboPreprocessRanges;
}
if (data->info()->trace_turbo_allocation()) {
flags |= RegisterAllocationFlag::kTraceAllocation;
}
......@@ -3657,25 +3627,12 @@ void PipelineImpl::AllocateRegistersForTopTier(
"PreAllocation", data->top_tier_register_allocation_data());
}
if (info()->turbo_preprocess_ranges()) {
Run<SplinterLiveRangesPhase>();
if (info()->trace_turbo_json() && !data->MayHaveUnverifiableGraph()) {
TurboCfgFile tcf(isolate());
tcf << AsC1VRegisterAllocationData(
"PostSplinter", data->top_tier_register_allocation_data());
}
}
Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
if (data->sequence()->HasFPVirtualRegisters()) {
Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
}
if (info()->turbo_preprocess_ranges()) {
Run<MergeSplintersPhase>();
}
Run<DecideSpillingModePhase>();
Run<AssignSpillSlotsPhase>();
Run<CommitAssignmentPhase>();
......
......@@ -596,8 +596,6 @@ DEFINE_BOOL(print_deopt_stress, false, "print number of possible deopt points")
DEFINE_BOOL(opt, true, "use adaptive optimizations")
DEFINE_BOOL(turbo_sp_frame_access, false,
"use stack pointer-relative access to frame wherever possible")
DEFINE_BOOL(turbo_control_flow_aware_allocation, true,
"consider control flow while allocating registers")
DEFINE_BOOL(
stress_turbo_late_spilling, false,
"optimize placement of all spill instructions, not just loop-top phis")
......
......@@ -927,7 +927,6 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MachineOperatorOptimization) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MeetRegisterConstraints) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MemoryOptimization) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MergeSplinteredRanges) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, OptimizeMoves) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, PopulatePointerMaps) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, PrintGraph) \
......@@ -939,7 +938,6 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Scheduling) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, SelectInstructions) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, SimplifiedLowering) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, SplinterLiveRanges) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, StoreStoreElimination) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TypeAssertions) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TypedLowering) \
......
......@@ -85,18 +85,6 @@ class LiveRangeUnitTest : public TestWithZone {
return range->SplitAt(LifetimePosition::FromInt(pos), zone());
}
TopLevelLiveRange* Splinter(TopLevelLiveRange* top, int start, int end,
int new_id = 0) {
if (top->splinter() == nullptr) {
TopLevelLiveRange* ret = zone()->New<TopLevelLiveRange>(
new_id, MachineRepresentation::kTagged);
top->SetSplinter(ret);
}
top->Splinter(LifetimePosition::FromInt(start),
LifetimePosition::FromInt(end), zone());
return top->splinter();
}
// Ranges first and second match structurally.
bool RangesMatch(LiveRange* first, LiveRange* second) {
if (first->Start() != second->Start() || first->End() != second->End()) {
......@@ -308,185 +296,6 @@ TEST_F(LiveRangeUnitTest, SplitManyIntervalUsePositionsAfter) {
EXPECT_TRUE(RangesMatch(expected_bottom, child));
}
TEST_F(LiveRangeUnitTest, SplinterSingleInterval) {
TopLevelLiveRange* range = TestRangeBuilder(zone()).Build(0, 6);
TopLevelLiveRange* splinter = Splinter(range, 3, 5);
EXPECT_EQ(nullptr, range->next());
EXPECT_EQ(nullptr, splinter->next());
EXPECT_EQ(range, splinter->splintered_from());
TopLevelLiveRange* expected_source =
TestRangeBuilder(zone()).Add(0, 3).Add(5, 6).Build();
TopLevelLiveRange* expected_splinter = TestRangeBuilder(zone()).Build(3, 5);
EXPECT_TRUE(RangesMatch(expected_source, range));
EXPECT_TRUE(RangesMatch(expected_splinter, splinter));
}
TEST_F(LiveRangeUnitTest, MergeSingleInterval) {
TopLevelLiveRange* original = TestRangeBuilder(zone()).Build(0, 6);
TopLevelLiveRange* splinter = Splinter(original, 3, 5);
original->Merge(splinter, zone());
TopLevelLiveRange* result = TestRangeBuilder(zone()).Build(0, 6);
LiveRange* child_1 = Split(result, 3);
Split(child_1, 5);
EXPECT_TRUE(RangesMatch(result, original));
}
TEST_F(LiveRangeUnitTest, SplinterMultipleIntervalsOutside) {
TopLevelLiveRange* range =
TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
TopLevelLiveRange* splinter = Splinter(range, 2, 6);
EXPECT_EQ(nullptr, range->next());
EXPECT_EQ(nullptr, splinter->next());
EXPECT_EQ(range, splinter->splintered_from());
TopLevelLiveRange* expected_source =
TestRangeBuilder(zone()).Add(0, 2).Add(6, 8).Build();
TopLevelLiveRange* expected_splinter =
TestRangeBuilder(zone()).Add(2, 3).Add(5, 6).Build();
EXPECT_TRUE(RangesMatch(expected_source, range));
EXPECT_TRUE(RangesMatch(expected_splinter, splinter));
}
TEST_F(LiveRangeUnitTest, MergeMultipleIntervalsOutside) {
TopLevelLiveRange* original =
TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
TopLevelLiveRange* splinter = Splinter(original, 2, 6);
original->Merge(splinter, zone());
TopLevelLiveRange* result =
TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
LiveRange* child_1 = Split(result, 2);
Split(child_1, 6);
EXPECT_TRUE(RangesMatch(result, original));
}
TEST_F(LiveRangeUnitTest, SplinterMultipleIntervalsInside) {
TopLevelLiveRange* range =
TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
V8_ASSERT_DEBUG_DEATH(Splinter(range, 3, 5), ".*");
}
TEST_F(LiveRangeUnitTest, SplinterMultipleIntervalsLeft) {
TopLevelLiveRange* range =
TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
TopLevelLiveRange* splinter = Splinter(range, 2, 4);
EXPECT_EQ(nullptr, range->next());
EXPECT_EQ(nullptr, splinter->next());
EXPECT_EQ(range, splinter->splintered_from());
TopLevelLiveRange* expected_source =
TestRangeBuilder(zone()).Add(0, 2).Add(5, 8).Build();
TopLevelLiveRange* expected_splinter = TestRangeBuilder(zone()).Build(2, 3);
EXPECT_TRUE(RangesMatch(expected_source, range));
EXPECT_TRUE(RangesMatch(expected_splinter, splinter));
}
TEST_F(LiveRangeUnitTest, MergeMultipleIntervalsLeft) {
TopLevelLiveRange* original =
TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
TopLevelLiveRange* splinter = Splinter(original, 2, 4);
original->Merge(splinter, zone());
TopLevelLiveRange* result =
TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
Split(result, 2);
EXPECT_TRUE(RangesMatch(result, original));
}
TEST_F(LiveRangeUnitTest, SplinterMultipleIntervalsRight) {
TopLevelLiveRange* range =
TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
TopLevelLiveRange* splinter = Splinter(range, 4, 6);
EXPECT_EQ(nullptr, range->next());
EXPECT_EQ(nullptr, splinter->next());
EXPECT_EQ(range, splinter->splintered_from());
TopLevelLiveRange* expected_source =
TestRangeBuilder(zone()).Add(0, 3).Add(6, 8).Build();
TopLevelLiveRange* expected_splinter = TestRangeBuilder(zone()).Build(5, 6);
EXPECT_TRUE(RangesMatch(expected_source, range));
EXPECT_TRUE(RangesMatch(expected_splinter, splinter));
}
TEST_F(LiveRangeUnitTest, SplinterMergeMultipleTimes) {
TopLevelLiveRange* range =
TestRangeBuilder(zone()).Add(0, 3).Add(5, 10).Add(12, 16).Build();
Splinter(range, 4, 6);
Splinter(range, 8, 14);
TopLevelLiveRange* splinter = range->splinter();
EXPECT_EQ(nullptr, range->next());
EXPECT_EQ(nullptr, splinter->next());
EXPECT_EQ(range, splinter->splintered_from());
TopLevelLiveRange* expected_source =
TestRangeBuilder(zone()).Add(0, 3).Add(6, 8).Add(14, 16).Build();
TopLevelLiveRange* expected_splinter =
TestRangeBuilder(zone()).Add(5, 6).Add(8, 10).Add(12, 14).Build();
EXPECT_TRUE(RangesMatch(expected_source, range));
EXPECT_TRUE(RangesMatch(expected_splinter, splinter));
}
TEST_F(LiveRangeUnitTest, MergeMultipleIntervalsRight) {
TopLevelLiveRange* original =
TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
TopLevelLiveRange* splinter = Splinter(original, 4, 6);
original->Merge(splinter, zone());
TopLevelLiveRange* result =
TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
LiveRange* child_1 = Split(result, 5);
Split(child_1, 6);
EXPECT_TRUE(RangesMatch(result, original));
}
TEST_F(LiveRangeUnitTest, MergeAfterSplitting) {
TopLevelLiveRange* original = TestRangeBuilder(zone()).Build(0, 8);
TopLevelLiveRange* splinter = Splinter(original, 4, 6);
LiveRange* original_child = Split(original, 2);
Split(original_child, 7);
original->Merge(splinter, zone());
TopLevelLiveRange* result = TestRangeBuilder(zone()).Build(0, 8);
LiveRange* child_1 = Split(result, 2);
LiveRange* child_2 = Split(child_1, 4);
LiveRange* child_3 = Split(child_2, 6);
Split(child_3, 7);
EXPECT_TRUE(RangesMatch(result, original));
}
TEST_F(LiveRangeUnitTest, IDGeneration) {
TopLevelLiveRange* vreg = TestRangeBuilder(zone()).Id(2).Build(0, 100);
EXPECT_EQ(2, vreg->vreg());
EXPECT_EQ(0, vreg->relative_id());
TopLevelLiveRange* splinter =
zone()->New<TopLevelLiveRange>(101, MachineRepresentation::kTagged);
vreg->SetSplinter(splinter);
vreg->Splinter(LifetimePosition::FromInt(4), LifetimePosition::FromInt(12),
zone());
EXPECT_EQ(101, splinter->vreg());
EXPECT_EQ(1, splinter->relative_id());
LiveRange* child = vreg->SplitAt(LifetimePosition::FromInt(50), zone());
EXPECT_EQ(2, child->relative_id());
LiveRange* splinter_child =
splinter->SplitAt(LifetimePosition::FromInt(8), zone());
EXPECT_EQ(1, splinter->relative_id());
EXPECT_EQ(3, splinter_child->relative_id());
vreg->Merge(splinter, zone());
EXPECT_EQ(1, splinter->relative_id());
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -634,63 +634,7 @@ TEST_F(RegisterAllocatorTest, SingleDeferredBlockSpill) {
Reg(0), Slot(0)));
}
TEST_F(RegisterAllocatorTest, MultipleDeferredBlockSpills) {
if (FLAG_turbo_control_flow_aware_allocation) return;
StartBlock(); // B0
auto var1 = EmitOI(Reg(0));
auto var2 = EmitOI(Reg(1));
auto var3 = EmitOI(Reg(2));
EndBlock(Branch(Reg(var1, 0), 1, 2));
StartBlock(true); // B1
EmitCall(Slot(-2), Slot(var1));
EndBlock(Jump(2));
StartBlock(true); // B2
EmitCall(Slot(-1), Slot(var2));
EndBlock();
StartBlock(); // B3
EmitNop();
EndBlock();
StartBlock(); // B4
Return(Reg(var3, 2));
EndBlock();
const int def_of_v2 = 3;
const int call_in_b1 = 4;
const int call_in_b2 = 6;
const int end_of_b1 = 5;
const int end_of_b2 = 7;
const int start_of_b3 = 8;
Allocate();
// TODO(mtrofin): at the moment, the linear allocator spills var1 and var2,
// so only var3 is spilled in deferred blocks.
const int var3_reg = 2;
const int var3_slot = 2;
EXPECT_FALSE(IsParallelMovePresent(def_of_v2, Instruction::START, sequence(),
Reg(var3_reg), Slot()));
EXPECT_TRUE(IsParallelMovePresent(call_in_b1, Instruction::START, sequence(),
Reg(var3_reg), Slot(var3_slot)));
EXPECT_TRUE(IsParallelMovePresent(end_of_b1, Instruction::START, sequence(),
Slot(var3_slot), Reg()));
EXPECT_TRUE(IsParallelMovePresent(call_in_b2, Instruction::START, sequence(),
Reg(var3_reg), Slot(var3_slot)));
EXPECT_TRUE(IsParallelMovePresent(end_of_b2, Instruction::START, sequence(),
Slot(var3_slot), Reg()));
EXPECT_EQ(0,
GetParallelMoveCount(start_of_b3, Instruction::START, sequence()));
}
TEST_F(RegisterAllocatorTest, ValidMultipleDeferredBlockSpills) {
if (!FLAG_turbo_control_flow_aware_allocation) return;
StartBlock(); // B0
auto var1 = EmitOI(Reg(0));
auto var2 = EmitOI(Reg(1));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment