Commit 0b024958 authored by mtrofin's avatar mtrofin Committed by Commit bot

[turbofan] Centralize splitting for memory operands.

This addresses an issue where the at-start splitting used
in the splintering mechanism was in conflict with the mechanics
used in linear allocator, in particular in the initial split/spill of
ranges for memory operands. We are already doing a split-at-start in
Greedy, so this change centralizes that to the base RegisterAllocator.

Verified locally that v8:4508 is addressed by this. Also, this fixes
the failures that required the revert
5308a999. See trybots at
issue 1425533002.

R=bmeurer@chromium.org,jarin@chromium.org
BUG= v8:4508
LOG=n

Review URL: https://codereview.chromium.org/1426583002

Cr-Commit-Position: refs/heads/master@{#31544}
parent e8ff5181
......@@ -50,19 +50,6 @@ LiveRange* Split(LiveRange* range, RegisterAllocationData* data,
}
// TODO(mtrofin): explain why splitting in gap START is always OK.
LifetimePosition GetSplitPositionForInstruction(const LiveRange* range,
int instruction_index) {
LifetimePosition ret = LifetimePosition::Invalid();
ret = LifetimePosition::GapFromInstructionIndex(instruction_index);
if (range->Start() >= ret || ret >= range->End()) {
return LifetimePosition::Invalid();
}
return ret;
}
} // namespace
......@@ -374,43 +361,6 @@ void GreedyAllocator::EvictAndRescheduleConflicts(unsigned reg_id,
}
void GreedyAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
size_t initial_range_count = data()->live_ranges().size();
for (size_t i = 0; i < initial_range_count; ++i) {
TopLevelLiveRange* range = data()->live_ranges()[i];
if (!CanProcessRange(range)) continue;
if (!range->HasSpillOperand()) continue;
LifetimePosition start = range->Start();
TRACE("Live range %d:%d is defined by a spill operand.\n",
range->TopLevel()->vreg(), range->relative_id());
auto next_pos = start;
if (next_pos.IsGapPosition()) {
next_pos = next_pos.NextStart();
}
auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
// If the range already has a spill operand and it doesn't need a
// register immediately, split it and spill the first part of the range.
if (pos == nullptr) {
Spill(range);
} else if (pos->pos() > range->Start().NextStart()) {
// Do not spill live range eagerly if use position that can benefit from
// the register is too close to the start of live range.
auto split_pos = GetSplitPositionForInstruction(
range, pos->pos().ToInstructionIndex());
// There is no place to split, so we can't split and spill.
if (!split_pos.IsValid()) continue;
split_pos =
FindOptimalSplitPos(range->Start().NextFullStart(), split_pos);
Split(range, data(), split_pos);
Spill(range);
}
}
}
void GreedyAllocator::AllocateRegisters() {
CHECK(scheduler().empty());
CHECK(allocations_.empty());
......
......@@ -128,18 +128,10 @@ class GreedyAllocator final : public RegisterAllocator {
// Evict and reschedule conflicts of a given range, at a given register.
void EvictAndRescheduleConflicts(unsigned reg_id, const LiveRange* range);
// Find the optimal split for ranges defined by a memory operand, e.g.
// constants or function parameters passed on the stack.
void SplitAndSpillRangesDefinedByMemoryOperand();
void TryAllocateCandidate(const AllocationCandidate& candidate);
void TryAllocateLiveRange(LiveRange* range);
void TryAllocateGroup(LiveRangeGroup* group);
bool CanProcessRange(LiveRange* range) const {
return range != nullptr && !range->IsEmpty() && range->kind() == mode();
}
// Calculate the weight of a candidate for allocation.
void EnsureValidRangeWeight(LiveRange* range);
......
......@@ -704,6 +704,13 @@ TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineType machine_type)
}
#if DEBUG
int TopLevelLiveRange::debug_virt_reg() const {
return IsSplinter() ? splintered_from()->vreg() : vreg();
}
#endif
void TopLevelLiveRange::SpillAtDefinition(Zone* zone, int gap_index,
InstructionOperand* operand) {
DCHECK(HasNoSpillType());
......@@ -2213,6 +2220,55 @@ RegisterAllocator::RegisterAllocator(RegisterAllocationData* data,
GetAllocatableRegisterCodes(data->config(), kind)) {}
LifetimePosition RegisterAllocator::GetSplitPositionForInstruction(
const LiveRange* range, int instruction_index) {
LifetimePosition ret = LifetimePosition::Invalid();
ret = LifetimePosition::GapFromInstructionIndex(instruction_index);
if (range->Start() >= ret || ret >= range->End()) {
return LifetimePosition::Invalid();
}
return ret;
}
void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
size_t initial_range_count = data()->live_ranges().size();
for (size_t i = 0; i < initial_range_count; ++i) {
TopLevelLiveRange* range = data()->live_ranges()[i];
if (!CanProcessRange(range)) continue;
if (!range->HasSpillOperand()) continue;
LifetimePosition start = range->Start();
TRACE("Live range %d:%d is defined by a spill operand.\n",
range->TopLevel()->vreg(), range->relative_id());
LifetimePosition next_pos = start;
if (next_pos.IsGapPosition()) {
next_pos = next_pos.NextStart();
}
UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
// If the range already has a spill operand and it doesn't need a
// register immediately, split it and spill the first part of the range.
if (pos == nullptr) {
Spill(range);
} else if (pos->pos() > range->Start().NextStart()) {
// Do not spill live range eagerly if use position that can benefit from
// the register is too close to the start of live range.
LifetimePosition split_pos = GetSplitPositionForInstruction(
range, pos->pos().ToInstructionIndex());
// There is no place to split, so we can't split and spill.
if (!split_pos.IsValid()) continue;
split_pos =
FindOptimalSplitPos(range->Start().NextFullStart(), split_pos);
SplitRangeAt(range, split_pos);
Spill(range);
}
}
}
LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
LifetimePosition pos) {
DCHECK(!range->TopLevel()->IsFixed());
......@@ -2364,10 +2420,15 @@ void LinearScanAllocator::AllocateRegisters() {
DCHECK(active_live_ranges().empty());
DCHECK(inactive_live_ranges().empty());
for (LiveRange* range : data()->live_ranges()) {
if (range == nullptr) continue;
if (range->kind() == mode()) {
AddToUnhandledUnsorted(range);
SplitAndSpillRangesDefinedByMemoryOperand();
for (TopLevelLiveRange* range : data()->live_ranges()) {
if (!CanProcessRange(range)) continue;
for (LiveRange* to_add = range; to_add != nullptr;
to_add = to_add->next()) {
if (!to_add->spilled()) {
AddToUnhandledUnsorted(to_add);
}
}
}
SortUnhandled();
......
......@@ -562,6 +562,10 @@ class TopLevelLiveRange final : public LiveRange {
void UpdateSpillRangePostMerge(TopLevelLiveRange* merged);
int vreg() const { return vreg_; }
#if DEBUG
int debug_virt_reg() const;
#endif
int GetNextChildId() {
return IsSplinter() ? splintered_from()->GetNextChildId()
: ++last_child_id_;
......@@ -909,8 +913,16 @@ class RegisterAllocator : public ZoneObject {
return allocatable_register_codes_[allocatable_index];
}
// TODO(mtrofin): explain why splitting in gap START is always OK.
LifetimePosition GetSplitPositionForInstruction(const LiveRange* range,
int instruction_index);
Zone* allocation_zone() const { return data()->allocation_zone(); }
// Find the optimal split for ranges defined by a memory operand, e.g.
// constants or function parameters passed on the stack.
void SplitAndSpillRangesDefinedByMemoryOperand();
// Split the given range at the given position.
// If range starts at or after the given position then the
// original range is returned.
......@@ -919,6 +931,11 @@ class RegisterAllocator : public ZoneObject {
// still be owned by the original range after splitting.
LiveRange* SplitRangeAt(LiveRange* range, LifetimePosition pos);
bool CanProcessRange(LiveRange* range) const {
return range != nullptr && !range->IsEmpty() && range->kind() == mode();
}
// Split the given range in a position from the interval [start, end].
LiveRange* SplitBetween(LiveRange* range, LifetimePosition start,
LifetimePosition end);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment