Commit be7e4361 authored by mtrofin's avatar mtrofin Committed by Commit bot

A simpler way to determine if a range spills only in deferred blocks, by

validating that the hot path does not spill - somewhat simpler code.

Cleared the scenario where a range is defined in a deferred block. The
code before was slightly more complicated by not leveraging the
property that these sort of ranges would be completely contained within
deferred blocks.

Moved "spills in deferred blocks" marking to a more appropriate
location.

One thing this CL achieves is correct support for scenarios where a
range is spilled both on the deferred and then hot path, and the ranges
concatenate. I owe better unit testing, which I will add in a subsequent
CL.

BUG=

Review URL: https://codereview.chromium.org/1472803004

Cr-Commit-Position: refs/heads/master@{#32302}
parent 19741ac9
......@@ -58,26 +58,6 @@ void CreateSplinter(TopLevelLiveRange *range, RegisterAllocationData *data,
}
int FirstInstruction(const UseInterval *interval) {
LifetimePosition start = interval->start();
int ret = start.ToInstructionIndex();
if (start.IsInstructionPosition() && start.IsEnd()) {
++ret;
}
return ret;
}
int LastInstruction(const UseInterval *interval) {
LifetimePosition end = interval->end();
int ret = end.ToInstructionIndex();
if (end.IsGapPosition() || end.IsStart()) {
--ret;
}
return ret;
}
void SplinterLiveRange(TopLevelLiveRange *range, RegisterAllocationData *data) {
const InstructionSequence *code = data->code();
UseInterval *interval = range->first_interval();
......@@ -88,9 +68,9 @@ void SplinterLiveRange(TopLevelLiveRange *range, RegisterAllocationData *data) {
while (interval != nullptr) {
UseInterval *next_interval = interval->next();
const InstructionBlock *first_block =
code->GetInstructionBlock(FirstInstruction(interval));
code->GetInstructionBlock(interval->FirstInstructionIndex());
const InstructionBlock *last_block =
code->GetInstructionBlock(LastInstruction(interval));
code->GetInstructionBlock(interval->LastInstructionIndex());
int first_block_nr = first_block->rpo_number().ToInt();
int last_block_nr = last_block->rpo_number().ToInt();
for (int block_id = first_block_nr; block_id <= last_block_nr; ++block_id) {
......@@ -129,12 +109,35 @@ void LiveRangeSeparator::Splinter() {
if (range == nullptr || range->IsEmpty() || range->IsSplinter()) {
continue;
}
SplinterLiveRange(range, data());
int first_instr = range->first_interval()->FirstInstructionIndex();
if (!data()->code()->GetInstructionBlock(first_instr)->IsDeferred()) {
SplinterLiveRange(range, data());
}
}
}
void LiveRangeMerger::MarkRangesSpilledInDeferredBlocks() {
for (TopLevelLiveRange *top : data()->live_ranges()) {
if (top == nullptr || top->IsEmpty() || top->splinter() == nullptr) {
continue;
}
LiveRange *child = top;
for (; child != nullptr; child = child->next()) {
if (child->spilled() ||
child->NextSlotPosition(child->Start()) != nullptr) {
break;
}
}
if (child == nullptr) top->MarkSpilledInDeferredBlock();
}
}
void LiveRangeMerger::Merge() {
MarkRangesSpilledInDeferredBlocks();
int live_range_count = static_cast<int>(data()->live_ranges().size());
for (int i = 0; i < live_range_count; ++i) {
TopLevelLiveRange *range = data()->live_ranges()[i];
......
......@@ -47,6 +47,11 @@ class LiveRangeMerger final : public ZoneObject {
RegisterAllocationData* data() const { return data_; }
Zone* zone() const { return zone_; }
// Mark ranges spilled in deferred blocks, that also cover non-deferred code.
// We do nothing special for ranges fully contained in deferred blocks,
// because they would "spill in deferred blocks" anyway.
void MarkRangesSpilledInDeferredBlocks();
RegisterAllocationData* const data_;
Zone* const zone_;
......
......@@ -1372,6 +1372,8 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
}
if (verifier != nullptr) {
CHECK(!data->register_allocation_data()->ExistsUseWithoutDefinition());
CHECK(data->register_allocation_data()
->RangesDefinedInDeferredStayInDeferred());
}
if (FLAG_turbo_preprocess_ranges) {
......
......@@ -713,52 +713,6 @@ void TopLevelLiveRange::RecordSpillLocation(Zone* zone, int gap_index,
}
void TopLevelLiveRange::MarkSpilledInDeferredBlock(
const InstructionSequence* code) {
if (!FLAG_turbo_preprocess_ranges || IsEmpty() || HasNoSpillType() ||
!HasSpillRange()) {
return;
}
int count = 0;
for (const LiveRange* child = this; child != nullptr; child = child->next()) {
int first_instr = child->Start().ToInstructionIndex();
// If the range starts at instruction end, the first instruction index is
// the next one.
if (!child->Start().IsGapPosition() && !child->Start().IsStart()) {
++first_instr;
}
// We only look at where the range starts. It doesn't matter where it ends:
// if it ends past this block, then either there is a phi there already,
// or ResolveControlFlow will adapt the last instruction gap of this block
// as if there were a phi. In either case, data flow will be correct.
const InstructionBlock* block = code->GetInstructionBlock(first_instr);
// If we have slot uses in a subrange, bail out, because we need the value
// on the stack before that use.
bool has_slot_use = child->NextSlotPosition(child->Start()) != nullptr;
if (!block->IsDeferred()) {
if (child->spilled() || has_slot_use) {
TRACE(
"Live Range %d must be spilled at definition: found a "
"slot-requiring non-deferred child range %d.\n",
TopLevel()->vreg(), child->relative_id());
return;
}
} else {
if (child->spilled() || has_slot_use) ++count;
}
}
if (count == 0) return;
spill_start_index_ = -1;
spilled_in_deferred_blocks_ = true;
spill_move_insertion_locations_ = nullptr;
}
bool TopLevelLiveRange::TryCommitSpillInDeferredBlock(
InstructionSequence* code, const InstructionOperand& spill_operand) {
if (!IsSpilledOnlyInDeferredBlocks()) return false;
......@@ -854,17 +808,12 @@ void TopLevelLiveRange::Splinter(LifetimePosition start, LifetimePosition end,
TopLevelLiveRange splinter_temp(-1, machine_type());
UsePosition* last_in_splinter = nullptr;
if (start <= Start()) {
// TODO(mtrofin): here, the TopLevel part is in the deferred range, so we
// may want to continue processing the splinter. However, if the value is
// defined in a cold block, and then used in a hot block, it follows that
// it should terminate on the RHS of a phi, defined on the hot path. We
// should check this, however, this may not be the place, because we don't
// have access to the instruction sequence.
DCHECK(end < End());
DetachAt(end, &splinter_temp, zone);
next_ = nullptr;
} else if (end >= End()) {
// Live ranges defined in deferred blocks stay in deferred blocks, so we
// don't need to splinter them. That means that start should always be
// after the beginning of the range.
DCHECK(start > Start());
if (end >= End()) {
DCHECK(start > Start());
DetachAt(start, &splinter_temp, zone);
next_ = nullptr;
......@@ -1397,6 +1346,37 @@ bool RegisterAllocationData::ExistsUseWithoutDefinition() {
}
// If a range is defined in a deferred block, we can expect all the range
// to only cover positions in deferred blocks. Otherwise, a block on the
// hot path would be dominated by a deferred block, meaning it is unreachable
// without passing through the deferred block, which is contradictory.
// In particular, when such a range contributes a result back on the hot
// path, it will be as one of the inputs of a phi. In that case, the value
// will be transferred via a move in the Gap::END's of the last instruction
// of a deferred block.
bool RegisterAllocationData::RangesDefinedInDeferredStayInDeferred() {
for (const TopLevelLiveRange* range : live_ranges()) {
if (range == nullptr || range->IsEmpty() ||
!code()
->GetInstructionBlock(range->Start().ToInstructionIndex())
->IsDeferred()) {
continue;
}
for (const UseInterval* i = range->first_interval(); i != nullptr;
i = i->next()) {
int first = i->FirstInstructionIndex();
int last = i->LastInstructionIndex();
for (int instr = first; instr <= last;) {
const InstructionBlock* block = code()->GetInstructionBlock(instr);
if (!block->IsDeferred()) return false;
instr = block->last_instruction_index() + 1;
}
}
}
return true;
}
SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange(
TopLevelLiveRange* range) {
DCHECK(!range->HasSpillOperand());
......@@ -2964,7 +2944,6 @@ void SpillSlotLocator::LocateSpillSlots() {
if (range == nullptr || range->IsEmpty()) continue;
// We care only about ranges which spill in the frame.
if (!range->HasSpillRange()) continue;
range->MarkSpilledInDeferredBlock(data()->code());
if (range->IsSpilledOnlyInDeferredBlocks()) {
for (LiveRange* child = range; child != nullptr; child = child->next()) {
if (child->spilled()) {
......
......@@ -194,6 +194,22 @@ class UseInterval final : public ZoneObject {
return start_ <= point && point < end_;
}
int FirstInstructionIndex() const {
int ret = start_.ToInstructionIndex();
if (start_.IsInstructionPosition() && start_.IsEnd()) {
++ret;
}
return ret;
}
int LastInstructionIndex() const {
int ret = end_.ToInstructionIndex();
if (end_.IsGapPosition() || end_.IsStart()) {
--ret;
}
return ret;
}
private:
LifetimePosition start_;
LifetimePosition end_;
......@@ -550,7 +566,12 @@ class TopLevelLiveRange final : public LiveRange {
// and instead let the LiveRangeConnector perform the spills within the
// deferred blocks. If so, we insert here spills for non-spilled ranges
// with slot use positions.
void MarkSpilledInDeferredBlock(const InstructionSequence* code);
void MarkSpilledInDeferredBlock() {
spill_start_index_ = -1;
spilled_in_deferred_blocks_ = true;
spill_move_insertion_locations_ = nullptr;
}
bool TryCommitSpillInDeferredBlock(InstructionSequence* code,
const InstructionOperand& spill_operand);
......@@ -771,6 +792,7 @@ class RegisterAllocationData final : public ZoneObject {
}
bool ExistsUseWithoutDefinition();
bool RangesDefinedInDeferredStayInDeferred();
void MarkAllocated(RegisterKind kind, int index);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment