Commit 32a2ab0c authored by mtrofin's avatar mtrofin Committed by Commit bot

[turbofan] Move frame elision logic to the end

We establish spilling blocks for ranges spilling only in deferred blocks
really late - just before optimization. This means frame elision logic
should happen after all dust has settled - even after optimization, since
we may lose spills after that (this is not currently leveraged).

Also enabled the elision algo for all functions, but forcing the first frame
to construct a frame for non-code stub cases.  This is preparing for a
subsequent change where we guide frame construction/destruction
solely based on the info produced by the register allocation pipeline.

BUG=

Review URL: https://codereview.chromium.org/1810333003

Cr-Commit-Position: refs/heads/master@{#35016}
parent 66c6cadc
......@@ -267,13 +267,16 @@ class PipelineData {
register_allocation_data_ = nullptr;
}
void InitializeInstructionSequence() {
void InitializeInstructionSequence(const CallDescriptor* descriptor) {
DCHECK(sequence_ == nullptr);
InstructionBlocks* instruction_blocks =
InstructionSequence::InstructionBlocksFor(instruction_zone(),
schedule());
sequence_ = new (instruction_zone()) InstructionSequence(
info()->isolate(), instruction_zone(), instruction_blocks);
if (descriptor && descriptor->RequiresFrameAsIncoming()) {
sequence_->instruction_blocks()[0]->mark_needs_frame();
}
}
void InitializeFrameData(CallDescriptor* descriptor) {
......@@ -1336,7 +1339,7 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
data->schedule());
}
data->InitializeInstructionSequence();
data->InitializeInstructionSequence(call_descriptor);
data->InitializeFrameData(call_descriptor);
// Select and schedule instructions covering the scheduled graph.
......@@ -1486,12 +1489,6 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
Run<MergeSplintersPhase>();
}
// We plan to enable frame elision only for stubs and bytecode handlers.
if (FLAG_turbo_frame_elision && info()->IsStub()) {
Run<LocateSpillSlotsPhase>();
Run<FrameElisionPhase>();
}
Run<AssignSpillSlotsPhase>();
Run<CommitAssignmentPhase>();
......@@ -1502,6 +1499,9 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
Run<OptimizeMovesPhase>();
}
Run<LocateSpillSlotsPhase>();
Run<FrameElisionPhase>();
if (FLAG_trace_turbo_graph) {
OFStream os(stdout);
PrintableInstructionSequence printable = {config, data->sequence()};
......
......@@ -3162,15 +3162,9 @@ void SpillSlotLocator::LocateSpillSlots() {
for (TopLevelLiveRange* range : data()->live_ranges()) {
if (range == nullptr || range->IsEmpty()) continue;
// We care only about ranges which spill in the frame.
if (!range->HasSpillRange()) continue;
if (range->IsSpilledOnlyInDeferredBlocks()) {
for (LiveRange* child = range; child != nullptr; child = child->next()) {
if (child->spilled()) {
code->GetInstructionBlock(child->Start().ToInstructionIndex())
->mark_needs_frame();
}
if (!range->HasSpillRange() || range->IsSpilledOnlyInDeferredBlocks()) {
continue;
}
} else {
TopLevelLiveRange::SpillMoveInsertionList* spills =
range->GetSpillMoveInsertionLocations();
DCHECK_NOT_NULL(spills);
......@@ -3178,7 +3172,6 @@ void SpillSlotLocator::LocateSpillSlots() {
code->GetInstructionBlock(spills->gap_index)->mark_needs_frame();
}
}
}
}
......@@ -3639,7 +3632,7 @@ void LiveRangeConnector::CommitSpillsInDeferredBlocks(
worklist.pop();
if (done_blocks.Contains(block_id)) continue;
done_blocks.Add(block_id);
const InstructionBlock* spill_block =
InstructionBlock* spill_block =
code->InstructionBlockAt(RpoNumber::FromInt(block_id));
for (const RpoNumber& pred : spill_block->predecessors()) {
......@@ -3659,6 +3652,7 @@ void LiveRangeConnector::CommitSpillsInDeferredBlocks(
data()->AddGapMove(spill_block->first_instruction_index(),
Instruction::GapPosition::START, pred_op,
spill_operand);
spill_block->mark_needs_frame();
}
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment