Commit 7c54dc33 authored by mtrofin's avatar mtrofin Committed by Commit bot

[turbofan] optimize spills in defered blocks

Up to now, for ranges spilled in deferred blocks, we would spill every
time a range would switch from using a register to spill slots. That can
be redundant, leading to avoidable code size  cost.

This change addresses this issue, by performing the spills as early as
possible.

BUG=

Review URL: https://codereview.chromium.org/1551013002

Cr-Commit-Position: refs/heads/master@{#33413}
parent a910cb40
......@@ -119,6 +119,7 @@ void LiveRangeSeparator::Splinter() {
void LiveRangeMerger::MarkRangesSpilledInDeferredBlocks() {
const InstructionSequence *code = data()->code();
for (TopLevelLiveRange *top : data()->live_ranges()) {
if (top == nullptr || top->IsEmpty() || top->splinter() == nullptr ||
top->HasSpillOperand() || !top->splinter()->HasSpillRange()) {
......@@ -132,7 +133,10 @@ void LiveRangeMerger::MarkRangesSpilledInDeferredBlocks() {
break;
}
}
if (child == nullptr) top->MarkSpilledInDeferredBlock();
if (child == nullptr) {
top->TreatAsSpilledInDeferredBlock(data()->allocation_zone(),
code->InstructionBlockCount());
}
}
}
......
This diff is collapsed.
......@@ -579,14 +579,17 @@ class TopLevelLiveRange final : public LiveRange {
// and instead let the LiveRangeConnector perform the spills within the
// deferred blocks. If so, we insert here spills for non-spilled ranges
// with slot use positions.
void MarkSpilledInDeferredBlock() {
void TreatAsSpilledInDeferredBlock(Zone* zone, int total_block_count) {
spill_start_index_ = -1;
spilled_in_deferred_blocks_ = true;
spill_move_insertion_locations_ = nullptr;
list_of_blocks_requiring_spill_operands_ =
new (zone) BitVector(total_block_count, zone);
}
bool TryCommitSpillInDeferredBlock(InstructionSequence* code,
const InstructionOperand& spill_operand);
void CommitSpillInDeferredBlocks(RegisterAllocationData* data,
const InstructionOperand& spill_operand,
BitVector* necessary_spill_points);
TopLevelLiveRange* splintered_from() const { return splintered_from_; }
bool IsSplinter() const { return splintered_from_ != nullptr; }
......@@ -617,7 +620,8 @@ class TopLevelLiveRange final : public LiveRange {
struct SpillMoveInsertionList;
SpillMoveInsertionList* spill_move_insertion_locations() const {
SpillMoveInsertionList* GetSpillMoveInsertionLocations() const {
DCHECK(!IsSpilledOnlyInDeferredBlocks());
return spill_move_insertion_locations_;
}
TopLevelLiveRange* splinter() const { return splinter_; }
......@@ -634,6 +638,16 @@ class TopLevelLiveRange final : public LiveRange {
void MarkHasPreassignedSlot() { has_preassigned_slot_ = true; }
bool has_preassigned_slot() const { return has_preassigned_slot_; }
void AddBlockRequiringSpillOperand(RpoNumber block_id) {
DCHECK(IsSpilledOnlyInDeferredBlocks());
GetListOfBlocksRequiringSpillOperands()->Add(block_id.ToInt());
}
BitVector* GetListOfBlocksRequiringSpillOperands() const {
DCHECK(IsSpilledOnlyInDeferredBlocks());
return list_of_blocks_requiring_spill_operands_;
}
private:
void SetSplinteredFrom(TopLevelLiveRange* splinter_parent);
......@@ -650,7 +664,12 @@ class TopLevelLiveRange final : public LiveRange {
InstructionOperand* spill_operand_;
SpillRange* spill_range_;
};
SpillMoveInsertionList* spill_move_insertion_locations_;
union {
SpillMoveInsertionList* spill_move_insertion_locations_;
BitVector* list_of_blocks_requiring_spill_operands_;
};
// TODO(mtrofin): generalize spilling after definition, currently specialized
// just for spill in a single deferred block.
bool spilled_in_deferred_blocks_;
......@@ -1125,6 +1144,7 @@ class ReferenceMapPopulator final : public ZoneObject {
};
class LiveRangeBoundArray;
// Insert moves of the form
//
// Operand(child_(k+1)) = Operand(child_k)
......@@ -1157,6 +1177,10 @@ class LiveRangeConnector final : public ZoneObject {
const InstructionBlock* pred,
const InstructionOperand& pred_op);
void CommitSpillsInDeferredBlocks(TopLevelLiveRange* range,
LiveRangeBoundArray* array,
Zone* temp_zone);
RegisterAllocationData* const data_;
DISALLOW_COPY_AND_ASSIGN(LiveRangeConnector);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment