Commit 27f51390 authored by mtrofin's avatar mtrofin Committed by Commit bot

Instead of splintering by chunks of deferred blocks, irrespective of the

range's internal structure, we take a range at a time and splinter based on
the blocks it covers. This is no different in scenarios where a UseInterval
covers non-deferred then deferred blocks. However, in scenarios where
a deferred block jumps to another one, and there are no other blocks
covered by the range in between, this CL will treat the two such blocks
together, while the previous one would treat them separately. This matters
in cases such as deoptimization blocks preceded (not necessarily
consecutively) by a single instruction (jump) Merging block.

Review URL: https://codereview.chromium.org/1415833002

Cr-Commit-Position: refs/heads/master@{#31422}
parent d168a1e8
......@@ -18,40 +18,6 @@ namespace compiler {
namespace {
// Starting from a deferred block, find the last consecutive deferred block.
RpoNumber GetLastDeferredBlock(const InstructionBlock *block,
const InstructionSequence *code) {
DCHECK(block->IsDeferred());
RpoNumber first = block->rpo_number();
RpoNumber last = first;
for (int i = first.ToInt(); i < code->InstructionBlockCount(); ++i) {
RpoNumber at_i = RpoNumber::FromInt(i);
const InstructionBlock *block_at_i = code->InstructionBlockAt(at_i);
if (!block_at_i->IsDeferred()) break;
last = at_i;
}
return last;
}
// Delimits consecutive deferred block sequences.
void AssociateDeferredBlockSequences(InstructionSequence *code) {
for (int blk_id = 0; blk_id < code->InstructionBlockCount(); ++blk_id) {
InstructionBlock *block =
code->InstructionBlockAt(RpoNumber::FromInt(blk_id));
if (!block->IsDeferred()) continue;
RpoNumber last = GetLastDeferredBlock(block, code);
block->set_last_deferred(last);
// We know last is still deferred, and that last + 1, is not (or is an
// invalid index). So skip over last + 1 and continue from last + 2. This
// way, we visit each block exactly once, and the total complexity of this
// function is O(n), n being jthe number of blocks.
blk_id = last.ToInt() + 1;
}
}
void CreateSplinter(TopLevelLiveRange *range, RegisterAllocationData *data,
LifetimePosition first_cut, LifetimePosition last_cut) {
......@@ -88,66 +54,78 @@ void CreateSplinter(TopLevelLiveRange *range, RegisterAllocationData *data,
}
// Splinter all ranges live inside successive deferred blocks.
// No control flow analysis is performed. After the register allocation, we will
// merge the splinters back into the original ranges, and then rely on the
// range connector to properly connect them.
void SplinterRangesInDeferredBlocks(RegisterAllocationData *data) {
InstructionSequence *code = data->code();
int code_block_count = code->InstructionBlockCount();
Zone *zone = data->allocation_zone();
ZoneVector<BitVector *> &in_sets = data->live_in_sets();
for (int i = 0; i < code_block_count; ++i) {
InstructionBlock *block = code->InstructionBlockAt(RpoNumber::FromInt(i));
if (!block->IsDeferred()) continue;
RpoNumber last_deferred = block->last_deferred();
// last_deferred + 1 is not deferred, so no point in visiting it.
i = last_deferred.ToInt() + 1;
LifetimePosition first_cut = LifetimePosition::GapFromInstructionIndex(
block->first_instruction_index());
LifetimePosition last_cut = LifetimePosition::GapFromInstructionIndex(
static_cast<int>(code->instructions().size()));
const BitVector *in_set = in_sets[block->rpo_number().ToInt()];
BitVector ranges_to_splinter(*in_set, zone);
InstructionBlock *last = code->InstructionBlockAt(last_deferred);
for (int deferred_id = block->rpo_number().ToInt();
deferred_id <= last->rpo_number().ToInt(); ++deferred_id) {
const BitVector *ins = in_sets[deferred_id];
ranges_to_splinter.Union(*ins);
const BitVector *outs = LiveRangeBuilder::ComputeLiveOut(
code->InstructionBlockAt(RpoNumber::FromInt(deferred_id)), data);
ranges_to_splinter.Union(*outs);
}
int FirstInstruction(const UseInterval *interval) {
LifetimePosition start = interval->start();
int ret = start.ToInstructionIndex();
if (start.IsInstructionPosition() && start.IsEnd()) {
++ret;
}
return ret;
}
int last_index = last->last_instruction_index();
if (code->InstructionAt(last_index)->opcode() ==
ArchOpcode::kArchDeoptimize) {
++last_index;
}
last_cut = LifetimePosition::GapFromInstructionIndex(last_index);
BitVector::Iterator iterator(&ranges_to_splinter);
int LastInstruction(const UseInterval *interval) {
LifetimePosition end = interval->end();
int ret = end.ToInstructionIndex();
if (end.IsGapPosition() || end.IsStart()) {
--ret;
}
return ret;
}
while (!iterator.Done()) {
int range_id = iterator.Current();
iterator.Advance();
TopLevelLiveRange *range = data->live_ranges()[range_id];
CreateSplinter(range, data, first_cut, last_cut);
void SplinterLiveRange(TopLevelLiveRange *range, RegisterAllocationData *data) {
const InstructionSequence *code = data->code();
UseInterval *interval = range->first_interval();
LifetimePosition first_cut = LifetimePosition::Invalid();
LifetimePosition last_cut = LifetimePosition::Invalid();
while (interval != nullptr) {
UseInterval *next_interval = interval->next();
const InstructionBlock *first_block =
code->GetInstructionBlock(FirstInstruction(interval));
const InstructionBlock *last_block =
code->GetInstructionBlock(LastInstruction(interval));
int first_block_nr = first_block->rpo_number().ToInt();
int last_block_nr = last_block->rpo_number().ToInt();
for (int block_id = first_block_nr; block_id <= last_block_nr; ++block_id) {
const InstructionBlock *current_block =
code->InstructionBlockAt(RpoNumber::FromInt(block_id));
if (current_block->IsDeferred()) {
if (!first_cut.IsValid()) {
first_cut = LifetimePosition::GapFromInstructionIndex(
current_block->first_instruction_index());
}
last_cut = LifetimePosition::GapFromInstructionIndex(
current_block->last_instruction_index());
} else {
if (first_cut.IsValid()) {
CreateSplinter(range, data, first_cut, last_cut);
first_cut = LifetimePosition::Invalid();
last_cut = LifetimePosition::Invalid();
}
}
}
interval = next_interval;
}
// When the range ends in deferred blocks, first_cut will be valid here.
if (first_cut.IsValid()) {
CreateSplinter(range, data, first_cut, range->End());
}
}
} // namespace
void LiveRangeSeparator::Splinter() {
AssociateDeferredBlockSequences(data()->code());
SplinterRangesInDeferredBlocks(data());
size_t virt_reg_count = data()->live_ranges().size();
for (size_t vreg = 0; vreg < virt_reg_count; ++vreg) {
TopLevelLiveRange *range = data()->live_ranges()[vreg];
if (range == nullptr || range->IsEmpty() || range->IsSplinter()) {
continue;
}
SplinterLiveRange(range, data());
}
}
......
......@@ -58,6 +58,8 @@ class LifetimePosition final {
// Returns true if this lifetime position corresponds to a START value
bool IsStart() const { return (value_ & (kHalfStep - 1)) == 0; }
// Returns true if this lifetime position corresponds to an END value
bool IsEnd() const { return (value_ & (kHalfStep - 1)) == 1; }
// Returns true if this lifetime position corresponds to a gap START value
bool IsFullStart() const { return (value_ & (kStep - 1)) == 0; }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment