Commit 5ae587cf authored by mtrofin's avatar mtrofin Committed by Commit bot

[turbofan] Single entry into deferred

If a deferred block has multiple predecessors, they have to be
all deferred. Otherwise, we can run into a situation where if a range
that spills only in deferred blocks inserts its spill in the block, and
other ranges need moves inserted by ResolveControlFlow in the predecessors,
the register of the range spilled in the deferred block may be clobbered.

To avoid that, when a deferred block has multiple predecessors, and some
are not deferred, we add a non-deferred block to collect all such edges.

This CL addresses the validator assertion failure the referenced issue, as well
as the greedy allocator failure - which was caused by the situation described
above.

BUG=v8:4940
LOG=n

Review URL: https://codereview.chromium.org/1912093005

Cr-Commit-Position: refs/heads/master@{#35742}
parent b0530dc9
......@@ -627,7 +627,7 @@ InstructionBlocks* InstructionSequence::InstructionBlocksFor(
return blocks;
}
void InstructionSequence::ValidateEdgeSplitForm() {
void InstructionSequence::ValidateEdgeSplitForm() const {
// Validate blocks are in edge-split form: no block with multiple successors
// has an edge to a block (== a successor) with more than one predecessors.
for (const InstructionBlock* block : instruction_blocks()) {
......@@ -642,7 +642,7 @@ void InstructionSequence::ValidateEdgeSplitForm() {
}
}
void InstructionSequence::ValidateDeferredBlockExitPaths() {
void InstructionSequence::ValidateDeferredBlockExitPaths() const {
// A deferred block with more than one successor must have all its successors
// deferred.
for (const InstructionBlock* block : instruction_blocks()) {
......@@ -653,7 +653,21 @@ void InstructionSequence::ValidateDeferredBlockExitPaths() {
}
}
void InstructionSequence::ValidateSSA() {
void InstructionSequence::ValidateDeferredBlockEntryPaths() const {
// If a deferred block has multiple predecessors, they have to
// all be deferred. Otherwise, we can run into a situation where a range
// that spills only in deferred blocks inserts its spill in the block, but
// other ranges need moves inserted by ResolveControlFlow in the predecessors,
// which may clobber the register of this range.
for (const InstructionBlock* block : instruction_blocks()) {
if (!block->IsDeferred() || block->PredecessorCount() <= 1) continue;
for (RpoNumber predecessor_id : block->predecessors()) {
CHECK(InstructionBlockAt(predecessor_id)->IsDeferred());
}
}
}
void InstructionSequence::ValidateSSA() const {
// TODO(mtrofin): We could use a local zone here instead.
BitVector definitions(VirtualRegisterCount(), zone());
for (const Instruction* instruction : *this) {
......
......@@ -1392,9 +1392,10 @@ class InstructionSequence final : public ZoneObject {
void PrintBlock(const RegisterConfiguration* config, int block_id) const;
void PrintBlock(int block_id) const;
void ValidateEdgeSplitForm();
void ValidateDeferredBlockExitPaths();
void ValidateSSA();
void ValidateEdgeSplitForm() const;
void ValidateDeferredBlockExitPaths() const;
void ValidateDeferredBlockEntryPaths() const;
void ValidateSSA() const;
private:
friend std::ostream& operator<<(std::ostream& os,
......
......@@ -1524,6 +1524,7 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
#ifdef DEBUG
debug_name = info()->GetDebugName();
data_->sequence()->ValidateEdgeSplitForm();
data_->sequence()->ValidateDeferredBlockEntryPaths();
data_->sequence()->ValidateDeferredBlockExitPaths();
#endif
......
......@@ -50,7 +50,7 @@ Schedule* RawMachineAssembler::Export() {
PrintF("--- RAW SCHEDULE -------------------------------------------\n");
os << *schedule_;
}
schedule_->EnsureSplitEdgeForm();
schedule_->EnsureCFGWellFormedness();
schedule_->PropagateDeferredMark();
if (FLAG_trace_turbo_scheduler) {
PrintF("--- EDGE SPLIT AND PROPAGATED DEFERRED SCHEDULE ------------\n");
......
......@@ -282,9 +282,7 @@ void BlockAssessments::PerformParallelMoves(const ParallelMove* moves) {
CHECK(it != map_.end());
// The LHS of a parallel move should not have been assigned in this
// parallel move.
// TODO(mtrofin): this check fails when generating code for
// CodeStubAssembler::ChangeUint32ToTagged.
// CHECK(map_for_moves_.find(move->destination()) == map_for_moves_.end());
CHECK(map_for_moves_.find(move->destination()) == map_for_moves_.end());
// Copy the assessment to the destination.
map_for_moves_[move->destination()] = it->second;
}
......@@ -418,7 +416,7 @@ void RegisterAllocatorVerifier::ValidatePendingAssessment(
case Pending: {
// This happens if we have a diamond feeding into another one, and
// the inner one never being used - other than for carrying the value.
PendingAssessment* next = PendingAssessment::cast(contribution);
const PendingAssessment* next = PendingAssessment::cast(contribution);
if (seen.find(pred) == seen.end()) {
worklist.push({next, expected});
seen.insert(pred);
......
......@@ -74,9 +74,9 @@ class PendingAssessment final : public Assessment {
InstructionOperand operand)
: Assessment(Pending), origin_(origin), operand_(operand) {}
static PendingAssessment* cast(Assessment* assessment) {
static const PendingAssessment* cast(const Assessment* assessment) {
CHECK(assessment->kind() == Pending);
return static_cast<PendingAssessment*>(assessment);
return static_cast<const PendingAssessment*>(assessment);
}
const InstructionBlock* origin() const { return origin_; }
......
......@@ -3411,7 +3411,8 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
BitVector* live = live_in_sets[block->rpo_number().ToInt()];
BitVector::Iterator iterator(live);
while (!iterator.Done()) {
LiveRangeBoundArray* array = finder.ArrayFor(iterator.Current());
int vreg = iterator.Current();
LiveRangeBoundArray* array = finder.ArrayFor(vreg);
for (const RpoNumber& pred : block->predecessors()) {
FindResult result;
const InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
......@@ -3628,6 +3629,7 @@ void LiveRangeConnector::CommitSpillsInDeferredBlocks(
worklist.push(iterator.Current());
}
ZoneSet<std::pair<RpoNumber, int>> done_moves(temp_zone);
// Seek the deferred blocks that dominate locations requiring spill operands,
// and spill there. We only need to spill at the start of such blocks.
BitVector done_blocks(
......@@ -3654,13 +3656,18 @@ void LiveRangeConnector::CommitSpillsInDeferredBlocks(
InstructionOperand pred_op = bound->range_->GetAssignedOperand();
RpoNumber spill_block_number = spill_block->rpo_number();
if (done_moves.find(std::make_pair(
spill_block_number, range->vreg())) == done_moves.end()) {
data()->AddGapMove(spill_block->first_instruction_index(),
Instruction::GapPosition::START, pred_op,
spill_operand);
done_moves.insert(std::make_pair(spill_block_number, range->vreg()));
spill_block->mark_needs_frame();
}
}
}
}
}
......
......@@ -315,14 +315,26 @@ void Schedule::InsertSwitch(BasicBlock* block, BasicBlock* end, Node* sw,
SetControlInput(block, sw);
}
void Schedule::EnsureSplitEdgeForm() {
void Schedule::EnsureCFGWellFormedness() {
// Make a copy of all the blocks for the iteration, since adding the split
// edges will allocate new blocks.
BasicBlockVector all_blocks_copy(all_blocks_);
// Insert missing split edge blocks.
for (auto block : all_blocks_copy) {
if (block->PredecessorCount() > 1 && block != end_) {
if (block->PredecessorCount() > 1) {
if (block != end_) {
EnsureSplitEdgeForm(block);
}
if (block->deferred()) {
EnsureDeferredCodeSingleEntryPoint(block);
}
}
}
}
void Schedule::EnsureSplitEdgeForm(BasicBlock* block) {
DCHECK(block->PredecessorCount() > 1 && block != end_);
for (auto current_pred = block->predecessors().begin();
current_pred != block->predecessors().end(); ++current_pred) {
BasicBlock* pred = *current_pred;
......@@ -346,8 +358,42 @@ void Schedule::EnsureSplitEdgeForm() {
}
}
}
}
void Schedule::EnsureDeferredCodeSingleEntryPoint(BasicBlock* block) {
// If a deferred block has multiple predecessors, they have to
// all be deferred. Otherwise, we can run into a situation where a range
// that spills only in deferred blocks inserts its spill in the block, but
// other ranges need moves inserted by ResolveControlFlow in the predecessors,
// which may clobber the register of this range.
// To ensure that, when a deferred block has multiple predecessors, and some
// are not deferred, we add a non-deferred block to collect all such edges.
DCHECK(block->deferred() && block->PredecessorCount() > 1);
bool all_deferred = true;
for (auto current_pred = block->predecessors().begin();
current_pred != block->predecessors().end(); ++current_pred) {
BasicBlock* pred = *current_pred;
if (!pred->deferred()) {
all_deferred = false;
break;
}
}
if (all_deferred) return;
BasicBlock* merger = NewBasicBlock();
merger->set_control(BasicBlock::kGoto);
merger->successors().push_back(block);
for (auto current_pred = block->predecessors().begin();
current_pred != block->predecessors().end(); ++current_pred) {
BasicBlock* pred = *current_pred;
merger->predecessors().push_back(pred);
pred->successors().clear();
pred->successors().push_back(merger);
}
merger->set_deferred(false);
block->predecessors().clear();
block->predecessors().push_back(merger);
}
void Schedule::PropagateDeferredMark() {
......
......@@ -257,8 +257,12 @@ class Schedule final : public ZoneObject {
friend class BasicBlockInstrumentor;
friend class RawMachineAssembler;
// Ensure properties of the CFG assumed by further stages.
void EnsureCFGWellFormedness();
// Ensure split-edge form for a hand-assembled schedule.
void EnsureSplitEdgeForm();
void EnsureSplitEdgeForm(BasicBlock* block);
// Ensure entry into a deferred block happens from a single hot block.
void EnsureDeferredCodeSingleEntryPoint(BasicBlock* block);
// Copy deferred block markers down as far as possible
void PropagateDeferredMark();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment