Commit a8274000 authored by danno's avatar danno Committed by Commit bot

Ensure Schedules generated by the RawMachineAssembler are in edge-split form

This CL adds an extra pass before calculating the special RPO
order in the custom RawMachineAssembler pipeline that
walks through the schedule and inserts extra blocks to
guarantee that the control flow graph is in split edge form. It
also propagates deferred block marks forward to these new
blocks if appropriate.

Review URL: https://codereview.chromium.org/1811333002

Cr-Commit-Position: refs/heads/master@{#35014}
parent 9c5940de
......@@ -39,6 +39,17 @@ RawMachineAssembler::RawMachineAssembler(Isolate* isolate, Graph* graph,
Schedule* RawMachineAssembler::Export() {
// Compute the correct codegen order.
DCHECK(schedule_->rpo_order()->empty());
OFStream os(stdout);
if (FLAG_trace_turbo_scheduler) {
PrintF("--- RAW SCHEDULE -------------------------------------------\n");
os << *schedule_;
}
schedule_->EnsureSplitEdgeForm();
schedule_->PropagateDeferredMark();
if (FLAG_trace_turbo_scheduler) {
PrintF("--- EDGE SPLIT AND PROPAGATED DEFERRED SCHEDULE ------------\n");
os << *schedule_;
}
Scheduler::ComputeSpecialRPO(zone(), schedule_);
// Invalidate RawMachineAssembler.
Schedule* schedule = schedule_;
......@@ -79,15 +90,17 @@ void RawMachineAssembler::Switch(Node* index, RawMachineLabel* default_label,
BasicBlock** succ_blocks = zone()->NewArray<BasicBlock*>(succ_count);
for (size_t index = 0; index < case_count; ++index) {
int32_t case_value = case_values[index];
BasicBlock* case_block = Use(case_labels[index]);
BasicBlock* case_block = schedule()->NewBasicBlock();
Node* case_node =
graph()->NewNode(common()->IfValue(case_value), switch_node);
schedule()->AddNode(case_block, case_node);
schedule()->AddGoto(case_block, Use(case_labels[index]));
succ_blocks[index] = case_block;
}
BasicBlock* default_block = Use(default_label);
BasicBlock* default_block = schedule()->NewBasicBlock();
Node* default_node = graph()->NewNode(common()->IfDefault(), switch_node);
schedule()->AddNode(default_block, default_node);
schedule()->AddGoto(default_block, Use(default_label));
succ_blocks[case_count] = default_block;
schedule()->AddSwitch(CurrentBlock(), switch_node, succ_blocks, succ_count);
current_block_ = nullptr;
......
......@@ -298,6 +298,64 @@ void Schedule::InsertSwitch(BasicBlock* block, BasicBlock* end, Node* sw,
SetControlInput(block, sw);
}
void Schedule::EnsureSplitEdgeForm() {
// Make a copy of all the blocks for the iteration, since adding the split
// edges will allocate new blocks.
BasicBlockVector all_blocks_copy(all_blocks_);
// Insert missing split edge blocks.
for (auto block : all_blocks_copy) {
if (block->PredecessorCount() > 1 && block != end_) {
for (auto current_pred = block->predecessors().begin();
current_pred != block->predecessors().end(); ++current_pred) {
BasicBlock* pred = *current_pred;
if (pred->SuccessorCount() > 1) {
// Found a predecessor block with multiple successors.
BasicBlock* split_edge_block = NewBasicBlock();
split_edge_block->set_control(BasicBlock::kGoto);
split_edge_block->successors().push_back(block);
split_edge_block->predecessors().push_back(pred);
split_edge_block->set_deferred(pred->deferred());
*current_pred = split_edge_block;
// Find a corresponding successor in the previous block, replace it
// with the split edge block... but only do it once, since we only
// replace the previous blocks in the current block one at a time.
for (auto successor = pred->successors().begin();
successor != pred->successors().end(); ++successor) {
if (*successor == block) {
*successor = split_edge_block;
break;
}
}
}
}
}
}
}
void Schedule::PropagateDeferredMark() {
// Push forward the deferred block marks through newly inserted blocks and
// other improperly marked blocks until a fixed point is reached.
// TODO(danno): optimize the propagation
bool done = false;
while (!done) {
done = true;
for (auto block : all_blocks_) {
if (!block->deferred()) {
bool deferred = block->PredecessorCount() > 0;
for (auto pred : block->predecessors()) {
if (!pred->deferred()) {
deferred = false;
}
}
if (deferred) {
block->set_deferred(true);
done = false;
}
}
}
}
}
void Schedule::AddSuccessor(BasicBlock* block, BasicBlock* succ) {
block->AddSuccessor(succ);
......@@ -331,15 +389,24 @@ void Schedule::SetBlockForNode(BasicBlock* block, Node* node) {
std::ostream& operator<<(std::ostream& os, const Schedule& s) {
for (BasicBlock* block : *s.rpo_order()) {
os << "--- BLOCK B" << block->rpo_number();
for (BasicBlock* block :
((s.RpoBlockCount() == 0) ? *s.all_blocks() : *s.rpo_order())) {
if (block->rpo_number() == -1) {
os << "--- BLOCK B" << block->id().ToInt() << " (block id)";
} else {
os << "--- BLOCK B" << block->rpo_number();
}
if (block->deferred()) os << " (deferred)";
if (block->PredecessorCount() != 0) os << " <- ";
bool comma = false;
for (BasicBlock const* predecessor : block->predecessors()) {
if (comma) os << ", ";
comma = true;
os << "B" << predecessor->rpo_number();
if (predecessor->rpo_number() == -1) {
os << "B" << predecessor->id().ToInt();
} else {
os << "B" << predecessor->rpo_number();
}
}
os << " ---\n";
for (Node* node : *block) {
......@@ -364,7 +431,11 @@ std::ostream& operator<<(std::ostream& os, const Schedule& s) {
for (BasicBlock const* successor : block->successors()) {
if (comma) os << ", ";
comma = true;
os << "B" << successor->rpo_number();
if (successor->rpo_number() == -1) {
os << "B" << successor->id().ToInt();
} else {
os << "B" << successor->rpo_number();
}
}
os << "\n";
}
......
......@@ -243,6 +243,7 @@ class Schedule final : public ZoneObject {
return AddSuccessor(block, succ);
}
const BasicBlockVector* all_blocks() const { return &all_blocks_; }
BasicBlockVector* rpo_order() { return &rpo_order_; }
const BasicBlockVector* rpo_order() const { return &rpo_order_; }
......@@ -254,6 +255,12 @@ class Schedule final : public ZoneObject {
private:
friend class Scheduler;
friend class BasicBlockInstrumentor;
friend class RawMachineAssembler;
// Ensure split-edge form for a hand-assembled schedule.
void EnsureSplitEdgeForm();
// Copy deferred block markers down as far as possible
void PropagateDeferredMark();
void AddSuccessor(BasicBlock* block, BasicBlock* succ);
void MoveSuccessors(BasicBlock* from, BasicBlock* to);
......
......@@ -331,6 +331,36 @@ TEST(JSFunction) {
CHECK_EQ(57, Handle<Smi>::cast(result.ToHandleChecked())->value());
}
TEST(SplitEdgeBranchMerge) {
Isolate* isolate(CcTest::InitIsolateOnce());
VoidDescriptor descriptor(isolate);
CodeStubAssemblerTester m(isolate, descriptor);
CodeStubAssembler::Label l1(&m), merge(&m);
m.Branch(m.Int32Constant(1), &l1, &merge);
m.Bind(&l1);
m.Goto(&merge);
m.Bind(&merge);
USE(m.GenerateCode());
}
TEST(SplitEdgeSwitchMerge) {
Isolate* isolate(CcTest::InitIsolateOnce());
VoidDescriptor descriptor(isolate);
CodeStubAssemblerTester m(isolate, descriptor);
CodeStubAssembler::Label l1(&m), l2(&m), l3(&m), default_label(&m);
CodeStubAssembler::Label* labels[] = {&l1, &l2};
int32_t values[] = {1, 2};
m.Branch(m.Int32Constant(1), &l3, &l1);
m.Bind(&l3);
m.Switch(m.Int32Constant(2), &default_label, values, labels, 2);
m.Bind(&l1);
m.Goto(&l2);
m.Bind(&l2);
m.Goto(&default_label);
m.Bind(&default_label);
USE(m.GenerateCode());
}
} // namespace compiler
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment