Commit 78b55f2e authored by mtrofin's avatar mtrofin Committed by Commit bot

If all the predecessors of a node have, at the last gap, the exact same

moves, we move those to the node, and remove them from the
predecessors ("merge" them to the common node).

If only some of the moves are common, we don't do anything. This is
what this change addresses.

The bug linked below should be addressed by this change. The only
difference in codegen before/after the change that introduced the bug
was un-merged moves.

BUG=chromium:549262
LOG=N

Review URL: https://codereview.chromium.org/1527203002

Cr-Commit-Position: refs/heads/master@{#33481}
parent 825ece48
......@@ -211,6 +211,12 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
// things that would prevent moving gap moves across them.
for (RpoNumber& pred_index : block->predecessors()) {
const InstructionBlock* pred = code()->InstructionBlockAt(pred_index);
// If the predecessor has more than one successor, we shouldn't attempt to
// move down to this block (one of the successors) any of the gap moves,
// because their effect may be necessary to the other successors.
if (pred->SuccessorCount() > 1) return;
const Instruction* last_instr =
code()->instructions()[pred->last_instruction_index()];
if (last_instr->IsCall()) return;
......@@ -246,15 +252,58 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
}
}
}
if (move_map.empty() || correct_counts != move_map.size()) return;
if (move_map.empty() || correct_counts == 0) return;
// Find insertion point.
Instruction* instr = nullptr;
for (int i = block->first_instruction_index();
i <= block->last_instruction_index(); ++i) {
instr = code()->instructions()[i];
if (!GapsCanMoveOver(instr, local_zone()) || !instr->AreMovesRedundant())
if (correct_counts != move_map.size() ||
!GapsCanMoveOver(instr, local_zone()) || !instr->AreMovesRedundant())
break;
}
DCHECK_NOT_NULL(instr);
if (correct_counts != move_map.size()) {
// Moves that are unique to each predecessor won't be pushed to the common
// successor.
OperandSet conflicting_srcs(local_zone());
for (auto iter = move_map.begin(), end = move_map.end(); iter != end;) {
auto current = iter;
++iter;
if (current->second != block->PredecessorCount()) {
InstructionOperand dest = current->first.second;
// Not all the moves in all the gaps are the same. Maybe some are. If
// there are such moves, we could move them, but the destination of the
// moves staying behind can't appear as a source of a common move,
// because the move staying behind will clobber this destination.
conflicting_srcs.insert(dest);
move_map.erase(current);
}
}
bool changed = false;
do {
// If a common move can't be pushed to the common successor, then its
// destination also can't appear as source to any move being pushed.
changed = false;
for (auto iter = move_map.begin(), end = move_map.end(); iter != end;) {
auto current = iter;
++iter;
DCHECK_EQ(block->PredecessorCount(), current->second);
if (conflicting_srcs.find(current->first.first) !=
conflicting_srcs.end()) {
conflicting_srcs.insert(current->first.second);
move_map.erase(current);
changed = true;
}
}
} while (changed);
}
if (move_map.empty()) return;
DCHECK_NOT_NULL(instr);
bool gap_initialized = true;
if (instr->parallel_moves()[0] == nullptr ||
......@@ -275,13 +324,13 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
if (move->IsRedundant()) continue;
MoveKey key = {move->source(), move->destination()};
auto it = move_map.find(key);
USE(it);
DCHECK(it != move_map.end());
if (it != move_map.end()) {
if (first_iteration) {
moves->AddMove(move->source(), move->destination());
}
move->Eliminate();
}
}
first_iteration = false;
}
// Compress.
......
......@@ -246,6 +246,82 @@ TEST_F(MoveOptimizerTest, GapsCanMoveOverInstruction) {
}
TEST_F(MoveOptimizerTest, SubsetMovesMerge) {
StartBlock();
EndBlock(Branch(Imm(), 1, 2));
StartBlock();
EndBlock(Jump(2));
Instruction* last_move_b1 = LastInstruction();
AddMove(last_move_b1, Reg(0), Reg(1));
AddMove(last_move_b1, Reg(2), Reg(3));
StartBlock();
EndBlock(Jump(1));
Instruction* last_move_b2 = LastInstruction();
AddMove(last_move_b2, Reg(0), Reg(1));
AddMove(last_move_b2, Reg(4), Reg(5));
StartBlock();
EndBlock(Last());
Instruction* last = LastInstruction();
Optimize();
ParallelMove* last_move = last->parallel_moves()[0];
CHECK_EQ(1, NonRedundantSize(last_move));
CHECK(Contains(last_move, Reg(0), Reg(1)));
ParallelMove* b1_move = last_move_b1->parallel_moves()[0];
CHECK_EQ(1, NonRedundantSize(b1_move));
CHECK(Contains(b1_move, Reg(2), Reg(3)));
ParallelMove* b2_move = last_move_b2->parallel_moves()[0];
CHECK_EQ(1, NonRedundantSize(b2_move));
CHECK(Contains(b2_move, Reg(4), Reg(5)));
}
TEST_F(MoveOptimizerTest, GapConflictSubsetMovesDoNotMerge) {
StartBlock();
EndBlock(Branch(Imm(), 1, 2));
StartBlock();
EndBlock(Jump(2));
Instruction* last_move_b1 = LastInstruction();
AddMove(last_move_b1, Reg(0), Reg(1));
AddMove(last_move_b1, Reg(2), Reg(0));
AddMove(last_move_b1, Reg(4), Reg(5));
StartBlock();
EndBlock(Jump(1));
Instruction* last_move_b2 = LastInstruction();
AddMove(last_move_b2, Reg(0), Reg(1));
AddMove(last_move_b2, Reg(4), Reg(5));
StartBlock();
EndBlock(Last());
Instruction* last = LastInstruction();
Optimize();
ParallelMove* last_move = last->parallel_moves()[0];
CHECK_EQ(1, NonRedundantSize(last_move));
CHECK(Contains(last_move, Reg(4), Reg(5)));
ParallelMove* b1_move = last_move_b1->parallel_moves()[0];
CHECK_EQ(2, NonRedundantSize(b1_move));
CHECK(Contains(b1_move, Reg(0), Reg(1)));
CHECK(Contains(b1_move, Reg(2), Reg(0)));
ParallelMove* b2_move = last_move_b2->parallel_moves()[0];
CHECK_EQ(1, NonRedundantSize(b2_move));
CHECK(Contains(b1_move, Reg(0), Reg(1)));
}
} // namespace compiler
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment