Commit 46878c1d authored by mtrofin's avatar mtrofin Committed by Commit bot

When we split above an instruction (for example because of splintering),

we may introduce moves that are redundant in the context of
moves on subsequent instructions. Currently, we only detect such
redundancies by allowing moves to skip over Nop instructions (true
nops, with no input/output). We can also skip over other cases, for
example over constant definitions (nop with an output), since whatever
moves happen above it do not influence the instruction's outcome.

We may be able to handle other cases, too - in subsequent CLs.

BUG=

Review URL: https://codereview.chromium.org/1422333003

Cr-Commit-Position: refs/heads/master@{#31662}
parent 8e35d8eb
......@@ -21,11 +21,48 @@ struct MoveKeyCompare {
}
};
struct OperandCompare {
bool operator()(const InstructionOperand& a,
const InstructionOperand& b) const {
return a.CompareCanonicalized(b);
}
};
typedef ZoneMap<MoveKey, unsigned, MoveKeyCompare> MoveMap;
typedef ZoneSet<InstructionOperand, CompareOperandModuloType> OperandSet;
bool GapsCanMoveOver(Instruction* instr) { return instr->IsNop(); }
bool GapsCanMoveOver(Instruction* instr, Zone* zone) {
if (instr->IsNop()) return true;
if (instr->ClobbersTemps() || instr->ClobbersRegisters() ||
instr->ClobbersDoubleRegisters()) {
return false;
}
if (instr->arch_opcode() != ArchOpcode::kArchNop) return false;
ZoneSet<InstructionOperand, OperandCompare> operands(zone);
for (size_t i = 0; i < instr->InputCount(); ++i) {
operands.insert(*instr->InputAt(i));
}
for (size_t i = 0; i < instr->OutputCount(); ++i) {
operands.insert(*instr->OutputAt(i));
}
for (size_t i = 0; i < instr->TempCount(); ++i) {
operands.insert(*instr->TempAt(i));
}
for (int i = Instruction::GapPosition::FIRST_GAP_POSITION;
i <= Instruction::GapPosition::LAST_GAP_POSITION; ++i) {
ParallelMove* moves = instr->parallel_moves()[i];
if (moves == nullptr) continue;
for (MoveOperands* move : *moves) {
if (operands.count(move->source()) > 0 ||
operands.count(move->destination()) > 0) {
return false;
}
}
}
return true;
}
int FindFirstNonEmptySlot(Instruction* instr) {
......@@ -135,7 +172,7 @@ void MoveOptimizer::CompressBlock(InstructionBlock* block) {
std::swap(prev_instr->parallel_moves()[0], instr->parallel_moves()[0]);
}
prev_instr = instr->parallel_moves()[0] == nullptr ? nullptr : instr;
if (GapsCanMoveOver(instr)) continue;
if (GapsCanMoveOver(instr, local_zone())) continue;
if (prev_instr != nullptr) {
to_finalize_.push_back(prev_instr);
prev_instr = nullptr;
......@@ -198,7 +235,8 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
for (int i = block->first_instruction_index();
i <= block->last_instruction_index(); ++i) {
instr = code()->instructions()[i];
if (!GapsCanMoveOver(instr) || !instr->AreMovesRedundant()) break;
if (!GapsCanMoveOver(instr, local_zone()) || !instr->AreMovesRedundant())
break;
}
DCHECK(instr != nullptr);
bool gap_initialized = true;
......
......@@ -207,6 +207,45 @@ TEST_F(MoveOptimizerTest, SimpleMergeCycle) {
CHECK(Contains(move, Reg(1), Reg(0)));
}
TEST_F(MoveOptimizerTest, GapsCanMoveOverInstruction) {
StartBlock();
int const_index = 1;
DefineConstant(const_index);
Instruction* ctant_def = LastInstruction();
AddMove(ctant_def, Reg(1), Reg(0));
Instruction* last = EmitNop();
AddMove(last, Const(const_index), Reg(0));
AddMove(last, Reg(0), Reg(1));
EndBlock(Last());
Optimize();
ParallelMove* inst1_start =
ctant_def->GetParallelMove(Instruction::GapPosition::START);
ParallelMove* inst1_end =
ctant_def->GetParallelMove(Instruction::GapPosition::END);
ParallelMove* last_start =
last->GetParallelMove(Instruction::GapPosition::START);
CHECK(inst1_start == nullptr || inst1_start->size() == 0);
CHECK(inst1_end == nullptr || inst1_end->size() == 0);
CHECK(last_start->size() == 2);
int redundants = 0;
int assignment = 0;
for (MoveOperands* move : *last_start) {
if (move->IsRedundant()) {
++redundants;
} else {
++assignment;
CHECK(move->destination().IsRegister());
CHECK(move->source().IsConstant());
}
}
CHECK_EQ(1, redundants);
CHECK_EQ(1, assignment);
}
} // namespace compiler
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment