Commit d33b8f20 authored by mtrofin's avatar mtrofin Committed by Commit bot

[turbofan] Remove some dead code in move optimizer.

There was no use for the second temp vector, and the one we
are using is scoped to certain methods, and that scope can be
further restricted.

I'm curious if there is really any value in having the temp
vector instead of allocating a function-scoped local. Will
verify that separately.

Review URL: https://codereview.chromium.org/1533423003

Cr-Commit-Position: refs/heads/master@{#32998}
parent 98f819c3
...@@ -86,8 +86,7 @@ MoveOptimizer::MoveOptimizer(Zone* local_zone, InstructionSequence* code) ...@@ -86,8 +86,7 @@ MoveOptimizer::MoveOptimizer(Zone* local_zone, InstructionSequence* code)
: local_zone_(local_zone), : local_zone_(local_zone),
code_(code), code_(code),
to_finalize_(local_zone), to_finalize_(local_zone),
temp_vector_0_(local_zone), local_vector_(local_zone) {}
temp_vector_1_(local_zone) {}
void MoveOptimizer::Run() { void MoveOptimizer::Run() {
...@@ -118,22 +117,23 @@ void MoveOptimizer::Run() { ...@@ -118,22 +117,23 @@ void MoveOptimizer::Run() {
} }
void MoveOptimizer::CompressMoves(MoveOpVector* eliminated, ParallelMove* left, void MoveOptimizer::CompressMoves(ParallelMove* left, ParallelMove* right) {
ParallelMove* right) { MoveOpVector& eliminated = local_vector();
DCHECK(eliminated->empty()); DCHECK(eliminated.empty());
if (!left->empty()) { if (!left->empty()) {
// Modify the right moves in place and collect moves that will be killed by // Modify the right moves in place and collect moves that will be killed by
// merging the two gaps. // merging the two gaps.
for (MoveOperands* move : *right) { for (MoveOperands* move : *right) {
if (move->IsRedundant()) continue; if (move->IsRedundant()) continue;
MoveOperands* to_eliminate = left->PrepareInsertAfter(move); MoveOperands* to_eliminate = left->PrepareInsertAfter(move);
if (to_eliminate != nullptr) eliminated->push_back(to_eliminate); if (to_eliminate != nullptr) eliminated.push_back(to_eliminate);
} }
// Eliminate dead moves. // Eliminate dead moves.
for (MoveOperands* to_eliminate : *eliminated) { for (MoveOperands* to_eliminate : eliminated) {
to_eliminate->Eliminate(); to_eliminate->Eliminate();
} }
eliminated->clear(); eliminated.clear();
} }
// Add all possibly modified moves from right side. // Add all possibly modified moves from right side.
for (MoveOperands* move : *right) { for (MoveOperands* move : *right) {
...@@ -142,14 +142,13 @@ void MoveOptimizer::CompressMoves(MoveOpVector* eliminated, ParallelMove* left, ...@@ -142,14 +142,13 @@ void MoveOptimizer::CompressMoves(MoveOpVector* eliminated, ParallelMove* left,
} }
// Nuke right. // Nuke right.
right->clear(); right->clear();
DCHECK(eliminated.empty());
} }
// Smash all consecutive moves into the left most move slot and accumulate them // Smash all consecutive moves into the left most move slot and accumulate them
// as much as possible across instructions. // as much as possible across instructions.
void MoveOptimizer::CompressBlock(InstructionBlock* block) { void MoveOptimizer::CompressBlock(InstructionBlock* block) {
MoveOpVector& temp_vector = temp_vector_0();
DCHECK(temp_vector.empty());
Instruction* prev_instr = nullptr; Instruction* prev_instr = nullptr;
for (int index = block->code_start(); index < block->code_end(); ++index) { for (int index = block->code_start(); index < block->code_end(); ++index) {
Instruction* instr = code()->instructions()[index]; Instruction* instr = code()->instructions()[index];
...@@ -162,12 +161,12 @@ void MoveOptimizer::CompressBlock(InstructionBlock* block) { ...@@ -162,12 +161,12 @@ void MoveOptimizer::CompressBlock(InstructionBlock* block) {
for (++i; i <= Instruction::LAST_GAP_POSITION; ++i) { for (++i; i <= Instruction::LAST_GAP_POSITION; ++i) {
ParallelMove* move = instr->parallel_moves()[i]; ParallelMove* move = instr->parallel_moves()[i];
if (move == nullptr) continue; if (move == nullptr) continue;
CompressMoves(&temp_vector, left, move); CompressMoves(left, move);
} }
if (prev_instr != nullptr) { if (prev_instr != nullptr) {
// Smash left into prev_instr, killing left. // Smash left into prev_instr, killing left.
ParallelMove* pred_moves = prev_instr->parallel_moves()[0]; ParallelMove* pred_moves = prev_instr->parallel_moves()[0];
CompressMoves(&temp_vector, pred_moves, left); CompressMoves(pred_moves, left);
} }
} }
if (prev_instr != nullptr) { if (prev_instr != nullptr) {
...@@ -274,8 +273,7 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) { ...@@ -274,8 +273,7 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
} }
// Compress. // Compress.
if (!gap_initialized) { if (!gap_initialized) {
CompressMoves(&temp_vector_0(), instr->parallel_moves()[0], CompressMoves(instr->parallel_moves()[0], instr->parallel_moves()[1]);
instr->parallel_moves()[1]);
} }
} }
...@@ -302,8 +300,9 @@ bool LoadCompare(const MoveOperands* a, const MoveOperands* b) { ...@@ -302,8 +300,9 @@ bool LoadCompare(const MoveOperands* a, const MoveOperands* b) {
// Split multiple loads of the same constant or stack slot off into the second // Split multiple loads of the same constant or stack slot off into the second
// slot and keep remaining moves in the first slot. // slot and keep remaining moves in the first slot.
void MoveOptimizer::FinalizeMoves(Instruction* instr) { void MoveOptimizer::FinalizeMoves(Instruction* instr) {
MoveOpVector& loads = temp_vector_0(); MoveOpVector& loads = local_vector();
DCHECK(loads.empty()); DCHECK(loads.empty());
// Find all the loads. // Find all the loads.
for (MoveOperands* move : *instr->parallel_moves()[0]) { for (MoveOperands* move : *instr->parallel_moves()[0]) {
if (move->IsRedundant()) continue; if (move->IsRedundant()) continue;
......
...@@ -24,12 +24,10 @@ class MoveOptimizer final { ...@@ -24,12 +24,10 @@ class MoveOptimizer final {
InstructionSequence* code() const { return code_; } InstructionSequence* code() const { return code_; }
Zone* local_zone() const { return local_zone_; } Zone* local_zone() const { return local_zone_; }
Zone* code_zone() const { return code()->zone(); } Zone* code_zone() const { return code()->zone(); }
MoveOpVector& temp_vector_0() { return temp_vector_0_; } MoveOpVector& local_vector() { return local_vector_; }
MoveOpVector& temp_vector_1() { return temp_vector_1_; }
void CompressBlock(InstructionBlock* blocke); void CompressBlock(InstructionBlock* blocke);
void CompressMoves(MoveOpVector* eliminated, ParallelMove* left, void CompressMoves(ParallelMove* left, ParallelMove* right);
ParallelMove* right);
const Instruction* LastInstruction(const InstructionBlock* block) const; const Instruction* LastInstruction(const InstructionBlock* block) const;
void OptimizeMerge(InstructionBlock* block); void OptimizeMerge(InstructionBlock* block);
void FinalizeMoves(Instruction* instr); void FinalizeMoves(Instruction* instr);
...@@ -37,8 +35,7 @@ class MoveOptimizer final { ...@@ -37,8 +35,7 @@ class MoveOptimizer final {
Zone* const local_zone_; Zone* const local_zone_;
InstructionSequence* const code_; InstructionSequence* const code_;
Instructions to_finalize_; Instructions to_finalize_;
MoveOpVector temp_vector_0_; MoveOpVector local_vector_;
MoveOpVector temp_vector_1_;
DISALLOW_COPY_AND_ASSIGN(MoveOptimizer); DISALLOW_COPY_AND_ASSIGN(MoveOptimizer);
}; };
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment