Commit f8eb4e69 authored by mtrofin's avatar mtrofin Committed by Commit bot

[turbofan] More "auto" keyword cleanup

BUG=

Review URL: https://codereview.chromium.org/1738973002

Cr-Commit-Position: refs/heads/master@{#34363}
parent dd6f62e6
......@@ -87,7 +87,8 @@ Handle<Code> CodeGenerator::GenerateCode() {
// Define deoptimization literals for all inlined functions.
DCHECK_EQ(0u, deoptimization_literals_.size());
for (auto& inlined : info->inlined_functions()) {
for (const CompilationInfo::InlinedFunctionHolder& inlined :
info->inlined_functions()) {
if (!inlined.shared_info.is_identical_to(info->shared_info())) {
DefineDeoptimizationLiteral(inlined.shared_info);
}
......@@ -96,7 +97,8 @@ Handle<Code> CodeGenerator::GenerateCode() {
// Define deoptimization literals for all unoptimized code objects of inlined
// functions. This ensures unoptimized code is kept alive by optimized code.
for (auto& inlined : info->inlined_functions()) {
for (const CompilationInfo::InlinedFunctionHolder& inlined :
info->inlined_functions()) {
if (!inlined.shared_info.is_identical_to(info->shared_info())) {
DefineDeoptimizationLiteral(inlined.inlined_code_object_root);
}
......@@ -104,7 +106,7 @@ Handle<Code> CodeGenerator::GenerateCode() {
// Assemble all non-deferred blocks, followed by deferred ones.
for (int deferred = 0; deferred < 2; ++deferred) {
for (auto const block : code()->instruction_blocks()) {
for (const InstructionBlock* block : code()->instruction_blocks()) {
if (block->IsDeferred() == (deferred == 0)) {
continue;
}
......@@ -239,7 +241,7 @@ void CodeGenerator::RecordSafepoint(ReferenceMap* references,
safepoints()->DefineSafepoint(masm(), kind, arguments, deopt_mode);
int stackSlotToSpillSlotDelta =
frame()->GetTotalFrameSlotCount() - frame()->GetSpillSlotCount();
for (auto& operand : references->reference_operands()) {
for (const InstructionOperand& operand : references->reference_operands()) {
if (operand.IsStackSlot()) {
int index = LocationOperand::cast(operand).index();
DCHECK(index >= 0);
......
......@@ -19,9 +19,9 @@ void FrameElider::Run() {
void FrameElider::MarkBlocks() {
for (auto block : instruction_blocks()) {
for (InstructionBlock* block : instruction_blocks()) {
if (block->needs_frame()) continue;
for (auto i = block->code_start(); i < block->code_end(); ++i) {
for (int i = block->code_start(); i < block->code_end(); ++i) {
if (InstructionAt(i)->IsCall() ||
InstructionAt(i)->opcode() == ArchOpcode::kArchDeoptimize) {
block->mark_needs_frame();
......@@ -39,7 +39,7 @@ void FrameElider::PropagateMarks() {
void FrameElider::MarkDeConstruction() {
for (auto block : instruction_blocks()) {
for (InstructionBlock* block : instruction_blocks()) {
if (block->needs_frame()) {
// Special case: The start block needs a frame.
if (block->predecessors().empty()) {
......@@ -47,7 +47,7 @@ void FrameElider::MarkDeConstruction() {
}
// Find "frame -> no frame" transitions, inserting frame
// deconstructions.
for (auto succ : block->successors()) {
for (RpoNumber& succ : block->successors()) {
if (!InstructionBlockAt(succ)->needs_frame()) {
DCHECK_EQ(1U, block->SuccessorCount());
block->mark_must_deconstruct_frame();
......@@ -55,7 +55,7 @@ void FrameElider::MarkDeConstruction() {
}
} else {
// Find "no frame -> frame" transitions, inserting frame constructions.
for (auto succ : block->successors()) {
for (RpoNumber& succ : block->successors()) {
if (InstructionBlockAt(succ)->needs_frame()) {
DCHECK_NE(1U, block->SuccessorCount());
InstructionBlockAt(succ)->mark_must_construct_frame();
......@@ -68,7 +68,7 @@ void FrameElider::MarkDeConstruction() {
bool FrameElider::PropagateInOrder() {
bool changed = false;
for (auto block : instruction_blocks()) {
for (InstructionBlock* block : instruction_blocks()) {
changed |= PropagateIntoBlock(block);
}
return changed;
......@@ -77,7 +77,7 @@ bool FrameElider::PropagateInOrder() {
bool FrameElider::PropagateReversed() {
bool changed = false;
for (auto block : base::Reversed(instruction_blocks())) {
for (InstructionBlock* block : base::Reversed(instruction_blocks())) {
changed |= PropagateIntoBlock(block);
}
return changed;
......@@ -94,7 +94,7 @@ bool FrameElider::PropagateIntoBlock(InstructionBlock* block) {
// Propagate towards the end ("downwards") if there is a predecessor needing
// a frame, but don't "bleed" from deferred code to non-deferred code.
for (auto pred : block->predecessors()) {
for (RpoNumber& pred : block->predecessors()) {
if (InstructionBlockAt(pred)->needs_frame() &&
(!InstructionBlockAt(pred)->IsDeferred() || block->IsDeferred())) {
block->mark_needs_frame();
......@@ -104,7 +104,7 @@ bool FrameElider::PropagateIntoBlock(InstructionBlock* block) {
// Propagate towards start ("upwards") if there are successors and all of
// them need a frame.
for (auto succ : block->successors()) {
for (RpoNumber& succ : block->successors()) {
if (!InstructionBlockAt(succ)->needs_frame()) return false;
}
block->mark_needs_frame();
......
......@@ -29,7 +29,7 @@ void GapResolver::Resolve(ParallelMove* moves) const {
auto it =
std::remove_if(moves->begin(), moves->end(), std::ptr_fun(IsRedundant));
moves->erase(it, moves->end());
for (auto move : *moves) {
for (MoveOperands* move : *moves) {
if (!move->IsEliminated()) PerformMove(moves, move);
}
}
......@@ -53,7 +53,7 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) const {
// Perform a depth-first traversal of the move graph to resolve dependencies.
// Any unperformed, unpending move with a source the same as this one's
// destination blocks this one so recursively perform all such moves.
for (auto other : *moves) {
for (MoveOperands* other : *moves) {
if (other->Blocks(destination) && !other->IsPending()) {
// Though PerformMove can change any source operand in the move graph,
// this call cannot create a blocking move via a swap (this loop does not
......@@ -103,7 +103,7 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) const {
// Any unperformed (including pending) move with a source of either this
// move's source or destination needs to have their source changed to
// reflect the state of affairs after the swap.
for (auto other : *moves) {
for (MoveOperands* other : *moves) {
if (other->Blocks(source)) {
other->set_source(destination);
} else if (other->Blocks(destination)) {
......
......@@ -231,8 +231,8 @@ class GraphC1Visualizer {
void PrintInputs(InputIterator* i, int count, const char* prefix);
void PrintType(Node* node);
void PrintLiveRange(LiveRange* range, const char* type, int vreg);
void PrintLiveRangeChain(TopLevelLiveRange* range, const char* type);
void PrintLiveRange(const LiveRange* range, const char* type, int vreg);
void PrintLiveRangeChain(const TopLevelLiveRange* range, const char* type);
class Tag final BASE_EMBEDDED {
public:
......@@ -505,31 +505,30 @@ void GraphC1Visualizer::PrintLiveRanges(const char* phase,
Tag tag(this, "intervals");
PrintStringProperty("name", phase);
for (auto range : data->fixed_double_live_ranges()) {
for (const TopLevelLiveRange* range : data->fixed_double_live_ranges()) {
PrintLiveRangeChain(range, "fixed");
}
for (auto range : data->fixed_live_ranges()) {
for (const TopLevelLiveRange* range : data->fixed_live_ranges()) {
PrintLiveRangeChain(range, "fixed");
}
for (auto range : data->live_ranges()) {
for (const TopLevelLiveRange* range : data->live_ranges()) {
PrintLiveRangeChain(range, "object");
}
}
void GraphC1Visualizer::PrintLiveRangeChain(TopLevelLiveRange* range,
void GraphC1Visualizer::PrintLiveRangeChain(const TopLevelLiveRange* range,
const char* type) {
if (range == nullptr || range->IsEmpty()) return;
int vreg = range->vreg();
for (LiveRange* child = range; child != nullptr; child = child->next()) {
for (const LiveRange* child = range; child != nullptr;
child = child->next()) {
PrintLiveRange(child, type, vreg);
}
}
void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type,
void GraphC1Visualizer::PrintLiveRange(const LiveRange* range, const char* type,
int vreg) {
if (range != nullptr && !range->IsEmpty()) {
PrintIndent();
......@@ -545,7 +544,7 @@ void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type,
os_ << " \"" << assigned_reg.ToString() << "\"";
}
} else if (range->spilled()) {
auto top = range->TopLevel();
const TopLevelLiveRange* top = range->TopLevel();
int index = -1;
if (top->HasSpillRange()) {
index = kMaxInt; // This hasn't been set yet.
......@@ -564,8 +563,8 @@ void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type,
}
os_ << " " << vreg;
for (auto interval = range->first_interval(); interval != nullptr;
interval = interval->next()) {
for (const UseInterval* interval = range->first_interval();
interval != nullptr; interval = interval->next()) {
os_ << " [" << interval->start().value() << ", "
<< interval->end().value() << "[";
}
......
......@@ -25,7 +25,7 @@ Graph::Graph(Zone* zone)
void Graph::Decorate(Node* node) {
for (auto const decorator : decorators_) {
for (GraphDecorator* const decorator : decorators_) {
decorator->Decorate(node);
}
}
......
......@@ -115,7 +115,7 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
if (IsBlockTerminator(instr)) {
// Make sure that basic block terminators are not moved by adding them
// as successor of every instruction.
for (auto node : graph_) {
for (ScheduleGraphNode* node : graph_) {
node->AddSuccessor(new_node);
}
} else if (IsFixedRegisterParameter(instr)) {
......@@ -134,7 +134,7 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
if (last_side_effect_instr_ != nullptr) {
last_side_effect_instr_->AddSuccessor(new_node);
}
for (auto load : pending_loads_) {
for (ScheduleGraphNode* load : pending_loads_) {
load->AddSuccessor(new_node);
}
pending_loads_.clear();
......@@ -149,7 +149,7 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
}
// Look for operand dependencies.
for (auto node : graph_) {
for (ScheduleGraphNode* node : graph_) {
if (HasOperandDependency(node->instruction(), instr)) {
node->AddSuccessor(new_node);
}
......@@ -168,7 +168,7 @@ void InstructionScheduler::ScheduleBlock() {
ComputeTotalLatencies();
// Add nodes which don't have dependencies to the ready list.
for (auto node : graph_) {
for (ScheduleGraphNode* node : graph_) {
if (!node->HasUnscheduledPredecessor()) {
ready_list.AddNode(node);
}
......@@ -177,12 +177,12 @@ void InstructionScheduler::ScheduleBlock() {
// Go through the ready list and schedule the instructions.
int cycle = 0;
while (!ready_list.IsEmpty()) {
auto candidate = ready_list.PopBestCandidate(cycle);
ScheduleGraphNode* candidate = ready_list.PopBestCandidate(cycle);
if (candidate != nullptr) {
sequence()->AddInstruction(candidate->instruction());
for (auto successor : candidate->successors()) {
for (ScheduleGraphNode* successor : candidate->successors()) {
successor->DropUnscheduledPredecessor();
successor->set_start_cycle(
std::max(successor->start_cycle(),
......@@ -296,10 +296,10 @@ bool InstructionScheduler::IsBlockTerminator(const Instruction* instr) const {
void InstructionScheduler::ComputeTotalLatencies() {
for (auto node : base::Reversed(graph_)) {
for (ScheduleGraphNode* node : base::Reversed(graph_)) {
int max_latency = 0;
for (auto successor : node->successors()) {
for (ScheduleGraphNode* successor : node->successors()) {
DCHECK(successor->total_latency() != -1);
if (successor->total_latency() > max_latency) {
max_latency = successor->total_latency();
......
......@@ -114,7 +114,7 @@ std::ostream& operator<<(std::ostream& os,
return os << "[constant:" << ConstantOperand::cast(op).virtual_register()
<< "]";
case InstructionOperand::IMMEDIATE: {
auto imm = ImmediateOperand::cast(op);
ImmediateOperand imm = ImmediateOperand::cast(op);
switch (imm.type()) {
case ImmediateOperand::INLINE:
return os << "#" << imm.inline_value();
......@@ -124,7 +124,7 @@ std::ostream& operator<<(std::ostream& os,
}
case InstructionOperand::EXPLICIT:
case InstructionOperand::ALLOCATED: {
auto allocated = LocationOperand::cast(op);
LocationOperand allocated = LocationOperand::cast(op);
if (op.IsStackSlot()) {
os << "[stack:" << LocationOperand::cast(op).index();
} else if (op.IsDoubleStackSlot()) {
......@@ -214,7 +214,7 @@ std::ostream& operator<<(std::ostream& os,
bool ParallelMove::IsRedundant() const {
for (auto move : *this) {
for (MoveOperands* move : *this) {
if (!move->IsRedundant()) return false;
}
return true;
......@@ -224,7 +224,7 @@ bool ParallelMove::IsRedundant() const {
MoveOperands* ParallelMove::PrepareInsertAfter(MoveOperands* move) const {
MoveOperands* replacement = nullptr;
MoveOperands* to_eliminate = nullptr;
for (auto curr : *this) {
for (MoveOperands* curr : *this) {
if (curr->IsEliminated()) continue;
if (curr->destination().EqualsCanonicalized(move->source())) {
DCHECK(!replacement);
......@@ -321,7 +321,7 @@ std::ostream& operator<<(std::ostream& os,
const PrintableParallelMove& printable) {
const ParallelMove& pm = *printable.parallel_move_;
bool first = true;
for (auto move : pm) {
for (MoveOperands* move : pm) {
if (move->IsEliminated()) continue;
if (!first) os << " ";
first = false;
......@@ -346,7 +346,7 @@ std::ostream& operator<<(std::ostream& os, const ReferenceMap& pm) {
PrintableInstructionOperand poi = {
RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
InstructionOperand()};
for (auto& op : pm.reference_operands_) {
for (const InstructionOperand& op : pm.reference_operands_) {
if (!first) {
os << ";";
} else {
......@@ -637,12 +637,12 @@ void InstructionSequence::Validate() {
void InstructionSequence::ComputeAssemblyOrder(InstructionBlocks* blocks) {
int ao = 0;
for (auto const block : *blocks) {
for (InstructionBlock* const block : *blocks) {
if (!block->IsDeferred()) {
block->set_ao_number(RpoNumber::FromInt(ao++));
}
}
for (auto const block : *blocks) {
for (InstructionBlock* const block : *blocks) {
if (block->IsDeferred()) {
block->set_ao_number(RpoNumber::FromInt(ao++));
}
......@@ -732,7 +732,7 @@ InstructionBlock* InstructionSequence::GetInstructionBlock(
if (end == block_starts_.end() || *end > instruction_index) --end;
DCHECK(*end <= instruction_index);
size_t index = std::distance(begin, end);
auto block = instruction_blocks_->at(index);
InstructionBlock* block = instruction_blocks_->at(index);
DCHECK(block->code_start() <= instruction_index &&
instruction_index < block->code_end());
return block;
......@@ -863,15 +863,15 @@ void InstructionSequence::PrintBlock(const RegisterConfiguration* config,
os << " instructions: [" << block->code_start() << ", " << block->code_end()
<< ")\n predecessors:";
for (auto pred : block->predecessors()) {
for (RpoNumber pred : block->predecessors()) {
os << " B" << pred.ToInt();
}
os << "\n";
for (auto phi : block->phis()) {
for (const PhiInstruction* phi : block->phis()) {
PrintableInstructionOperand printable_op = {config, phi->output()};
os << " phi: " << printable_op << " =";
for (auto input : phi->operands()) {
for (int input : phi->operands()) {
os << " v" << input;
}
os << "\n";
......@@ -888,7 +888,7 @@ void InstructionSequence::PrintBlock(const RegisterConfiguration* config,
os << " " << buf.start() << ": " << printable_instr << "\n";
}
for (auto succ : block->successors()) {
for (RpoNumber succ : block->successors()) {
os << " B" << succ.ToInt();
}
os << "\n";
......
......@@ -29,13 +29,13 @@ namespace compiler {
static bool IsDefUseChainLinkPresent(Node* def, Node* use) {
auto const uses = def->uses();
const Node::Uses uses = def->uses();
return std::find(uses.begin(), uses.end(), use) != uses.end();
}
static bool IsUseDefChainLinkPresent(Node* def, Node* use) {
auto const inputs = use->inputs();
const Node::Inputs inputs = use->inputs();
return std::find(inputs.begin(), inputs.end(), def) != inputs.end();
}
......@@ -194,7 +194,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kBranch: {
// Branch uses are IfTrue and IfFalse.
int count_true = 0, count_false = 0;
for (auto use : node->uses()) {
for (const Node* use : node->uses()) {
CHECK(use->opcode() == IrOpcode::kIfTrue ||
use->opcode() == IrOpcode::kIfFalse);
if (use->opcode() == IrOpcode::kIfTrue) ++count_true;
......@@ -232,10 +232,10 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kSwitch: {
// Switch uses are Case and Default.
int count_case = 0, count_default = 0;
for (auto use : node->uses()) {
for (const Node* use : node->uses()) {
switch (use->opcode()) {
case IrOpcode::kIfValue: {
for (auto user : node->uses()) {
for (const Node* user : node->uses()) {
if (user != use && user->opcode() == IrOpcode::kIfValue) {
CHECK_NE(OpParameter<int32_t>(use->op()),
OpParameter<int32_t>(user->op()));
......@@ -283,7 +283,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kReturn:
case IrOpcode::kThrow:
// Deoptimize, Return and Throw uses are End.
for (auto use : node->uses()) {
for (const Node* use : node->uses()) {
CHECK_EQ(IrOpcode::kEnd, use->opcode());
}
// Type is empty.
......@@ -297,7 +297,7 @@ void Verifier::Visitor::Check(Node* node) {
CHECK_EQ(IrOpcode::kLoop,
NodeProperties::GetControlInput(node)->opcode());
// Terminate uses are End.
for (auto use : node->uses()) {
for (const Node* use : node->uses()) {
CHECK_EQ(IrOpcode::kEnd, use->opcode());
}
// Type is empty.
......
......@@ -13,7 +13,7 @@ ZonePool::StatsScope::StatsScope(ZonePool* zone_pool)
total_allocated_bytes_at_start_(zone_pool->GetTotalAllocatedBytes()),
max_allocated_bytes_(0) {
zone_pool_->stats_.push_back(this);
for (auto zone : zone_pool_->used_) {
for (Zone* zone : zone_pool_->used_) {
size_t size = static_cast<size_t>(zone->allocation_size());
std::pair<InitialValues::iterator, bool> res =
initial_values_.insert(std::make_pair(zone, size));
......@@ -116,7 +116,7 @@ void ZonePool::ReturnZone(Zone* zone) {
// Update max.
max_allocated_bytes_ = std::max(max_allocated_bytes_, current_total);
// Update stats.
for (auto stat_scope : stats_) {
for (StatsScope* stat_scope : stats_) {
stat_scope->ZoneReturned(zone);
}
// Remove from used.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment