scheduler.cc 42 KB
Newer Older
1 2 3 4
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

5 6 7
#include <deque>
#include <queue>

8 9 10 11 12 13 14 15 16 17 18 19 20
#include "src/compiler/scheduler.h"

#include "src/compiler/graph.h"
#include "src/compiler/graph-inl.h"
#include "src/compiler/node.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node-properties-inl.h"
#include "src/data-flow.h"

namespace v8 {
namespace internal {
namespace compiler {

21 22 23 24 25 26 27 28 29 30
static inline void Trace(const char* msg, ...) {
  if (FLAG_trace_turbo_scheduler) {
    va_list arguments;
    va_start(arguments, msg);
    base::OS::VPrint(msg, arguments);
    va_end(arguments);
  }
}


31 32 33 34 35 36
Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule)
    : zone_(zone),
      graph_(graph),
      schedule_(schedule),
      scheduled_nodes_(zone),
      schedule_root_nodes_(zone),
37
      node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone),
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
      has_floating_control_(false) {}


Schedule* Scheduler::ComputeSchedule(Graph* graph) {
  Schedule* schedule;
  bool had_floating_control = false;
  do {
    Zone tmp_zone(graph->zone()->isolate());
    schedule = new (graph->zone())
        Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount()));
    Scheduler scheduler(&tmp_zone, graph, schedule);

    scheduler.BuildCFG();
    Scheduler::ComputeSpecialRPO(schedule);
    scheduler.GenerateImmediateDominatorTree();

    scheduler.PrepareUses();
    scheduler.ScheduleEarly();
    scheduler.ScheduleLate();

    had_floating_control = scheduler.ConnectFloatingControl();
  } while (had_floating_control);

  return schedule;
}


65
Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
66
  SchedulerData def = {NULL, 0, false, false, kUnknown};
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
  return def;
}


Scheduler::Placement Scheduler::GetPlacement(Node* node) {
  SchedulerData* data = GetData(node);
  if (data->placement_ == kUnknown) {  // Compute placement, once, on demand.
    switch (node->opcode()) {
      case IrOpcode::kParameter:
        // Parameters are always fixed to the start node.
        data->placement_ = kFixed;
        break;
      case IrOpcode::kPhi:
      case IrOpcode::kEffectPhi: {
        // Phis and effect phis are fixed if their control inputs are.
        data->placement_ = GetPlacement(NodeProperties::GetControlInput(node));
        break;
      }
#define DEFINE_FLOATING_CONTROL_CASE(V) case IrOpcode::k##V:
        CONTROL_OP_LIST(DEFINE_FLOATING_CONTROL_CASE)
#undef DEFINE_FLOATING_CONTROL_CASE
        {
          // Control nodes that were not control-reachable from end may float.
          data->placement_ = kSchedulable;
          if (!data->is_connected_control_) {
            data->is_floating_control_ = true;
            has_floating_control_ = true;
            Trace("Floating control found: #%d:%s\n", node->id(),
                  node->op()->mnemonic());
          }
          break;
        }
      default:
        data->placement_ = kSchedulable;
        break;
    }
  }
  return data->placement_;
}


BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
  while (b1 != b2) {
    int b1_rpo = GetRPONumber(b1);
    int b2_rpo = GetRPONumber(b2);
    DCHECK(b1_rpo != b2_rpo);
    if (b1_rpo < b2_rpo) {
      b2 = b2->dominator();
    } else {
      b1 = b1->dominator();
    }
  }
  return b1;
}


// -----------------------------------------------------------------------------
// Phase 1: Build control-flow graph and dominator tree.


127 128
// Internal class to build a control flow graph (i.e the basic blocks and edges
// between them within a Schedule) from the node graph.
129 130 131
// Visits the control edges of the graph backwards from end in order to find
// the connected control subgraph, needed for scheduling.
class CFGBuilder {
132
 public:
133
  Scheduler* scheduler_;
134
  Schedule* schedule_;
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
  ZoneQueue<Node*> queue_;
  NodeVector control_;

  CFGBuilder(Zone* zone, Scheduler* scheduler)
      : scheduler_(scheduler),
        schedule_(scheduler->schedule_),
        queue_(zone),
        control_(zone) {}

  // Run the control flow graph construction algorithm by walking the graph
  // backwards from end through control edges, building and connecting the
  // basic blocks for control nodes.
  void Run() {
    Graph* graph = scheduler_->graph_;
    FixNode(schedule_->start(), graph->start());
    Queue(graph->end());

    while (!queue_.empty()) {  // Breadth-first backwards traversal.
      Node* node = queue_.front();
      queue_.pop();
      int max = NodeProperties::PastControlIndex(node);
      for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
        Queue(node->InputAt(i));
      }
    }
160

161 162 163 164 165 166 167 168 169 170 171 172
    for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
      ConnectBlocks(*i);  // Connect block to its predecessor/successors.
    }

    FixNode(schedule_->end(), graph->end());
  }

  void FixNode(BasicBlock* block, Node* node) {
    schedule_->AddNode(block, node);
    scheduler_->GetData(node)->is_connected_control_ = true;
    scheduler_->GetData(node)->placement_ = Scheduler::kFixed;
  }
173

174 175 176 177 178 179 180 181 182 183 184
  void Queue(Node* node) {
    // Mark the connected control nodes as they queued.
    Scheduler::SchedulerData* data = scheduler_->GetData(node);
    if (!data->is_connected_control_) {
      BuildBlocks(node);
      queue_.push(node);
      control_.push_back(node);
      data->is_connected_control_ = true;
    }
  }

185

186
  void BuildBlocks(Node* node) {
187 188 189 190 191 192 193 194 195 196 197 198 199
    switch (node->opcode()) {
      case IrOpcode::kLoop:
      case IrOpcode::kMerge:
        BuildBlockForNode(node);
        break;
      case IrOpcode::kBranch:
        BuildBlocksForSuccessors(node, IrOpcode::kIfTrue, IrOpcode::kIfFalse);
        break;
      default:
        break;
    }
  }

200
  void ConnectBlocks(Node* node) {
201 202 203 204 205 206
    switch (node->opcode()) {
      case IrOpcode::kLoop:
      case IrOpcode::kMerge:
        ConnectMerge(node);
        break;
      case IrOpcode::kBranch:
207
        scheduler_->schedule_root_nodes_.push_back(node);
208 209 210
        ConnectBranch(node);
        break;
      case IrOpcode::kReturn:
211
        scheduler_->schedule_root_nodes_.push_back(node);
212 213 214 215 216 217 218 219 220 221
        ConnectReturn(node);
        break;
      default:
        break;
    }
  }

  void BuildBlockForNode(Node* node) {
    if (schedule_->block(node) == NULL) {
      BasicBlock* block = schedule_->NewBasicBlock();
222
      Trace("Create block B%d for #%d:%s\n", block->id().ToInt(), node->id(),
223 224
            node->op()->mnemonic());
      FixNode(block, node);
225 226 227 228 229 230 231 232 233 234 235 236
    }
  }

  void BuildBlocksForSuccessors(Node* node, IrOpcode::Value a,
                                IrOpcode::Value b) {
    Node* successors[2];
    CollectSuccessorProjections(node, successors, a, b);
    BuildBlockForNode(successors[0]);
    BuildBlockForNode(successors[1]);
  }

  // Collect the branch-related projections from a node, such as IfTrue,
237
  // IfFalse.
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290
  // TODO(titzer): consider moving this to node.h
  void CollectSuccessorProjections(Node* node, Node** buffer,
                                   IrOpcode::Value true_opcode,
                                   IrOpcode::Value false_opcode) {
    buffer[0] = NULL;
    buffer[1] = NULL;
    for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
      if ((*i)->opcode() == true_opcode) {
        DCHECK_EQ(NULL, buffer[0]);
        buffer[0] = *i;
      }
      if ((*i)->opcode() == false_opcode) {
        DCHECK_EQ(NULL, buffer[1]);
        buffer[1] = *i;
      }
    }
    DCHECK_NE(NULL, buffer[0]);
    DCHECK_NE(NULL, buffer[1]);
  }

  void CollectSuccessorBlocks(Node* node, BasicBlock** buffer,
                              IrOpcode::Value true_opcode,
                              IrOpcode::Value false_opcode) {
    Node* successors[2];
    CollectSuccessorProjections(node, successors, true_opcode, false_opcode);
    buffer[0] = schedule_->block(successors[0]);
    buffer[1] = schedule_->block(successors[1]);
  }

  void ConnectBranch(Node* branch) {
    Node* branch_block_node = NodeProperties::GetControlInput(branch);
    BasicBlock* branch_block = schedule_->block(branch_block_node);
    DCHECK(branch_block != NULL);

    BasicBlock* successor_blocks[2];
    CollectSuccessorBlocks(branch, successor_blocks, IrOpcode::kIfTrue,
                           IrOpcode::kIfFalse);

    TraceConnect(branch, branch_block, successor_blocks[0]);
    TraceConnect(branch, branch_block, successor_blocks[1]);

    schedule_->AddBranch(branch_block, branch, successor_blocks[0],
                         successor_blocks[1]);
  }

  void ConnectMerge(Node* merge) {
    BasicBlock* block = schedule_->block(merge);
    DCHECK(block != NULL);
    // For all of the merge's control inputs, add a goto at the end to the
    // merge's basic block.
    for (InputIter j = merge->inputs().begin(); j != merge->inputs().end();
         ++j) {
      BasicBlock* predecessor_block = schedule_->block(*j);
291
      if ((*j)->opcode() != IrOpcode::kReturn) {
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
        TraceConnect(merge, predecessor_block, block);
        schedule_->AddGoto(predecessor_block, block);
      }
    }
  }

  void ConnectReturn(Node* ret) {
    Node* return_block_node = NodeProperties::GetControlInput(ret);
    BasicBlock* return_block = schedule_->block(return_block_node);
    TraceConnect(ret, return_block, NULL);
    schedule_->AddReturn(return_block, ret);
  }

  void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
    DCHECK_NE(NULL, block);
    if (succ == NULL) {
308
      Trace("Connect #%d:%s, B%d -> end\n", node->id(), node->op()->mnemonic(),
309
            block->id().ToInt());
310
    } else {
311
      Trace("Connect #%d:%s, B%d -> B%d\n", node->id(), node->op()->mnemonic(),
312
            block->id().ToInt(), succ->id().ToInt());
313 314 315 316 317
    }
  }
};


318
void Scheduler::BuildCFG() {
319
  Trace("--- CREATING CFG -------------------------------------------\n");
320 321 322 323
  CFGBuilder cfg_builder(zone_, this);
  cfg_builder.Run();
  // Initialize per-block data.
  scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
324 325 326 327 328 329
}


void Scheduler::GenerateImmediateDominatorTree() {
  // Build the dominator graph.  TODO(danno): consider using Lengauer & Tarjan's
  // if this becomes really slow.
330
  Trace("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n");
331 332
  for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) {
    BasicBlock* current_rpo = schedule_->rpo_order_[i];
333
    if (current_rpo != schedule_->start()) {
334
      BasicBlock::Predecessors::iterator current_pred =
335 336
          current_rpo->predecessors_begin();
      BasicBlock::Predecessors::iterator end = current_rpo->predecessors_end();
337
      DCHECK(current_pred != end);
338 339 340 341 342 343 344 345 346 347 348 349 350
      BasicBlock* dominator = *current_pred;
      ++current_pred;
      // For multiple predecessors, walk up the rpo ordering until a common
      // dominator is found.
      int current_rpo_pos = GetRPONumber(current_rpo);
      while (current_pred != end) {
        // Don't examine backwards edges
        BasicBlock* pred = *current_pred;
        if (GetRPONumber(pred) < current_rpo_pos) {
          dominator = GetCommonDominator(dominator, *current_pred);
        }
        ++current_pred;
      }
351
      current_rpo->set_dominator(dominator);
352
      Trace("Block %d's idom is %d\n", current_rpo->id().ToInt(),
353
            dominator->id().ToInt());
354 355 356 357 358
    }
  }
}


359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
// -----------------------------------------------------------------------------
// Phase 2: Prepare use counts for nodes.


class PrepareUsesVisitor : public NullNodeVisitor {
 public:
  explicit PrepareUsesVisitor(Scheduler* scheduler)
      : scheduler_(scheduler), schedule_(scheduler->schedule_) {}

  GenericGraphVisit::Control Pre(Node* node) {
    if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
      // Fixed nodes are always roots for schedule late.
      scheduler_->schedule_root_nodes_.push_back(node);
      if (!schedule_->IsScheduled(node)) {
        // Make sure root nodes are scheduled in their respective blocks.
        Trace("  Scheduling fixed position node #%d:%s\n", node->id(),
              node->op()->mnemonic());
        IrOpcode::Value opcode = node->opcode();
        BasicBlock* block =
            opcode == IrOpcode::kParameter
                ? schedule_->start()
                : schedule_->block(NodeProperties::GetControlInput(node));
        DCHECK(block != NULL);
        schedule_->AddNode(block, node);
      }
    }

    return GenericGraphVisit::CONTINUE;
  }

  void PostEdge(Node* from, int index, Node* to) {
    // If the edge is from an unscheduled node, then tally it in the use count
    // for all of its inputs. The same criterion will be used in ScheduleLate
    // for decrementing use counts.
    if (!schedule_->IsScheduled(from)) {
      DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
      ++(scheduler_->GetData(to)->unscheduled_count_);
      Trace("  Use count of #%d:%s (used by #%d:%s)++ = %d\n", to->id(),
            to->op()->mnemonic(), from->id(), from->op()->mnemonic(),
            scheduler_->GetData(to)->unscheduled_count_);
399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
      if (OperatorProperties::IsBasicBlockBegin(to->op()) &&
          (from->opcode() == IrOpcode::kEffectPhi ||
           from->opcode() == IrOpcode::kPhi) &&
          scheduler_->GetData(to)->is_floating_control_ &&
          !scheduler_->GetData(to)->is_connected_control_) {
        for (InputIter i = from->inputs().begin(); i != from->inputs().end();
             ++i) {
          if (!NodeProperties::IsControlEdge(i.edge())) {
            ++(scheduler_->GetData(*i)->unscheduled_count_);
            Trace(
                "  Use count of #%d:%s (additional dependency of #%d:%s)++ = "
                "%d\n",
                (*i)->id(), (*i)->op()->mnemonic(), to->id(),
                to->op()->mnemonic(),
                scheduler_->GetData(*i)->unscheduled_count_);
          }
        }
      }
417 418 419 420 421 422 423 424 425 426
    }
  }

 private:
  Scheduler* scheduler_;
  Schedule* schedule_;
};


void Scheduler::PrepareUses() {
427
  Trace("--- PREPARE USES -------------------------------------------\n");
428 429 430 431 432 433 434 435 436 437 438 439

  // Count the uses of every node, it will be used to ensure that all of a
  // node's uses are scheduled before the node itself.
  PrepareUsesVisitor prepare_uses(this);
  graph_->VisitNodeInputsFromEnd(&prepare_uses);
}


// -----------------------------------------------------------------------------
// Phase 3: Schedule nodes early.


440 441 442
class ScheduleEarlyNodeVisitor : public NullNodeVisitor {
 public:
  explicit ScheduleEarlyNodeVisitor(Scheduler* scheduler)
443
      : scheduler_(scheduler), schedule_(scheduler->schedule_) {}
444 445

  GenericGraphVisit::Control Pre(Node* node) {
446
    Scheduler::SchedulerData* data = scheduler_->GetData(node);
447
    if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
448
      // Fixed nodes already know their schedule early position.
449 450
      if (data->minimum_block_ == NULL) {
        data->minimum_block_ = schedule_->block(node);
451
        Trace("Preschedule #%d:%s minimum_rpo = %d (fixed)\n", node->id(),
452
              node->op()->mnemonic(), data->minimum_block_->rpo_number());
453 454 455
      }
    } else {
      // For unfixed nodes the minimum RPO is the max of all of the inputs.
456 457 458
      if (data->minimum_block_ == NULL) {
        data->minimum_block_ = ComputeMaximumInputRPO(node);
        if (data->minimum_block_ == NULL) return GenericGraphVisit::REENTER;
459
        Trace("Preschedule #%d:%s minimum_rpo = %d\n", node->id(),
460
              node->op()->mnemonic(), data->minimum_block_->rpo_number());
461 462
      }
    }
463
    DCHECK_NE(data->minimum_block_, NULL);
464 465 466 467
    return GenericGraphVisit::CONTINUE;
  }

  GenericGraphVisit::Control Post(Node* node) {
468
    Scheduler::SchedulerData* data = scheduler_->GetData(node);
469
    if (scheduler_->GetPlacement(node) != Scheduler::kFixed) {
470
      // For unfixed nodes the minimum RPO is the max of all of the inputs.
471 472
      if (data->minimum_block_ == NULL) {
        data->minimum_block_ = ComputeMaximumInputRPO(node);
473
        Trace("Postschedule #%d:%s minimum_rpo = %d\n", node->id(),
474
              node->op()->mnemonic(), data->minimum_block_->rpo_number());
475 476
      }
    }
477
    DCHECK_NE(data->minimum_block_, NULL);
478 479 480
    return GenericGraphVisit::CONTINUE;
  }

481
  // Computes the maximum of the minimum RPOs for all inputs. If the maximum
482 483 484 485
  // cannot be determined (i.e. minimum RPO for at least one input is {NULL}),
  // then {NULL} is returned.
  BasicBlock* ComputeMaximumInputRPO(Node* node) {
    BasicBlock* max_block = schedule_->start();
486 487
    for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
      DCHECK_NE(node, *i);  // Loops only exist for fixed nodes.
488 489 490 491
      BasicBlock* block = scheduler_->GetData(*i)->minimum_block_;
      if (block == NULL) return NULL;
      if (block->rpo_number() > max_block->rpo_number()) {
        max_block = block;
492 493
      }
    }
494
    return max_block;
495
  }
496 497 498 499 500 501 502 503

 private:
  Scheduler* scheduler_;
  Schedule* schedule_;
};


void Scheduler::ScheduleEarly() {
504
  Trace("--- SCHEDULE EARLY -----------------------------------------\n");
505

506 507
  // Compute the minimum RPO for each node thereby determining the earliest
  // position each node could be placed within a valid schedule.
508
  ScheduleEarlyNodeVisitor visitor(this);
509
  graph_->VisitNodeInputsFromEnd(&visitor);
510 511 512
}


513 514
// -----------------------------------------------------------------------------
// Phase 4: Schedule nodes late.
515 516 517 518 519 520 521 522


class ScheduleLateNodeVisitor : public NullNodeVisitor {
 public:
  explicit ScheduleLateNodeVisitor(Scheduler* scheduler)
      : scheduler_(scheduler), schedule_(scheduler_->schedule_) {}

  GenericGraphVisit::Control Pre(Node* node) {
523 524
    // Don't schedule nodes that are already scheduled.
    if (schedule_->IsScheduled(node)) {
525 526
      return GenericGraphVisit::CONTINUE;
    }
527

528 529
    Scheduler::SchedulerData* data = scheduler_->GetData(node);
    DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
530 531 532

    // If all the uses of a node have been scheduled, then the node itself can
    // be scheduled.
533 534 535
    bool eligible = data->unscheduled_count_ == 0;
    Trace("Testing for schedule eligibility for #%d:%s = %s\n", node->id(),
          node->op()->mnemonic(), eligible ? "true" : "false");
536 537 538 539 540 541 542 543 544 545 546 547 548
    if (!eligible) return GenericGraphVisit::DEFER;

    // Determine the dominating block for all of the uses of this node. It is
    // the latest block that this node can be scheduled in.
    BasicBlock* block = NULL;
    for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end();
         ++i) {
      BasicBlock* use_block = GetBlockForUse(i.edge());
      block = block == NULL ? use_block : use_block == NULL
                                              ? block
                                              : scheduler_->GetCommonDominator(
                                                    block, use_block);
    }
549
    DCHECK(block != NULL);
550

551
    int min_rpo = data->minimum_block_->rpo_number();
552
    Trace(
553 554
        "Schedule late conservative for #%d:%s is B%d at loop depth %d, "
        "minimum_rpo = %d\n",
555 556
        node->id(), node->op()->mnemonic(), block->id().ToInt(),
        block->loop_depth(), min_rpo);
557
    // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
558
    // into enclosing loop pre-headers until they would preceed their
559 560
    // ScheduleEarly position.
    BasicBlock* hoist_block = block;
561 562
    while (hoist_block != NULL && hoist_block->rpo_number() >= min_rpo) {
      if (hoist_block->loop_depth() < block->loop_depth()) {
563
        block = hoist_block;
564
        Trace("  hoisting #%d:%s to block %d\n", node->id(),
565
              node->op()->mnemonic(), block->id().ToInt());
566 567 568 569
      }
      // Try to hoist to the pre-header of the loop header.
      hoist_block = hoist_block->loop_header();
      if (hoist_block != NULL) {
570
        BasicBlock* pre_header = hoist_block->dominator();
571
        DCHECK(pre_header == NULL ||
572
               *hoist_block->predecessors_begin() == pre_header);
573
        Trace(
574
            "  hoist to pre-header B%d of loop header B%d, depth would be %d\n",
575 576
            pre_header->id().ToInt(), hoist_block->id().ToInt(),
            pre_header->loop_depth());
577 578 579 580 581 582 583 584 585 586 587 588 589 590
        hoist_block = pre_header;
      }
    }

    ScheduleNode(block, node);

    return GenericGraphVisit::CONTINUE;
  }

 private:
  BasicBlock* GetBlockForUse(Node::Edge edge) {
    Node* use = edge.from();
    IrOpcode::Value opcode = use->opcode();
    if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
591 592
      // If the use is from a fixed (i.e. non-floating) phi, use the block
      // of the corresponding control input to the merge.
593
      int index = edge.index();
594 595 596 597 598 599 600 601
      if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
        Trace("  input@%d into a fixed phi #%d:%s\n", index, use->id(),
              use->op()->mnemonic());
        Node* merge = NodeProperties::GetControlInput(use, 0);
        opcode = merge->opcode();
        DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop);
        use = NodeProperties::GetControlInput(merge, index);
      }
602 603 604
    }
    BasicBlock* result = schedule_->block(use);
    if (result == NULL) return NULL;
605
    Trace("  must dominate use #%d:%s in B%d\n", use->id(),
606
          use->op()->mnemonic(), result->id().ToInt());
607 608 609 610 611
    return result;
  }

  void ScheduleNode(BasicBlock* block, Node* node) {
    schedule_->PlanNode(block, node);
612
    scheduler_->scheduled_nodes_[block->id().ToSize()].push_back(node);
613 614

    // Reduce the use count of the node's inputs to potentially make them
615
    // schedulable.
616
    for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
617 618 619
      Scheduler::SchedulerData* data = scheduler_->GetData(*i);
      DCHECK(data->unscheduled_count_ > 0);
      --data->unscheduled_count_;
620
      if (FLAG_trace_turbo_scheduler) {
621 622 623 624 625 626
        Trace("  Use count for #%d:%s (used by #%d:%s)-- = %d\n", (*i)->id(),
              (*i)->op()->mnemonic(), i.edge().from()->id(),
              i.edge().from()->op()->mnemonic(), data->unscheduled_count_);
        if (data->unscheduled_count_ == 0) {
          Trace("  newly eligible #%d:%s\n", (*i)->id(),
                (*i)->op()->mnemonic());
627 628 629
        }
      }
    }
630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652

    for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
      Node* use = *i;
      if (use->opcode() == IrOpcode::kPhi ||
          use->opcode() == IrOpcode::kEffectPhi) {
        Node* control = NodeProperties::GetControlInput(use);
        Scheduler::SchedulerData* data = scheduler_->GetData(control);
        if (data->is_floating_control_ && !data->is_connected_control_) {
          --data->unscheduled_count_;
          if (FLAG_trace_turbo_scheduler) {
            Trace(
                "  Use count for #%d:%s (additional dependency of #%d:%s)-- = "
                "%d\n",
                (*i)->id(), (*i)->op()->mnemonic(), node->id(),
                node->op()->mnemonic(), data->unscheduled_count_);
            if (data->unscheduled_count_ == 0) {
              Trace("  newly eligible #%d:%s\n", (*i)->id(),
                    (*i)->op()->mnemonic());
            }
          }
        }
      }
    }
653 654 655 656 657 658 659 660
  }

  Scheduler* scheduler_;
  Schedule* schedule_;
};


void Scheduler::ScheduleLate() {
661
  Trace("--- SCHEDULE LATE ------------------------------------------\n");
662 663 664 665 666 667 668 669
  if (FLAG_trace_turbo_scheduler) {
    Trace("roots: ");
    for (NodeVectorIter i = schedule_root_nodes_.begin();
         i != schedule_root_nodes_.end(); ++i) {
      Trace("#%d:%s ", (*i)->id(), (*i)->op()->mnemonic());
    }
    Trace("\n");
  }
670 671 672 673

  // Schedule: Places nodes in dominator block of all their uses.
  ScheduleLateNodeVisitor schedule_late_visitor(this);

674
  {
675
    Zone zone(zone_->isolate());
676 677
    GenericGraphVisit::Visit<ScheduleLateNodeVisitor,
                             NodeInputIterationTraits<Node> >(
678 679
        graph_, &zone, schedule_root_nodes_.begin(), schedule_root_nodes_.end(),
        &schedule_late_visitor);
680 681 682 683 684 685 686 687 688 689 690 691 692 693
  }

  // Add collected nodes for basic blocks to their blocks in the right order.
  int block_num = 0;
  for (NodeVectorVectorIter i = scheduled_nodes_.begin();
       i != scheduled_nodes_.end(); ++i) {
    for (NodeVectorRIter j = i->rbegin(); j != i->rend(); ++j) {
      schedule_->AddNode(schedule_->all_blocks_.at(block_num), *j);
    }
    block_num++;
  }
}


694 695 696
// -----------------------------------------------------------------------------


697 698 699 700 701 702 703 704 705 706 707
bool Scheduler::ConnectFloatingControl() {
  if (!has_floating_control_) return false;

  Trace("Connecting floating control...\n");

  // Process blocks and instructions backwards to find and connect floating
  // control nodes into the control graph according to the block they were
  // scheduled into.
  int max = static_cast<int>(schedule_->rpo_order()->size());
  for (int i = max - 1; i >= 0; i--) {
    BasicBlock* block = schedule_->rpo_order()->at(i);
708 709 710
    // TODO(titzer): we place at most one floating control structure per
    // basic block because scheduling currently can interleave phis from
    // one subgraph with the merges from another subgraph.
711 712
    for (size_t j = 0; j < block->NodeCount(); j++) {
      Node* node = block->NodeAt(block->NodeCount() - 1 - j);
713
      SchedulerData* data = GetData(node);
714
      if (data->is_floating_control_ && !data->is_connected_control_) {
715
        Trace("  Floating control #%d:%s was scheduled in B%d\n", node->id(),
716
              node->op()->mnemonic(), block->id().ToInt());
717
        ConnectFloatingControlSubgraph(block, node);
718
        break;
719 720 721 722 723 724 725 726 727
      }
    }
  }

  return true;
}


void Scheduler::ConnectFloatingControlSubgraph(BasicBlock* block, Node* end) {
728
  Node* block_start = block->NodeAt(0);
729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
  DCHECK(IrOpcode::IsControlOpcode(block_start->opcode()));
  // Find the current "control successor" of the node that starts the block
  // by searching the control uses for a control input edge from a connected
  // control node.
  Node* control_succ = NULL;
  for (UseIter i = block_start->uses().begin(); i != block_start->uses().end();
       ++i) {
    Node::Edge edge = i.edge();
    if (NodeProperties::IsControlEdge(edge) &&
        GetData(edge.from())->is_connected_control_) {
      DCHECK_EQ(NULL, control_succ);
      control_succ = edge.from();
      control_succ->ReplaceInput(edge.index(), end);
    }
  }
  DCHECK_NE(NULL, control_succ);
  Trace("  Inserting floating control end %d:%s between %d:%s -> %d:%s\n",
        end->id(), end->op()->mnemonic(), control_succ->id(),
        control_succ->op()->mnemonic(), block_start->id(),
        block_start->op()->mnemonic());

  // Find the "start" node of the control subgraph, which should be the
  // unique node that is itself floating control but has a control input that
  // is not floating.
  Node* start = NULL;
  ZoneQueue<Node*> queue(zone_);
  queue.push(end);
  GetData(end)->is_connected_control_ = true;
  while (!queue.empty()) {
    Node* node = queue.front();
    queue.pop();
    Trace("  Search #%d:%s for control subgraph start\n", node->id(),
          node->op()->mnemonic());
    int max = NodeProperties::PastControlIndex(node);
    for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
      Node* input = node->InputAt(i);
      SchedulerData* data = GetData(input);
      if (data->is_floating_control_) {
        // {input} is floating control.
        if (!data->is_connected_control_) {
          // First time seeing {input} during this traversal, queue it.
          queue.push(input);
          data->is_connected_control_ = true;
        }
      } else {
        // Otherwise, {node} is the start node, because it is floating control
        // but is connected to {input} that is not floating control.
        DCHECK_EQ(NULL, start);  // There can be only one.
        start = node;
      }
    }
  }

  DCHECK_NE(NULL, start);
  start->ReplaceInput(NodeProperties::FirstControlIndex(start), block_start);

  Trace("  Connecting floating control start %d:%s to %d:%s\n", start->id(),
        start->op()->mnemonic(), block_start->id(),
        block_start->op()->mnemonic());
}


791 792 793 794 795 796 797 798 799
// Numbering for BasicBlockData.rpo_number_ for this block traversal:
static const int kBlockOnStack = -2;
static const int kBlockVisited1 = -3;
static const int kBlockVisited2 = -4;
static const int kBlockUnvisited1 = -1;
static const int kBlockUnvisited2 = kBlockVisited1;

struct SpecialRPOStackFrame {
  BasicBlock* block;
800
  size_t index;
801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
};

struct BlockList {
  BasicBlock* block;
  BlockList* next;

  BlockList* Add(Zone* zone, BasicBlock* b) {
    BlockList* list = static_cast<BlockList*>(zone->New(sizeof(BlockList)));
    list->block = b;
    list->next = this;
    return list;
  }

  void Serialize(BasicBlockVector* final_order) {
    for (BlockList* l = this; l != NULL; l = l->next) {
816
      l->block->set_rpo_number(static_cast<int>(final_order->size()));
817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838
      final_order->push_back(l->block);
    }
  }
};

struct LoopInfo {
  BasicBlock* header;
  ZoneList<BasicBlock*>* outgoing;
  BitVector* members;
  LoopInfo* prev;
  BlockList* end;
  BlockList* start;

  void AddOutgoing(Zone* zone, BasicBlock* block) {
    if (outgoing == NULL) outgoing = new (zone) ZoneList<BasicBlock*>(2, zone);
    outgoing->Add(block, zone);
  }
};


static int Push(SpecialRPOStackFrame* stack, int depth, BasicBlock* child,
                int unvisited) {
839
  if (child->rpo_number() == unvisited) {
840 841
    stack[depth].block = child;
    stack[depth].index = 0;
842
    child->set_rpo_number(kBlockOnStack);
843 844 845 846 847 848 849 850
    return depth + 1;
  }
  return depth;
}


// Computes loop membership from the backedges of the control flow graph.
static LoopInfo* ComputeLoopInfo(
851 852
    Zone* zone, SpecialRPOStackFrame* queue, int num_loops, size_t num_blocks,
    ZoneList<std::pair<BasicBlock*, size_t> >* backedges) {
853 854 855 856 857 858 859 860
  LoopInfo* loops = zone->NewArray<LoopInfo>(num_loops);
  memset(loops, 0, num_loops * sizeof(LoopInfo));

  // Compute loop membership starting from backedges.
  // O(max(loop_depth) * max(|loop|)
  for (int i = 0; i < backedges->length(); i++) {
    BasicBlock* member = backedges->at(i).first;
    BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
861
    int loop_num = header->loop_end();
862 863
    if (loops[loop_num].header == NULL) {
      loops[loop_num].header = header;
864 865
      loops[loop_num].members =
          new (zone) BitVector(static_cast<int>(num_blocks), zone);
866 867 868 869 870 871
    }

    int queue_length = 0;
    if (member != header) {
      // As long as the header doesn't have a backedge to itself,
      // Push the member onto the queue and process its predecessors.
872 873
      if (!loops[loop_num].members->Contains(member->id().ToInt())) {
        loops[loop_num].members->Add(member->id().ToInt());
874 875 876 877 878 879 880 881
      }
      queue[queue_length++].block = member;
    }

    // Propagate loop membership backwards. All predecessors of M up to the
    // loop header H are members of the loop too. O(|blocks between M and H|).
    while (queue_length > 0) {
      BasicBlock* block = queue[--queue_length].block;
882
      for (size_t i = 0; i < block->PredecessorCount(); i++) {
883 884
        BasicBlock* pred = block->PredecessorAt(i);
        if (pred != header) {
885 886
          if (!loops[loop_num].members->Contains(pred->id().ToInt())) {
            loops[loop_num].members->Add(pred->id().ToInt());
887 888 889 890 891 892 893 894 895 896 897 898
            queue[queue_length++].block = pred;
          }
        }
      }
    }
  }
  return loops;
}


#if DEBUG
static void PrintRPO(int num_loops, LoopInfo* loops, BasicBlockVector* order) {
899 900
  OFStream os(stdout);
  os << "-- RPO with " << num_loops << " loops ";
901
  if (num_loops > 0) {
902
    os << "(";
903
    for (int i = 0; i < num_loops; i++) {
904 905
      if (i > 0) os << " ";
      os << "B" << loops[i].header->id();
906
    }
907
    os << ") ";
908
  }
909
  os << "-- \n";
910

911
  for (size_t i = 0; i < order->size(); i++) {
912
    BasicBlock* block = (*order)[i];
913 914 915 916 917 918 919 920 921
    BasicBlock::Id bid = block->id();
    // TODO(jarin,svenpanne): Add formatting here once we have support for that
    // in streams (we want an equivalent of PrintF("%5d:", i) here).
    os << i << ":";
    for (int j = 0; j < num_loops; j++) {
      bool membership = loops[j].members->Contains(bid.ToInt());
      bool range = loops[j].header->LoopContains(block);
      os << (membership ? " |" : "  ");
      os << (range ? "x" : " ");
922
    }
923 924 925 926
    os << "  B" << bid << ": ";
    if (block->loop_end() >= 0) {
      os << " range: [" << block->rpo_number() << ", " << block->loop_end()
         << ")";
927
    }
928
    os << "\n";
929 930 931 932 933 934
  }
}


static void VerifySpecialRPO(int num_loops, LoopInfo* loops,
                             BasicBlockVector* order) {
935
  DCHECK(order->size() > 0);
936
  DCHECK((*order)[0]->id().ToInt() == 0);  // entry should be first.
937 938 939 940 941

  for (int i = 0; i < num_loops; i++) {
    LoopInfo* loop = &loops[i];
    BasicBlock* header = loop->header;

942
    DCHECK(header != NULL);
943 944 945 946 947
    DCHECK(header->rpo_number() >= 0);
    DCHECK(header->rpo_number() < static_cast<int>(order->size()));
    DCHECK(header->loop_end() >= 0);
    DCHECK(header->loop_end() <= static_cast<int>(order->size()));
    DCHECK(header->loop_end() > header->rpo_number());
948 949 950 951

    // Verify the start ... end list relationship.
    int links = 0;
    BlockList* l = loop->start;
952
    DCHECK(l != NULL && l->block == header);
953 954 955 956 957 958 959
    bool end_found;
    while (true) {
      if (l == NULL || l == loop->end) {
        end_found = (loop->end == l);
        break;
      }
      // The list should be in same order as the final result.
960
      DCHECK(l->block->rpo_number() == links + loop->header->rpo_number());
961 962
      links++;
      l = l->next;
963
      DCHECK(links < static_cast<int>(2 * order->size()));  // cycle?
964
    }
965
    DCHECK(links > 0);
966
    DCHECK(links == (header->loop_end() - header->rpo_number()));
967
    DCHECK(end_found);
968 969 970 971 972

    // Check the contiguousness of loops.
    int count = 0;
    for (int j = 0; j < static_cast<int>(order->size()); j++) {
      BasicBlock* block = order->at(j);
973 974 975
      DCHECK(block->rpo_number() == j);
      if (j < header->rpo_number() || j >= header->loop_end()) {
        DCHECK(!loop->members->Contains(block->id().ToInt()));
976 977
      } else {
        if (block == header) {
978
          DCHECK(!loop->members->Contains(block->id().ToInt()));
979
        } else {
980
          DCHECK(loop->members->Contains(block->id().ToInt()));
981 982 983 984
        }
        count++;
      }
    }
985
    DCHECK(links == count);
986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001
  }
}
#endif  // DEBUG


// Compute the special reverse-post-order block ordering, which is essentially
// a RPO of the graph where loop bodies are contiguous. Properties:
// 1. If block A is a predecessor of B, then A appears before B in the order,
//    unless B is a loop header and A is in the loop headed at B
//    (i.e. A -> B is a backedge).
// => If block A dominates block B, then A appears before B in the order.
// => If block A is a loop header, A appears before all blocks in the loop
//    headed at A.
// 2. All loops are contiguous in the order (i.e. no intervening blocks that
//    do not belong to the loop.)
// Note a simple RPO traversal satisfies (1) but not (3).
1002 1003 1004
BasicBlockVector* Scheduler::ComputeSpecialRPO(Schedule* schedule) {
  Zone tmp_zone(schedule->zone()->isolate());
  Zone* zone = &tmp_zone;
1005
  Trace("--- COMPUTING SPECIAL RPO ----------------------------------\n");
1006
  // RPO should not have been computed for this schedule yet.
1007
  CHECK_EQ(kBlockUnvisited1, schedule->start()->rpo_number());
1008
  CHECK_EQ(0, static_cast<int>(schedule->rpo_order_.size()));
1009 1010 1011

  // Perform an iterative RPO traversal using an explicit stack,
  // recording backedges that form cycles. O(|B|).
1012 1013 1014
  ZoneList<std::pair<BasicBlock*, size_t> > backedges(1, zone);
  SpecialRPOStackFrame* stack = zone->NewArray<SpecialRPOStackFrame>(
      static_cast<int>(schedule->BasicBlockCount()));
1015
  BasicBlock* entry = schedule->start();
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
  BlockList* order = NULL;
  int stack_depth = Push(stack, 0, entry, kBlockUnvisited1);
  int num_loops = 0;

  while (stack_depth > 0) {
    int current = stack_depth - 1;
    SpecialRPOStackFrame* frame = stack + current;

    if (frame->index < frame->block->SuccessorCount()) {
      // Process the next successor.
      BasicBlock* succ = frame->block->SuccessorAt(frame->index++);
1027 1028
      if (succ->rpo_number() == kBlockVisited1) continue;
      if (succ->rpo_number() == kBlockOnStack) {
1029 1030
        // The successor is on the stack, so this is a backedge (cycle).
        backedges.Add(
1031 1032 1033
            std::pair<BasicBlock*, size_t>(frame->block, frame->index - 1),
            zone);
        if (succ->loop_end() < 0) {
1034
          // Assign a new loop number to the header if it doesn't have one.
1035
          succ->set_loop_end(num_loops++);
1036 1037 1038
        }
      } else {
        // Push the successor onto the stack.
1039
        DCHECK(succ->rpo_number() == kBlockUnvisited1);
1040 1041 1042 1043
        stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited1);
      }
    } else {
      // Finished with all successors; pop the stack and add the block.
1044
      order = order->Add(zone, frame->block);
1045
      frame->block->set_rpo_number(kBlockVisited1);
1046 1047 1048 1049 1050 1051 1052 1053 1054
      stack_depth--;
    }
  }

  // If no loops were encountered, then the order we computed was correct.
  LoopInfo* loops = NULL;
  if (num_loops != 0) {
    // Otherwise, compute the loop information from the backedges in order
    // to perform a traversal that groups loop bodies together.
1055 1056
    loops = ComputeLoopInfo(zone, stack, num_loops, schedule->BasicBlockCount(),
                            &backedges);
1057 1058

    // Initialize the "loop stack". Note the entry could be a loop header.
1059
    LoopInfo* loop = entry->IsLoopHeader() ? &loops[entry->loop_end()] : NULL;
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
    order = NULL;

    // Perform an iterative post-order traversal, visiting loop bodies before
    // edges that lead out of loops. Visits each block once, but linking loop
    // sections together is linear in the loop size, so overall is
    // O(|B| + max(loop_depth) * max(|loop|))
    stack_depth = Push(stack, 0, entry, kBlockUnvisited2);
    while (stack_depth > 0) {
      SpecialRPOStackFrame* frame = stack + (stack_depth - 1);
      BasicBlock* block = frame->block;
      BasicBlock* succ = NULL;

      if (frame->index < block->SuccessorCount()) {
        // Process the next normal successor.
        succ = block->SuccessorAt(frame->index++);
      } else if (block->IsLoopHeader()) {
        // Process additional outgoing edges from the loop header.
1077
        if (block->rpo_number() == kBlockOnStack) {
1078 1079
          // Finish the loop body the first time the header is left on the
          // stack.
1080
          DCHECK(loop != NULL && loop->header == block);
1081
          loop->start = order->Add(zone, block);
1082
          order = loop->end;
1083
          block->set_rpo_number(kBlockVisited2);
1084 1085 1086 1087 1088 1089 1090 1091
          // Pop the loop stack and continue visiting outgoing edges within the
          // the context of the outer loop, if any.
          loop = loop->prev;
          // We leave the loop header on the stack; the rest of this iteration
          // and later iterations will go through its outgoing edges list.
        }

        // Use the next outgoing edge if there are any.
1092 1093 1094
        int outgoing_index =
            static_cast<int>(frame->index - block->SuccessorCount());
        LoopInfo* info = &loops[block->loop_end()];
1095
        DCHECK(loop != info);
1096 1097 1098 1099 1100 1101 1102 1103 1104
        if (info->outgoing != NULL &&
            outgoing_index < info->outgoing->length()) {
          succ = info->outgoing->at(outgoing_index);
          frame->index++;
        }
      }

      if (succ != NULL) {
        // Process the next successor.
1105 1106 1107 1108
        if (succ->rpo_number() == kBlockOnStack) continue;
        if (succ->rpo_number() == kBlockVisited2) continue;
        DCHECK(succ->rpo_number() == kBlockUnvisited2);
        if (loop != NULL && !loop->members->Contains(succ->id().ToInt())) {
1109 1110
          // The successor is not in the current loop or any nested loop.
          // Add it to the outgoing edges of this loop and visit it later.
1111
          loop->AddOutgoing(zone, succ);
1112 1113 1114 1115 1116
        } else {
          // Push the successor onto the stack.
          stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited2);
          if (succ->IsLoopHeader()) {
            // Push the inner loop onto the loop stack.
1117 1118
            DCHECK(succ->loop_end() >= 0 && succ->loop_end() < num_loops);
            LoopInfo* next = &loops[succ->loop_end()];
1119 1120 1121 1122 1123 1124 1125 1126 1127
            next->end = order;
            next->prev = loop;
            loop = next;
          }
        }
      } else {
        // Finished with all successors of the current block.
        if (block->IsLoopHeader()) {
          // If we are going to pop a loop header, then add its entire body.
1128
          LoopInfo* info = &loops[block->loop_end()];
1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
          for (BlockList* l = info->start; true; l = l->next) {
            if (l->next == info->end) {
              l->next = order;
              info->end = order;
              break;
            }
          }
          order = info->start;
        } else {
          // Pop a single node off the stack and add it to the order.
1139
          order = order->Add(zone, block);
1140
          block->set_rpo_number(kBlockVisited2);
1141 1142 1143 1144 1145 1146 1147
        }
        stack_depth--;
      }
    }
  }

  // Construct the final order from the list.
1148
  BasicBlockVector* final_order = &schedule->rpo_order_;
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158
  order->Serialize(final_order);

  // Compute the correct loop header for every block and set the correct loop
  // ends.
  LoopInfo* current_loop = NULL;
  BasicBlock* current_header = NULL;
  int loop_depth = 0;
  for (BasicBlockVectorIter i = final_order->begin(); i != final_order->end();
       ++i) {
    BasicBlock* current = *i;
1159
    current->set_loop_header(current_header);
1160 1161
    if (current->IsLoopHeader()) {
      loop_depth++;
1162
      current_loop = &loops[current->loop_end()];
1163
      BlockList* end = current_loop->end;
1164 1165
      current->set_loop_end(end == NULL ? static_cast<int>(final_order->size())
                                        : end->block->rpo_number());
1166
      current_header = current_loop->header;
1167 1168
      Trace("B%d is a loop header, increment loop depth to %d\n",
            current->id().ToInt(), loop_depth);
1169 1170
    } else {
      while (current_header != NULL &&
1171
             current->rpo_number() >= current_header->loop_end()) {
1172 1173
        DCHECK(current_header->IsLoopHeader());
        DCHECK(current_loop != NULL);
1174 1175 1176 1177 1178
        current_loop = current_loop->prev;
        current_header = current_loop == NULL ? NULL : current_loop->header;
        --loop_depth;
      }
    }
1179 1180 1181 1182
    current->set_loop_depth(loop_depth);
    if (current->loop_header() == NULL) {
      Trace("B%d is not in a loop (depth == %d)\n", current->id().ToInt(),
            current->loop_depth());
1183
    } else {
1184 1185
      Trace("B%d has loop header B%d, (depth == %d)\n", current->id().ToInt(),
            current->loop_header()->id().ToInt(), current->loop_depth());
1186
    }
1187 1188 1189 1190 1191 1192 1193 1194
  }

#if DEBUG
  if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order);
  VerifySpecialRPO(num_loops, loops, final_order);
#endif
  return final_order;
}
1195 1196 1197 1198

}  // namespace compiler
}  // namespace internal
}  // namespace v8