greedy-allocator.cc 22.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/compiler/greedy-allocator.h"
#include "src/compiler/register-allocator.h"

namespace v8 {
namespace internal {
namespace compiler {

12

13 14 15 16 17 18
#define TRACE(...)                             \
  do {                                         \
    if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
  } while (false)


19 20 21
const float GreedyAllocator::kAllocatedRangeMultiplier = 10.0;


22
namespace {
23

24
void UpdateOperands(LiveRange* range, RegisterAllocationData* data) {
25 26
  int reg_id = range->assigned_register();
  range->SetUseHints(reg_id);
27 28 29 30 31 32 33 34 35 36
  if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
    data->GetPhiMapValueFor(range->TopLevel())->set_assigned_register(reg_id);
  }
}


void UnsetOperands(LiveRange* range, RegisterAllocationData* data) {
  range->UnsetUseHints();
  if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
    data->GetPhiMapValueFor(range->TopLevel())->UnsetAssignedRegister();
37
  }
38
}
39 40


41 42 43 44 45 46 47
LiveRange* Split(LiveRange* range, RegisterAllocationData* data,
                 LifetimePosition pos) {
  DCHECK(range->Start() < pos && pos < range->End());
  DCHECK(pos.IsStart() || pos.IsGapPosition() ||
         (data->code()
              ->GetInstructionBlock(pos.ToInstructionIndex())
              ->last_instruction_index() != pos.ToInstructionIndex()));
48
  LiveRange* result = range->SplitAt(pos, data->allocation_zone());
49 50
  return result;
}
51 52


53 54 55 56
// TODO(mtrofin): explain why splitting in gap START is always OK.
LifetimePosition GetSplitPositionForInstruction(const LiveRange* range,
                                                int instruction_index) {
  LifetimePosition ret = LifetimePosition::Invalid();
57

58 59 60
  ret = LifetimePosition::GapFromInstructionIndex(instruction_index);
  if (range->Start() >= ret || ret >= range->End()) {
    return LifetimePosition::Invalid();
61
  }
62 63
  return ret;
}
64 65


66 67 68 69 70
int GetFirstGapIndex(const UseInterval* interval) {
  LifetimePosition start = interval->start();
  int ret = start.ToInstructionIndex();
  return ret;
}
71 72


73 74 75 76
int GetLastGapIndex(const UseInterval* interval) {
  LifetimePosition end = interval->end();
  return end.ToInstructionIndex();
}
77 78


79 80 81 82 83 84 85
// Basic heuristic for advancing the algorithm, if any other splitting heuristic
// failed.
LifetimePosition GetLastResortSplitPosition(const LiveRange* range,
                                            const InstructionSequence* code) {
  if (range->first_interval()->next() != nullptr) {
    return range->first_interval()->next()->start();
  }
86

87 88 89 90
  UseInterval* interval = range->first_interval();
  int first = GetFirstGapIndex(interval);
  int last = GetLastGapIndex(interval);
  if (first == last) return LifetimePosition::Invalid();
91

92 93 94 95 96
  // TODO(mtrofin:) determine why we can't just split somewhere arbitrary
  // within the range, e.g. it's middle.
  for (UsePosition* pos = range->first_pos(); pos != nullptr;
       pos = pos->next()) {
    if (pos->type() != UsePositionType::kRequiresRegister) continue;
97 98
    LifetimePosition before =
        GetSplitPositionForInstruction(range, pos->pos().ToInstructionIndex());
99 100
    if (before.IsValid()) return before;
    LifetimePosition after = GetSplitPositionForInstruction(
101
        range, pos->pos().ToInstructionIndex() + 1);
102
    if (after.IsValid()) return after;
103
  }
104 105 106
  return LifetimePosition::Invalid();
}

107

108 109 110 111
bool IsProgressPossible(const LiveRange* range,
                        const InstructionSequence* code) {
  return range->CanBeSpilled(range->Start()) ||
         GetLastResortSplitPosition(range, code).IsValid();
112
}
113
}  // namespace
114 115


116 117 118 119 120
AllocationCandidate AllocationScheduler::GetNext() {
  DCHECK(!queue_.empty());
  AllocationCandidate ret = queue_.top();
  queue_.pop();
  return ret;
121 122 123
}


124
void AllocationScheduler::Schedule(LiveRange* range) {
125 126
  TRACE("Scheduling live range %d:%d.\n", range->TopLevel()->vreg(),
        range->relative_id());
127 128
  queue_.push(AllocationCandidate(range));
}
129

130 131 132 133 134

void AllocationScheduler::Schedule(LiveRangeGroup* group) {
  queue_.push(AllocationCandidate(group));
}

135 136 137 138 139
GreedyAllocator::GreedyAllocator(RegisterAllocationData* data,
                                 RegisterKind kind, Zone* local_zone)
    : RegisterAllocator(data, kind),
      local_zone_(local_zone),
      allocations_(local_zone),
140 141
      scheduler_(local_zone),
      groups_(local_zone) {}
142 143


144
void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) {
145 146
  TRACE("Assigning register %s to live range %d:%d\n", RegisterName(reg_id),
        range->TopLevel()->vreg(), range->relative_id());
147

148
  DCHECK(!range->HasRegisterAssigned());
149

150
  AllocateRegisterToRange(reg_id, range);
151

152 153
  TRACE("Assigning %s to range %d%d.\n", RegisterName(reg_id),
        range->TopLevel()->vreg(), range->relative_id());
154
  range->set_assigned_register(reg_id);
155
  UpdateOperands(range, data());
156 157 158
}


159 160 161 162
void GreedyAllocator::PreallocateFixedRanges() {
  allocations_.resize(num_registers());
  for (int i = 0; i < num_registers(); i++) {
    allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone());
163 164
  }

165 166 167
  for (LiveRange* fixed_range : GetFixedRegisters()) {
    if (fixed_range != nullptr) {
      DCHECK_EQ(mode(), fixed_range->kind());
168
      DCHECK(fixed_range->TopLevel()->IsFixed());
169

170 171
      int reg_nr = fixed_range->assigned_register();
      EnsureValidRangeWeight(fixed_range);
172
      AllocateRegisterToRange(reg_nr, fixed_range);
173 174 175 176 177
    }
  }
}


178
void GreedyAllocator::GroupLiveRanges() {
179
  CoalescedLiveRanges grouper(local_zone());
180
  for (TopLevelLiveRange* range : data()->live_ranges()) {
181
    grouper.clear();
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
    // Skip splinters, because we do not want to optimize for them, and moves
    // due to assigning them to different registers occur in deferred blocks.
    if (!CanProcessRange(range) || range->IsSplinter() || !range->is_phi()) {
      continue;
    }

    // A phi can't be a memory operand, so it couldn't have been split.
    DCHECK(!range->spilled());

    // Maybe this phi range is itself an input to another phi which was already
    // processed.
    LiveRangeGroup* latest_grp = range->group() != nullptr
                                     ? range->group()
                                     : new (local_zone())
                                           LiveRangeGroup(local_zone());

198
    // Populate the grouper.
199
    if (range->group() == nullptr) {
200
      grouper.AllocateRange(range);
201 202
    } else {
      for (LiveRange* member : range->group()->ranges()) {
203
        grouper.AllocateRange(member);
204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
      }
    }
    for (int j : data()->GetPhiMapValueFor(range)->phi()->operands()) {
      // skip output also in input, which may happen for loops.
      if (j == range->vreg()) continue;

      TopLevelLiveRange* other_top = data()->live_ranges()[j];

      if (other_top->IsSplinter()) continue;
      // If the other was a memory operand, it might have been split.
      // So get the unsplit part.
      LiveRange* other =
          other_top->next() == nullptr ? other_top : other_top->next();

      if (other->spilled()) continue;

      LiveRangeGroup* other_group = other->group();
      if (other_group != nullptr) {
        bool can_merge = true;
        for (LiveRange* member : other_group->ranges()) {
224
          if (grouper.GetConflicts(member).Current() != nullptr) {
225 226 227 228 229 230 231 232 233 234 235
            can_merge = false;
            break;
          }
        }
        // If each member doesn't conflict with the current group, then since
        // the members don't conflict with eachother either, we can merge them.
        if (can_merge) {
          latest_grp->ranges().insert(latest_grp->ranges().end(),
                                      other_group->ranges().begin(),
                                      other_group->ranges().end());
          for (LiveRange* member : other_group->ranges()) {
236
            grouper.AllocateRange(member);
237 238 239 240 241
            member->set_group(latest_grp);
          }
          // Clear the other range, so we avoid scheduling it.
          other_group->ranges().clear();
        }
242 243
      } else if (grouper.GetConflicts(other).Current() == nullptr) {
        grouper.AllocateRange(other);
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
        latest_grp->ranges().push_back(other);
        other->set_group(latest_grp);
      }
    }

    if (latest_grp->ranges().size() > 0 && range->group() == nullptr) {
      latest_grp->ranges().push_back(range);
      DCHECK(latest_grp->ranges().size() > 1);
      groups().push_back(latest_grp);
      range->set_group(latest_grp);
    }
  }
}


259
void GreedyAllocator::ScheduleAllocationCandidates() {
260 261 262 263 264 265 266 267
  for (LiveRangeGroup* group : groups()) {
    if (group->ranges().size() > 0) {
      // We shouldn't have added single-range groups.
      DCHECK(group->ranges().size() != 1);
      scheduler().Schedule(group);
    }
  }
  for (LiveRange* range : data()->live_ranges()) {
mtrofin's avatar
mtrofin committed
268 269
    if (CanProcessRange(range)) {
      for (LiveRange* child = range; child != nullptr; child = child->next()) {
270
        if (!child->spilled() && child->group() == nullptr) {
mtrofin's avatar
mtrofin committed
271 272 273
          scheduler().Schedule(child);
        }
      }
274
    }
275 276 277 278
  }
}


279 280
void GreedyAllocator::TryAllocateCandidate(
    const AllocationCandidate& candidate) {
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
  if (candidate.is_group()) {
    TryAllocateGroup(candidate.group());
  } else {
    TryAllocateLiveRange(candidate.live_range());
  }
}


void GreedyAllocator::TryAllocateGroup(LiveRangeGroup* group) {
  float group_weight = 0.0;
  for (LiveRange* member : group->ranges()) {
    EnsureValidRangeWeight(member);
    group_weight = Max(group_weight, member->weight());
  }

  float eviction_weight = group_weight;
  int eviction_reg = -1;
  int free_reg = -1;
  for (int reg = 0; reg < num_registers(); ++reg) {
    float weight = GetMaximumConflictingWeight(reg, group, group_weight);
    if (weight == LiveRange::kInvalidWeight) {
      free_reg = reg;
      break;
    }
    if (weight < eviction_weight) {
      eviction_weight = weight;
      eviction_reg = reg;
    }
  }
  if (eviction_reg < 0 && free_reg < 0) {
    for (LiveRange* member : group->ranges()) {
      scheduler().Schedule(member);
    }
    return;
  }
  if (free_reg < 0) {
    DCHECK(eviction_reg >= 0);
    for (LiveRange* member : group->ranges()) {
      EvictAndRescheduleConflicts(eviction_reg, member);
    }
    free_reg = eviction_reg;
  }

  DCHECK(free_reg >= 0);
  for (LiveRange* member : group->ranges()) {
    AssignRangeToRegister(free_reg, member);
  }
328
}
329 330


331 332 333
void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
  // TODO(mtrofin): once we introduce groups, we'll want to first try and
  // allocate at the preferred register.
334 335
  TRACE("Attempting to allocate live range %d:%d.\n", range->TopLevel()->vreg(),
        range->relative_id());
336 337
  int free_reg = -1;
  int evictable_reg = -1;
338 339
  int hinted_reg = -1;

340
  EnsureValidRangeWeight(range);
341 342
  float competing_weight = range->weight();
  DCHECK(competing_weight != LiveRange::kInvalidWeight);
343

344 345 346
  // Can we allocate at the hinted register?
  if (range->FirstHintPosition(&hinted_reg) != nullptr) {
    DCHECK(hinted_reg >= 0);
347 348
    float max_conflict_weight =
        GetMaximumConflictingWeight(hinted_reg, range, competing_weight);
349
    if (max_conflict_weight == LiveRange::kInvalidWeight) {
350 351 352
      free_reg = hinted_reg;
    } else if (max_conflict_weight < range->weight()) {
      evictable_reg = hinted_reg;
353
    }
354 355 356 357 358 359 360 361 362 363 364 365
  }

  if (free_reg < 0 && evictable_reg < 0) {
    // There was no hinted reg, or we cannot allocate there.
    float smallest_weight = LiveRange::kMaxWeight;

    // Seek either the first free register, or, from the set of registers
    // where the maximum conflict is lower than the candidate's weight, the one
    // with the smallest such weight.
    for (int i = 0; i < num_registers(); i++) {
      // Skip unnecessarily re-visiting the hinted register, if any.
      if (i == hinted_reg) continue;
366 367
      float max_conflict_weight =
          GetMaximumConflictingWeight(i, range, competing_weight);
368 369 370 371 372 373 374 375 376
      if (max_conflict_weight == LiveRange::kInvalidWeight) {
        free_reg = i;
        break;
      }
      if (max_conflict_weight < range->weight() &&
          max_conflict_weight < smallest_weight) {
        smallest_weight = max_conflict_weight;
        evictable_reg = i;
      }
377 378
    }
  }
379

380 381
  // We have a free register, so we use it.
  if (free_reg >= 0) {
382 383 384
    TRACE("Found free register %s for live range %d:%d.\n",
          RegisterName(free_reg), range->TopLevel()->vreg(),
          range->relative_id());
385 386
    AssignRangeToRegister(free_reg, range);
    return;
387 388
  }

389 390 391
  // We found a register to perform evictions, so we evict and allocate our
  // candidate.
  if (evictable_reg >= 0) {
392 393 394
    TRACE("Found evictable register %s for live range %d:%d.\n",
          RegisterName(free_reg), range->TopLevel()->vreg(),
          range->relative_id());
395
    EvictAndRescheduleConflicts(evictable_reg, range);
396 397 398
    AssignRangeToRegister(evictable_reg, range);
    return;
  }
399

400 401
  // The range needs to be split or spilled.
  SplitOrSpillBlockedRange(range);
402 403 404
}


405 406 407 408 409 410
void GreedyAllocator::EvictAndRescheduleConflicts(unsigned reg_id,
                                                  const LiveRange* range) {
  auto conflicts = current_allocations(reg_id)->GetConflicts(range);
  for (LiveRange* conflict = conflicts.Current(); conflict != nullptr;
       conflict = conflicts.RemoveCurrentAndGetNext()) {
    DCHECK(conflict->HasRegisterAssigned());
411
    CHECK(!conflict->TopLevel()->IsFixed());
412
    conflict->UnsetAssignedRegister();
413
    UnsetOperands(conflict, data());
414 415
    UpdateWeightAtEviction(conflict);
    scheduler().Schedule(conflict);
416 417
    TRACE("Evicted range %d%d.\n", conflict->TopLevel()->vreg(),
          conflict->relative_id());
418 419 420 421
  }
}


422 423 424
void GreedyAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
  size_t initial_range_count = data()->live_ranges().size();
  for (size_t i = 0; i < initial_range_count; ++i) {
425
    TopLevelLiveRange* range = data()->live_ranges()[i];
426
    if (!CanProcessRange(range)) continue;
427
    if (!range->HasSpillOperand()) continue;
428

429
    LifetimePosition start = range->Start();
430 431
    TRACE("Live range %d:%d is defined by a spill operand.\n",
          range->TopLevel()->vreg(), range->relative_id());
432
    auto next_pos = start;
433 434 435 436 437 438 439 440 441 442 443
    if (next_pos.IsGapPosition()) {
      next_pos = next_pos.NextStart();
    }
    auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
    // If the range already has a spill operand and it doesn't need a
    // register immediately, split it and spill the first part of the range.
    if (pos == nullptr) {
      Spill(range);
    } else if (pos->pos() > range->Start().NextStart()) {
      // Do not spill live range eagerly if use position that can benefit from
      // the register is too close to the start of live range.
444
      auto split_pos = GetSplitPositionForInstruction(
445
          range, pos->pos().ToInstructionIndex());
446 447 448 449 450 451
      // There is no place to split, so we can't split and spill.
      if (!split_pos.IsValid()) continue;

      split_pos =
          FindOptimalSplitPos(range->Start().NextFullStart(), split_pos);

452 453
      Split(range, data(), split_pos);
      Spill(range);
454 455 456 457 458 459
    }
  }
}


void GreedyAllocator::AllocateRegisters() {
460 461
  CHECK(scheduler().empty());
  CHECK(allocations_.empty());
462

463 464
  TRACE("Begin allocating function %s with the Greedy Allocator\n",
        data()->debug_name());
465

466
  SplitAndSpillRangesDefinedByMemoryOperand();
467
  GroupLiveRanges();
468
  ScheduleAllocationCandidates();
469
  PreallocateFixedRanges();
470 471 472
  while (!scheduler().empty()) {
    AllocationCandidate candidate = scheduler().GetNext();
    TryAllocateCandidate(candidate);
473 474 475
  }

  for (size_t i = 0; i < allocations_.size(); ++i) {
476
    if (!allocations_[i]->empty()) {
477 478 479
      data()->MarkAllocated(mode(), static_cast<int>(i));
    }
  }
480
  allocations_.clear();
481

482 483
  TryReuseSpillRangesForGroups();

484 485
  TRACE("End allocating function %s with the Greedy Allocator\n",
        data()->debug_name());
486 487 488
}


489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512
void GreedyAllocator::TryReuseSpillRangesForGroups() {
  for (TopLevelLiveRange* top : data()->live_ranges()) {
    if (!CanProcessRange(top) || !top->is_phi() || top->group() == nullptr) {
      continue;
    }

    SpillRange* spill_range = nullptr;
    for (LiveRange* member : top->group()->ranges()) {
      if (!member->TopLevel()->HasSpillRange()) continue;
      SpillRange* member_range = member->TopLevel()->GetSpillRange();
      if (spill_range == nullptr) {
        spill_range = member_range;
      } else {
        // This may not always succeed, because we group non-conflicting ranges
        // that may have been splintered, and the splinters may cause conflicts
        // in the spill ranges.
        // TODO(mtrofin): should the splinters own their own spill ranges?
        spill_range->TryMerge(member_range);
      }
    }
  }
}


513
float GreedyAllocator::GetMaximumConflictingWeight(
514
    unsigned reg_id, const LiveRange* range, float competing_weight) const {
515 516 517 518 519 520
  float ret = LiveRange::kInvalidWeight;

  auto conflicts = current_allocations(reg_id)->GetConflicts(range);
  for (LiveRange* conflict = conflicts.Current(); conflict != nullptr;
       conflict = conflicts.GetNext()) {
    DCHECK_NE(conflict->weight(), LiveRange::kInvalidWeight);
521
    if (competing_weight <= conflict->weight()) return LiveRange::kMaxWeight;
522
    ret = Max(ret, conflict->weight());
523
    DCHECK(ret < LiveRange::kMaxWeight);
524 525 526 527 528 529
  }

  return ret;
}


530 531 532 533 534 535
float GreedyAllocator::GetMaximumConflictingWeight(unsigned reg_id,
                                                   const LiveRangeGroup* group,
                                                   float group_weight) const {
  float ret = LiveRange::kInvalidWeight;

  for (LiveRange* member : group->ranges()) {
536 537
    float member_conflict_weight =
        GetMaximumConflictingWeight(reg_id, member, group_weight);
538 539 540 541 542 543 544 545 546 547 548
    if (member_conflict_weight == LiveRange::kMaxWeight) {
      return LiveRange::kMaxWeight;
    }
    if (member_conflict_weight > group_weight) return LiveRange::kMaxWeight;
    ret = Max(member_conflict_weight, ret);
  }

  return ret;
}


549 550 551 552 553
void GreedyAllocator::EnsureValidRangeWeight(LiveRange* range) {
  // The live range weight will be invalidated when ranges are created or split.
  // Otherwise, it is consistently updated when the range is allocated or
  // unallocated.
  if (range->weight() != LiveRange::kInvalidWeight) return;
554

555
  if (range->TopLevel()->IsFixed()) {
556 557
    range->set_weight(LiveRange::kMaxWeight);
    return;
558
  }
559 560 561
  if (!IsProgressPossible(range, code())) {
    range->set_weight(LiveRange::kMaxWeight);
    return;
562 563
  }

564 565 566
  float use_count = 0.0;
  for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next()) {
    ++use_count;
567
  }
568 569
  range->set_weight(use_count / static_cast<float>(range->GetSize()));
}
570 571


572 573 574
void GreedyAllocator::SpillRangeAsLastResort(LiveRange* range) {
  LifetimePosition start = range->Start();
  CHECK(range->CanBeSpilled(start));
575

576 577
  DCHECK(range->NextRegisterPosition(start) == nullptr);
  Spill(range);
578 579 580
}


581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
LiveRange* GreedyAllocator::GetRemainderAfterSplittingAroundFirstCall(
    LiveRange* range) {
  LiveRange* ret = range;
  for (UseInterval* interval = range->first_interval(); interval != nullptr;
       interval = interval->next()) {
    LifetimePosition start = interval->start();
    LifetimePosition end = interval->end();
    // If the interval starts at instruction end, then the first instruction
    // in the interval is the next one.
    int first_full_instruction = (start.IsGapPosition() || start.IsStart())
                                     ? start.ToInstructionIndex()
                                     : start.ToInstructionIndex() + 1;
    // If the interval ends in a gap or at instruction start, then the last
    // instruction is the previous one.
    int last_full_instruction = (end.IsGapPosition() || end.IsStart())
                                    ? end.ToInstructionIndex() - 1
                                    : end.ToInstructionIndex();

    for (int instruction_index = first_full_instruction;
         instruction_index <= last_full_instruction; ++instruction_index) {
      if (!code()->InstructionAt(instruction_index)->IsCall()) continue;

      LifetimePosition before =
          GetSplitPositionForInstruction(range, instruction_index);
      LiveRange* second_part =
          before.IsValid() ? Split(range, data(), before) : range;

      if (range != second_part) scheduler().Schedule(range);

      LifetimePosition after =
          FindSplitPositionAfterCall(second_part, instruction_index);

      if (after.IsValid()) {
        ret = Split(second_part, data(), after);
      } else {
        ret = nullptr;
      }
      Spill(second_part);
      return ret;
    }
  }
  return ret;
}


bool GreedyAllocator::TrySplitAroundCalls(LiveRange* range) {
  bool modified = false;

  while (range != nullptr) {
    LiveRange* remainder = GetRemainderAfterSplittingAroundFirstCall(range);
    // If we performed no modification, we're done.
    if (remainder == range) {
      break;
    }
    // We performed a modification.
    modified = true;
    range = remainder;
  }
  // If we have a remainder and we made modifications, it means the remainder
  // has no calls and we should schedule it for further processing. If we made
  // no modifications, we will just return false, because we want the algorithm
  // to make progress by trying some other heuristic.
  if (modified && range != nullptr) {
    DCHECK(!range->spilled());
    DCHECK(!range->HasRegisterAssigned());
    scheduler().Schedule(range);
  }
  return modified;
}


LifetimePosition GreedyAllocator::FindSplitPositionAfterCall(
    const LiveRange* range, int call_index) {
  LifetimePosition after_call =
      Max(range->Start(),
          LifetimePosition::GapFromInstructionIndex(call_index + 1));
  UsePosition* next_use = range->NextRegisterPosition(after_call);
  if (!next_use) return LifetimePosition::Invalid();

  LifetimePosition split_pos = FindOptimalSplitPos(after_call, next_use->pos());
  split_pos =
      GetSplitPositionForInstruction(range, split_pos.ToInstructionIndex());
  return split_pos;
}


667 668 669 670 671 672 673 674 675 676 677 678 679
LifetimePosition GreedyAllocator::FindSplitPositionBeforeLoops(
    LiveRange* range) {
  LifetimePosition end = range->End();
  if (end.ToInstructionIndex() >= code()->LastInstructionIndex()) {
    end =
        LifetimePosition::GapFromInstructionIndex(end.ToInstructionIndex() - 1);
  }
  LifetimePosition pos = FindOptimalSplitPos(range->Start(), end);
  pos = GetSplitPositionForInstruction(range, pos.ToInstructionIndex());
  return pos;
}


680
void GreedyAllocator::SplitOrSpillBlockedRange(LiveRange* range) {
681
  if (TrySplitAroundCalls(range)) return;
682 683 684 685

  LifetimePosition pos = FindSplitPositionBeforeLoops(range);

  if (!pos.IsValid()) pos = GetLastResortSplitPosition(range, code());
686 687 688 689 690 691
  if (pos.IsValid()) {
    LiveRange* tail = Split(range, data(), pos);
    DCHECK(tail != range);
    scheduler().Schedule(tail);
    scheduler().Schedule(range);
    return;
692
  }
693
  SpillRangeAsLastResort(range);
694 695 696 697 698 699
}


}  // namespace compiler
}  // namespace internal
}  // namespace v8