greedy-allocator.cc 20.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/compiler/greedy-allocator.h"
#include "src/compiler/register-allocator.h"

namespace v8 {
namespace internal {
namespace compiler {

12

13 14 15 16 17 18
#define TRACE(...)                             \
  do {                                         \
    if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
  } while (false)


19 20 21
const float GreedyAllocator::kAllocatedRangeMultiplier = 10.0;


22
namespace {
23

24
void UpdateOperands(LiveRange* range, RegisterAllocationData* data) {
25 26
  int reg_id = range->assigned_register();
  range->SetUseHints(reg_id);
27 28 29 30 31 32 33 34 35 36
  if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
    data->GetPhiMapValueFor(range->TopLevel())->set_assigned_register(reg_id);
  }
}


void UnsetOperands(LiveRange* range, RegisterAllocationData* data) {
  range->UnsetUseHints();
  if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
    data->GetPhiMapValueFor(range->TopLevel())->UnsetAssignedRegister();
37
  }
38
}
39 40


41 42 43 44 45 46 47
LiveRange* Split(LiveRange* range, RegisterAllocationData* data,
                 LifetimePosition pos) {
  DCHECK(range->Start() < pos && pos < range->End());
  DCHECK(pos.IsStart() || pos.IsGapPosition() ||
         (data->code()
              ->GetInstructionBlock(pos.ToInstructionIndex())
              ->last_instruction_index() != pos.ToInstructionIndex()));
48
  LiveRange* result = range->SplitAt(pos, data->allocation_zone());
49 50
  return result;
}
51 52


53
}  // namespace
54 55


56 57 58 59 60
AllocationCandidate AllocationScheduler::GetNext() {
  DCHECK(!queue_.empty());
  AllocationCandidate ret = queue_.top();
  queue_.pop();
  return ret;
61 62 63
}


64
void AllocationScheduler::Schedule(LiveRange* range) {
65 66
  TRACE("Scheduling live range %d:%d.\n", range->TopLevel()->vreg(),
        range->relative_id());
67 68
  queue_.push(AllocationCandidate(range));
}
69

70 71 72 73 74

void AllocationScheduler::Schedule(LiveRangeGroup* group) {
  queue_.push(AllocationCandidate(group));
}

75 76 77 78 79
GreedyAllocator::GreedyAllocator(RegisterAllocationData* data,
                                 RegisterKind kind, Zone* local_zone)
    : RegisterAllocator(data, kind),
      local_zone_(local_zone),
      allocations_(local_zone),
80 81
      scheduler_(local_zone),
      groups_(local_zone) {}
82 83


84
void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) {
85 86
  TRACE("Assigning register %s to live range %d:%d\n", RegisterName(reg_id),
        range->TopLevel()->vreg(), range->relative_id());
87

88
  DCHECK(!range->HasRegisterAssigned());
89

90
  AllocateRegisterToRange(reg_id, range);
91

92 93
  TRACE("Assigning %s to range %d%d.\n", RegisterName(reg_id),
        range->TopLevel()->vreg(), range->relative_id());
94
  range->set_assigned_register(reg_id);
95
  UpdateOperands(range, data());
96 97 98
}


99 100 101 102
void GreedyAllocator::PreallocateFixedRanges() {
  allocations_.resize(num_registers());
  for (int i = 0; i < num_registers(); i++) {
    allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone());
103 104
  }

105 106 107
  for (LiveRange* fixed_range : GetFixedRegisters()) {
    if (fixed_range != nullptr) {
      DCHECK_EQ(mode(), fixed_range->kind());
108
      DCHECK(fixed_range->TopLevel()->IsFixed());
109

110 111
      int reg_nr = fixed_range->assigned_register();
      EnsureValidRangeWeight(fixed_range);
112
      AllocateRegisterToRange(reg_nr, fixed_range);
113 114 115 116 117
    }
  }
}


118
void GreedyAllocator::GroupLiveRanges() {
119
  CoalescedLiveRanges grouper(local_zone());
120
  for (TopLevelLiveRange* range : data()->live_ranges()) {
121
    grouper.clear();
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
    // Skip splinters, because we do not want to optimize for them, and moves
    // due to assigning them to different registers occur in deferred blocks.
    if (!CanProcessRange(range) || range->IsSplinter() || !range->is_phi()) {
      continue;
    }

    // A phi can't be a memory operand, so it couldn't have been split.
    DCHECK(!range->spilled());

    // Maybe this phi range is itself an input to another phi which was already
    // processed.
    LiveRangeGroup* latest_grp = range->group() != nullptr
                                     ? range->group()
                                     : new (local_zone())
                                           LiveRangeGroup(local_zone());

138
    // Populate the grouper.
139
    if (range->group() == nullptr) {
140
      grouper.AllocateRange(range);
141 142
    } else {
      for (LiveRange* member : range->group()->ranges()) {
143
        grouper.AllocateRange(member);
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
      }
    }
    for (int j : data()->GetPhiMapValueFor(range)->phi()->operands()) {
      // skip output also in input, which may happen for loops.
      if (j == range->vreg()) continue;

      TopLevelLiveRange* other_top = data()->live_ranges()[j];

      if (other_top->IsSplinter()) continue;
      // If the other was a memory operand, it might have been split.
      // So get the unsplit part.
      LiveRange* other =
          other_top->next() == nullptr ? other_top : other_top->next();

      if (other->spilled()) continue;

      LiveRangeGroup* other_group = other->group();
      if (other_group != nullptr) {
        bool can_merge = true;
        for (LiveRange* member : other_group->ranges()) {
164
          if (grouper.GetConflicts(member).Current() != nullptr) {
165 166 167 168 169 170 171 172 173 174 175
            can_merge = false;
            break;
          }
        }
        // If each member doesn't conflict with the current group, then since
        // the members don't conflict with eachother either, we can merge them.
        if (can_merge) {
          latest_grp->ranges().insert(latest_grp->ranges().end(),
                                      other_group->ranges().begin(),
                                      other_group->ranges().end());
          for (LiveRange* member : other_group->ranges()) {
176
            grouper.AllocateRange(member);
177 178 179 180 181
            member->set_group(latest_grp);
          }
          // Clear the other range, so we avoid scheduling it.
          other_group->ranges().clear();
        }
182 183
      } else if (grouper.GetConflicts(other).Current() == nullptr) {
        grouper.AllocateRange(other);
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
        latest_grp->ranges().push_back(other);
        other->set_group(latest_grp);
      }
    }

    if (latest_grp->ranges().size() > 0 && range->group() == nullptr) {
      latest_grp->ranges().push_back(range);
      DCHECK(latest_grp->ranges().size() > 1);
      groups().push_back(latest_grp);
      range->set_group(latest_grp);
    }
  }
}


199
void GreedyAllocator::ScheduleAllocationCandidates() {
200 201 202 203 204 205 206 207
  for (LiveRangeGroup* group : groups()) {
    if (group->ranges().size() > 0) {
      // We shouldn't have added single-range groups.
      DCHECK(group->ranges().size() != 1);
      scheduler().Schedule(group);
    }
  }
  for (LiveRange* range : data()->live_ranges()) {
mtrofin's avatar
mtrofin committed
208 209
    if (CanProcessRange(range)) {
      for (LiveRange* child = range; child != nullptr; child = child->next()) {
210
        if (!child->spilled() && child->group() == nullptr) {
mtrofin's avatar
mtrofin committed
211 212 213
          scheduler().Schedule(child);
        }
      }
214
    }
215 216 217 218
  }
}


219 220
void GreedyAllocator::TryAllocateCandidate(
    const AllocationCandidate& candidate) {
221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
  if (candidate.is_group()) {
    TryAllocateGroup(candidate.group());
  } else {
    TryAllocateLiveRange(candidate.live_range());
  }
}


void GreedyAllocator::TryAllocateGroup(LiveRangeGroup* group) {
  float group_weight = 0.0;
  for (LiveRange* member : group->ranges()) {
    EnsureValidRangeWeight(member);
    group_weight = Max(group_weight, member->weight());
  }

  float eviction_weight = group_weight;
  int eviction_reg = -1;
  int free_reg = -1;
239 240
  for (int i = 0; i < num_allocatable_registers(); ++i) {
    int reg = allocatable_register_code(i);
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
    float weight = GetMaximumConflictingWeight(reg, group, group_weight);
    if (weight == LiveRange::kInvalidWeight) {
      free_reg = reg;
      break;
    }
    if (weight < eviction_weight) {
      eviction_weight = weight;
      eviction_reg = reg;
    }
  }
  if (eviction_reg < 0 && free_reg < 0) {
    for (LiveRange* member : group->ranges()) {
      scheduler().Schedule(member);
    }
    return;
  }
  if (free_reg < 0) {
    DCHECK(eviction_reg >= 0);
    for (LiveRange* member : group->ranges()) {
      EvictAndRescheduleConflicts(eviction_reg, member);
    }
    free_reg = eviction_reg;
  }

  DCHECK(free_reg >= 0);
  for (LiveRange* member : group->ranges()) {
    AssignRangeToRegister(free_reg, member);
  }
269
}
270 271


272 273 274
void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
  // TODO(mtrofin): once we introduce groups, we'll want to first try and
  // allocate at the preferred register.
275 276
  TRACE("Attempting to allocate live range %d:%d.\n", range->TopLevel()->vreg(),
        range->relative_id());
277 278
  int free_reg = -1;
  int evictable_reg = -1;
279 280
  int hinted_reg = -1;

281
  EnsureValidRangeWeight(range);
282 283
  float competing_weight = range->weight();
  DCHECK(competing_weight != LiveRange::kInvalidWeight);
284

285 286 287
  // Can we allocate at the hinted register?
  if (range->FirstHintPosition(&hinted_reg) != nullptr) {
    DCHECK(hinted_reg >= 0);
288 289
    float max_conflict_weight =
        GetMaximumConflictingWeight(hinted_reg, range, competing_weight);
290
    if (max_conflict_weight == LiveRange::kInvalidWeight) {
291 292 293
      free_reg = hinted_reg;
    } else if (max_conflict_weight < range->weight()) {
      evictable_reg = hinted_reg;
294
    }
295 296 297 298 299 300 301 302 303
  }

  if (free_reg < 0 && evictable_reg < 0) {
    // There was no hinted reg, or we cannot allocate there.
    float smallest_weight = LiveRange::kMaxWeight;

    // Seek either the first free register, or, from the set of registers
    // where the maximum conflict is lower than the candidate's weight, the one
    // with the smallest such weight.
304 305
    for (int i = 0; i < num_allocatable_registers(); i++) {
      int reg = allocatable_register_code(i);
306
      // Skip unnecessarily re-visiting the hinted register, if any.
307
      if (reg == hinted_reg) continue;
308
      float max_conflict_weight =
309
          GetMaximumConflictingWeight(reg, range, competing_weight);
310
      if (max_conflict_weight == LiveRange::kInvalidWeight) {
311
        free_reg = reg;
312 313 314 315 316
        break;
      }
      if (max_conflict_weight < range->weight() &&
          max_conflict_weight < smallest_weight) {
        smallest_weight = max_conflict_weight;
317
        evictable_reg = reg;
318
      }
319 320
    }
  }
321

322 323
  // We have a free register, so we use it.
  if (free_reg >= 0) {
324 325 326
    TRACE("Found free register %s for live range %d:%d.\n",
          RegisterName(free_reg), range->TopLevel()->vreg(),
          range->relative_id());
327 328
    AssignRangeToRegister(free_reg, range);
    return;
329 330
  }

331 332 333
  // We found a register to perform evictions, so we evict and allocate our
  // candidate.
  if (evictable_reg >= 0) {
334 335 336
    TRACE("Found evictable register %s for live range %d:%d.\n",
          RegisterName(free_reg), range->TopLevel()->vreg(),
          range->relative_id());
337
    EvictAndRescheduleConflicts(evictable_reg, range);
338 339 340
    AssignRangeToRegister(evictable_reg, range);
    return;
  }
341

342 343
  // The range needs to be split or spilled.
  SplitOrSpillBlockedRange(range);
344 345 346
}


347 348 349 350 351 352
void GreedyAllocator::EvictAndRescheduleConflicts(unsigned reg_id,
                                                  const LiveRange* range) {
  auto conflicts = current_allocations(reg_id)->GetConflicts(range);
  for (LiveRange* conflict = conflicts.Current(); conflict != nullptr;
       conflict = conflicts.RemoveCurrentAndGetNext()) {
    DCHECK(conflict->HasRegisterAssigned());
353
    CHECK(!conflict->TopLevel()->IsFixed());
354
    conflict->UnsetAssignedRegister();
355
    UnsetOperands(conflict, data());
356 357
    UpdateWeightAtEviction(conflict);
    scheduler().Schedule(conflict);
358 359
    TRACE("Evicted range %d%d.\n", conflict->TopLevel()->vreg(),
          conflict->relative_id());
360 361 362 363
  }
}


364
void GreedyAllocator::AllocateRegisters() {
365 366
  CHECK(scheduler().empty());
  CHECK(allocations_.empty());
367

368 369
  TRACE("Begin allocating function %s with the Greedy Allocator\n",
        data()->debug_name());
370

371
  SplitAndSpillRangesDefinedByMemoryOperand(true);
372
  GroupLiveRanges();
373
  ScheduleAllocationCandidates();
374
  PreallocateFixedRanges();
375 376 377
  while (!scheduler().empty()) {
    AllocationCandidate candidate = scheduler().GetNext();
    TryAllocateCandidate(candidate);
378 379 380
  }

  for (size_t i = 0; i < allocations_.size(); ++i) {
381
    if (!allocations_[i]->empty()) {
382 383 384
      data()->MarkAllocated(mode(), static_cast<int>(i));
    }
  }
385
  allocations_.clear();
386

387 388
  TryReuseSpillRangesForGroups();

389 390
  TRACE("End allocating function %s with the Greedy Allocator\n",
        data()->debug_name());
391 392 393
}


394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
void GreedyAllocator::TryReuseSpillRangesForGroups() {
  for (TopLevelLiveRange* top : data()->live_ranges()) {
    if (!CanProcessRange(top) || !top->is_phi() || top->group() == nullptr) {
      continue;
    }

    SpillRange* spill_range = nullptr;
    for (LiveRange* member : top->group()->ranges()) {
      if (!member->TopLevel()->HasSpillRange()) continue;
      SpillRange* member_range = member->TopLevel()->GetSpillRange();
      if (spill_range == nullptr) {
        spill_range = member_range;
      } else {
        // This may not always succeed, because we group non-conflicting ranges
        // that may have been splintered, and the splinters may cause conflicts
        // in the spill ranges.
        // TODO(mtrofin): should the splinters own their own spill ranges?
        spill_range->TryMerge(member_range);
      }
    }
  }
}


418
float GreedyAllocator::GetMaximumConflictingWeight(
419
    unsigned reg_id, const LiveRange* range, float competing_weight) const {
420 421 422 423 424 425
  float ret = LiveRange::kInvalidWeight;

  auto conflicts = current_allocations(reg_id)->GetConflicts(range);
  for (LiveRange* conflict = conflicts.Current(); conflict != nullptr;
       conflict = conflicts.GetNext()) {
    DCHECK_NE(conflict->weight(), LiveRange::kInvalidWeight);
426
    if (competing_weight <= conflict->weight()) return LiveRange::kMaxWeight;
427
    ret = Max(ret, conflict->weight());
428
    DCHECK(ret < LiveRange::kMaxWeight);
429 430 431 432 433 434
  }

  return ret;
}


435 436 437 438 439 440
float GreedyAllocator::GetMaximumConflictingWeight(unsigned reg_id,
                                                   const LiveRangeGroup* group,
                                                   float group_weight) const {
  float ret = LiveRange::kInvalidWeight;

  for (LiveRange* member : group->ranges()) {
441 442
    float member_conflict_weight =
        GetMaximumConflictingWeight(reg_id, member, group_weight);
443 444 445 446 447 448 449 450 451 452 453
    if (member_conflict_weight == LiveRange::kMaxWeight) {
      return LiveRange::kMaxWeight;
    }
    if (member_conflict_weight > group_weight) return LiveRange::kMaxWeight;
    ret = Max(member_conflict_weight, ret);
  }

  return ret;
}


454 455 456 457 458
void GreedyAllocator::EnsureValidRangeWeight(LiveRange* range) {
  // The live range weight will be invalidated when ranges are created or split.
  // Otherwise, it is consistently updated when the range is allocated or
  // unallocated.
  if (range->weight() != LiveRange::kInvalidWeight) return;
459

460
  if (range->TopLevel()->IsFixed()) {
461 462
    range->set_weight(LiveRange::kMaxWeight);
    return;
463
  }
464
  if (!IsProgressPossible(range)) {
465 466
    range->set_weight(LiveRange::kMaxWeight);
    return;
467 468
  }

469 470 471
  float use_count = 0.0;
  for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next()) {
    ++use_count;
472
  }
473 474
  range->set_weight(use_count / static_cast<float>(range->GetSize()));
}
475 476


477 478 479
void GreedyAllocator::SpillRangeAsLastResort(LiveRange* range) {
  LifetimePosition start = range->Start();
  CHECK(range->CanBeSpilled(start));
480

481 482
  DCHECK(range->NextRegisterPosition(start) == nullptr);
  Spill(range);
483 484 485
}


486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571
LiveRange* GreedyAllocator::GetRemainderAfterSplittingAroundFirstCall(
    LiveRange* range) {
  LiveRange* ret = range;
  for (UseInterval* interval = range->first_interval(); interval != nullptr;
       interval = interval->next()) {
    LifetimePosition start = interval->start();
    LifetimePosition end = interval->end();
    // If the interval starts at instruction end, then the first instruction
    // in the interval is the next one.
    int first_full_instruction = (start.IsGapPosition() || start.IsStart())
                                     ? start.ToInstructionIndex()
                                     : start.ToInstructionIndex() + 1;
    // If the interval ends in a gap or at instruction start, then the last
    // instruction is the previous one.
    int last_full_instruction = (end.IsGapPosition() || end.IsStart())
                                    ? end.ToInstructionIndex() - 1
                                    : end.ToInstructionIndex();

    for (int instruction_index = first_full_instruction;
         instruction_index <= last_full_instruction; ++instruction_index) {
      if (!code()->InstructionAt(instruction_index)->IsCall()) continue;

      LifetimePosition before =
          GetSplitPositionForInstruction(range, instruction_index);
      LiveRange* second_part =
          before.IsValid() ? Split(range, data(), before) : range;

      if (range != second_part) scheduler().Schedule(range);

      LifetimePosition after =
          FindSplitPositionAfterCall(second_part, instruction_index);

      if (after.IsValid()) {
        ret = Split(second_part, data(), after);
      } else {
        ret = nullptr;
      }
      Spill(second_part);
      return ret;
    }
  }
  return ret;
}


bool GreedyAllocator::TrySplitAroundCalls(LiveRange* range) {
  bool modified = false;

  while (range != nullptr) {
    LiveRange* remainder = GetRemainderAfterSplittingAroundFirstCall(range);
    // If we performed no modification, we're done.
    if (remainder == range) {
      break;
    }
    // We performed a modification.
    modified = true;
    range = remainder;
  }
  // If we have a remainder and we made modifications, it means the remainder
  // has no calls and we should schedule it for further processing. If we made
  // no modifications, we will just return false, because we want the algorithm
  // to make progress by trying some other heuristic.
  if (modified && range != nullptr) {
    DCHECK(!range->spilled());
    DCHECK(!range->HasRegisterAssigned());
    scheduler().Schedule(range);
  }
  return modified;
}


LifetimePosition GreedyAllocator::FindSplitPositionAfterCall(
    const LiveRange* range, int call_index) {
  LifetimePosition after_call =
      Max(range->Start(),
          LifetimePosition::GapFromInstructionIndex(call_index + 1));
  UsePosition* next_use = range->NextRegisterPosition(after_call);
  if (!next_use) return LifetimePosition::Invalid();

  LifetimePosition split_pos = FindOptimalSplitPos(after_call, next_use->pos());
  split_pos =
      GetSplitPositionForInstruction(range, split_pos.ToInstructionIndex());
  return split_pos;
}


572 573 574 575 576 577 578 579 580 581 582 583 584
LifetimePosition GreedyAllocator::FindSplitPositionBeforeLoops(
    LiveRange* range) {
  LifetimePosition end = range->End();
  if (end.ToInstructionIndex() >= code()->LastInstructionIndex()) {
    end =
        LifetimePosition::GapFromInstructionIndex(end.ToInstructionIndex() - 1);
  }
  LifetimePosition pos = FindOptimalSplitPos(range->Start(), end);
  pos = GetSplitPositionForInstruction(range, pos.ToInstructionIndex());
  return pos;
}


585
void GreedyAllocator::SplitOrSpillBlockedRange(LiveRange* range) {
586
  if (TrySplitAroundCalls(range)) return;
587 588 589

  LifetimePosition pos = FindSplitPositionBeforeLoops(range);

590
  if (!pos.IsValid()) pos = GetLastResortSplitPosition(range);
591 592 593 594 595 596
  if (pos.IsValid()) {
    LiveRange* tail = Split(range, data(), pos);
    DCHECK(tail != range);
    scheduler().Schedule(tail);
    scheduler().Schedule(range);
    return;
597
  }
598
  SpillRangeAsLastResort(range);
599 600 601
}


602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
// Basic heuristic for advancing the algorithm, if any other splitting heuristic
// failed.
LifetimePosition GreedyAllocator::GetLastResortSplitPosition(
    const LiveRange* range) {
  LifetimePosition previous = range->Start();
  for (UsePosition *pos = range->NextRegisterPosition(previous); pos != nullptr;
       previous = previous.NextFullStart(),
                   pos = range->NextRegisterPosition(previous)) {
    LifetimePosition optimal = FindOptimalSplitPos(previous, pos->pos());
    LifetimePosition before =
        GetSplitPositionForInstruction(range, optimal.ToInstructionIndex());
    if (before.IsValid()) return before;
    LifetimePosition after = GetSplitPositionForInstruction(
        range, pos->pos().ToInstructionIndex() + 1);
    if (after.IsValid()) return after;
  }
  return LifetimePosition::Invalid();
}


bool GreedyAllocator::IsProgressPossible(const LiveRange* range) {
  return range->CanBeSpilled(range->Start()) ||
         GetLastResortSplitPosition(range).IsValid();
}

627 628 629
}  // namespace compiler
}  // namespace internal
}  // namespace v8