mark-compact.cc 163 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5
#include "src/heap/mark-compact.h"
6

7 8
#include <unordered_map>

9
#include "src/base/atomicops.h"
10
#include "src/base/bits.h"
11
#include "src/base/sys-info.h"
12
#include "src/cancelable-task.h"
13 14 15 16
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
17
#include "src/frames-inl.h"
18 19
#include "src/gdb-jit.h"
#include "src/global-handles.h"
20
#include "src/heap/array-buffer-tracker-inl.h"
21
#include "src/heap/array-buffer-tracker.h"
22
#include "src/heap/concurrent-marking.h"
23
#include "src/heap/gc-tracer.h"
24
#include "src/heap/incremental-marking.h"
25 26
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/invalidated-slots.h"
27
#include "src/heap/item-parallel-job.h"
28
#include "src/heap/local-allocator.h"
29
#include "src/heap/mark-compact-inl.h"
30
#include "src/heap/object-stats.h"
31
#include "src/heap/objects-visiting-inl.h"
32
#include "src/heap/objects-visiting.h"
33
#include "src/heap/spaces-inl.h"
34
#include "src/heap/worklist.h"
35
#include "src/ic/ic.h"
36
#include "src/ic/stub-cache.h"
37
#include "src/tracing/tracing-category-observer.h"
38
#include "src/transitions-inl.h"
39
#include "src/utils-inl.h"
40
#include "src/v8.h"
41
#include "src/v8threads.h"
42

43 44
namespace v8 {
namespace internal {
45

46 47

const char* Marking::kWhiteBitPattern = "00";
48 49
const char* Marking::kBlackBitPattern = "11";
const char* Marking::kGreyBitPattern = "10";
50 51
const char* Marking::kImpossibleBitPattern = "01";

52
// The following has to hold in order for {MarkingState::MarkBitFrom} to not
53
// produce invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
54 55
STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2);

56 57 58
// =============================================================================
// Verifiers
// =============================================================================
59

60
#ifdef VERIFY_HEAP
61 62
namespace {

63
class MarkingVerifier : public ObjectVisitor, public RootVisitor {
64
 public:
65
  virtual void Run() = 0;
66

67 68
 protected:
  explicit MarkingVerifier(Heap* heap) : heap_(heap) {}
69

70
  virtual Bitmap* bitmap(const MemoryChunk* chunk) = 0;
71

72 73
  virtual void VerifyPointers(Object** start, Object** end) = 0;

74 75
  virtual bool IsMarked(HeapObject* object) = 0;

76 77
  virtual bool IsBlackOrGrey(HeapObject* object) = 0;

78
  void VisitPointers(HeapObject* host, Object** start, Object** end) override {
79 80 81 82 83 84 85
    VerifyPointers(start, end);
  }

  void VisitRootPointers(Root root, Object** start, Object** end) override {
    VerifyPointers(start, end);
  }

86
  void VerifyRoots(VisitMode mode);
87
  void VerifyMarkingOnPage(const Page* page, Address start, Address end);
88 89 90 91
  void VerifyMarking(NewSpace* new_space);
  void VerifyMarking(PagedSpace* paged_space);

  Heap* heap_;
92 93
};

94 95 96
void MarkingVerifier::VerifyRoots(VisitMode mode) {
  heap_->IterateStrongRoots(this, mode);
}
97

98 99
void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
                                          Address end) {
100
  HeapObject* object;
101 102
  Address next_object_must_be_here_or_later = start;
  for (Address current = start; current < end;) {
103
    object = HeapObject::FromAddress(current);
104
    // One word fillers at the end of a black area can be grey.
105
    if (IsBlackOrGrey(object) &&
106
        object->map() != heap_->one_pointer_filler_map()) {
107
      CHECK(IsMarked(object));
108
      CHECK(current >= next_object_must_be_here_or_later);
109
      object->Iterate(this);
110
      next_object_must_be_here_or_later = current + object->Size();
111 112 113
      // The object is either part of a black area of black allocation or a
      // regular black object
      CHECK(
114 115 116 117 118 119
          bitmap(page)->AllBitsSetInRange(
              page->AddressToMarkbitIndex(current),
              page->AddressToMarkbitIndex(next_object_must_be_here_or_later)) ||
          bitmap(page)->AllBitsClearInRange(
              page->AddressToMarkbitIndex(current + kPointerSize * 2),
              page->AddressToMarkbitIndex(next_object_must_be_here_or_later)));
120 121
      current = next_object_must_be_here_or_later;
    } else {
122
      current += kPointerSize;
123 124 125 126
    }
  }
}

127
void MarkingVerifier::VerifyMarking(NewSpace* space) {
128 129
  Address end = space->top();
  // The bottom position is at the start of its page. Allows us to use
130
  // page->area_start() as start of range on all pages.
131
  CHECK_EQ(space->bottom(), Page::FromAddress(space->bottom())->area_start());
132

133
  PageRange range(space->bottom(), end);
134 135 136
  for (auto it = range.begin(); it != range.end();) {
    Page* page = *(it++);
    Address limit = it != range.end() ? page->area_end() : end;
137
    CHECK(limit == end || !page->Contains(end));
138
    VerifyMarkingOnPage(page, page->area_start(), limit);
139 140 141
  }
}

142
void MarkingVerifier::VerifyMarking(PagedSpace* space) {
143
  for (Page* p : *space) {
144
    VerifyMarkingOnPage(p, p->area_start(), p->area_end());
145 146 147
  }
}

148 149
class FullMarkingVerifier : public MarkingVerifier {
 public:
150 151 152 153
  explicit FullMarkingVerifier(Heap* heap)
      : MarkingVerifier(heap),
        marking_state_(
            heap->mark_compact_collector()->non_atomic_marking_state()) {}
154

155 156 157 158 159 160 161 162 163
  void Run() override {
    VerifyRoots(VISIT_ONLY_STRONG);
    VerifyMarking(heap_->new_space());
    VerifyMarking(heap_->old_space());
    VerifyMarking(heap_->code_space());
    VerifyMarking(heap_->map_space());

    LargeObjectIterator it(heap_->lo_space());
    for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
164
      if (marking_state_->IsBlackOrGrey(obj)) {
165 166 167 168
        obj->Iterate(this);
      }
    }
  }
169

170
 protected:
171 172
  Bitmap* bitmap(const MemoryChunk* chunk) override {
    return marking_state_->bitmap(chunk);
173
  }
174

175 176
  bool IsMarked(HeapObject* object) override {
    return marking_state_->IsBlack(object);
177 178
  }

179 180
  bool IsBlackOrGrey(HeapObject* object) override {
    return marking_state_->IsBlackOrGrey(object);
181 182
  }

183
  void VerifyPointers(Object** start, Object** end) override {
184 185 186
    for (Object** current = start; current < end; current++) {
      if ((*current)->IsHeapObject()) {
        HeapObject* object = HeapObject::cast(*current);
187
        CHECK(marking_state_->IsBlackOrGrey(object));
188
      }
189 190 191
    }
  }

192
  void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
193
    DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
194
    if (!host->IsWeakObject(rinfo->target_object())) {
195
      Object* p = rinfo->target_object();
196
      VisitPointer(host, &p);
197 198
    }
  }
199 200 201

 private:
  MarkCompactCollector::NonAtomicMarkingState* marking_state_;
202 203 204 205
};

class YoungGenerationMarkingVerifier : public MarkingVerifier {
 public:
206 207 208 209
  explicit YoungGenerationMarkingVerifier(Heap* heap)
      : MarkingVerifier(heap),
        marking_state_(
            heap->minor_mark_compact_collector()->non_atomic_marking_state()) {}
210

211 212
  Bitmap* bitmap(const MemoryChunk* chunk) override {
    return marking_state_->bitmap(chunk);
213 214
  }

215 216
  bool IsMarked(HeapObject* object) override {
    return marking_state_->IsGrey(object);
217
  }
218

219 220
  bool IsBlackOrGrey(HeapObject* object) override {
    return marking_state_->IsBlackOrGrey(object);
221 222
  }

223 224 225 226 227
  void Run() override {
    VerifyRoots(VISIT_ALL_IN_SCAVENGE);
    VerifyMarking(heap_->new_space());
  }

228
  void VerifyPointers(Object** start, Object** end) override {
229 230 231 232
    for (Object** current = start; current < end; current++) {
      if ((*current)->IsHeapObject()) {
        HeapObject* object = HeapObject::cast(*current);
        if (!heap_->InNewSpace(object)) return;
233
        CHECK(IsMarked(object));
234 235 236
      }
    }
  }
237 238 239

 private:
  MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
240
};
241

242
class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
243
 public:
244 245
  virtual void Run() = 0;

246
  void VisitPointers(HeapObject* host, Object** start, Object** end) override {
247 248 249 250 251 252 253 254 255 256
    VerifyPointers(start, end);
  }

  void VisitRootPointers(Root root, Object** start, Object** end) override {
    VerifyPointers(start, end);
  }

 protected:
  explicit EvacuationVerifier(Heap* heap) : heap_(heap) {}

257 258
  inline Heap* heap() { return heap_; }

259
  virtual void VerifyPointers(Object** start, Object** end) = 0;
260 261 262 263 264 265 266

  void VerifyRoots(VisitMode mode);
  void VerifyEvacuationOnPage(Address start, Address end);
  void VerifyEvacuation(NewSpace* new_space);
  void VerifyEvacuation(PagedSpace* paged_space);

  Heap* heap_;
267 268
};

269 270 271
void EvacuationVerifier::VerifyRoots(VisitMode mode) {
  heap_->IterateStrongRoots(this, mode);
}
272

273 274 275 276 277 278
void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
  Address current = start;
  while (current < end) {
    HeapObject* object = HeapObject::FromAddress(current);
    if (!object->IsFiller()) object->Iterate(this);
    current += object->Size();
279 280 281
  }
}

282
void EvacuationVerifier::VerifyEvacuation(NewSpace* space) {
283
  PageRange range(space->bottom(), space->top());
284 285 286 287 288
  for (auto it = range.begin(); it != range.end();) {
    Page* page = *(it++);
    Address current = page->area_start();
    Address limit = it != range.end() ? page->area_end() : space->top();
    CHECK(limit == space->top() || !page->Contains(space->top()));
289
    VerifyEvacuationOnPage(current, limit);
290
  }
291 292
}

293
void EvacuationVerifier::VerifyEvacuation(PagedSpace* space) {
294
  for (Page* p : *space) {
295
    if (p->IsEvacuationCandidate()) continue;
296
    if (p->Contains(space->top()))
297 298 299
      heap_->CreateFillerObjectAt(
          space->top(), static_cast<int>(space->limit() - space->top()),
          ClearRecordedSlots::kNo);
300 301

    VerifyEvacuationOnPage(p->area_start(), p->area_end());
302 303 304
  }
}

305 306 307
class FullEvacuationVerifier : public EvacuationVerifier {
 public:
  explicit FullEvacuationVerifier(Heap* heap) : EvacuationVerifier(heap) {}
308

309 310 311 312 313 314 315
  void Run() override {
    VerifyRoots(VISIT_ALL);
    VerifyEvacuation(heap_->new_space());
    VerifyEvacuation(heap_->old_space());
    VerifyEvacuation(heap_->code_space());
    VerifyEvacuation(heap_->map_space());
  }
316 317 318 319 320 321

 protected:
  void VerifyPointers(Object** start, Object** end) override {
    for (Object** current = start; current < end; current++) {
      if ((*current)->IsHeapObject()) {
        HeapObject* object = HeapObject::cast(*current);
322 323 324
        if (heap()->InNewSpace(object)) {
          CHECK(heap()->InToSpace(object));
        }
325 326 327 328
        CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
      }
    }
  }
329
};
330

331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
 public:
  explicit YoungGenerationEvacuationVerifier(Heap* heap)
      : EvacuationVerifier(heap) {}

  void Run() override {
    VerifyRoots(VISIT_ALL_IN_SCAVENGE);
    VerifyEvacuation(heap_->new_space());
    VerifyEvacuation(heap_->old_space());
    VerifyEvacuation(heap_->code_space());
    VerifyEvacuation(heap_->map_space());
  }

 protected:
  void VerifyPointers(Object** start, Object** end) override {
    for (Object** current = start; current < end; current++) {
      if ((*current)->IsHeapObject()) {
        HeapObject* object = HeapObject::cast(*current);
        CHECK_IMPLIES(heap()->InNewSpace(object), heap()->InToSpace(object));
      }
    }
  }
};

355
}  // namespace
356
#endif  // VERIFY_HEAP
357

358
// =============================================================================
359
// MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector
360 361
// =============================================================================

362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410
namespace {

// This root visitor walks all roots and creates items bundling objects that
// are then processed later on. Slots have to be dereferenced as they could
// live on the native (C++) stack, which requires filtering out the indirection.
template <class BatchedItem>
class RootMarkingVisitorSeedOnly : public RootVisitor {
 public:
  explicit RootMarkingVisitorSeedOnly(ItemParallelJob* job) : job_(job) {
    buffered_objects_.reserve(kBufferSize);
  }

  void VisitRootPointer(Root root, Object** p) override {
    if (!(*p)->IsHeapObject()) return;
    AddObject(*p);
  }

  void VisitRootPointers(Root root, Object** start, Object** end) override {
    for (Object** p = start; p < end; p++) {
      if (!(*p)->IsHeapObject()) continue;
      AddObject(*p);
    }
  }

  void FlushObjects() {
    job_->AddItem(new BatchedItem(std::move(buffered_objects_)));
    // Moving leaves the container in a valid but unspecified state. Reusing the
    // container requires a call without precondition that resets the state.
    buffered_objects_.clear();
    buffered_objects_.reserve(kBufferSize);
  }

 private:
  // Bundling several objects together in items avoids issues with allocating
  // and deallocating items; both are operations that are performed on the main
  // thread.
  static const int kBufferSize = 128;

  void AddObject(Object* object) {
    buffered_objects_.push_back(object);
    if (buffered_objects_.size() == kBufferSize) FlushObjects();
  }

  ItemParallelJob* job_;
  std::vector<Object*> buffered_objects_;
};

}  // namespace

411 412
static int NumberOfAvailableCores() {
  return Max(
413 414
      1, static_cast<int>(
             V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
415 416 417
}

int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(int pages) {
418
  DCHECK_GT(pages, 0);
419
  return FLAG_parallel_compaction ? Min(NumberOfAvailableCores(), pages) : 1;
420
}
421

422 423 424
int MarkCompactCollectorBase::NumberOfParallelPointerUpdateTasks(int pages,
                                                                 int slots) {
  DCHECK_GT(pages, 0);
425 426
  // Limit the number of update tasks as task creation often dominates the
  // actual work that is being done.
427 428 429 430
  const int kMaxPointerUpdateTasks = 8;
  const int kSlotsPerTask = 600;
  const int wanted_tasks =
      (slots >= 0) ? Max(1, Min(pages, slots / kSlotsPerTask)) : pages;
431
  return FLAG_parallel_pointer_update
432 433
             ? Min(kMaxPointerUpdateTasks,
                   Min(NumberOfAvailableCores(), wanted_tasks))
434
             : 1;
435 436
}

437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
int MarkCompactCollectorBase::NumberOfParallelToSpacePointerUpdateTasks(
    int pages) {
  DCHECK_GT(pages, 0);
  // No cap needed because all pages we need to process are fully filled with
  // interesting objects.
  return FLAG_parallel_pointer_update ? Min(NumberOfAvailableCores(), pages)
                                      : 1;
}

int MinorMarkCompactCollector::NumberOfParallelMarkingTasks(int pages) {
  DCHECK_GT(pages, 0);
  if (!FLAG_minor_mc_parallel_marking) return 1;
  // Pages are not private to markers but we can still use them to estimate the
  // amount of marking that is required.
  const int kPagesPerTask = 2;
  const int wanted_tasks = Max(1, pages / kPagesPerTask);
  return Min(NumberOfAvailableCores(), Min(wanted_tasks, kNumMarkers));
454 455
}

456
MarkCompactCollector::MarkCompactCollector(Heap* heap)
457
    : MarkCompactCollectorBase(heap),
458 459 460 461 462 463 464 465 466
      page_parallel_job_semaphore_(0),
#ifdef DEBUG
      state_(IDLE),
#endif
      was_marked_incrementally_(false),
      evacuation_(false),
      compacting_(false),
      black_allocation_(false),
      have_code_to_deoptimize_(false),
467
      marking_worklist_(heap),
468
      sweeper_(heap, non_atomic_marking_state()) {
469
  old_to_new_slots_ = -1;
470
}
471

472
void MarkCompactCollector::SetUp() {
473
  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
474 475
  DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
  DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
476
  DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
477
  marking_worklist()->SetUp();
478 479
}

480
void MinorMarkCompactCollector::SetUp() {}
481

hpayer's avatar
hpayer committed
482 483
void MarkCompactCollector::TearDown() {
  AbortCompaction();
484
  AbortWeakObjects();
485
  marking_worklist()->TearDown();
hpayer's avatar
hpayer committed
486
}
487

488
void MinorMarkCompactCollector::TearDown() {}
489

490
void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
491
  DCHECK(!p->NeverEvacuate());
492
  p->MarkEvacuationCandidate();
493
  evacuation_candidates_.push_back(p);
494 495 496
}


497 498
static void TraceFragmentation(PagedSpace* space) {
  int number_of_pages = space->CountTotalPages();
499
  intptr_t reserved = (number_of_pages * space->AreaSize());
500 501
  intptr_t free = reserved - space->SizeOfObjects();
  PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
502 503
         AllocationSpaceName(space->identity()), number_of_pages,
         static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
504 505
}

506
bool MarkCompactCollector::StartCompaction() {
507
  if (!compacting_) {
508
    DCHECK(evacuation_candidates_.empty());
509

510
    CollectEvacuationCandidates(heap()->old_space());
511

512
    if (FLAG_compact_code_space) {
513
      CollectEvacuationCandidates(heap()->code_space());
514 515 516 517 518 519
    } else if (FLAG_trace_fragmentation) {
      TraceFragmentation(heap()->code_space());
    }

    if (FLAG_trace_fragmentation) {
      TraceFragmentation(heap()->map_space());
520
    }
521

522
    compacting_ = !evacuation_candidates_.empty();
523 524 525 526 527
  }

  return compacting_;
}

528 529 530
void MarkCompactCollector::CollectGarbage() {
  // Make sure that Prepare() has been called. The individual steps below will
  // update the state as they proceed.
531
  DCHECK(state_ == PREPARE_GC);
532

533 534
  heap()->minor_mark_compact_collector()->CleanupSweepToIteratePages();

535
  MarkLiveObjects();
536

537
  DCHECK(heap_->incremental_marking()->IsStopped());
538

539 540
  ClearNonLiveReferences();

541 542
  RecordObjectStats();

543
#ifdef VERIFY_HEAP
544
  if (FLAG_verify_heap) {
545 546
    FullMarkingVerifier verifier(heap());
    verifier.Run();
547 548
  }
#endif
549

550
  StartSweepSpaces();
551

552
  Evacuate();
553

554
  Finish();
555 556
}

557
#ifdef VERIFY_HEAP
558
void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
559
  for (Page* p : *space) {
560 561
    CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
    CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
562 563 564
  }
}

565

566
void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
567
  for (Page* p : PageRange(space->bottom(), space->top())) {
568 569
    CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
    CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
570 571 572
  }
}

573

574
void MarkCompactCollector::VerifyMarkbitsAreClean() {
575
  VerifyMarkbitsAreClean(heap_->old_space());
576 577 578 579 580 581
  VerifyMarkbitsAreClean(heap_->code_space());
  VerifyMarkbitsAreClean(heap_->map_space());
  VerifyMarkbitsAreClean(heap_->new_space());

  LargeObjectIterator it(heap_->lo_space());
  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
582 583 584
    CHECK(non_atomic_marking_state()->IsWhite(obj));
    CHECK_EQ(0, non_atomic_marking_state()->live_bytes(
                    MemoryChunk::FromAddress(obj->address())));
585 586
  }
}
587

588
void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
589
  HeapObjectIterator code_iterator(heap()->code_space());
590
  for (HeapObject* obj = code_iterator.Next(); obj != NULL;
591 592
       obj = code_iterator.Next()) {
    Code* code = Code::cast(obj);
593
    if (!code->is_optimized_code()) continue;
594
    if (WillBeDeoptimized(code)) continue;
595
    code->VerifyEmbeddedObjectsDependency();
596 597
  }
}
598 599


600
void MarkCompactCollector::VerifyOmittedMapChecks() {
601
  HeapObjectIterator iterator(heap()->map_space());
602
  for (HeapObject* obj = iterator.Next(); obj != NULL; obj = iterator.Next()) {
603
    Map* map = Map::cast(obj);
604
    map->VerifyOmittedMapChecks();
605 606
  }
}
607
#endif  // VERIFY_HEAP
608

609
void MarkCompactCollector::ClearMarkbitsInPagedSpace(PagedSpace* space) {
610
  for (Page* p : *space) {
611
    non_atomic_marking_state()->ClearLiveness(p);
612 613 614
  }
}

615
void MarkCompactCollector::ClearMarkbitsInNewSpace(NewSpace* space) {
616
  for (Page* p : *space) {
617
    non_atomic_marking_state()->ClearLiveness(p);
618 619 620 621
  }
}


622 623 624
void MarkCompactCollector::ClearMarkbits() {
  ClearMarkbitsInPagedSpace(heap_->code_space());
  ClearMarkbitsInPagedSpace(heap_->map_space());
625
  ClearMarkbitsInPagedSpace(heap_->old_space());
626
  ClearMarkbitsInNewSpace(heap_->new_space());
627
  heap_->lo_space()->ClearMarkingStateOfLiveObjects();
628 629
}

630
class MarkCompactCollector::Sweeper::SweeperTask final : public CancelableTask {
631
 public:
632 633
  SweeperTask(Isolate* isolate, Sweeper* sweeper,
              base::Semaphore* pending_sweeper_tasks,
634
              base::AtomicNumber<intptr_t>* num_sweeping_tasks,
mlippautz's avatar
mlippautz committed
635
              AllocationSpace space_to_start)
636 637
      : CancelableTask(isolate),
        sweeper_(sweeper),
mlippautz's avatar
mlippautz committed
638
        pending_sweeper_tasks_(pending_sweeper_tasks),
639
        num_sweeping_tasks_(num_sweeping_tasks),
mlippautz's avatar
mlippautz committed
640
        space_to_start_(space_to_start) {}
641 642 643 644

  virtual ~SweeperTask() {}

 private:
645
  void RunInternal() final {
646
    DCHECK_GE(space_to_start_, FIRST_SPACE);
647
    DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
648 649
    const int offset = space_to_start_ - FIRST_SPACE;
    const int num_spaces = LAST_PAGED_SPACE - FIRST_SPACE + 1;
650
    for (int i = 0; i < num_spaces; i++) {
651 652
      const int space_id = FIRST_SPACE + ((i + offset) % num_spaces);
      DCHECK_GE(space_id, FIRST_SPACE);
653
      DCHECK_LE(space_id, LAST_PAGED_SPACE);
mlippautz's avatar
mlippautz committed
654
      sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0);
655
    }
656
    num_sweeping_tasks_->Decrement(1);
657
    pending_sweeper_tasks_->Signal();
658 659
  }

660 661 662
  Sweeper* const sweeper_;
  base::Semaphore* const pending_sweeper_tasks_;
  base::AtomicNumber<intptr_t>* const num_sweeping_tasks_;
663
  AllocationSpace space_to_start_;
664 665 666 667

  DISALLOW_COPY_AND_ASSIGN(SweeperTask);
};

mlippautz's avatar
mlippautz committed
668 669
void MarkCompactCollector::Sweeper::StartSweeping() {
  sweeping_in_progress_ = true;
670 671 672
  NonAtomicMarkingState* marking_state =
      heap_->mark_compact_collector()->non_atomic_marking_state();
  ForAllSweepingSpaces([this, marking_state](AllocationSpace space) {
mlippautz's avatar
mlippautz committed
673
    std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(),
674 675 676
              [marking_state](Page* a, Page* b) {
                return marking_state->live_bytes(a) <
                       marking_state->live_bytes(b);
677
              });
mlippautz's avatar
mlippautz committed
678
  });
679 680 681
}

void MarkCompactCollector::Sweeper::StartSweeperTasks() {
682 683
  DCHECK_EQ(0, num_tasks_);
  DCHECK_EQ(0, num_sweeping_tasks_.Value());
684
  if (FLAG_concurrent_sweeping && sweeping_in_progress_) {
mlippautz's avatar
mlippautz committed
685 686
    ForAllSweepingSpaces([this](AllocationSpace space) {
      if (space == NEW_SPACE) return;
687
      num_sweeping_tasks_.Increment(1);
688 689 690 691 692
      SweeperTask* task = new SweeperTask(heap_->isolate(), this,
                                          &pending_sweeper_tasks_semaphore_,
                                          &num_sweeping_tasks_, space);
      DCHECK_LT(num_tasks_, kMaxSweeperTasks);
      task_ids_[num_tasks_++] = task->id();
693
      V8::GetCurrentPlatform()->CallOnBackgroundThread(
694
          task, v8::Platform::kShortRunningTask);
mlippautz's avatar
mlippautz committed
695 696 697
    });
  }
}
698

mlippautz's avatar
mlippautz committed
699 700
void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted(
    Page* page) {
701
  if (!page->SweepingDone()) {
702
    ParallelSweepPage(page, page->owner()->identity());
703
    if (!page->SweepingDone()) {
704 705 706 707 708 709
      // We were not able to sweep that page, i.e., a concurrent
      // sweeper thread currently owns this page. Wait for the sweeper
      // thread to be done with this page.
      page->WaitUntilSweepingCompleted();
    }
  }
710 711
}

712
void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
713
  if (FLAG_concurrent_sweeping && sweeper().sweeping_in_progress()) {
mlippautz's avatar
mlippautz committed
714
    sweeper().ParallelSweepSpace(space->identity(), 0);
715 716 717 718
    space->RefillFreeList();
  }
}

mlippautz's avatar
mlippautz committed
719 720 721
Page* MarkCompactCollector::Sweeper::GetSweptPageSafe(PagedSpace* space) {
  base::LockGuard<base::Mutex> guard(&mutex_);
  SweptList& list = swept_list_[space->identity()];
722 723 724 725
  if (!list.empty()) {
    auto last_page = list.back();
    list.pop_back();
    return last_page;
mlippautz's avatar
mlippautz committed
726 727 728
  }
  return nullptr;
}
729

mlippautz's avatar
mlippautz committed
730
void MarkCompactCollector::Sweeper::EnsureCompleted() {
731
  if (!sweeping_in_progress_) return;
732

733 734
  // If sweeping is not completed or not running at all, we try to complete it
  // here.
735 736
  ForAllSweepingSpaces(
      [this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
737

738
  if (FLAG_concurrent_sweeping) {
739 740 741 742 743
    for (int i = 0; i < num_tasks_; i++) {
      if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
          CancelableTaskManager::kTaskAborted) {
        pending_sweeper_tasks_semaphore_.Wait();
      }
mlippautz's avatar
mlippautz committed
744
    }
745 746
    num_tasks_ = 0;
    num_sweeping_tasks_.SetValue(0);
747
  }
748

749 750
  ForAllSweepingSpaces([this](AllocationSpace space) {
    if (space == NEW_SPACE) {
751
      swept_list_[NEW_SPACE].clear();
752 753 754
    }
    DCHECK(sweeping_list_[space].empty());
  });
755
  sweeping_in_progress_ = false;
mlippautz's avatar
mlippautz committed
756 757
}

758 759
void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() {
  if (!sweeping_in_progress_) return;
760
  if (!FLAG_concurrent_sweeping || sweeping_in_progress()) {
761 762 763 764 765 766
    for (Page* p : *heap_->new_space()) {
      SweepOrWaitUntilSweepingCompleted(p);
    }
  }
}

mlippautz's avatar
mlippautz committed
767
void MarkCompactCollector::EnsureSweepingCompleted() {
768
  if (!sweeper().sweeping_in_progress()) return;
mlippautz's avatar
mlippautz committed
769 770

  sweeper().EnsureCompleted();
771 772 773
  heap()->old_space()->RefillFreeList();
  heap()->code_space()->RefillFreeList();
  heap()->map_space()->RefillFreeList();
774

775
#ifdef VERIFY_HEAP
776
  if (FLAG_verify_heap && !evacuation()) {
777
    FullEvacuationVerifier verifier(heap());
778
    verifier.Run();
779 780
  }
#endif
781 782 783

  if (heap()->memory_allocator()->unmapper()->has_delayed_chunks())
    heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
784 785
}

786 787 788 789
bool MarkCompactCollector::Sweeper::AreSweeperTasksRunning() {
  return num_sweeping_tasks_.Value() != 0;
}

790
void MarkCompactCollector::ComputeEvacuationHeuristics(
791 792
    size_t area_size, int* target_fragmentation_percent,
    size_t* max_evacuated_bytes) {
793 794
  // For memory reducing and optimize for memory mode we directly define both
  // constants.
795
  const int kTargetFragmentationPercentForReduceMemory = 20;
796
  const size_t kMaxEvacuatedBytesForReduceMemory = 12 * MB;
797
  const int kTargetFragmentationPercentForOptimizeMemory = 20;
798
  const size_t kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
799 800 801 802 803

  // For regular mode (which is latency critical) we define less aggressive
  // defaults to start and switch to a trace-based (using compaction speed)
  // approach as soon as we have enough samples.
  const int kTargetFragmentationPercent = 70;
804
  const size_t kMaxEvacuatedBytes = 4 * MB;
805 806
  // Time to take for a single area (=payload of page). Used as soon as there
  // exist enough compaction speed samples.
807
  const float kTargetMsPerArea = .5;
808 809 810 811

  if (heap()->ShouldReduceMemory()) {
    *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
    *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
812 813 814 815
  } else if (heap()->ShouldOptimizeForMemoryUsage()) {
    *target_fragmentation_percent =
        kTargetFragmentationPercentForOptimizeMemory;
    *max_evacuated_bytes = kMaxEvacuatedBytesForOptimizeMemory;
816
  } else {
817
    const double estimated_compaction_speed =
818 819 820 821
        heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
    if (estimated_compaction_speed != 0) {
      // Estimate the target fragmentation based on traced compaction speed
      // and a goal for a single page.
822 823 824 825
      const double estimated_ms_per_area =
          1 + area_size / estimated_compaction_speed;
      *target_fragmentation_percent = static_cast<int>(
          100 - 100 * kTargetMsPerArea / estimated_ms_per_area);
826 827 828 829 830 831 832 833 834 835 836 837 838
      if (*target_fragmentation_percent <
          kTargetFragmentationPercentForReduceMemory) {
        *target_fragmentation_percent =
            kTargetFragmentationPercentForReduceMemory;
      }
    } else {
      *target_fragmentation_percent = kTargetFragmentationPercent;
    }
    *max_evacuated_bytes = kMaxEvacuatedBytes;
  }
}


839
void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
840
  DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
841

842
  int number_of_pages = space->CountTotalPages();
843
  size_t area_size = space->AreaSize();
844

845
  // Pairs of (live_bytes_in_page, page).
846
  typedef std::pair<size_t, Page*> LiveBytesPagePair;
847
  std::vector<LiveBytesPagePair> pages;
848
  pages.reserve(number_of_pages);
849

850
  DCHECK(!sweeping_in_progress());
851 852 853 854
  Page* owner_of_linear_allocation_area =
      space->top() == space->limit()
          ? nullptr
          : Page::FromAllocationAreaAddress(space->top());
855
  for (Page* p : *space) {
856
    if (p->NeverEvacuate() || p == owner_of_linear_allocation_area) continue;
857
    // Invariant: Evacuation candidates are just created when marking is
858 859 860
    // started. This means that sweeping has finished. Furthermore, at the end
    // of a GC all evacuation candidates are cleared and their slot buffers are
    // released.
861
    CHECK(!p->IsEvacuationCandidate());
862 863
    CHECK_NULL(p->slot_set<OLD_TO_OLD>());
    CHECK_NULL(p->typed_slot_set<OLD_TO_OLD>());
864
    CHECK(p->SweepingDone());
865
    DCHECK(p->area_size() == area_size);
866
    pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p));
867 868 869
  }

  int candidate_count = 0;
870
  size_t total_live_bytes = 0;
871

872
  const bool reduce_memory = heap()->ShouldReduceMemory();
873
  if (FLAG_manual_evacuation_candidates_selection) {
874 875
    for (size_t i = 0; i < pages.size(); i++) {
      Page* p = pages[i].second;
876
      if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
877 878 879 880
        candidate_count++;
        total_live_bytes += pages[i].first;
        p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
        AddEvacuationCandidate(p);
881
      }
882
    }
883 884 885 886 887 888 889 890 891
  } else if (FLAG_stress_compaction) {
    for (size_t i = 0; i < pages.size(); i++) {
      Page* p = pages[i].second;
      if (i % 2 == 0) {
        candidate_count++;
        total_live_bytes += pages[i].first;
        AddEvacuationCandidate(p);
      }
    }
892
  } else {
893 894 895 896 897 898 899 900 901 902 903 904 905
    // The following approach determines the pages that should be evacuated.
    //
    // We use two conditions to decide whether a page qualifies as an evacuation
    // candidate, or not:
    // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
    //   between live bytes and capacity of this page (= area).
    // * Evacuation quota: A global quota determining how much bytes should be
    //   compacted.
    //
    // The algorithm sorts all pages by live bytes and then iterates through
    // them starting with the page with the most free memory, adding them to the
    // set of evacuation candidates as long as both conditions (fragmentation
    // and quota) hold.
906
    size_t max_evacuated_bytes;
907
    int target_fragmentation_percent;
908 909
    ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
                                &max_evacuated_bytes);
910

911
    const size_t free_bytes_threshold =
912 913 914 915 916 917 918
        target_fragmentation_percent * (area_size / 100);

    // Sort pages from the most free to the least free, then select
    // the first n pages for evacuation such that:
    // - the total size of evacuated objects does not exceed the specified
    // limit.
    // - fragmentation of (n+1)-th page does not exceed the specified limit.
919 920 921 922
    std::sort(pages.begin(), pages.end(),
              [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
                return a.first < b.first;
              });
923
    for (size_t i = 0; i < pages.size(); i++) {
924 925 926
      size_t live_bytes = pages[i].first;
      DCHECK_GE(area_size, live_bytes);
      size_t free_bytes = area_size - live_bytes;
927
      if (FLAG_always_compact ||
928 929
          ((free_bytes >= free_bytes_threshold) &&
           ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
930 931
        candidate_count++;
        total_live_bytes += live_bytes;
932
      }
erikcorry's avatar
erikcorry committed
933
      if (FLAG_trace_fragmentation_verbose) {
934
        PrintIsolate(isolate(),
935
                     "compaction-selection-page: space=%s free_bytes_page=%zu "
936
                     "fragmentation_limit_kb=%" PRIuS
937 938
                     " fragmentation_limit_percent=%d sum_compaction_kb=%zu "
                     "compaction_limit_kb=%zu\n",
939 940 941
                     AllocationSpaceName(space->identity()), free_bytes / KB,
                     free_bytes_threshold / KB, target_fragmentation_percent,
                     total_live_bytes / KB, max_evacuated_bytes / KB);
942
      }
943
    }
944 945
    // How many pages we will allocated for the evacuated objects
    // in the worst case: ceil(total_live_bytes / area_size)
946 947
    int estimated_new_pages =
        static_cast<int>((total_live_bytes + area_size - 1) / area_size);
948 949 950
    DCHECK_LE(estimated_new_pages, candidate_count);
    int estimated_released_pages = candidate_count - estimated_new_pages;
    // Avoid (compact -> expand) cycles.
951
    if ((estimated_released_pages == 0) && !FLAG_always_compact) {
952
      candidate_count = 0;
953
    }
954 955
    for (int i = 0; i < candidate_count; i++) {
      AddEvacuationCandidate(pages[i].second);
956 957
    }
  }
958

959
  if (FLAG_trace_fragmentation) {
960 961
    PrintIsolate(isolate(),
                 "compaction-selection: space=%s reduce_memory=%d pages=%d "
962
                 "total_live_bytes=%zu\n",
963 964
                 AllocationSpaceName(space->identity()), reduce_memory,
                 candidate_count, total_live_bytes / KB);
965 966 967 968
  }
}


969 970
void MarkCompactCollector::AbortCompaction() {
  if (compacting_) {
971
    RememberedSet<OLD_TO_OLD>::ClearAll(heap());
972
    for (Page* p : evacuation_candidates_) {
973 974 975
      p->ClearEvacuationCandidate();
    }
    compacting_ = false;
976
    evacuation_candidates_.clear();
977
  }
978
  DCHECK(evacuation_candidates_.empty());
979 980 981
}


982
void MarkCompactCollector::Prepare() {
983 984
  was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();

985
#ifdef DEBUG
986
  DCHECK(state_ == IDLE);
987 988
  state_ = PREPARE_GC;
#endif
989

990
  DCHECK(!FLAG_never_compact || !FLAG_always_compact);
991

992 993
  // Instead of waiting we could also abort the sweeper threads here.
  EnsureSweepingCompleted();
994

ulan's avatar
ulan committed
995 996 997 998
  if (heap()->incremental_marking()->IsSweeping()) {
    heap()->incremental_marking()->Stop();
  }

999 1000
  // If concurrent unmapping tasks are still running, we should wait for
  // them here.
1001
  heap()->memory_allocator()->unmapper()->WaitUntilCompleted();
1002

1003
  heap()->concurrent_marking()->EnsureCompleted();
1004

1005
  // Clear marking bits if incremental marking is aborted.
1006
  if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
1007
    heap()->incremental_marking()->Stop();
1008
    heap()->incremental_marking()->AbortBlackAllocation();
1009
    ClearMarkbits();
1010
    AbortWeakCollections();
1011
    AbortWeakObjects();
1012
    AbortCompaction();
1013
    heap_->local_embedder_heap_tracer()->AbortTracing();
1014
    marking_worklist()->Clear();
1015
    was_marked_incrementally_ = false;
1016 1017
  }

1018
  if (!was_marked_incrementally_) {
1019 1020
    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_PROLOGUE);
    heap_->local_embedder_heap_tracer()->TracePrologue();
1021 1022
  }

1023 1024 1025
  // Don't start compaction if we are in the middle of incremental
  // marking cycle. We did not collect any slots.
  if (!FLAG_never_compact && !was_marked_incrementally_) {
1026
    StartCompaction();
1027
  }
1028

1029
  PagedSpaces spaces(heap());
1030
  for (PagedSpace* space = spaces.next(); space != NULL;
1031 1032
       space = spaces.next()) {
    space->PrepareForMarkCompact();
1033
  }
1034
  heap()->account_external_memory_concurrently_freed();
1035

1036
#ifdef VERIFY_HEAP
1037
  if (!was_marked_incrementally_ && FLAG_verify_heap) {
1038 1039 1040
    VerifyMarkbitsAreClean();
  }
#endif
1041 1042
}

1043

1044
void MarkCompactCollector::Finish() {
1045
  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
1046

1047 1048
  if (!heap()->delay_sweeper_tasks_for_testing_) {
    sweeper().StartSweeperTasks();
1049
  }
1050

1051
  // The hashing of weak_object_to_code_table is no longer valid.
1052
  heap()->weak_object_to_code_table()->Rehash();
1053 1054 1055 1056

  // Clear the marking state of live large objects.
  heap_->lo_space()->ClearMarkingStateOfLiveObjects();

1057
#ifdef DEBUG
1058
  DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
1059 1060
  state_ = IDLE;
#endif
1061 1062
  heap_->isolate()->inner_pointer_to_code_cache()->Flush();

1063 1064
  // The stub caches are not traversed during GC; clear them to force
  // their lazy re-initialization. This must be done after the
1065 1066
  // GC, because it relies on the new address of certain old space
  // objects (empty string, illegal builtin).
1067 1068
  isolate()->load_stub_cache()->Clear();
  isolate()->store_stub_cache()->Clear();
1069

1070 1071 1072 1073
  if (have_code_to_deoptimize_) {
    // Some code objects were marked for deoptimization during the GC.
    Deoptimizer::DeoptimizeMarkedCode(isolate());
    have_code_to_deoptimize_ = false;
1074
  }
1075 1076

  heap_->incremental_marking()->ClearIdleMarkingDelayCounter();
1077 1078 1079
}


1080
// -------------------------------------------------------------------------
1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
// Phase 1: tracing and marking live objects.
//   before: all objects are in normal state.
//   after: a live object's map pointer is marked as '00'.

// Marking all live objects in the heap as part of mark-sweep or mark-compact
// collection.  Before marking, all objects are in their normal state.  After
// marking, live objects' map pointers are marked indicating that the object
// has been found reachable.
//
// The marking algorithm is a (mostly) depth-first (because of possible stack
// overflow) traversal of the graph of objects reachable from the roots.  It
// uses an explicit stack of pointers rather than recursion.  The young
// generation's inactive ('from') space is used as a marking stack.  The
// objects in the marking stack are the ones that have been reached and marked
// but their children have not yet been visited.
//
// The marking stack can overflow during traversal.  In that case, we set an
// overflow flag.  When the overflow flag is set, we continue marking objects
// reachable from the objects on the marking stack, but no longer push them on
// the marking stack.  Instead, we mark them as both marked and overflowed.
// When the stack is in the overflowed state, objects marked as overflowed
// have been reached and marked but their children have not been visited yet.
// After emptying the marking stack, we clear the overflow flag and traverse
// the heap looking for objects marked as overflowed, push them on the stack,
// and continue with marking.  This process repeats until all reachable
// objects have been marked.

1108 1109
class MarkCompactMarkingVisitor final
    : public MarkingVisitor<MarkCompactMarkingVisitor> {
1110
 public:
1111 1112 1113
  explicit MarkCompactMarkingVisitor(MarkCompactCollector* collector)
      : MarkingVisitor<MarkCompactMarkingVisitor>(collector->heap(),
                                                  collector) {}
1114

1115
  V8_INLINE void VisitPointer(HeapObject* host, Object** p) final {
1116
    MarkObjectByPointer(host, p);
1117 1118
  }

1119 1120
  V8_INLINE void VisitPointers(HeapObject* host, Object** start,
                               Object** end) final {
1121 1122
    // Mark all objects pointed to in [start, end).
    const int kMinRangeForMarkingRecursion = 64;
1123 1124
    if (end - start >= kMinRangeForMarkingRecursion &&
        V8_LIKELY(!FLAG_track_retaining_path)) {
1125
      if (VisitUnmarkedObjects(host, start, end)) return;
1126 1127
      // We are close to a stack overflow, so just mark the objects.
    }
1128
    for (Object** p = start; p < end; p++) {
1129
      MarkObjectByPointer(host, p);
1130 1131 1132
    }
  }

1133
  // Marks the object black and pushes it on the marking stack.
1134 1135
  V8_INLINE void MarkObject(HeapObject* host, HeapObject* object) {
    collector_->MarkObject(host, object);
1136 1137
  }

1138 1139
  // Marks the object black without pushing it on the marking stack. Returns
  // true if object needed marking and false otherwise.
1140
  V8_INLINE bool MarkObjectWithoutPush(HeapObject* host, HeapObject* object) {
1141
    if (collector_->non_atomic_marking_state()->WhiteToBlack(object)) {
1142 1143 1144 1145 1146 1147
      if (V8_UNLIKELY(FLAG_track_retaining_path)) {
        heap_->AddRetainer(host, object);
      }
      return true;
    }
    return false;
1148 1149
  }

1150
  V8_INLINE void MarkObjectByPointer(HeapObject* host, Object** p) {
1151
    if (!(*p)->IsHeapObject()) return;
1152
    HeapObject* target_object = HeapObject::cast(*p);
1153
    collector_->RecordSlot(host, p, target_object);
1154
    collector_->MarkObject(host, target_object);
1155 1156
  }

1157
 protected:
1158
  // Visit all unmarked objects pointed to by [start, end).
1159
  // Returns false if the operation fails (lack of stack space).
1160 1161
  inline bool VisitUnmarkedObjects(HeapObject* host, Object** start,
                                   Object** end) {
1162
    // Return false is we are close to the stack limit.
1163
    StackLimitCheck check(heap_->isolate());
1164 1165 1166
    if (check.HasOverflowed()) return false;

    // Visit the unmarked objects.
1167
    for (Object** p = start; p < end; p++) {
1168 1169
      Object* o = *p;
      if (!o->IsHeapObject()) continue;
1170
      collector_->RecordSlot(host, p, o);
1171
      HeapObject* obj = HeapObject::cast(o);
1172
      VisitUnmarkedObject(obj);
1173 1174 1175
    }
    return true;
  }
1176

1177 1178 1179
  // Visit an unmarked object.
  V8_INLINE void VisitUnmarkedObject(HeapObject* obj) {
    DCHECK(heap_->Contains(obj));
1180
    if (collector_->non_atomic_marking_state()->WhiteToBlack(obj)) {
1181 1182
      Map* map = obj->map();
      // Mark the map pointer and the body.
1183
      collector_->MarkObject(obj, map);
1184 1185 1186
      Visit(map, obj);
    }
  }
1187 1188
};

1189 1190 1191 1192
void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
  for (Page* p : sweep_to_iterate_pages_) {
    if (p->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
      p->ClearFlag(Page::SWEEP_TO_ITERATE);
1193
      non_atomic_marking_state()->ClearLiveness(p);
1194 1195 1196 1197 1198
    }
  }
  sweep_to_iterate_pages_.clear();
}

1199
class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
1200
 public:
1201
  explicit RootMarkingVisitor(MarkCompactCollector* collector)
1202
      : collector_(collector) {}
1203

1204 1205
  void VisitRootPointer(Root root, Object** p) final {
    MarkObjectByPointer(root, p);
1206 1207
  }

1208 1209
  void VisitRootPointers(Root root, Object** start, Object** end) final {
    for (Object** p = start; p < end; p++) MarkObjectByPointer(root, p);
1210 1211
  }

1212
 private:
1213
  V8_INLINE void MarkObjectByPointer(Root root, Object** p) {
1214 1215
    if (!(*p)->IsHeapObject()) return;

1216 1217
    collector_->MarkRootObject(root, HeapObject::cast(*p));
    collector_->EmptyMarkingWorklist();
1218
  }
1219

1220
  MarkCompactCollector* const collector_;
1221 1222
};

1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
// This visitor is used to visit the body of special objects held alive by
// other roots.
//
// It is currently used for
// - Code held alive by the top optimized frame. This code cannot be deoptimized
// and thus have to be kept alive in an isolate way, i.e., it should not keep
// alive other code objects reachable through the weak list but they should
// keep alive its embedded pointers (which would otherwise be dropped).
// - Prefix of the string table.
class MarkCompactCollector::CustomRootBodyMarkingVisitor final
    : public ObjectVisitor {
 public:
  explicit CustomRootBodyMarkingVisitor(MarkCompactCollector* collector)
      : collector_(collector) {}

  void VisitPointer(HeapObject* host, Object** p) final {
    MarkObject(host, *p);
  }

  void VisitPointers(HeapObject* host, Object** start, Object** end) final {
    for (Object** p = start; p < end; p++) MarkObject(host, *p);
  }

  // VisitEmbedderPointer is defined by ObjectVisitor to call VisitPointers.

  // Skip the weak next code link in a code object.
  void VisitNextCodeLink(Code* host, Object** p) override {}

 private:
  void MarkObject(HeapObject* host, Object* object) {
    if (!object->IsHeapObject()) return;
    collector_->MarkObject(host, HeapObject::cast(object));
  }

  MarkCompactCollector* const collector_;
};

1260
class InternalizedStringTableCleaner : public ObjectVisitor {
1261
 public:
1262 1263
  InternalizedStringTableCleaner(Heap* heap, HeapObject* table)
      : heap_(heap), pointers_removed_(0), table_(table) {}
1264

1265
  void VisitPointers(HeapObject* host, Object** start, Object** end) override {
1266
    // Visit all HeapObject pointers in [start, end).
1267
    Object* the_hole = heap_->the_hole_value();
1268 1269
    MarkCompactCollector::NonAtomicMarkingState* marking_state =
        heap_->mark_compact_collector()->non_atomic_marking_state();
1270
    for (Object** p = start; p < end; p++) {
1271
      Object* o = *p;
1272
      if (o->IsHeapObject()) {
1273
        HeapObject* heap_object = HeapObject::cast(o);
1274
        if (marking_state->IsWhite(heap_object)) {
1275
          pointers_removed_++;
1276
          // Set the entry to the_hole_value (as deleted).
1277 1278
          *p = the_hole;
        } else {
1279 1280
          // StringTable contains only old space strings.
          DCHECK(!heap_->InNewSpace(o));
1281
          MarkCompactCollector::RecordSlot(table_, p, o);
1282
        }
1283 1284 1285 1286 1287 1288 1289
      }
    }
  }

  int PointersRemoved() {
    return pointers_removed_;
  }
1290

1291
 private:
1292
  Heap* heap_;
1293
  int pointers_removed_;
1294
  HeapObject* table_;
1295 1296
};

1297 1298 1299 1300 1301 1302
class ExternalStringTableCleaner : public RootVisitor {
 public:
  explicit ExternalStringTableCleaner(Heap* heap) : heap_(heap) {}

  void VisitRootPointers(Root root, Object** start, Object** end) override {
    // Visit all HeapObject pointers in [start, end).
1303 1304
    MarkCompactCollector::NonAtomicMarkingState* marking_state =
        heap_->mark_compact_collector()->non_atomic_marking_state();
1305 1306 1307 1308 1309
    Object* the_hole = heap_->the_hole_value();
    for (Object** p = start; p < end; p++) {
      Object* o = *p;
      if (o->IsHeapObject()) {
        HeapObject* heap_object = HeapObject::cast(o);
1310
        if (marking_state->IsWhite(heap_object)) {
1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326
          if (o->IsExternalString()) {
            heap_->FinalizeExternalString(String::cast(*p));
          } else {
            // The original external string may have been internalized.
            DCHECK(o->IsThinString());
          }
          // Set the entry to the_hole_value (as deleted).
          *p = the_hole;
        }
      }
    }
  }

 private:
  Heap* heap_;
};
1327

1328 1329 1330 1331
// Helper class for pruning the string table.
class YoungGenerationExternalStringTableCleaner : public RootVisitor {
 public:
  YoungGenerationExternalStringTableCleaner(
1332 1333 1334
      MinorMarkCompactCollector* collector)
      : heap_(collector->heap()),
        marking_state_(collector->non_atomic_marking_state()) {}
1335 1336 1337 1338 1339 1340 1341 1342 1343

  void VisitRootPointers(Root root, Object** start, Object** end) override {
    DCHECK_EQ(static_cast<int>(root),
              static_cast<int>(Root::kExternalStringsTable));
    // Visit all HeapObject pointers in [start, end).
    for (Object** p = start; p < end; p++) {
      Object* o = *p;
      if (o->IsHeapObject()) {
        HeapObject* heap_object = HeapObject::cast(o);
1344
        if (marking_state_->IsWhite(heap_object)) {
1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359
          if (o->IsExternalString()) {
            heap_->FinalizeExternalString(String::cast(*p));
          } else {
            // The original external string may have been internalized.
            DCHECK(o->IsThinString());
          }
          // Set the entry to the_hole_value (as deleted).
          *p = heap_->the_hole_value();
        }
      }
    }
  }

 private:
  Heap* heap_;
1360
  MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
1361 1362 1363 1364 1365 1366 1367
};

// Marked young generation objects and all old generation objects will be
// retained.
class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer {
 public:
  explicit MinorMarkCompactWeakObjectRetainer(
1368 1369 1370
      MinorMarkCompactCollector* collector)
      : heap_(collector->heap()),
        marking_state_(collector->non_atomic_marking_state()) {}
1371 1372 1373

  virtual Object* RetainAs(Object* object) {
    HeapObject* heap_object = HeapObject::cast(object);
1374
    if (!heap_->InNewSpace(heap_object)) return object;
1375

1376
    // Young generation marking only marks to grey instead of black.
1377 1378
    DCHECK(!marking_state_->IsBlack(heap_object));
    if (marking_state_->IsGrey(heap_object)) {
1379 1380 1381 1382 1383 1384
      return object;
    }
    return nullptr;
  }

 private:
1385 1386
  Heap* heap_;
  MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
1387 1388
};

1389 1390 1391 1392
// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
// are retained.
class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
 public:
1393 1394 1395 1396
  explicit MarkCompactWeakObjectRetainer(
      MarkCompactCollector::NonAtomicMarkingState* marking_state)
      : marking_state_(marking_state) {}

1397
  virtual Object* RetainAs(Object* object) {
1398
    HeapObject* heap_object = HeapObject::cast(object);
1399 1400
    DCHECK(!marking_state_->IsGrey(heap_object));
    if (marking_state_->IsBlack(heap_object)) {
1401
      return object;
1402 1403 1404 1405 1406 1407
    } else if (object->IsAllocationSite() &&
               !(AllocationSite::cast(object)->IsZombie())) {
      // "dead" AllocationSites need to live long enough for a traversal of new
      // space. These sites get a one-time reprieve.
      AllocationSite* site = AllocationSite::cast(object);
      site->MarkZombie();
1408
      marking_state_->WhiteToBlack(site);
1409
      return object;
1410 1411 1412 1413
    } else {
      return NULL;
    }
  }
1414 1415 1416

 private:
  MarkCompactCollector::NonAtomicMarkingState* marking_state_;
1417 1418 1419
};


1420 1421 1422
// Fill the marking stack with overflowed objects returned by the given
// iterator.  Stop when the marking stack is filled or the end of the space
// is reached, whichever comes first.
1423
template <class T>
1424
void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) {
1425 1426
  // The caller should ensure that the marking stack is initially not full,
  // so that we don't waste effort pointlessly scanning for objects.
1427
  DCHECK(!marking_worklist()->IsFull());
1428

1429
  Map* filler_map = heap()->one_pointer_filler_map();
1430
  for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
1431
    if ((object->map() != filler_map) &&
1432
        non_atomic_marking_state()->GreyToBlack(object)) {
1433
      PushBlack(object);
1434
      if (marking_worklist()->IsFull()) return;
1435 1436 1437 1438
    }
  }
}

1439
void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
1440
  DCHECK(!marking_worklist()->IsFull());
1441 1442
  for (auto object_and_size : LiveObjectRange<kGreyObjects>(
           p, non_atomic_marking_state()->bitmap(p))) {
1443
    HeapObject* const object = object_and_size.first;
1444
    bool success = non_atomic_marking_state()->GreyToBlack(object);
1445 1446
    DCHECK(success);
    USE(success);
1447
    PushBlack(object);
1448
    if (marking_worklist()->IsFull()) return;
1449
  }
1450 1451
}

1452
class RecordMigratedSlotVisitor : public ObjectVisitor {
1453
 public:
1454 1455 1456
  explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector)
      : collector_(collector) {}

1457
  inline void VisitPointer(HeapObject* host, Object** p) final {
1458
    RecordMigratedSlot(host, *p, reinterpret_cast<Address>(p));
1459 1460
  }

1461 1462
  inline void VisitPointers(HeapObject* host, Object** start,
                            Object** end) final {
1463
    while (start < end) {
1464
      RecordMigratedSlot(host, *start, reinterpret_cast<Address>(start));
1465 1466 1467 1468
      ++start;
    }
  }

1469
  inline void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
1470
    DCHECK_EQ(host, rinfo->host());
1471 1472
    DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
    Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1473 1474 1475
    // The target is always in old space, we don't have to record the slot in
    // the old-to-new remembered set.
    DCHECK(!collector_->heap()->InNewSpace(target));
1476 1477 1478
    collector_->RecordRelocSlot(host, rinfo, target);
  }

1479
  inline void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
1480
    DCHECK_EQ(host, rinfo->host());
1481 1482
    DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
    HeapObject* object = HeapObject::cast(rinfo->target_object());
1483
    collector_->heap()->RecordWriteIntoCode(host, rinfo, object);
1484 1485 1486
    collector_->RecordRelocSlot(host, rinfo, object);
  }

1487
  inline void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) override {
1488
    DCHECK_EQ(host, rinfo->host());
1489 1490 1491 1492 1493 1494 1495
    DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
    Code* stub = rinfo->code_age_stub();
    USE(stub);
    DCHECK(!Page::FromAddress(stub->address())->IsEvacuationCandidate());
  }

  // Entries that are skipped for recording.
1496 1497 1498 1499
  inline void VisitExternalReference(Code* host, RelocInfo* rinfo) final {}
  inline void VisitExternalReference(Foreign* host, Address* p) final {}
  inline void VisitRuntimeEntry(Code* host, RelocInfo* rinfo) final {}
  inline void VisitInternalReference(Code* host, RelocInfo* rinfo) final {}
1500

1501 1502 1503
 protected:
  inline virtual void RecordMigratedSlot(HeapObject* host, Object* value,
                                         Address slot) {
1504 1505 1506
    if (value->IsHeapObject()) {
      Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
      if (p->InNewSpace()) {
1507 1508
        DCHECK_IMPLIES(p->InToSpace(),
                       p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
1509 1510
        RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
            Page::FromAddress(slot), slot);
1511
      } else if (p->IsEvacuationCandidate()) {
1512 1513
        RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
            Page::FromAddress(slot), slot);
1514 1515 1516
      }
    }
  }
1517 1518

  MarkCompactCollector* collector_;
1519
};
1520

1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546
class MigrationObserver {
 public:
  explicit MigrationObserver(Heap* heap) : heap_(heap) {}

  virtual ~MigrationObserver() {}
  virtual void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
                    int size) = 0;

 protected:
  Heap* heap_;
};

class ProfilingMigrationObserver final : public MigrationObserver {
 public:
  explicit ProfilingMigrationObserver(Heap* heap) : MigrationObserver(heap) {}

  inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
                   int size) final {
    if (dest == CODE_SPACE || (dest == OLD_SPACE && dst->IsBytecodeArray())) {
      PROFILE(heap_->isolate(),
              CodeMoveEvent(AbstractCode::cast(src), dst->address()));
    }
    heap_->OnMoveEvent(dst, src, size);
  }
};

1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558
class YoungGenerationMigrationObserver final : public MigrationObserver {
 public:
  YoungGenerationMigrationObserver(Heap* heap,
                                   MarkCompactCollector* mark_compact_collector)
      : MigrationObserver(heap),
        mark_compact_collector_(mark_compact_collector) {}

  inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
                   int size) final {
    // Migrate color to old generation marking in case the object survived young
    // generation garbage collection.
    if (heap_->incremental_marking()->IsMarking()) {
1559 1560
      DCHECK(
          heap_->incremental_marking()->atomic_marking_state()->IsWhite(dst));
1561
      heap_->incremental_marking()->TransferColor(src, dst);
1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588
    }
  }

 protected:
  base::Mutex mutex_;
  MarkCompactCollector* mark_compact_collector_;
};

class YoungGenerationRecordMigratedSlotVisitor final
    : public RecordMigratedSlotVisitor {
 public:
  explicit YoungGenerationRecordMigratedSlotVisitor(
      MarkCompactCollector* collector)
      : RecordMigratedSlotVisitor(collector) {}

  void VisitCodeTarget(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); }
  void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final {
    UNREACHABLE();
  }
  void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) final {
    UNREACHABLE();
  }

 private:
  // Only record slots for host objects that are considered as live by the full
  // collector.
  inline bool IsLive(HeapObject* object) {
1589
    return collector_->non_atomic_marking_state()->IsBlack(object);
1590 1591 1592 1593 1594 1595 1596
  }

  inline void RecordMigratedSlot(HeapObject* host, Object* value,
                                 Address slot) final {
    if (value->IsHeapObject()) {
      Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
      if (p->InNewSpace()) {
1597 1598
        DCHECK_IMPLIES(p->InToSpace(),
                       p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
1599 1600
        RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
            Page::FromAddress(slot), slot);
1601
      } else if (p->IsEvacuationCandidate() && IsLive(host)) {
1602 1603
        RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
            Page::FromAddress(slot), slot);
1604 1605 1606 1607 1608
      }
    }
  }
};

1609
class HeapObjectVisitor {
1610 1611
 public:
  virtual ~HeapObjectVisitor() {}
1612
  virtual bool Visit(HeapObject* object, int size) = 0;
1613
};
1614

1615
class EvacuateVisitorBase : public HeapObjectVisitor {
1616 1617 1618 1619 1620 1621
 public:
  void AddObserver(MigrationObserver* observer) {
    migration_function_ = RawMigrateObject<MigrationMode::kObserved>;
    observers_.push_back(observer);
  }

1622
 protected:
1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659
  enum MigrationMode { kFast, kObserved };

  typedef void (*MigrateFunction)(EvacuateVisitorBase* base, HeapObject* dst,
                                  HeapObject* src, int size,
                                  AllocationSpace dest);

  template <MigrationMode mode>
  static void RawMigrateObject(EvacuateVisitorBase* base, HeapObject* dst,
                               HeapObject* src, int size,
                               AllocationSpace dest) {
    Address dst_addr = dst->address();
    Address src_addr = src->address();
    DCHECK(base->heap_->AllowedToBeMigrated(src, dest));
    DCHECK(dest != LO_SPACE);
    if (dest == OLD_SPACE) {
      DCHECK_OBJECT_SIZE(size);
      DCHECK(IsAligned(size, kPointerSize));
      base->heap_->CopyBlock(dst_addr, src_addr, size);
      if (mode != MigrationMode::kFast)
        base->ExecuteMigrationObservers(dest, src, dst, size);
      dst->IterateBodyFast(dst->map()->instance_type(), size,
                           base->record_visitor_);
    } else if (dest == CODE_SPACE) {
      DCHECK_CODEOBJECT_SIZE(size, base->heap_->code_space());
      base->heap_->CopyBlock(dst_addr, src_addr, size);
      Code::cast(dst)->Relocate(dst_addr - src_addr);
      if (mode != MigrationMode::kFast)
        base->ExecuteMigrationObservers(dest, src, dst, size);
      dst->IterateBodyFast(dst->map()->instance_type(), size,
                           base->record_visitor_);
    } else {
      DCHECK_OBJECT_SIZE(size);
      DCHECK(dest == NEW_SPACE);
      base->heap_->CopyBlock(dst_addr, src_addr, size);
      if (mode != MigrationMode::kFast)
        base->ExecuteMigrationObservers(dest, src, dst, size);
    }
1660 1661
    base::Relaxed_Store(reinterpret_cast<base::AtomicWord*>(src_addr),
                        reinterpret_cast<base::AtomicWord>(dst_addr));
1662
  }
1663

1664
  EvacuateVisitorBase(Heap* heap, LocalAllocator* local_allocator,
1665
                      RecordMigratedSlotVisitor* record_visitor)
1666
      : heap_(heap),
1667
        local_allocator_(local_allocator),
1668 1669 1670
        record_visitor_(record_visitor) {
    migration_function_ = RawMigrateObject<MigrationMode::kFast>;
  }
1671

1672 1673 1674
  inline bool TryEvacuateObject(AllocationSpace target_space,
                                HeapObject* object, int size,
                                HeapObject** target_object) {
1675 1676 1677
#ifdef VERIFY_HEAP
    if (AbortCompactionForTesting(object)) return false;
#endif  // VERIFY_HEAP
1678
    AllocationAlignment alignment = object->RequiredAlignment();
1679 1680
    AllocationResult allocation =
        local_allocator_->Allocate(target_space, size, alignment);
1681
    if (allocation.To(target_object)) {
1682
      MigrateObject(*target_object, object, size, target_space);
1683 1684 1685 1686
      return true;
    }
    return false;
  }
1687

1688 1689 1690 1691
  inline void ExecuteMigrationObservers(AllocationSpace dest, HeapObject* src,
                                        HeapObject* dst, int size) {
    for (MigrationObserver* obs : observers_) {
      obs->Move(dest, src, dst, size);
1692 1693 1694
    }
  }

1695 1696
  inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
                            AllocationSpace dest) {
1697
    migration_function_(this, dst, src, size, dest);
1698 1699
  }

1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719
#ifdef VERIFY_HEAP
  bool AbortCompactionForTesting(HeapObject* object) {
    if (FLAG_stress_compaction) {
      const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
                             Page::kPageAlignmentMask & ~kPointerAlignmentMask;
      if ((reinterpret_cast<uintptr_t>(object->address()) &
           Page::kPageAlignmentMask) == mask) {
        Page* page = Page::FromAddress(object->address());
        if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
          page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
        } else {
          page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
          return true;
        }
      }
    }
    return false;
  }
#endif  // VERIFY_HEAP

1720
  Heap* heap_;
1721
  LocalAllocator* local_allocator_;
1722
  RecordMigratedSlotVisitor* record_visitor_;
1723 1724
  std::vector<MigrationObserver*> observers_;
  MigrateFunction migration_function_;
1725 1726
};

1727
class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
1728
 public:
1729
  explicit EvacuateNewSpaceVisitor(Heap* heap, LocalAllocator* local_allocator,
1730
                                   RecordMigratedSlotVisitor* record_visitor,
lpy's avatar
lpy committed
1731
                                   base::HashMap* local_pretenuring_feedback)
1732
      : EvacuateVisitorBase(heap, local_allocator, record_visitor),
1733
        buffer_(LocalAllocationBuffer::InvalidBuffer()),
1734
        promoted_size_(0),
1735 1736
        semispace_copied_size_(0),
        local_pretenuring_feedback_(local_pretenuring_feedback) {}
1737

1738
  inline bool Visit(HeapObject* object, int size) override {
1739
    HeapObject* target_object = nullptr;
1740
    if (heap_->ShouldBePromoted(object->address()) &&
1741
        TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
1742
      promoted_size_ += size;
1743 1744
      return true;
    }
1745
    heap_->UpdateAllocationSite<Heap::kCached>(object->map(), object,
1746
                                               local_pretenuring_feedback_);
1747
    HeapObject* target = nullptr;
1748
    AllocationSpace space = AllocateTargetObject(object, size, &target);
1749
    MigrateObject(HeapObject::cast(target), object, size, space);
1750
    semispace_copied_size_ += size;
1751 1752
    return true;
  }
1753

1754 1755 1756
  intptr_t promoted_size() { return promoted_size_; }
  intptr_t semispace_copied_size() { return semispace_copied_size_; }

1757
 private:
1758
  inline AllocationSpace AllocateTargetObject(HeapObject* old_object, int size,
1759 1760
                                              HeapObject** target_object) {
    AllocationAlignment alignment = old_object->RequiredAlignment();
1761 1762 1763 1764
    AllocationSpace space_allocated_in = NEW_SPACE;
    AllocationResult allocation =
        local_allocator_->Allocate(NEW_SPACE, size, alignment);
    if (allocation.IsRetry()) {
1765
      allocation = AllocateInOldSpace(size, alignment);
1766
      space_allocated_in = OLD_SPACE;
1767 1768 1769 1770
    }
    bool ok = allocation.To(target_object);
    DCHECK(ok);
    USE(ok);
1771
    return space_allocated_in;
1772 1773 1774 1775 1776
  }

  inline AllocationResult AllocateInOldSpace(int size_in_bytes,
                                             AllocationAlignment alignment) {
    AllocationResult allocation =
1777
        local_allocator_->Allocate(OLD_SPACE, size_in_bytes, alignment);
1778
    if (allocation.IsRetry()) {
1779 1780
      v8::internal::Heap::FatalProcessOutOfMemory(
          "MarkCompactCollector: semi-space copy, fallback in old gen", true);
1781
    }
1782
    return allocation;
1783
  }
1784 1785

  LocalAllocationBuffer buffer_;
1786 1787
  intptr_t promoted_size_;
  intptr_t semispace_copied_size_;
lpy's avatar
lpy committed
1788
  base::HashMap* local_pretenuring_feedback_;
1789
};
1790

1791
template <PageEvacuationMode mode>
1792
class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
1793
 public:
1794
  explicit EvacuateNewSpacePageVisitor(
1795 1796
      Heap* heap, RecordMigratedSlotVisitor* record_visitor,
      base::HashMap* local_pretenuring_feedback)
1797
      : heap_(heap),
1798
        record_visitor_(record_visitor),
1799 1800
        moved_bytes_(0),
        local_pretenuring_feedback_(local_pretenuring_feedback) {}
1801

1802 1803 1804 1805 1806 1807 1808 1809 1810
  static void Move(Page* page) {
    switch (mode) {
      case NEW_TO_NEW:
        page->heap()->new_space()->MovePageFromSpaceToSpace(page);
        page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
        break;
      case NEW_TO_OLD: {
        page->Unlink();
        Page* new_page = Page::ConvertNewToOld(page);
1811
        DCHECK(!new_page->InNewSpace());
1812 1813 1814 1815
        new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
        break;
      }
    }
1816 1817
  }

1818
  inline bool Visit(HeapObject* object, int size) {
1819
    if (mode == NEW_TO_NEW) {
1820
      heap_->UpdateAllocationSite<Heap::kCached>(object->map(), object,
1821 1822
                                                 local_pretenuring_feedback_);
    } else if (mode == NEW_TO_OLD) {
1823
      object->IterateBodyFast(record_visitor_);
1824
    }
1825 1826 1827
    return true;
  }

1828 1829
  intptr_t moved_bytes() { return moved_bytes_; }
  void account_moved_bytes(intptr_t bytes) { moved_bytes_ += bytes; }
1830 1831

 private:
1832
  Heap* heap_;
1833
  RecordMigratedSlotVisitor* record_visitor_;
1834 1835
  intptr_t moved_bytes_;
  base::HashMap* local_pretenuring_feedback_;
1836
};
1837

1838
class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
1839
 public:
1840
  EvacuateOldSpaceVisitor(Heap* heap, LocalAllocator* local_allocator,
1841
                          RecordMigratedSlotVisitor* record_visitor)
1842
      : EvacuateVisitorBase(heap, local_allocator, record_visitor) {}
1843

1844
  inline bool Visit(HeapObject* object, int size) override {
1845
    HeapObject* target_object = nullptr;
1846 1847 1848
    if (TryEvacuateObject(
            Page::FromAddress(object->address())->owner()->identity(), object,
            size, &target_object)) {
1849 1850
      DCHECK(object->map_word().IsForwardingAddress());
      return true;
1851
    }
1852
    return false;
1853 1854 1855
  }
};

1856
class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
1857
 public:
1858
  explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {}
1859

1860
  inline bool Visit(HeapObject* object, int size) {
1861 1862
    RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
    object->IterateBody(&visitor);
1863 1864 1865 1866
    return true;
  }

 private:
1867
  Heap* heap_;
1868
};
1869

1870
void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
1871
  for (Page* p : *space) {
1872
    DiscoverGreyObjectsOnPage(p);
1873
    if (marking_worklist()->IsFull()) return;
1874
  }
1875
}
1876 1877


1878 1879
void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
  NewSpace* space = heap()->new_space();
1880
  for (Page* page : PageRange(space->bottom(), space->top())) {
1881
    DiscoverGreyObjectsOnPage(page);
1882
    if (marking_worklist()->IsFull()) return;
1883 1884 1885 1886
  }
}


1887
bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
1888 1889
  Object* o = *p;
  if (!o->IsHeapObject()) return false;
1890 1891 1892 1893 1894
  HeapObject* heap_object = HeapObject::cast(o);
  return heap_object->GetHeap()
      ->mark_compact_collector()
      ->non_atomic_marking_state()
      ->IsWhite(HeapObject::cast(o));
1895 1896
}

1897 1898
void MarkCompactCollector::MarkStringTable(
    ObjectVisitor* custom_root_body_visitor) {
1899 1900
  StringTable* string_table = heap()->string_table();
  // Mark the string table itself.
1901
  if (non_atomic_marking_state()->WhiteToBlack(string_table)) {
1902
    // Explicitly mark the prefix.
1903
    string_table->IteratePrefix(custom_root_body_visitor);
1904
    ProcessMarkingWorklist();
1905
  }
1906 1907
}

1908 1909
void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
                                     ObjectVisitor* custom_root_body_visitor) {
1910 1911
  // Mark the heap roots including global variables, stack variables,
  // etc., and all objects reachable from them.
1912
  heap()->IterateStrongRoots(root_visitor, VISIT_ONLY_STRONG);
1913

1914 1915 1916
  // Custom marking for string table and top optimized frame.
  MarkStringTable(custom_root_body_visitor);
  ProcessTopOptimizedFrame(custom_root_body_visitor);
1917

1918
  // There may be overflowed objects in the heap.  Visit them now.
1919 1920 1921
  while (marking_worklist()->overflowed()) {
    RefillMarkingWorklist();
    EmptyMarkingWorklist();
1922
  }
1923 1924
}

1925 1926 1927 1928
// Mark all objects reachable from the objects on the marking stack.
// Before: the marking stack contains zero or more heap object pointers.
// After: the marking stack is empty, and all objects reachable from the
// marking stack have been marked, or are overflowed in the heap.
1929
void MarkCompactCollector::EmptyMarkingWorklist() {
1930
  HeapObject* object;
1931
  MarkCompactMarkingVisitor visitor(this);
1932
  while ((object = marking_worklist()->Pop()) != nullptr) {
1933
    DCHECK(!object->IsFiller());
1934 1935
    DCHECK(object->IsHeapObject());
    DCHECK(heap()->Contains(object));
1936
    DCHECK(!(non_atomic_marking_state()->IsWhite(object)));
1937

1938
    Map* map = object->map();
1939
    MarkObject(object, map);
1940
    visitor.Visit(map, object);
1941
  }
1942
  DCHECK(marking_worklist()->IsEmpty());
1943 1944
}

1945

1946 1947 1948 1949 1950
// Sweep the heap for overflowed objects, clear their overflow bits, and
// push them on the marking stack.  Stop early if the marking stack fills
// before sweeping completes.  If sweeping completes, there are no remaining
// overflowed objects in the heap so the overflow flag on the markings stack
// is cleared.
1951
void MarkCompactCollector::RefillMarkingWorklist() {
1952
  isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow);
1953
  DCHECK(marking_worklist()->overflowed());
1954

1955
  DiscoverGreyObjectsInNewSpace();
1956
  if (marking_worklist()->IsFull()) return;
1957

1958
  DiscoverGreyObjectsInSpace(heap()->old_space());
1959
  if (marking_worklist()->IsFull()) return;
1960
  DiscoverGreyObjectsInSpace(heap()->code_space());
1961
  if (marking_worklist()->IsFull()) return;
1962
  DiscoverGreyObjectsInSpace(heap()->map_space());
1963
  if (marking_worklist()->IsFull()) return;
1964 1965
  LargeObjectIterator lo_it(heap()->lo_space());
  DiscoverGreyObjectsWithIterator(&lo_it);
1966
  if (marking_worklist()->IsFull()) return;
1967

1968
  marking_worklist()->ClearOverflowed();
1969 1970 1971 1972 1973 1974
}

// Mark all objects reachable (transitively) from objects on the marking
// stack.  Before: the marking stack contains zero or more heap object
// pointers.  After: the marking stack is empty and there are no overflowed
// objects in the heap.
1975 1976 1977 1978 1979
void MarkCompactCollector::ProcessMarkingWorklist() {
  EmptyMarkingWorklist();
  while (marking_worklist()->overflowed()) {
    RefillMarkingWorklist();
    EmptyMarkingWorklist();
1980
  }
1981
  DCHECK(marking_worklist()->IsEmpty());
1982 1983
}

1984 1985
// Mark all objects reachable (transitively) from objects on the marking
// stack including references only considered in the atomic marking pause.
1986
void MarkCompactCollector::ProcessEphemeralMarking(
1987
    bool only_process_harmony_weak_collections) {
1988
  DCHECK(marking_worklist()->IsEmpty() && !marking_worklist()->overflowed());
1989
  bool work_to_do = true;
1990
  while (work_to_do) {
1991
    if (!only_process_harmony_weak_collections) {
1992
      if (heap_->local_embedder_heap_tracer()->InUse()) {
1993
        TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
1994
        heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
1995
        heap_->local_embedder_heap_tracer()->Trace(
1996 1997 1998 1999 2000 2001 2002 2003 2004
            0,
            EmbedderHeapTracer::AdvanceTracingActions(
                EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
      }
    } else {
      // TODO(mlippautz): We currently do not trace through blink when
      // discovering new objects reachable from weak roots (that have been made
      // strong). This is a limitation of not having a separate handle type
      // that doesn't require zapping before this phase. See crbug.com/668060.
2005
      heap_->local_embedder_heap_tracer()->ClearCachedWrappersToTrace();
2006
    }
2007
    ProcessWeakCollections();
2008 2009
    work_to_do = !marking_worklist()->IsEmpty();
    ProcessMarkingWorklist();
2010
  }
2011
  CHECK(marking_worklist()->IsEmpty());
2012
  CHECK_EQ(0, heap()->local_embedder_heap_tracer()->NumberOfWrappersToTrace());
2013 2014
}

2015
void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
2016
  for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
2017 2018
       !it.done(); it.Advance()) {
    if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
2019
      return;
2020 2021
    }
    if (it.frame()->type() == StackFrame::OPTIMIZED) {
2022 2023
      Code* code = it.frame()->LookupCode();
      if (!code->CanDeoptAt(it.frame()->pc())) {
2024
        Code::BodyDescriptor::IterateBody(code, visitor);
2025
      }
2026
      ProcessMarkingWorklist();
2027
      return;
2028 2029 2030 2031
    }
  }
}

2032
class ObjectStatsVisitor : public HeapObjectVisitor {
2033
 public:
2034 2035
  ObjectStatsVisitor(Heap* heap, ObjectStats* live_stats,
                     ObjectStats* dead_stats)
2036 2037 2038 2039
      : live_collector_(heap, live_stats),
        dead_collector_(heap, dead_stats),
        marking_state_(
            heap->mark_compact_collector()->non_atomic_marking_state()) {
2040 2041 2042 2043
    DCHECK_NOT_NULL(live_stats);
    DCHECK_NOT_NULL(dead_stats);
    // Global objects are roots and thus recorded as live.
    live_collector_.CollectGlobalStatistics();
2044 2045
  }

2046
  bool Visit(HeapObject* obj, int size) override {
2047
    if (marking_state_->IsBlack(obj)) {
2048
      live_collector_.CollectStatistics(obj);
2049
    } else {
2050
      DCHECK(!marking_state_->IsGrey(obj));
2051
      dead_collector_.CollectStatistics(obj);
2052 2053 2054 2055 2056
    }
    return true;
  }

 private:
2057 2058
  ObjectStatsCollector live_collector_;
  ObjectStatsCollector dead_collector_;
2059
  MarkCompactCollector::NonAtomicMarkingState* marking_state_;
2060 2061 2062 2063 2064 2065
};

void MarkCompactCollector::VisitAllObjects(HeapObjectVisitor* visitor) {
  SpaceIterator space_it(heap());
  HeapObject* obj = nullptr;
  while (space_it.has_next()) {
2066 2067 2068
    std::unique_ptr<ObjectIterator> it(space_it.next()->GetObjectIterator());
    ObjectIterator* obj_it = it.get();
    while ((obj = obj_it->Next()) != nullptr) {
2069
      visitor->Visit(obj, obj->Size());
2070 2071 2072 2073
    }
  }
}

2074
void MarkCompactCollector::RecordObjectStats() {
2075 2076
  if (V8_UNLIKELY(FLAG_gc_stats)) {
    heap()->CreateObjectStats();
2077
    ObjectStatsVisitor visitor(heap(), heap()->live_object_stats_,
2078 2079
                               heap()->dead_object_stats_);
    VisitAllObjects(&visitor);
2080 2081 2082 2083 2084 2085 2086 2087 2088 2089
    if (V8_UNLIKELY(FLAG_gc_stats &
                    v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
      std::stringstream live, dead;
      heap()->live_object_stats_->Dump(live);
      heap()->dead_object_stats_->Dump(dead);
      TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
                           "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
                           "live", TRACE_STR_COPY(live.str().c_str()), "dead",
                           TRACE_STR_COPY(dead.str().c_str()));
    }
2090 2091 2092 2093 2094 2095 2096 2097 2098
    if (FLAG_trace_gc_object_stats) {
      heap()->live_object_stats_->PrintJSON("live");
      heap()->dead_object_stats_->PrintJSON("dead");
    }
    heap()->live_object_stats_->CheckpointObjectStats();
    heap()->dead_object_stats_->ClearObjectStats();
  }
}

2099 2100
class YoungGenerationMarkingVisitor final
    : public NewSpaceVisitor<YoungGenerationMarkingVisitor> {
2101
 public:
2102
  YoungGenerationMarkingVisitor(
2103 2104
      Heap* heap, MinorMarkCompactCollector::MarkingState* marking_state,
      MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
2105 2106
      : heap_(heap),
        worklist_(global_worklist, task_id),
2107
        marking_state_(marking_state) {}
2108

2109 2110
  V8_INLINE void VisitPointers(HeapObject* host, Object** start,
                               Object** end) final {
2111 2112 2113 2114 2115
    for (Object** p = start; p < end; p++) {
      VisitPointer(host, p);
    }
  }

2116
  V8_INLINE void VisitPointer(HeapObject* host, Object** slot) final {
2117 2118 2119
    Object* target = *slot;
    if (heap_->InNewSpace(target)) {
      HeapObject* target_object = HeapObject::cast(target);
2120
      MarkObjectViaMarkingWorklist(target_object);
2121 2122 2123 2124
    }
  }

 private:
2125
  inline void MarkObjectViaMarkingWorklist(HeapObject* object) {
2126
    if (marking_state_->WhiteToGrey(object)) {
2127
      // Marking deque overflow is unsupported for the young generation.
2128
      CHECK(worklist_.Push(object));
2129 2130 2131 2132
    }
  }

  Heap* heap_;
2133
  MinorMarkCompactCollector::MarkingWorklist::View worklist_;
2134
  MinorMarkCompactCollector::MarkingState* marking_state_;
2135 2136 2137 2138 2139
};

class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
 public:
  explicit RootMarkingVisitor(MinorMarkCompactCollector* collector)
2140 2141
      : collector_(collector),
        marking_state_(collector_->non_atomic_marking_state()) {}
2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158

  void VisitRootPointer(Root root, Object** p) override {
    MarkObjectByPointer(p);
  }

  void VisitRootPointers(Root root, Object** start, Object** end) override {
    for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
  }

 private:
  void MarkObjectByPointer(Object** p) {
    if (!(*p)->IsHeapObject()) return;

    HeapObject* object = HeapObject::cast(*p);

    if (!collector_->heap()->InNewSpace(object)) return;

2159
    if (marking_state_->WhiteToGrey(object)) {
2160
      collector_->main_marking_visitor()->Visit(object);
2161
      collector_->EmptyMarkingWorklist();
2162 2163 2164 2165
    }
  }

  MinorMarkCompactCollector* collector_;
2166
  MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
2167 2168
};

2169
class MarkingItem;
2170
class GlobalHandlesMarkingItem;
2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182
class PageMarkingItem;
class RootMarkingItem;
class YoungGenerationMarkingTask;

class MarkingItem : public ItemParallelJob::Item {
 public:
  virtual ~MarkingItem() {}
  virtual void Process(YoungGenerationMarkingTask* task) = 0;
};

class YoungGenerationMarkingTask : public ItemParallelJob::Task {
 public:
2183 2184 2185
  YoungGenerationMarkingTask(
      Isolate* isolate, MinorMarkCompactCollector* collector,
      MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
2186 2187
      : ItemParallelJob::Task(isolate),
        collector_(collector),
2188
        marking_worklist_(global_worklist, task_id),
2189
        marking_state_(collector->marking_state()),
2190
        visitor_(isolate->heap(), marking_state_, global_worklist, task_id) {
2191 2192 2193 2194
    local_live_bytes_.reserve(isolate->heap()->new_space()->Capacity() /
                              Page::kPageSize);
  }

2195 2196 2197 2198 2199 2200 2201 2202
  void RunInParallel() override {
    double marking_time = 0.0;
    {
      TimedScope scope(&marking_time);
      MarkingItem* item = nullptr;
      while ((item = GetItem<MarkingItem>()) != nullptr) {
        item->Process(this);
        item->MarkFinished();
2203
        EmptyLocalMarkingWorklist();
2204
      }
2205 2206
      EmptyMarkingWorklist();
      DCHECK(marking_worklist_.IsLocalEmpty());
2207
      FlushLiveBytes();
2208 2209 2210 2211 2212 2213 2214 2215 2216 2217
    }
    if (FLAG_trace_minor_mc_parallel_marking) {
      PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
                   static_cast<void*>(this), marking_time);
    }
  };

  void MarkObject(Object* object) {
    if (!collector_->heap()->InNewSpace(object)) return;
    HeapObject* heap_object = HeapObject::cast(object);
2218
    if (marking_state_->WhiteToGrey(heap_object)) {
2219
      const int size = visitor_.Visit(heap_object);
2220
      IncrementLiveBytes(heap_object, size);
2221 2222 2223 2224
    }
  }

 private:
2225
  void EmptyLocalMarkingWorklist() {
2226
    HeapObject* object = nullptr;
2227
    while (marking_worklist_.Pop(&object)) {
2228
      const int size = visitor_.Visit(object);
2229
      IncrementLiveBytes(object, size);
2230 2231 2232
    }
  }

2233
  void EmptyMarkingWorklist() {
2234
    HeapObject* object = nullptr;
2235
    while (marking_worklist_.Pop(&object)) {
2236 2237
      const int size = visitor_.Visit(object);
      IncrementLiveBytes(object, size);
2238 2239 2240
    }
  }

2241 2242 2243 2244 2245 2246 2247
  void IncrementLiveBytes(HeapObject* object, intptr_t bytes) {
    local_live_bytes_[Page::FromAddress(reinterpret_cast<Address>(object))] +=
        bytes;
  }

  void FlushLiveBytes() {
    for (auto pair : local_live_bytes_) {
2248
      marking_state_->IncrementLiveBytes(pair.first, pair.second);
2249 2250 2251
    }
  }

2252
  MinorMarkCompactCollector* collector_;
2253
  MinorMarkCompactCollector::MarkingWorklist::View marking_worklist_;
2254
  MinorMarkCompactCollector::MarkingState* marking_state_;
2255
  YoungGenerationMarkingVisitor visitor_;
2256
  std::unordered_map<Page*, intptr_t, Page::Hasher> local_live_bytes_;
2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276
};

class BatchedRootMarkingItem : public MarkingItem {
 public:
  explicit BatchedRootMarkingItem(std::vector<Object*>&& objects)
      : objects_(objects) {}
  virtual ~BatchedRootMarkingItem() {}

  void Process(YoungGenerationMarkingTask* task) override {
    for (Object* object : objects_) {
      task->MarkObject(object);
    }
  }

 private:
  std::vector<Object*> objects_;
};

class PageMarkingItem : public MarkingItem {
 public:
2277 2278 2279 2280
  explicit PageMarkingItem(MemoryChunk* chunk,
                           base::AtomicNumber<intptr_t>* global_slots)
      : chunk_(chunk), global_slots_(global_slots), slots_(0) {}
  virtual ~PageMarkingItem() { global_slots_->Increment(slots_); }
2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291

  void Process(YoungGenerationMarkingTask* task) override {
    base::LockGuard<base::RecursiveMutex> guard(chunk_->mutex());
    MarkUntypedPointers(task);
    MarkTypedPointers(task);
  }

 private:
  inline Heap* heap() { return chunk_->heap(); }

  void MarkUntypedPointers(YoungGenerationMarkingTask* task) {
2292 2293 2294 2295
    RememberedSet<OLD_TO_NEW>::Iterate(
        chunk_,
        [this, task](Address slot) { return CheckAndMarkObject(task, slot); },
        SlotSet::PREFREE_EMPTY_BUCKETS);
2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319
  }

  void MarkTypedPointers(YoungGenerationMarkingTask* task) {
    Isolate* isolate = heap()->isolate();
    RememberedSet<OLD_TO_NEW>::IterateTyped(
        chunk_, [this, isolate, task](SlotType slot_type, Address host_addr,
                                      Address slot) {
          return UpdateTypedSlotHelper::UpdateTypedSlot(
              isolate, slot_type, slot, [this, task](Object** slot) {
                return CheckAndMarkObject(task,
                                          reinterpret_cast<Address>(slot));
              });
        });
  }

  SlotCallbackResult CheckAndMarkObject(YoungGenerationMarkingTask* task,
                                        Address slot_address) {
    Object* object = *reinterpret_cast<Object**>(slot_address);
    if (heap()->InNewSpace(object)) {
      // Marking happens before flipping the young generation, so the object
      // has to be in ToSpace.
      DCHECK(heap()->InToSpace(object));
      HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
      task->MarkObject(heap_object);
2320
      slots_++;
2321 2322 2323 2324 2325 2326
      return KEEP_SLOT;
    }
    return REMOVE_SLOT;
  }

  MemoryChunk* chunk_;
2327 2328
  base::AtomicNumber<intptr_t>* global_slots_;
  intptr_t slots_;
2329 2330
};

2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371
class GlobalHandlesMarkingItem : public MarkingItem {
 public:
  GlobalHandlesMarkingItem(GlobalHandles* global_handles, size_t start,
                           size_t end)
      : global_handles_(global_handles), start_(start), end_(end) {}
  virtual ~GlobalHandlesMarkingItem() {}

  void Process(YoungGenerationMarkingTask* task) override {
    GlobalHandlesRootMarkingVisitor visitor(task);
    global_handles_
        ->IterateNewSpaceStrongAndDependentRootsAndIdentifyUnmodified(
            &visitor, start_, end_);
  }

 private:
  class GlobalHandlesRootMarkingVisitor : public RootVisitor {
   public:
    explicit GlobalHandlesRootMarkingVisitor(YoungGenerationMarkingTask* task)
        : task_(task) {}

    void VisitRootPointer(Root root, Object** p) override {
      DCHECK(Root::kGlobalHandles == root);
      task_->MarkObject(*p);
    }

    void VisitRootPointers(Root root, Object** start, Object** end) override {
      DCHECK(Root::kGlobalHandles == root);
      for (Object** p = start; p < end; p++) {
        task_->MarkObject(*p);
      }
    }

   private:
    YoungGenerationMarkingTask* task_;
  };

  GlobalHandles* global_handles_;
  size_t start_;
  size_t end_;
};

2372
MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
2373
    : MarkCompactCollectorBase(heap),
2374
      worklist_(new MinorMarkCompactCollector::MarkingWorklist()),
2375 2376
      main_marking_visitor_(new YoungGenerationMarkingVisitor(
          heap, marking_state(), worklist_, kMainMarker)),
2377
      page_parallel_job_semaphore_(0) {
2378 2379 2380
  static_assert(
      kNumMarkers <= MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks,
      "more marker tasks than marking deque can handle");
2381
}
2382 2383

MinorMarkCompactCollector::~MinorMarkCompactCollector() {
2384
  delete worklist_;
2385
  delete main_marking_visitor_;
2386 2387
}

2388
static bool IsUnmarkedObjectForYoungGeneration(Heap* heap, Object** p) {
2389
  DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p));
2390 2391 2392
  return heap->InNewSpace(*p) && !heap->minor_mark_compact_collector()
                                      ->non_atomic_marking_state()
                                      ->IsGrey(HeapObject::cast(*p));
2393 2394
}

2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408
template <class ParallelItem>
static void SeedGlobalHandles(GlobalHandles* global_handles,
                              ItemParallelJob* job) {
  // Create batches of global handles.
  const size_t kGlobalHandlesBufferSize = 1000;
  const size_t new_space_nodes = global_handles->NumberOfNewSpaceNodes();
  for (size_t start = 0; start < new_space_nodes;
       start += kGlobalHandlesBufferSize) {
    size_t end = start + kGlobalHandlesBufferSize;
    if (end > new_space_nodes) end = new_space_nodes;
    job->AddItem(new ParallelItem(global_handles, start, end));
  }
}

2409
void MinorMarkCompactCollector::MarkRootSetInParallel() {
2410
  base::AtomicNumber<intptr_t> slots;
2411
  {
2412 2413
    ItemParallelJob job(isolate()->cancelable_task_manager(),
                        &page_parallel_job_semaphore_);
2414

2415 2416 2417 2418
    // Seed the root set (roots + old->new set).
    {
      TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_SEED);
      // Create batches of roots.
2419 2420
      RootMarkingVisitorSeedOnly<BatchedRootMarkingItem> root_seed_visitor(
          &job);
2421 2422
      heap()->IterateRoots(&root_seed_visitor, VISIT_ALL_IN_MINOR_MC_MARK);
      // Create batches of global handles.
2423 2424
      SeedGlobalHandles<GlobalHandlesMarkingItem>(isolate()->global_handles(),
                                                  &job);
2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440
      // Create items for each page.
      RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
          heap(), [&job, &slots](MemoryChunk* chunk) {
            job.AddItem(new PageMarkingItem(chunk, &slots));
          });
      // Flush any remaining objects in the seeding visitor.
      root_seed_visitor.FlushObjects();
    }

    // Add tasks and run in parallel.
    {
      TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
      const int new_space_pages =
          static_cast<int>(heap()->new_space()->Capacity()) / Page::kPageSize;
      const int num_tasks = NumberOfParallelMarkingTasks(new_space_pages);
      for (int i = 0; i < num_tasks; i++) {
2441 2442
        job.AddTask(
            new YoungGenerationMarkingTask(isolate(), this, worklist(), i));
2443 2444
      }
      job.Run();
2445
      DCHECK(worklist()->IsGlobalEmpty());
2446 2447
    }
  }
2448
  old_to_new_slots_ = static_cast<int>(slots.Value());
2449 2450
}

2451
void MinorMarkCompactCollector::MarkLiveObjects() {
2452 2453 2454 2455
  TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK);

  PostponeInterruptsScope postpone(isolate());

2456
  RootMarkingVisitor root_visitor(this);
2457

2458
  MarkRootSetInParallel();
2459

2460
  // Mark rest on the main thread.
2461 2462
  {
    TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_WEAK);
2463
    heap()->IterateEncounteredWeakCollections(&root_visitor);
2464
    ProcessMarkingWorklist();
2465 2466 2467 2468 2469
  }

  {
    TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES);
    isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
2470
        &IsUnmarkedObjectForYoungGeneration);
2471 2472
    isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
        &root_visitor);
2473
    ProcessMarkingWorklist();
2474 2475 2476
  }
}

2477 2478
void MinorMarkCompactCollector::ProcessMarkingWorklist() {
  EmptyMarkingWorklist();
2479 2480
}

2481
void MinorMarkCompactCollector::EmptyMarkingWorklist() {
2482
  MarkingWorklist::View marking_worklist(worklist(), kMainMarker);
2483
  HeapObject* object = nullptr;
2484
  while (marking_worklist.Pop(&object)) {
2485 2486 2487
    DCHECK(!object->IsFiller());
    DCHECK(object->IsHeapObject());
    DCHECK(heap()->Contains(object));
2488
    DCHECK(non_atomic_marking_state()->IsGrey(object));
2489
    main_marking_visitor()->Visit(object);
2490
  }
2491
  DCHECK(marking_worklist.IsLocalEmpty());
2492 2493
}

2494
void MinorMarkCompactCollector::CollectGarbage() {
2495 2496 2497 2498 2499
  {
    TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
    heap()->mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
    CleanupSweepToIteratePages();
  }
2500

2501 2502
  MarkLiveObjects();
  ClearNonLiveReferences();
2503 2504 2505 2506 2507 2508
#ifdef VERIFY_HEAP
  if (FLAG_verify_heap) {
    YoungGenerationMarkingVerifier verifier(heap());
    verifier.Run();
  }
#endif  // VERIFY_HEAP
2509 2510 2511 2512 2513 2514 2515 2516 2517

  Evacuate();
#ifdef VERIFY_HEAP
  if (FLAG_verify_heap) {
    YoungGenerationEvacuationVerifier verifier(heap());
    verifier.Run();
  }
#endif  // VERIFY_HEAP

2518 2519
  {
    TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARKING_DEQUE);
2520
    heap()->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
2521
  }
2522 2523

  {
2524
    TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS);
2525 2526
    for (Page* p : PageRange(heap()->new_space()->FromSpaceStart(),
                             heap()->new_space()->FromSpaceEnd())) {
2527
      DCHECK(!p->IsFlagSet(Page::SWEEP_TO_ITERATE));
2528
      non_atomic_marking_state()->ClearLiveness(p);
2529 2530
    }
  }
2531 2532

  heap()->account_external_memory_concurrently_freed();
2533 2534
}

2535 2536 2537 2538 2539 2540 2541 2542 2543
void MinorMarkCompactCollector::MakeIterable(
    Page* p, MarkingTreatmentMode marking_mode,
    FreeSpaceTreatmentMode free_space_mode) {
  // We have to clear the full collectors markbits for the areas that we
  // remove here.
  MarkCompactCollector* full_collector = heap()->mark_compact_collector();
  Address free_start = p->area_start();
  DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);

2544
  for (auto object_and_size :
2545
       LiveObjectRange<kGreyObjects>(p, marking_state()->bitmap(p))) {
2546
    HeapObject* const object = object_and_size.first;
2547
    DCHECK(non_atomic_marking_state()->IsGrey(object));
2548 2549 2550 2551
    Address free_end = object->address();
    if (free_end != free_start) {
      CHECK_GT(free_end, free_start);
      size_t size = static_cast<size_t>(free_end - free_start);
2552
      full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
2553 2554
          p->AddressToMarkbitIndex(free_start),
          p->AddressToMarkbitIndex(free_end));
2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568
      if (free_space_mode == ZAP_FREE_SPACE) {
        memset(free_start, 0xcc, size);
      }
      p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
                                      ClearRecordedSlots::kNo);
    }
    Map* map = object->synchronized_map();
    int size = object->SizeFromMap(map);
    free_start = free_end + size;
  }

  if (free_start != p->area_end()) {
    CHECK_GT(p->area_end(), free_start);
    size_t size = static_cast<size_t>(p->area_end() - free_start);
2569
    full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
2570 2571
        p->AddressToMarkbitIndex(free_start),
        p->AddressToMarkbitIndex(p->area_end()));
2572 2573 2574 2575 2576 2577 2578 2579
    if (free_space_mode == ZAP_FREE_SPACE) {
      memset(free_start, 0xcc, size);
    }
    p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
                                    ClearRecordedSlots::kNo);
  }

  if (marking_mode == MarkingTreatmentMode::CLEAR) {
2580
    non_atomic_marking_state()->ClearLiveness(p);
2581 2582 2583 2584
    p->ClearFlag(Page::SWEEP_TO_ITERATE);
  }
}

2585
void MinorMarkCompactCollector::ClearNonLiveReferences() {
2586
  TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR);
2587 2588

  {
2589
    TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_STRING_TABLE);
2590 2591
    // Internalized strings are always stored in old space, so there is no need
    // to clean them here.
2592
    YoungGenerationExternalStringTableCleaner external_visitor(this);
2593 2594 2595 2596 2597
    heap()->external_string_table_.IterateNewSpaceStrings(&external_visitor);
    heap()->external_string_table_.CleanUpNewSpaceStrings();
  }

  {
2598
    TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_WEAK_LISTS);
2599
    // Process the weak references.
2600
    MinorMarkCompactWeakObjectRetainer retainer(this);
2601 2602 2603 2604 2605 2606 2607 2608
    heap()->ProcessYoungWeakReferences(&retainer);
  }
}

void MinorMarkCompactCollector::EvacuatePrologue() {
  NewSpace* new_space = heap()->new_space();
  // Append the list of new space pages to be processed.
  for (Page* p : PageRange(new_space->bottom(), new_space->top())) {
2609
    new_space_evacuation_pages_.push_back(p);
2610 2611 2612 2613 2614 2615 2616 2617 2618 2619
  }
  new_space->Flip();
  new_space->ResetAllocationInfo();
}

void MinorMarkCompactCollector::EvacuateEpilogue() {
  heap()->new_space()->set_age_mark(heap()->new_space()->top());
}

void MinorMarkCompactCollector::Evacuate() {
2620
  TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE);
2621
  base::LockGuard<base::Mutex> guard(heap()->relocation_mutex());
2622 2623

  {
2624
    TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_PROLOGUE);
2625 2626 2627 2628
    EvacuatePrologue();
  }

  {
2629
    TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_COPY);
2630 2631 2632 2633 2634 2635
    EvacuatePagesInParallel();
  }

  UpdatePointersAfterEvacuation();

  {
2636
    TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_REBALANCE);
2637 2638 2639 2640 2641 2642 2643 2644 2645
    if (!heap()->new_space()->Rebalance()) {
      FatalProcessOutOfMemory("NewSpace::Rebalance");
    }
  }

  // Give pages that are queued to be freed back to the OS.
  heap()->memory_allocator()->unmapper()->FreeQueuedChunks();

  {
2646
    TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_CLEAN_UP);
2647 2648 2649 2650 2651 2652 2653 2654 2655
    for (Page* p : new_space_evacuation_pages_) {
      if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) ||
          p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
        p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
        p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
        p->SetFlag(Page::SWEEP_TO_ITERATE);
        sweep_to_iterate_pages_.push_back(p);
      }
    }
2656
    new_space_evacuation_pages_.clear();
2657 2658 2659
  }

  {
2660
    TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_EPILOGUE);
2661 2662
    EvacuateEpilogue();
  }
2663
}
2664

2665
void MarkCompactCollector::MarkLiveObjects() {
2666
  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
2667 2668 2669
  // The recursive GC marker detects when it is nearing stack overflow,
  // and switches to a different marking system.  JS interrupts interfere
  // with the C stack limit check.
2670
  PostponeInterruptsScope postpone(isolate());
2671

2672
  {
2673
    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
2674 2675 2676 2677
    IncrementalMarking* incremental_marking = heap_->incremental_marking();
    if (was_marked_incrementally_) {
      incremental_marking->Finalize();
    } else {
ulan's avatar
ulan committed
2678
      CHECK(incremental_marking->IsStopped());
2679
    }
2680 2681 2682
  }

#ifdef DEBUG
2683
  DCHECK(state_ == PREPARE_GC);
2684 2685 2686
  state_ = MARK_LIVE_OBJECTS;
#endif

2687
  marking_worklist()->StartUsing();
2688

2689 2690
  heap_->local_embedder_heap_tracer()->EnterFinalPause();

2691
  RootMarkingVisitor root_visitor(this);
2692

2693
  {
2694
    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
2695 2696
    CustomRootBodyMarkingVisitor custom_root_body_visitor(this);
    MarkRoots(&root_visitor, &custom_root_body_visitor);
2697
  }
2698

2699
  {
2700
    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
2701 2702 2703 2704

    // The objects reachable from the roots are marked, yet unreachable
    // objects are unmarked.  Mark objects reachable due to host
    // application specific logic or through Harmony weak maps.
2705
    {
2706 2707
      TRACE_GC(heap()->tracer(),
               GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL);
2708
      ProcessEphemeralMarking(false);
2709
    }
2710 2711 2712 2713 2714 2715 2716 2717

    // The objects reachable from the roots, weak maps or object groups
    // are marked. Objects pointed to only by weak global handles cannot be
    // immediately reclaimed. Instead, we have to mark them as pending and mark
    // objects reachable from them.
    //
    // First we identify nonlive weak handles and mark them as pending
    // destruction.
2718
    {
2719 2720
      TRACE_GC(heap()->tracer(),
               GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
2721 2722
      heap()->isolate()->global_handles()->IdentifyWeakHandles(
          &IsUnmarkedHeapObject);
2723
      ProcessMarkingWorklist();
2724
    }
2725
    // Then we mark the objects.
2726 2727

    {
2728 2729
      TRACE_GC(heap()->tracer(),
               GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
2730
      heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2731
      ProcessMarkingWorklist();
2732
    }
2733 2734 2735 2736 2737 2738

    // Repeat Harmony weak maps marking to mark unmarked objects reachable from
    // the weak roots we just marked as pending destruction.
    //
    // We only process harmony collections, as all object groups have been fully
    // processed and no weakly reachable node can discover new objects groups.
2739
    {
2740
      TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
2741
      ProcessEphemeralMarking(true);
2742
      {
2743
        TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_EPILOGUE);
2744
        heap()->local_embedder_heap_tracer()->TraceEpilogue();
2745
      }
2746
    }
2747
  }
2748 2749 2750
}


2751
void MarkCompactCollector::ClearNonLiveReferences() {
2752
  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
2753

2754
  {
2755
    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE);
2756

2757 2758 2759 2760
    // Prune the string table removing all strings only pointed to by the
    // string table.  Cannot use string_table() here because the string
    // table is marked.
    StringTable* string_table = heap()->string_table();
2761
    InternalizedStringTableCleaner internalized_visitor(heap(), string_table);
2762 2763
    string_table->IterateElements(&internalized_visitor);
    string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
2764

2765
    ExternalStringTableCleaner external_visitor(heap());
2766 2767
    heap()->external_string_table_.IterateAll(&external_visitor);
    heap()->external_string_table_.CleanUpAll();
2768 2769 2770
  }

  {
2771
    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
2772
    // Process the weak references.
2773 2774
    MarkCompactWeakObjectRetainer mark_compact_object_retainer(
        non_atomic_marking_state());
2775 2776 2777
    heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
  }

2778
  {
2779
    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
2780
    // ClearFullMapTransitions must be called before WeakCells are cleared.
2781 2782
    ClearFullMapTransitions();
  }
2783 2784
  DependentCode* dependent_code_list;
  ClearWeakCellsAndSimpleMapTransitions(&dependent_code_list);
2785
  MarkDependentCodeForDeoptimization(dependent_code_list);
2786 2787

  ClearWeakCollections();
2788 2789 2790

  DCHECK(weak_objects_.weak_cells.IsGlobalEmpty());
  DCHECK(weak_objects_.transition_arrays.IsGlobalEmpty());
2791 2792 2793
}


2794 2795
void MarkCompactCollector::MarkDependentCodeForDeoptimization(
    DependentCode* list_head) {
2796
  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_DEPENDENT_CODE);
2797 2798 2799 2800 2801 2802
  Isolate* isolate = this->isolate();
  DependentCode* current = list_head;
  while (current->length() > 0) {
    have_code_to_deoptimize_ |= current->MarkCodeForDeoptimization(
        isolate, DependentCode::kWeakCodeGroup);
    current = current->next_link();
2803
  }
2804

2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833
  {
    ArrayList* list = heap_->weak_new_space_object_to_code_list();
    int counter = 0;
    for (int i = 0; i < list->Length(); i += 2) {
      WeakCell* obj = WeakCell::cast(list->Get(i));
      WeakCell* dep = WeakCell::cast(list->Get(i + 1));
      if (obj->cleared() || dep->cleared()) {
        if (!dep->cleared()) {
          Code* code = Code::cast(dep->value());
          if (!code->marked_for_deoptimization()) {
            DependentCode::SetMarkedForDeoptimization(
                code, DependentCode::DependencyGroup::kWeakCodeGroup);
            code->InvalidateEmbeddedObjects();
            have_code_to_deoptimize_ = true;
          }
        }
      } else {
        // We record the slot manually because marking is finished at this
        // point and the write barrier would bailout.
        list->Set(counter, obj, SKIP_WRITE_BARRIER);
        RecordSlot(list, list->Slot(counter), obj);
        counter++;
        list->Set(counter, dep, SKIP_WRITE_BARRIER);
        RecordSlot(list, list->Slot(counter), dep);
        counter++;
      }
    }
  }

ulan's avatar
ulan committed
2834 2835 2836 2837 2838
  WeakHashTable* table = heap_->weak_object_to_code_table();
  uint32_t capacity = table->Capacity();
  for (uint32_t i = 0; i < capacity; i++) {
    uint32_t key_index = table->EntryToIndex(i);
    Object* key = table->get(key_index);
2839
    if (!table->IsKey(isolate, key)) continue;
ulan's avatar
ulan committed
2840 2841
    uint32_t value_index = table->EntryToValueIndex(i);
    Object* value = table->get(value_index);
2842
    DCHECK(key->IsWeakCell());
ulan's avatar
ulan committed
2843 2844 2845
    if (WeakCell::cast(key)->cleared()) {
      have_code_to_deoptimize_ |=
          DependentCode::cast(value)->MarkCodeForDeoptimization(
2846
              isolate, DependentCode::kWeakCodeGroup);
ulan's avatar
ulan committed
2847 2848 2849
      table->set(key_index, heap_->the_hole_value());
      table->set(value_index, heap_->the_hole_value());
      table->ElementRemoved();
2850 2851
    }
  }
2852 2853
}

2854 2855
void MarkCompactCollector::ClearSimpleMapTransition(
    WeakCell* potential_transition, Map* dead_target) {
2856
  DCHECK(non_atomic_marking_state()->IsWhite(dead_target));
2857 2858 2859
  Object* potential_parent = dead_target->constructor_or_backpointer();
  if (potential_parent->IsMap()) {
    Map* parent = Map::cast(potential_parent);
2860
    DisallowHeapAllocation no_gc_obviously;
2861
    if (non_atomic_marking_state()->IsBlackOrGrey(parent) &&
2862 2863
        TransitionsAccessor(parent, &no_gc_obviously)
            .HasSimpleTransitionTo(potential_transition)) {
2864
      ClearSimpleMapTransition(parent, dead_target);
2865
    }
2866 2867
  }
}
2868

2869
void MarkCompactCollector::ClearSimpleMapTransition(Map* map,
2870
                                                    Map* dead_target) {
2871 2872
  DCHECK(!map->is_prototype_map());
  DCHECK(!dead_target->is_prototype_map());
2873 2874
  // Clear the useless weak cell pointer, and take ownership of the descriptor
  // array.
2875
  map->set_raw_transitions(Smi::kZero);
2876 2877
  int number_of_own_descriptors = map->NumberOfOwnDescriptors();
  DescriptorArray* descriptors = map->instance_descriptors();
2878
  if (descriptors == dead_target->instance_descriptors() &&
2879
      number_of_own_descriptors > 0) {
2880
    TrimDescriptorArray(map, descriptors);
2881 2882 2883
    DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
    map->set_owns_descriptors(true);
  }
2884
}
2885

2886
void MarkCompactCollector::ClearFullMapTransitions() {
2887 2888
  TransitionArray* array;
  while (weak_objects_.transition_arrays.Pop(kMainThread, &array)) {
2889 2890
    int num_transitions = array->number_of_entries();
    if (num_transitions > 0) {
2891
      Map* map = array->GetTarget(0);
2892
      DCHECK_NOT_NULL(map);  // WeakCells aren't cleared yet.
2893
      Map* parent = Map::cast(map->constructor_or_backpointer());
2894
      bool parent_is_alive = non_atomic_marking_state()->IsBlackOrGrey(parent);
2895 2896 2897 2898 2899 2900
      DescriptorArray* descriptors =
          parent_is_alive ? parent->instance_descriptors() : nullptr;
      bool descriptors_owner_died =
          CompactTransitionArray(parent, array, descriptors);
      if (descriptors_owner_died) {
        TrimDescriptorArray(parent, descriptors);
2901 2902 2903 2904 2905 2906 2907
      }
    }
  }
}

bool MarkCompactCollector::CompactTransitionArray(
    Map* map, TransitionArray* transitions, DescriptorArray* descriptors) {
2908
  DCHECK(!map->is_prototype_map());
2909 2910 2911 2912
  int num_transitions = transitions->number_of_entries();
  bool descriptors_owner_died = false;
  int transition_index = 0;
  // Compact all live transitions to the left.
2913
  for (int i = 0; i < num_transitions; ++i) {
2914 2915
    Map* target = transitions->GetTarget(i);
    DCHECK_EQ(target->constructor_or_backpointer(), map);
2916
    if (non_atomic_marking_state()->IsWhite(target)) {
2917
      if (descriptors != nullptr &&
2918
          target->instance_descriptors() == descriptors) {
2919
        DCHECK(!target->is_prototype_map());
2920
        descriptors_owner_died = true;
2921
      }
2922 2923
    } else {
      if (i != transition_index) {
2924
        Name* key = transitions->GetKey(i);
2925 2926 2927
        transitions->SetKey(transition_index, key);
        Object** key_slot = transitions->GetKeySlot(transition_index);
        RecordSlot(transitions, key_slot, key);
2928 2929 2930 2931 2932 2933 2934 2935
        Object* raw_target = transitions->GetRawTarget(i);
        transitions->SetTarget(transition_index, raw_target);
        // Maps are not compacted, but for cached handlers the target slot
        // must be recorded.
        if (!raw_target->IsMap()) {
          Object** target_slot = transitions->GetTargetSlot(transition_index);
          RecordSlot(transitions, target_slot, raw_target);
        }
2936 2937 2938 2939 2940
      }
      transition_index++;
    }
  }
  // If there are no transitions to be cleared, return.
2941 2942 2943
  if (transition_index == num_transitions) {
    DCHECK(!descriptors_owner_died);
    return false;
2944 2945 2946
  }
  // Note that we never eliminate a transition array, though we might right-trim
  // such that number_of_transitions() == 0. If this assumption changes,
2947 2948
  // TransitionArray::Insert() will need to deal with the case that a transition
  // array disappeared during GC.
2949
  int trim = transitions->Capacity() - transition_index;
2950
  if (trim > 0) {
2951 2952
    heap_->RightTrimFixedArray(transitions,
                               trim * TransitionArray::kTransitionSize);
2953
    transitions->SetNumberOfTransitions(transition_index);
2954
  }
2955
  return descriptors_owner_died;
2956 2957 2958 2959
}


void MarkCompactCollector::TrimDescriptorArray(Map* map,
2960 2961 2962 2963 2964 2965 2966
                                               DescriptorArray* descriptors) {
  int number_of_own_descriptors = map->NumberOfOwnDescriptors();
  if (number_of_own_descriptors == 0) {
    DCHECK(descriptors == heap_->empty_descriptor_array());
    return;
  }

2967 2968
  int number_of_descriptors = descriptors->number_of_descriptors_storage();
  int to_trim = number_of_descriptors - number_of_own_descriptors;
2969
  if (to_trim > 0) {
2970
    heap_->RightTrimFixedArray(descriptors,
2971
                               to_trim * DescriptorArray::kEntrySize);
2972
    descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
2973

2974 2975
    if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
    descriptors->Sort();
2976

2977 2978 2979 2980 2981 2982
    if (FLAG_unbox_double_fields) {
      LayoutDescriptor* layout_descriptor = map->layout_descriptor();
      layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
                                                  number_of_own_descriptors);
      SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
    }
2983
  }
2984 2985
  DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
  map->set_owns_descriptors(true);
2986 2987 2988 2989 2990 2991 2992
}


void MarkCompactCollector::TrimEnumCache(Map* map,
                                         DescriptorArray* descriptors) {
  int live_enum = map->EnumLength();
  if (live_enum == kInvalidEnumCacheSentinel) {
2993
    live_enum = map->NumberOfEnumerableProperties();
2994 2995 2996 2997 2998 2999 3000
  }
  if (live_enum == 0) return descriptors->ClearEnumCache();

  FixedArray* enum_cache = descriptors->GetEnumCache();

  int to_trim = enum_cache->length() - live_enum;
  if (to_trim <= 0) return;
3001
  heap_->RightTrimFixedArray(descriptors->GetEnumCache(), to_trim);
3002 3003 3004

  if (!descriptors->HasEnumIndicesCache()) return;
  FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
3005
  heap_->RightTrimFixedArray(enum_indices_cache, to_trim);
3006 3007 3008
}


3009
void MarkCompactCollector::ProcessWeakCollections() {
3010
  MarkCompactMarkingVisitor visitor(this);
3011
  Object* weak_collection_obj = heap()->encountered_weak_collections();
3012
  while (weak_collection_obj != Smi::kZero) {
3013 3014
    JSWeakCollection* weak_collection =
        reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
3015
    DCHECK(non_atomic_marking_state()->IsBlackOrGrey(weak_collection));
3016 3017 3018
    if (weak_collection->table()->IsHashTable()) {
      ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
      for (int i = 0; i < table->Capacity(); i++) {
3019
        HeapObject* heap_object = HeapObject::cast(table->KeyAt(i));
3020
        if (non_atomic_marking_state()->IsBlackOrGrey(heap_object)) {
3021 3022
          Object** key_slot =
              table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
3023
          RecordSlot(table, key_slot, *key_slot);
3024 3025
          Object** value_slot =
              table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
3026
          visitor.MarkObjectByPointer(table, value_slot);
3027
        }
3028 3029
      }
    }
3030
    weak_collection_obj = weak_collection->next();
3031 3032 3033 3034
  }
}


3035
void MarkCompactCollector::ClearWeakCollections() {
3036
  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
3037
  Object* weak_collection_obj = heap()->encountered_weak_collections();
3038
  while (weak_collection_obj != Smi::kZero) {
3039 3040
    JSWeakCollection* weak_collection =
        reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
3041
    DCHECK(non_atomic_marking_state()->IsBlackOrGrey(weak_collection));
3042 3043 3044 3045
    if (weak_collection->table()->IsHashTable()) {
      ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
      for (int i = 0; i < table->Capacity(); i++) {
        HeapObject* key = HeapObject::cast(table->KeyAt(i));
3046
        if (!non_atomic_marking_state()->IsBlackOrGrey(key)) {
3047 3048
          table->RemoveEntry(i);
        }
3049 3050
      }
    }
3051
    weak_collection_obj = weak_collection->next();
3052
    weak_collection->set_next(heap()->undefined_value());
3053
  }
3054
  heap()->set_encountered_weak_collections(Smi::kZero);
3055 3056
}

3057

3058 3059
void MarkCompactCollector::AbortWeakCollections() {
  Object* weak_collection_obj = heap()->encountered_weak_collections();
3060
  while (weak_collection_obj != Smi::kZero) {
3061 3062 3063 3064 3065
    JSWeakCollection* weak_collection =
        reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
    weak_collection_obj = weak_collection->next();
    weak_collection->set_next(heap()->undefined_value());
  }
3066
  heap()->set_encountered_weak_collections(Smi::kZero);
3067 3068
}

3069 3070
void MarkCompactCollector::ClearWeakCellsAndSimpleMapTransitions(
    DependentCode** dependent_code_list) {
3071
  Heap* heap = this->heap();
3072
  TRACE_GC(heap->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_CELLS);
3073 3074
  DependentCode* dependent_code_head =
      DependentCode::cast(heap->empty_fixed_array());
3075
  WeakCell* weak_cell;
3076
  while (weak_objects_.weak_cells.Pop(kMainThread, &weak_cell)) {
3077 3078 3079
    // We do not insert cleared weak cells into the list, so the value
    // cannot be a Smi here.
    HeapObject* value = HeapObject::cast(weak_cell->value());
3080
    if (!non_atomic_marking_state()->IsBlackOrGrey(value)) {
ulan's avatar
ulan committed
3081 3082 3083 3084 3085 3086 3087 3088
      // Cells for new-space objects embedded in optimized code are wrapped in
      // WeakCell and put into Heap::weak_object_to_code_table.
      // Such cells do not have any strong references but we want to keep them
      // alive as long as the cell value is alive.
      // TODO(ulan): remove this once we remove Heap::weak_object_to_code_table.
      if (value->IsCell()) {
        Object* cell_value = Cell::cast(value)->value();
        if (cell_value->IsHeapObject() &&
3089 3090
            non_atomic_marking_state()->IsBlackOrGrey(
                HeapObject::cast(cell_value))) {
ulan's avatar
ulan committed
3091
          // Resurrect the cell.
3092
          non_atomic_marking_state()->WhiteToBlack(value);
ulan's avatar
ulan committed
3093
          Object** slot = HeapObject::RawField(value, Cell::kValueOffset);
3094
          RecordSlot(value, slot, *slot);
ulan's avatar
ulan committed
3095
          slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
3096
          RecordSlot(weak_cell, slot, *slot);
3097 3098
        } else {
          weak_cell->clear();
ulan's avatar
ulan committed
3099
        }
3100
      } else if (value->IsMap()) {
3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111
        // The map is non-live.
        Map* map = Map::cast(value);
        // Add dependent code to the dependent_code_list.
        DependentCode* candidate = map->dependent_code();
        // We rely on the fact that the weak code group comes first.
        STATIC_ASSERT(DependentCode::kWeakCodeGroup == 0);
        if (candidate->length() > 0 &&
            candidate->group() == DependentCode::kWeakCodeGroup) {
          candidate->set_next_link(dependent_code_head);
          dependent_code_head = candidate;
        }
3112 3113 3114 3115 3116
        ClearSimpleMapTransition(weak_cell, map);
        weak_cell->clear();
      } else {
        // All other objects.
        weak_cell->clear();
ulan's avatar
ulan committed
3117
      }
ulan@chromium.org's avatar
ulan@chromium.org committed
3118
    } else {
3119
      // The value of the weak cell is alive.
ulan@chromium.org's avatar
ulan@chromium.org committed
3120
      Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
3121
      RecordSlot(weak_cell, slot, *slot);
3122
    }
ulan@chromium.org's avatar
ulan@chromium.org committed
3123
  }
3124
  *dependent_code_list = dependent_code_head;
ulan@chromium.org's avatar
ulan@chromium.org committed
3125 3126
}

3127 3128 3129
void MarkCompactCollector::AbortWeakObjects() {
  weak_objects_.weak_cells.Clear();
  weak_objects_.transition_arrays.Clear();
3130 3131
}

3132 3133
void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
                                           Object* target) {
3134
  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
3135
  Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
3136 3137
  if (target_page->IsEvacuationCandidate() &&
      (rinfo->host() == NULL ||
3138
       !source_page->ShouldSkipEvacuationSlotRecording())) {
3139
    RelocInfo::Mode rmode = rinfo->rmode();
3140
    Address addr = rinfo->pc();
3141
    SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
3142 3143 3144
    if (rinfo->IsInConstantPool()) {
      addr = rinfo->constant_pool_entry_address();
      if (RelocInfo::IsCodeTarget(rmode)) {
3145
        slot_type = CODE_ENTRY_SLOT;
3146 3147
      } else {
        DCHECK(RelocInfo::IsEmbeddedObject(rmode));
3148
        slot_type = OBJECT_SLOT;
3149 3150
      }
    }
3151 3152
    RememberedSet<OLD_TO_OLD>::InsertTyped(
        source_page, reinterpret_cast<Address>(host), slot_type, addr);
3153 3154 3155
  }
}

3156
template <AccessMode access_mode>
3157
static inline SlotCallbackResult UpdateSlot(Object** slot) {
3158
  Object* obj = *slot;
3159 3160 3161 3162 3163 3164 3165 3166 3167
  if (obj->IsHeapObject()) {
    HeapObject* heap_obj = HeapObject::cast(obj);
    MapWord map_word = heap_obj->map_word();
    if (map_word.IsForwardingAddress()) {
      DCHECK(heap_obj->GetHeap()->InFromSpace(heap_obj) ||
             MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
             Page::FromAddress(heap_obj->address())
                 ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
      HeapObject* target = map_word.ToForwardingAddress();
3168 3169 3170
      if (access_mode == AccessMode::NON_ATOMIC) {
        *slot = target;
      } else {
3171
        base::AsAtomicPointer::Release_CompareAndSwap(slot, obj, target);
3172
      }
3173 3174
      DCHECK(!heap_obj->GetHeap()->InFromSpace(target));
      DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
3175 3176
    }
  }
3177
  // OLD_TO_OLD slots are always removed after updating.
3178 3179 3180
  return REMOVE_SLOT;
}

3181
// Visitor for updating root pointers and to-space pointers.
3182
// It does not expect to encounter pointers to dead objects.
3183 3184 3185
// TODO(ulan): Remove code object specific functions. This visitor
// nevers visits code objects.
class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
3186
 public:
3187 3188 3189
  void VisitPointer(HeapObject* host, Object** p) override {
    UpdateSlotInternal(p);
  }
3190

3191
  void VisitPointers(HeapObject* host, Object** start, Object** end) override {
3192
    for (Object** p = start; p < end; p++) UpdateSlotInternal(p);
3193 3194
  }

3195 3196 3197
  void VisitRootPointer(Root root, Object** p) override {
    UpdateSlotInternal(p);
  }
3198 3199

  void VisitRootPointers(Root root, Object** start, Object** end) override {
3200
    for (Object** p = start; p < end; p++) UpdateSlotInternal(p);
3201 3202
  }

3203
  void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
3204
    UpdateTypedSlotHelper::UpdateEmbeddedPointer(rinfo, UpdateSlotInternal);
3205 3206
  }

3207
  void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
3208
    UpdateTypedSlotHelper::UpdateCodeTarget(rinfo, UpdateSlotInternal);
3209 3210
  }

3211 3212 3213
 private:
  static inline SlotCallbackResult UpdateSlotInternal(Object** slot) {
    return UpdateSlot<AccessMode::NON_ATOMIC>(slot);
3214
  }
3215 3216
};

3217 3218 3219 3220 3221 3222 3223 3224 3225
static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
                                                         Object** p) {
  MapWord map_word = HeapObject::cast(*p)->map_word();

  if (map_word.IsForwardingAddress()) {
    return String::cast(map_word.ToForwardingAddress());
  }

  return String::cast(*p);
3226 3227
}

3228 3229
void MarkCompactCollector::EvacuatePrologue() {
  // New space.
3230
  NewSpace* new_space = heap()->new_space();
3231
  // Append the list of new space pages to be processed.
3232
  for (Page* p : PageRange(new_space->bottom(), new_space->top())) {
3233
    new_space_evacuation_pages_.push_back(p);
3234
  }
3235 3236
  new_space->Flip();
  new_space->ResetAllocationInfo();
3237 3238

  // Old space.
3239 3240 3241 3242
  DCHECK(old_space_evacuation_pages_.empty());
  old_space_evacuation_pages_ = std::move(evacuation_candidates_);
  evacuation_candidates_.clear();
  DCHECK(evacuation_candidates_.empty());
3243 3244 3245 3246 3247 3248 3249
}

void MarkCompactCollector::EvacuateEpilogue() {
  // New space.
  heap()->new_space()->set_age_mark(heap()->new_space()->top());
  // Old space. Deallocate evacuated candidate pages.
  ReleaseEvacuationCandidates();
3250 3251 3252 3253 3254 3255 3256 3257
#ifdef DEBUG
  // Old-to-old slot sets must be empty after evacuation.
  for (Page* p : *heap()->old_space()) {
    DCHECK_NULL((p->slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
    DCHECK_NULL((p->typed_slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
    DCHECK_NULL(p->invalidated_slots());
  }
#endif
3258 3259
}

3260
class Evacuator : public Malloced {
3261
 public:
3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278
  enum EvacuationMode {
    kObjectsNewToOld,
    kPageNewToOld,
    kObjectsOldToOld,
    kPageNewToNew,
  };

  static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
    // Note: The order of checks is important in this function.
    if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
      return kPageNewToOld;
    if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION))
      return kPageNewToNew;
    if (chunk->InNewSpace()) return kObjectsNewToOld;
    return kObjectsOldToOld;
  }

3279 3280 3281 3282
  // NewSpacePages with more live bytes than this threshold qualify for fast
  // evacuation.
  static int PageEvacuationThreshold() {
    if (FLAG_page_promotion)
3283 3284
      return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
    return Page::kAllocatableMemory + kPointerSize;
3285 3286
  }

3287
  Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor)
3288
      : heap_(heap),
3289
        local_allocator_(heap_),
3290
        compaction_spaces_(heap_),
3291
        local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
3292
        new_space_visitor_(heap_, &local_allocator_, record_visitor,
3293
                           &local_pretenuring_feedback_),
3294 3295 3296 3297
        new_to_new_page_visitor_(heap_, record_visitor,
                                 &local_pretenuring_feedback_),
        new_to_old_page_visitor_(heap_, record_visitor,
                                 &local_pretenuring_feedback_),
3298

3299
        old_space_visitor_(heap_, &local_allocator_, record_visitor),
3300
        duration_(0.0),
3301
        bytes_compacted_(0) {}
3302

3303 3304
  virtual ~Evacuator() {}

3305
  void EvacuatePage(Page* page);
3306

3307 3308 3309 3310 3311
  void AddObserver(MigrationObserver* observer) {
    new_space_visitor_.AddObserver(observer);
    old_space_visitor_.AddObserver(observer);
  }

3312 3313 3314 3315
  // Merge back locally cached info sequentially. Note that this method needs
  // to be called from the main thread.
  inline void Finalize();

3316
 protected:
3317 3318
  static const int kInitialLocalPretenuringFeedbackCapacity = 256;

3319
  // |saved_live_bytes| returns the live bytes of the page that was processed.
3320
  virtual void RawEvacuatePage(Page* page, intptr_t* saved_live_bytes) = 0;
3321

3322
  inline Heap* heap() { return heap_; }
3323

3324 3325 3326 3327 3328
  void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
    duration_ += duration;
    bytes_compacted_ += bytes_compacted;
  }

3329
  Heap* heap_;
3330 3331

  // Locally cached collector data.
3332
  LocalAllocator local_allocator_;
3333
  CompactionSpaceCollection compaction_spaces_;
lpy's avatar
lpy committed
3334
  base::HashMap local_pretenuring_feedback_;
3335

3336
  // Visitors for the corresponding spaces.
3337
  EvacuateNewSpaceVisitor new_space_visitor_;
3338 3339 3340 3341
  EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_NEW>
      new_to_new_page_visitor_;
  EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_OLD>
      new_to_old_page_visitor_;
3342 3343 3344 3345 3346 3347 3348
  EvacuateOldSpaceVisitor old_space_visitor_;

  // Book keeping info.
  double duration_;
  intptr_t bytes_compacted_;
};

3349
void Evacuator::EvacuatePage(Page* page) {
3350
  DCHECK(page->SweepingDone());
3351
  intptr_t saved_live_bytes = 0;
3352 3353 3354 3355
  double evacuation_time = 0.0;
  {
    AlwaysAllocateScope always_allocate(heap()->isolate());
    TimedScope timed_scope(&evacuation_time);
3356
    RawEvacuatePage(page, &saved_live_bytes);
3357 3358 3359
  }
  ReportCompactionProgress(evacuation_time, saved_live_bytes);
  if (FLAG_trace_evacuation) {
3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371
    PrintIsolate(
        heap()->isolate(),
        "evacuation[%p]: page=%p new_space=%d "
        "page_evacuation=%d executable=%d contains_age_mark=%d "
        "live_bytes=%" V8PRIdPTR " time=%f success=%d\n",
        static_cast<void*>(this), static_cast<void*>(page), page->InNewSpace(),
        page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
            page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
        page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
        page->Contains(heap()->new_space()->age_mark()), saved_live_bytes,
        evacuation_time, page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
  }
3372 3373
}

3374
void Evacuator::Finalize() {
3375
  local_allocator_.Finalize();
3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391
  heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
  heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
                                       new_to_old_page_visitor_.moved_bytes());
  heap()->IncrementSemiSpaceCopiedObjectSize(
      new_space_visitor_.semispace_copied_size() +
      new_to_new_page_visitor_.moved_bytes());
  heap()->IncrementYoungSurvivorsCounter(
      new_space_visitor_.promoted_size() +
      new_space_visitor_.semispace_copied_size() +
      new_to_old_page_visitor_.moved_bytes() +
      new_to_new_page_visitor_.moved_bytes());
  heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
}

class FullEvacuator : public Evacuator {
 public:
3392 3393 3394
  FullEvacuator(MarkCompactCollector* collector,
                RecordMigratedSlotVisitor* record_visitor)
      : Evacuator(collector->heap(), record_visitor), collector_(collector) {}
3395

3396
 protected:
3397
  void RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
3398 3399

  MarkCompactCollector* collector_;
3400 3401
};

3402
void FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) {
3403 3404 3405
  MarkCompactCollector::NonAtomicMarkingState* marking_state =
      collector_->non_atomic_marking_state();
  *live_bytes = marking_state->live_bytes(page);
3406
  HeapObject* failed_object = nullptr;
3407 3408
  switch (ComputeEvacuationMode(page)) {
    case kObjectsNewToOld:
3409
      LiveObjectVisitor::VisitBlackObjectsNoFail(
3410 3411
          page, marking_state, &new_space_visitor_,
          LiveObjectVisitor::kClearMarkbits);
3412
      // ArrayBufferTracker will be updated during pointers updating.
3413 3414
      break;
    case kPageNewToOld:
3415
      LiveObjectVisitor::VisitBlackObjectsNoFail(
3416
          page, marking_state, &new_to_old_page_visitor_,
3417
          LiveObjectVisitor::kKeepMarking);
3418 3419
      new_to_old_page_visitor_.account_moved_bytes(
          marking_state->live_bytes(page));
3420 3421 3422
      // ArrayBufferTracker will be updated during sweeping.
      break;
    case kPageNewToNew:
3423
      LiveObjectVisitor::VisitBlackObjectsNoFail(
3424
          page, marking_state, &new_to_new_page_visitor_,
3425
          LiveObjectVisitor::kKeepMarking);
3426 3427
      new_to_new_page_visitor_.account_moved_bytes(
          marking_state->live_bytes(page));
3428 3429
      // ArrayBufferTracker will be updated during sweeping.
      break;
3430 3431
    case kObjectsOldToOld: {
      const bool success = LiveObjectVisitor::VisitBlackObjects(
3432 3433
          page, marking_state, &old_space_visitor_,
          LiveObjectVisitor::kClearMarkbits, &failed_object);
3434
      if (!success) {
3435 3436 3437
        // Aborted compaction page. Actual processing happens on the main
        // thread for simplicity reasons.
        collector_->ReportAbortedEvacuationCandidate(failed_object, page);
3438
      } else {
3439
        // ArrayBufferTracker will be updated during pointers updating.
3440 3441
      }
      break;
3442
    }
3443 3444 3445
  }
}

3446 3447 3448 3449 3450 3451 3452
class YoungGenerationEvacuator : public Evacuator {
 public:
  YoungGenerationEvacuator(MinorMarkCompactCollector* collector,
                           RecordMigratedSlotVisitor* record_visitor)
      : Evacuator(collector->heap(), record_visitor), collector_(collector) {}

 protected:
3453
  void RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
3454 3455 3456 3457

  MinorMarkCompactCollector* collector_;
};

3458
void YoungGenerationEvacuator::RawEvacuatePage(Page* page,
3459
                                               intptr_t* live_bytes) {
3460 3461 3462
  MinorMarkCompactCollector::NonAtomicMarkingState* marking_state =
      collector_->non_atomic_marking_state();
  *live_bytes = marking_state->live_bytes(page);
3463 3464
  switch (ComputeEvacuationMode(page)) {
    case kObjectsNewToOld:
3465
      LiveObjectVisitor::VisitGreyObjectsNoFail(
3466 3467
          page, marking_state, &new_space_visitor_,
          LiveObjectVisitor::kClearMarkbits);
3468
      // ArrayBufferTracker will be updated during pointers updating.
3469 3470
      break;
    case kPageNewToOld:
3471
      LiveObjectVisitor::VisitGreyObjectsNoFail(
3472
          page, marking_state, &new_to_old_page_visitor_,
3473
          LiveObjectVisitor::kKeepMarking);
3474 3475
      new_to_old_page_visitor_.account_moved_bytes(
          marking_state->live_bytes(page));
3476 3477
      // TODO(mlippautz): If cleaning array buffers is too slow here we can
      // delay it until the next GC.
3478
      ArrayBufferTracker::FreeDead(page, marking_state);
3479
      if (heap()->ShouldZapGarbage()) {
3480 3481
        collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
                                 ZAP_FREE_SPACE);
3482 3483
      } else if (heap()->incremental_marking()->IsMarking()) {
        // When incremental marking is on, we need to clear the mark bits of
3484 3485 3486
        // the full collector. We cannot yet discard the young generation mark
        // bits as they are still relevant for pointers updating.
        collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
3487 3488
                                 IGNORE_FREE_SPACE);
      }
3489 3490
      break;
    case kPageNewToNew:
3491
      LiveObjectVisitor::VisitGreyObjectsNoFail(
3492
          page, marking_state, &new_to_new_page_visitor_,
3493
          LiveObjectVisitor::kKeepMarking);
3494 3495
      new_to_new_page_visitor_.account_moved_bytes(
          marking_state->live_bytes(page));
3496 3497
      // TODO(mlippautz): If cleaning array buffers is too slow here we can
      // delay it until the next GC.
3498
      ArrayBufferTracker::FreeDead(page, marking_state);
3499
      if (heap()->ShouldZapGarbage()) {
3500 3501
        collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
                                 ZAP_FREE_SPACE);
3502 3503
      } else if (heap()->incremental_marking()->IsMarking()) {
        // When incremental marking is on, we need to clear the mark bits of
3504 3505 3506
        // the full collector. We cannot yet discard the young generation mark
        // bits as they are still relevant for pointers updating.
        collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
3507 3508
                                 IGNORE_FREE_SPACE);
      }
3509 3510 3511 3512 3513 3514 3515
      break;
    case kObjectsOldToOld:
      UNREACHABLE();
      break;
  }
}

3516
class PageEvacuationItem : public ItemParallelJob::Item {
3517
 public:
3518 3519 3520 3521 3522 3523 3524
  explicit PageEvacuationItem(Page* page) : page_(page) {}
  virtual ~PageEvacuationItem() {}
  Page* page() const { return page_; }

 private:
  Page* page_;
};
3525

3526 3527 3528 3529
class PageEvacuationTask : public ItemParallelJob::Task {
 public:
  PageEvacuationTask(Isolate* isolate, Evacuator* evacuator)
      : ItemParallelJob::Task(isolate), evacuator_(evacuator) {}
3530

3531 3532 3533 3534 3535 3536 3537 3538 3539 3540
  void RunInParallel() override {
    PageEvacuationItem* item = nullptr;
    while ((item = GetItem<PageEvacuationItem>()) != nullptr) {
      evacuator_->EvacuatePage(item->page());
      item->MarkFinished();
    }
  };

 private:
  Evacuator* evacuator_;
3541
};
3542

3543 3544
template <class Evacuator, class Collector>
void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
3545
    Collector* collector, ItemParallelJob* job,
3546
    RecordMigratedSlotVisitor* record_visitor,
3547
    MigrationObserver* migration_observer, const intptr_t live_bytes) {
3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560
  // Used for trace summary.
  double compaction_speed = 0;
  if (FLAG_trace_evacuation) {
    compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
  }

  const bool profiling =
      heap()->isolate()->is_profiling() ||
      heap()->isolate()->logger()->is_logging_code_events() ||
      heap()->isolate()->heap_profiler()->is_tracking_object_moves();
  ProfilingMigrationObserver profiling_observer(heap());

  const int wanted_num_tasks =
3561
      NumberOfParallelCompactionTasks(job->NumberOfItems());
3562 3563 3564 3565
  Evacuator** evacuators = new Evacuator*[wanted_num_tasks];
  for (int i = 0; i < wanted_num_tasks; i++) {
    evacuators[i] = new Evacuator(collector, record_visitor);
    if (profiling) evacuators[i]->AddObserver(&profiling_observer);
3566 3567
    if (migration_observer != nullptr)
      evacuators[i]->AddObserver(migration_observer);
3568
    job->AddTask(new PageEvacuationTask(heap()->isolate(), evacuators[i]));
3569
  }
3570
  job->Run();
3571 3572 3573 3574 3575 3576 3577 3578 3579
  for (int i = 0; i < wanted_num_tasks; i++) {
    evacuators[i]->Finalize();
    delete evacuators[i];
  }
  delete[] evacuators;

  if (FLAG_trace_evacuation) {
    PrintIsolate(isolate(),
                 "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
3580
                 "wanted_tasks=%d tasks=%d cores=%" PRIuS
3581 3582
                 " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n",
                 isolate()->time_millis_since_init(),
3583
                 FLAG_parallel_compaction ? "yes" : "no", job->NumberOfItems(),
3584
                 wanted_num_tasks, job->NumberOfTasks(),
3585 3586 3587 3588 3589
                 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
                 live_bytes, compaction_speed);
  }
}

3590 3591 3592 3593 3594 3595 3596 3597
bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes) {
  const bool reduce_memory = heap()->ShouldReduceMemory();
  const Address age_mark = heap()->new_space()->age_mark();
  return !reduce_memory && !p->NeverEvacuate() &&
         (live_bytes > Evacuator::PageEvacuationThreshold()) &&
         !p->Contains(age_mark) && heap()->CanExpandOldGeneration(live_bytes);
}

3598
void MarkCompactCollector::EvacuatePagesInParallel() {
3599 3600
  ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
                                 &page_parallel_job_semaphore_);
3601
  intptr_t live_bytes = 0;
3602

3603
  for (Page* page : old_space_evacuation_pages_) {
3604
    live_bytes += non_atomic_marking_state()->live_bytes(page);
3605
    evacuation_job.AddItem(new PageEvacuationItem(page));
3606
  }
3607

3608
  for (Page* page : new_space_evacuation_pages_) {
3609
    intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
3610
    if (live_bytes_on_page == 0 && !page->contains_array_buffers()) continue;
3611
    live_bytes += live_bytes_on_page;
3612
    if (ShouldMovePage(page, live_bytes_on_page)) {
3613
      if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
3614
        EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
3615
      } else {
3616
        EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
3617
      }
3618
    }
3619
    evacuation_job.AddItem(new PageEvacuationItem(page));
3620
  }
3621
  if (evacuation_job.NumberOfItems() == 0) return;
3622

3623
  RecordMigratedSlotVisitor record_visitor(this);
3624 3625
  CreateAndExecuteEvacuationTasks<FullEvacuator>(
      this, &evacuation_job, &record_visitor, nullptr, live_bytes);
3626
  PostProcessEvacuationCandidates();
3627 3628 3629
}

void MinorMarkCompactCollector::EvacuatePagesInParallel() {
3630 3631
  ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
                                 &page_parallel_job_semaphore_);
3632 3633 3634
  intptr_t live_bytes = 0;

  for (Page* page : new_space_evacuation_pages_) {
3635
    intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
3636
    if (live_bytes_on_page == 0 && !page->contains_array_buffers()) continue;
3637
    live_bytes += live_bytes_on_page;
3638 3639 3640 3641 3642 3643 3644
    if (ShouldMovePage(page, live_bytes_on_page)) {
      if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
        EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
      } else {
        EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
      }
    }
3645
    evacuation_job.AddItem(new PageEvacuationItem(page));
3646
  }
3647
  if (evacuation_job.NumberOfItems() == 0) return;
3648 3649 3650 3651 3652 3653

  YoungGenerationMigrationObserver observer(heap(),
                                            heap()->mark_compact_collector());
  YoungGenerationRecordMigratedSlotVisitor record_visitor(
      heap()->mark_compact_collector());
  CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
3654
      this, &evacuation_job, &record_visitor, &observer, live_bytes);
3655 3656
}

3657
class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3658
 public:
3659 3660 3661 3662 3663 3664 3665 3666 3667
  virtual Object* RetainAs(Object* object) {
    if (object->IsHeapObject()) {
      HeapObject* heap_object = HeapObject::cast(object);
      MapWord map_word = heap_object->map_word();
      if (map_word.IsForwardingAddress()) {
        return map_word.ToForwardingAddress();
      }
    }
    return object;
3668 3669 3670
  }
};

mlippautz's avatar
mlippautz committed
3671 3672 3673 3674 3675
int MarkCompactCollector::Sweeper::RawSweep(
    Page* p, FreeListRebuildingMode free_list_mode,
    FreeSpaceTreatmentMode free_space_mode) {
  Space* space = p->owner();
  DCHECK_NOT_NULL(space);
3676 3677
  DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
         space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
3678
  DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
3679

3680 3681 3682 3683 3684
  // TODO(ulan): we don't have to clear type old-to-old slots in code space
  // because the concurrent marker doesn't mark code objects. This requires
  // the write barrier for code objects to check the color of the code object.
  bool non_empty_typed_slots = p->typed_slot_set<OLD_TO_NEW>() != nullptr ||
                               p->typed_slot_set<OLD_TO_OLD>() != nullptr;
3685 3686 3687 3688

  // The free ranges map is used for filtering typed slots.
  std::map<uint32_t, uint32_t> free_ranges;

3689 3690
  // Before we sweep objects on the page, we free dead array buffers which
  // requires valid mark bits.
3691
  ArrayBufferTracker::FreeDead(p, marking_state_);
3692

3693
  Address free_start = p->area_start();
3694
  DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3695

3696 3697 3698
  // If we use the skip list for code space pages, we have to lock the skip
  // list because it could be accessed concurrently by the runtime or the
  // deoptimizer.
mlippautz's avatar
mlippautz committed
3699 3700
  const bool rebuild_skip_list =
      space->identity() == CODE_SPACE && p->skip_list() != nullptr;
3701
  SkipList* skip_list = p->skip_list();
mlippautz's avatar
mlippautz committed
3702
  if (rebuild_skip_list) {
3703 3704 3705
    skip_list->Clear();
  }

3706 3707
  intptr_t freed_bytes = 0;
  intptr_t max_freed_bytes = 0;
3708
  int curr_region = -1;
3709

3710 3711
  for (auto object_and_size :
       LiveObjectRange<kBlackObjects>(p, marking_state_->bitmap(p))) {
3712
    HeapObject* const object = object_and_size.first;
3713
    DCHECK(marking_state_->IsBlack(object));
3714 3715
    Address free_end = object->address();
    if (free_end != free_start) {
3716 3717
      CHECK_GT(free_end, free_start);
      size_t size = static_cast<size_t>(free_end - free_start);
3718 3719
      if (free_space_mode == ZAP_FREE_SPACE) {
        memset(free_start, 0xcc, size);
3720
      }
3721
      if (free_list_mode == REBUILD_FREE_LIST) {
mlippautz's avatar
mlippautz committed
3722 3723
        freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
            free_start, size);
3724 3725
        max_freed_bytes = Max(freed_bytes, max_freed_bytes);
      } else {
3726
        p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
3727 3728
                                        ClearRecordedSlots::kNo);
      }
3729 3730 3731 3732
      RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, free_end,
                                             SlotSet::KEEP_EMPTY_BUCKETS);
      RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, free_end,
                                             SlotSet::KEEP_EMPTY_BUCKETS);
3733
      if (non_empty_typed_slots) {
3734 3735 3736
        free_ranges.insert(std::pair<uint32_t, uint32_t>(
            static_cast<uint32_t>(free_start - p->address()),
            static_cast<uint32_t>(free_end - p->address())));
3737
      }
3738 3739 3740
    }
    Map* map = object->synchronized_map();
    int size = object->SizeFromMap(map);
mlippautz's avatar
mlippautz committed
3741
    if (rebuild_skip_list) {
3742 3743 3744 3745 3746 3747
      int new_region_start = SkipList::RegionNumber(free_end);
      int new_region_end =
          SkipList::RegionNumber(free_end + size - kPointerSize);
      if (new_region_start != curr_region || new_region_end != curr_region) {
        skip_list->AddObject(free_end, size);
        curr_region = new_region_end;
3748
      }
3749
    }
3750
    free_start = free_end + size;
3751
  }
3752

3753
  if (free_start != p->area_end()) {
3754 3755
    CHECK_GT(p->area_end(), free_start);
    size_t size = static_cast<size_t>(p->area_end() - free_start);
3756
    if (free_space_mode == ZAP_FREE_SPACE) {
3757
      memset(free_start, 0xcc, size);
3758
    }
3759
    if (free_list_mode == REBUILD_FREE_LIST) {
mlippautz's avatar
mlippautz committed
3760 3761
      freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
          free_start, size);
3762 3763
      max_freed_bytes = Max(freed_bytes, max_freed_bytes);
    } else {
3764
      p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
3765 3766
                                      ClearRecordedSlots::kNo);
    }
3767

3768 3769 3770 3771
    RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, p->area_end(),
                                           SlotSet::KEEP_EMPTY_BUCKETS);
    RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, p->area_end(),
                                           SlotSet::KEEP_EMPTY_BUCKETS);
3772
    if (non_empty_typed_slots) {
3773 3774 3775
      free_ranges.insert(std::pair<uint32_t, uint32_t>(
          static_cast<uint32_t>(free_start - p->address()),
          static_cast<uint32_t>(p->area_end() - p->address())));
3776
    }
3777
  }
3778

3779
  // Clear invalid typed slots after collection all free ranges.
3780 3781 3782 3783 3784 3785 3786 3787
  if (!free_ranges.empty()) {
    TypedSlotSet* old_to_new = p->typed_slot_set<OLD_TO_NEW>();
    if (old_to_new != nullptr) {
      old_to_new->RemoveInvaldSlots(free_ranges);
    }
    TypedSlotSet* old_to_old = p->typed_slot_set<OLD_TO_OLD>();
    if (old_to_old != nullptr) {
      old_to_old->RemoveInvaldSlots(free_ranges);
3788
    }
3789 3790
  }

3791
  // Clear the mark bits of that page and reset live bytes count.
3792
  marking_state_->ClearLiveness(p);
3793

3794
  p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
3795
  if (free_list_mode == IGNORE_FREE_LIST) return 0;
3796
  return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
3797 3798
}

3799
void MarkCompactCollector::InvalidateCode(Code* code) {
3800 3801 3802 3803 3804 3805
  Page* page = Page::FromAddress(code->address());
  Address start = code->instruction_start();
  Address end = code->address() + code->Size();

  RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, start, end);

3806
  if (heap_->incremental_marking()->IsCompacting() &&
3807
      !page->ShouldSkipEvacuationSlotRecording()) {
3808 3809 3810
    DCHECK(compacting_);

    // If the object is white than no slots were recorded on it yet.
3811
    if (non_atomic_marking_state()->IsWhite(code)) return;
3812

3813 3814 3815
    // Ignore all slots that might have been recorded in the body of the
    // deoptimized code object. Assumption: no slots will be recorded for
    // this object after invalidating it.
3816
    RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(page, start, end);
3817 3818 3819 3820
  }
}


3821 3822
// Return true if the given code is deoptimized or will be deoptimized.
bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
3823
  return code->is_optimized_code() && code->marked_for_deoptimization();
3824 3825
}

3826 3827
void MarkCompactCollector::RecordLiveSlotsOnPage(Page* page) {
  EvacuateRecordOnlyVisitor visitor(heap());
3828
  LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
3829 3830
                                             &visitor,
                                             LiveObjectVisitor::kKeepMarking);
3831 3832
}

3833
template <class Visitor, typename MarkingState>
3834
bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk,
3835
                                          MarkingState* marking_state,
3836
                                          Visitor* visitor,
3837 3838
                                          IterationMode iteration_mode,
                                          HeapObject** failed_object) {
3839 3840
  for (auto object_and_size :
       LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
3841
    HeapObject* const object = object_and_size.first;
3842
    if (!visitor->Visit(object, object_and_size.second)) {
3843
      if (iteration_mode == kClearMarkbits) {
3844
        marking_state->bitmap(chunk)->ClearRange(
3845 3846
            chunk->AddressToMarkbitIndex(chunk->area_start()),
            chunk->AddressToMarkbitIndex(object->address()));
3847
        *failed_object = object;
3848
      }
3849
      return false;
3850
    }
3851
  }
3852
  if (iteration_mode == kClearMarkbits) {
3853
    marking_state->ClearLiveness(chunk);
3854 3855 3856 3857
  }
  return true;
}

3858
template <class Visitor, typename MarkingState>
3859
void LiveObjectVisitor::VisitBlackObjectsNoFail(MemoryChunk* chunk,
3860
                                                MarkingState* marking_state,
3861 3862
                                                Visitor* visitor,
                                                IterationMode iteration_mode) {
3863 3864
  for (auto object_and_size :
       LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
3865
    HeapObject* const object = object_and_size.first;
3866
    DCHECK(marking_state->IsBlack(object));
3867 3868 3869 3870 3871
    const bool success = visitor->Visit(object, object_and_size.second);
    USE(success);
    DCHECK(success);
  }
  if (iteration_mode == kClearMarkbits) {
3872
    marking_state->ClearLiveness(chunk);
3873 3874 3875
  }
}

3876
template <class Visitor, typename MarkingState>
3877
void LiveObjectVisitor::VisitGreyObjectsNoFail(MemoryChunk* chunk,
3878
                                               MarkingState* marking_state,
3879 3880
                                               Visitor* visitor,
                                               IterationMode iteration_mode) {
3881 3882
  for (auto object_and_size :
       LiveObjectRange<kGreyObjects>(chunk, marking_state->bitmap(chunk))) {
3883
    HeapObject* const object = object_and_size.first;
3884
    DCHECK(marking_state->IsGrey(object));
3885 3886 3887
    const bool success = visitor->Visit(object, object_and_size.second);
    USE(success);
    DCHECK(success);
3888 3889
  }
  if (iteration_mode == kClearMarkbits) {
3890
    marking_state->ClearLiveness(chunk);
3891 3892 3893
  }
}

3894
template <typename MarkingState>
3895
void LiveObjectVisitor::RecomputeLiveBytes(MemoryChunk* chunk,
3896
                                           MarkingState* marking_state) {
3897
  int new_live_size = 0;
3898 3899
  for (auto object_and_size :
       LiveObjectRange<kAllLiveObjects>(chunk, marking_state->bitmap(chunk))) {
3900
    new_live_size += object_and_size.second;
3901
  }
3902
  marking_state->SetLiveBytes(chunk, new_live_size);
3903 3904
}

mlippautz's avatar
mlippautz committed
3905 3906 3907
void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space,
                                                     Page* page) {
  base::LockGuard<base::Mutex> guard(&mutex_);
3908
  swept_list_[space->identity()].push_back(page);
mlippautz's avatar
mlippautz committed
3909
}
3910

3911
void MarkCompactCollector::Evacuate() {
3912
  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
3913
  base::LockGuard<base::Mutex> guard(heap()->relocation_mutex());
3914

3915 3916 3917 3918 3919
  {
    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE);
    EvacuatePrologue();
  }

3920
  {
3921
    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
3922
    EvacuationScope evacuation_scope(this);
3923
    EvacuatePagesInParallel();
3924 3925
  }

3926 3927
  UpdatePointersAfterEvacuation();

3928 3929 3930 3931 3932
  {
    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
    if (!heap()->new_space()->Rebalance()) {
      FatalProcessOutOfMemory("NewSpace::Rebalance");
    }
3933 3934
  }

3935 3936 3937 3938
  // Give pages that are queued to be freed back to the OS. Note that filtering
  // slots only handles old space (for unboxed doubles), and thus map space can
  // still contain stale pointers. We only free the chunks after pointer updates
  // to still have access to page headers.
3939
  heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
3940

3941
  {
3942
    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
3943

3944
    for (Page* p : new_space_evacuation_pages_) {
3945 3946
      if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
        p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
3947
        sweeper().AddPage(p->owner()->identity(), p);
3948 3949 3950 3951
      } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
        p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
        p->ForAllFreeListCategories(
            [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
3952
        sweeper().AddPage(p->owner()->identity(), p);
3953 3954
      }
    }
3955
    new_space_evacuation_pages_.clear();
3956

3957
    for (Page* p : old_space_evacuation_pages_) {
3958 3959 3960 3961 3962 3963
      // Important: skip list should be cleared only after roots were updated
      // because root iteration traverses the stack and might have to find
      // code objects from non-updated pc pointing into evacuation candidate.
      SkipList* list = p->skip_list();
      if (list != NULL) list->Clear();
      if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3964
        sweeper().AddPage(p->owner()->identity(), p);
3965 3966 3967
        p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
      }
    }
3968
  }
3969

3970 3971 3972
  {
    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE);
    EvacuateEpilogue();
3973 3974 3975
  }

#ifdef VERIFY_HEAP
mlippautz's avatar
mlippautz committed
3976
  if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) {
3977 3978
    FullEvacuationVerifier verifier(heap());
    verifier.Run();
3979 3980 3981 3982
  }
#endif
}

3983
class UpdatingItem : public ItemParallelJob::Item {
3984
 public:
3985 3986 3987
  virtual ~UpdatingItem() {}
  virtual void Process() = 0;
};
3988

3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002
class PointersUpatingTask : public ItemParallelJob::Task {
 public:
  explicit PointersUpatingTask(Isolate* isolate)
      : ItemParallelJob::Task(isolate) {}

  void RunInParallel() override {
    UpdatingItem* item = nullptr;
    while ((item = GetItem<UpdatingItem>()) != nullptr) {
      item->Process();
      item->MarkFinished();
    }
  };
};

4003
template <typename MarkingState>
4004 4005 4006
class ToSpaceUpdatingItem : public UpdatingItem {
 public:
  explicit ToSpaceUpdatingItem(MemoryChunk* chunk, Address start, Address end,
4007
                               MarkingState* marking_state)
4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021
      : chunk_(chunk),
        start_(start),
        end_(end),
        marking_state_(marking_state) {}
  virtual ~ToSpaceUpdatingItem() {}

  void Process() override {
    if (chunk_->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
      // New->new promoted pages contain garbage so they require iteration using
      // markbits.
      ProcessVisitLive();
    } else {
      ProcessVisitAll();
    }
4022 4023 4024
  }

 private:
4025 4026 4027 4028 4029 4030 4031 4032
  void ProcessVisitAll() {
    PointersUpdatingVisitor visitor;
    for (Address cur = start_; cur < end_;) {
      HeapObject* object = HeapObject::FromAddress(cur);
      Map* map = object->map();
      int size = object->SizeFromMap(map);
      object->IterateBody(map->instance_type(), size, &visitor);
      cur += size;
4033 4034 4035
    }
  }

4036 4037 4038 4039
  void ProcessVisitLive() {
    // For young generation evacuations we want to visit grey objects, for
    // full MC, we need to visit black objects.
    PointersUpdatingVisitor visitor;
4040 4041
    for (auto object_and_size : LiveObjectRange<kAllLiveObjects>(
             chunk_, marking_state_->bitmap(chunk_))) {
4042
      object_and_size.first->IterateBodyFast(&visitor);
4043 4044 4045
    }
  }

4046 4047 4048
  MemoryChunk* chunk_;
  Address start_;
  Address end_;
4049
  MarkingState* marking_state_;
4050 4051
};

4052
template <typename MarkingState>
4053 4054
class RememberedSetUpdatingItem : public UpdatingItem {
 public:
4055
  explicit RememberedSetUpdatingItem(Heap* heap, MarkingState* marking_state,
4056 4057 4058
                                     MemoryChunk* chunk,
                                     RememberedSetUpdatingMode updating_mode)
      : heap_(heap),
4059
        marking_state_(marking_state),
4060 4061
        chunk_(chunk),
        updating_mode_(updating_mode) {}
4062 4063 4064 4065 4066 4067 4068 4069 4070
  virtual ~RememberedSetUpdatingItem() {}

  void Process() override {
    base::LockGuard<base::RecursiveMutex> guard(chunk_->mutex());
    UpdateUntypedPointers();
    UpdateTypedPointers();
  }

 private:
4071
  template <AccessMode access_mode>
4072
  inline SlotCallbackResult CheckAndUpdateOldToNewSlot(Address slot_address) {
4073
    Object** slot = reinterpret_cast<Object**>(slot_address);
4074
    if (heap_->InFromSpace(*slot)) {
4075
      HeapObject* heap_object = reinterpret_cast<HeapObject*>(*slot);
4076 4077 4078
      DCHECK(heap_object->IsHeapObject());
      MapWord map_word = heap_object->map_word();
      if (map_word.IsForwardingAddress()) {
4079 4080
        if (access_mode == AccessMode::ATOMIC) {
          HeapObject** heap_obj_slot = reinterpret_cast<HeapObject**>(slot);
4081 4082
          base::AsAtomicPointer::Relaxed_Store(heap_obj_slot,
                                               map_word.ToForwardingAddress());
4083 4084 4085
        } else {
          *slot = map_word.ToForwardingAddress();
        }
4086 4087 4088 4089 4090
      }
      // If the object was in from space before and is after executing the
      // callback in to space, the object is still live.
      // Unfortunately, we do not know about the slot. It could be in a
      // just freed free space object.
4091
      if (heap_->InToSpace(*slot)) {
4092 4093
        return KEEP_SLOT;
      }
4094
    } else if (heap_->InToSpace(*slot)) {
4095
      // Slots can point to "to" space if the page has been moved, or if the
4096 4097 4098 4099
      // slot has been recorded multiple times in the remembered set, or
      // if the slot was already updated during old->old updating.
      // In case the page has been moved, check markbits to determine liveness
      // of the slot. In the other case, the slot can just be kept.
4100
      HeapObject* heap_object = reinterpret_cast<HeapObject*>(*slot);
4101 4102
      if (Page::FromAddress(heap_object->address())
              ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
4103 4104 4105 4106
        // IsBlackOrGrey is required because objects are marked as grey for
        // the young generation collector while they are black for the full
        // MC.);
        if (marking_state_->IsBlackOrGrey(heap_object)) {
4107 4108 4109 4110 4111 4112
          return KEEP_SLOT;
        } else {
          return REMOVE_SLOT;
        }
      }
      return KEEP_SLOT;
4113
    } else {
4114
      DCHECK(!heap_->InNewSpace(*slot));
4115
    }
4116
    return REMOVE_SLOT;
4117
  }
4118

4119
  void UpdateUntypedPointers() {
4120
    if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
4121 4122 4123 4124 4125 4126
        RememberedSet<OLD_TO_NEW>::Iterate(
            chunk_,
            [this](Address slot) {
              return CheckAndUpdateOldToNewSlot<AccessMode::NON_ATOMIC>(slot);
            },
            SlotSet::PREFREE_EMPTY_BUCKETS);
4127 4128 4129
    }
    if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
        (chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151
      InvalidatedSlotsFilter filter(chunk_);
      RememberedSet<OLD_TO_OLD>::Iterate(
          chunk_,
          [&filter](Address slot) {
            if (!filter.IsValid(slot)) return REMOVE_SLOT;
            return UpdateSlot<AccessMode::NON_ATOMIC>(
                reinterpret_cast<Object**>(slot));
          },
          SlotSet::PREFREE_EMPTY_BUCKETS);
    }
    if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
        chunk_->invalidated_slots() != nullptr) {
#ifdef DEBUG
      for (auto object_size : *chunk_->invalidated_slots()) {
        HeapObject* object = object_size.first;
        int size = object_size.second;
        DCHECK_LE(object->SizeFromMap(object->map()), size);
      }
#endif
      // The invalidated slots are not needed after old-to-old slots were
      // processsed.
      chunk_->ReleaseInvalidatedSlots();
4152 4153 4154
    }
  }

4155 4156
  void UpdateTypedPointers() {
    Isolate* isolate = heap_->isolate();
4157 4158
    if (chunk_->typed_slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() !=
        nullptr) {
4159
      CHECK_NE(chunk_->owner(), heap_->map_space());
4160 4161 4162 4163 4164
      RememberedSet<OLD_TO_NEW>::IterateTyped(
          chunk_,
          [isolate, this](SlotType slot_type, Address host_addr, Address slot) {
            return UpdateTypedSlotHelper::UpdateTypedSlot(
                isolate, slot_type, slot, [this](Object** slot) {
4165
                  return CheckAndUpdateOldToNewSlot<AccessMode::NON_ATOMIC>(
4166 4167 4168
                      reinterpret_cast<Address>(slot));
                });
          });
4169 4170 4171 4172
    }
    if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
        (chunk_->typed_slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() !=
         nullptr)) {
4173
      CHECK_NE(chunk_->owner(), heap_->map_space());
4174 4175 4176
      RememberedSet<OLD_TO_OLD>::IterateTyped(
          chunk_,
          [isolate](SlotType slot_type, Address host_addr, Address slot) {
4177 4178
            return UpdateTypedSlotHelper::UpdateTypedSlot(
                isolate, slot_type, slot, UpdateSlot<AccessMode::NON_ATOMIC>);
4179
          });
4180 4181
    }
  }
4182

4183
  Heap* heap_;
4184
  MarkingState* marking_state_;
4185
  MemoryChunk* chunk_;
4186
  RememberedSetUpdatingMode updating_mode_;
4187 4188
};

4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212
UpdatingItem* MinorMarkCompactCollector::CreateToSpaceUpdatingItem(
    MemoryChunk* chunk, Address start, Address end) {
  return new ToSpaceUpdatingItem<NonAtomicMarkingState>(
      chunk, start, end, non_atomic_marking_state());
}

UpdatingItem* MarkCompactCollector::CreateToSpaceUpdatingItem(
    MemoryChunk* chunk, Address start, Address end) {
  return new ToSpaceUpdatingItem<NonAtomicMarkingState>(
      chunk, start, end, non_atomic_marking_state());
}

UpdatingItem* MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
    MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
  return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
      heap(), non_atomic_marking_state(), chunk, updating_mode);
}

UpdatingItem* MarkCompactCollector::CreateRememberedSetUpdatingItem(
    MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
  return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
      heap(), non_atomic_marking_state(), chunk, updating_mode);
}

4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230
class GlobalHandlesUpdatingItem : public UpdatingItem {
 public:
  GlobalHandlesUpdatingItem(GlobalHandles* global_handles, size_t start,
                            size_t end)
      : global_handles_(global_handles), start_(start), end_(end) {}
  virtual ~GlobalHandlesUpdatingItem() {}

  void Process() override {
    PointersUpdatingVisitor updating_visitor;
    global_handles_->IterateNewSpaceRoots(&updating_visitor, start_, end_);
  }

 private:
  GlobalHandles* global_handles_;
  size_t start_;
  size_t end_;
};

4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249
// Update array buffers on a page that has been evacuated by copying objects.
// Target page exclusivity in old space is guaranteed by the fact that
// evacuation tasks either (a) retrieved a fresh page, or (b) retrieved all
// free list items of a given page. For new space the tracker will update
// using a lock.
class ArrayBufferTrackerUpdatingItem : public UpdatingItem {
 public:
  explicit ArrayBufferTrackerUpdatingItem(Page* page) : page_(page) {}
  virtual ~ArrayBufferTrackerUpdatingItem() {}

  void Process() override {
    ArrayBufferTracker::ProcessBuffers(
        page_, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
  }

 private:
  Page* page_;
};

4250 4251 4252
int MarkCompactCollectorBase::CollectToSpaceUpdatingItems(
    ItemParallelJob* job) {
  // Seed to space pages.
4253 4254
  const Address space_start = heap()->new_space()->bottom();
  const Address space_end = heap()->new_space()->top();
4255
  int pages = 0;
4256
  for (Page* page : PageRange(space_start, space_end)) {
4257 4258 4259
    Address start =
        page->Contains(space_start) ? space_start : page->area_start();
    Address end = page->Contains(space_end) ? space_end : page->area_end();
4260
    job->AddItem(CreateToSpaceUpdatingItem(page, start, end));
4261
    pages++;
4262
  }
4263 4264 4265 4266 4267
  if (pages == 0) return 0;
  return NumberOfParallelToSpacePointerUpdateTasks(pages);
}

int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
4268
    ItemParallelJob* job, RememberedSetUpdatingMode mode) {
4269
  int pages = 0;
4270 4271 4272
  if (mode == RememberedSetUpdatingMode::ALL) {
    RememberedSet<OLD_TO_OLD>::IterateMemoryChunks(
        heap(), [this, &job, &pages, mode](MemoryChunk* chunk) {
4273
          job->AddItem(CreateRememberedSetUpdatingItem(chunk, mode));
4274 4275 4276 4277 4278 4279 4280 4281 4282 4283
          pages++;
        });
  }
  RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
      heap(), [this, &job, &pages, mode](MemoryChunk* chunk) {
        const bool contains_old_to_old_slots =
            chunk->slot_set<OLD_TO_OLD>() != nullptr ||
            chunk->typed_slot_set<OLD_TO_OLD>() != nullptr;
        if (mode == RememberedSetUpdatingMode::OLD_TO_NEW_ONLY ||
            !contains_old_to_old_slots) {
4284
          job->AddItem(CreateRememberedSetUpdatingItem(chunk, mode));
4285 4286
          pages++;
        }
4287
      });
4288 4289 4290
  return (pages == 0)
             ? 0
             : NumberOfParallelPointerUpdateTasks(pages, old_to_new_slots_);
4291 4292
}

4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320
void MinorMarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
    ItemParallelJob* job) {
  for (Page* p : new_space_evacuation_pages_) {
    if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsNewToOld) {
      job->AddItem(new ArrayBufferTrackerUpdatingItem(p));
    }
  }
}

void MarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
    ItemParallelJob* job) {
  for (Page* p : new_space_evacuation_pages_) {
    if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsNewToOld) {
      job->AddItem(new ArrayBufferTrackerUpdatingItem(p));
    }
  }
}

void MarkCompactCollector::CollectOldSpaceArrayBufferTrackerItems(
    ItemParallelJob* job) {
  for (Page* p : old_space_evacuation_pages_) {
    if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsOldToOld &&
        p->IsEvacuationCandidate()) {
      job->AddItem(new ArrayBufferTrackerUpdatingItem(p));
    }
  }
}

4321
void MarkCompactCollector::UpdatePointersAfterEvacuation() {
4322
  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
4323

4324 4325 4326 4327
  PointersUpdatingVisitor updating_visitor;
  ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
                               &page_parallel_job_semaphore_);

4328 4329 4330
  CollectNewSpaceArrayBufferTrackerItems(&updating_job);
  CollectOldSpaceArrayBufferTrackerItems(&updating_job);

4331
  const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
4332 4333 4334
  const int remembered_set_tasks = CollectRememberedSetUpdatingItems(
      &updating_job, RememberedSetUpdatingMode::ALL);
  const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
4335 4336 4337
  for (int i = 0; i < num_tasks; i++) {
    updating_job.AddTask(new PointersUpatingTask(isolate()));
  }
4338

4339
  {
4340
    TRACE_GC(heap()->tracer(),
4341
             GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
4342
    heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
4343
  }
4344
  {
4345 4346 4347
    TRACE_GC(heap()->tracer(),
             GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS);
    updating_job.Run();
4348 4349
  }

4350
  {
4351 4352
    TRACE_GC(heap()->tracer(),
             GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
4353 4354 4355
    // Update pointers from external string table.
    heap_->UpdateReferencesInExternalStringTable(
        &UpdateReferenceInExternalStringTableEntry);
4356

4357
    EvacuationWeakObjectRetainer evacuation_object_retainer;
4358
    heap()->ProcessWeakListRoots(&evacuation_object_retainer);
4359
  }
4360 4361
}

4362
void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
4363 4364
  TRACE_GC(heap()->tracer(),
           GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
4365 4366

  PointersUpdatingVisitor updating_visitor;
4367 4368 4369
  ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
                               &page_parallel_job_semaphore_);

4370
  CollectNewSpaceArrayBufferTrackerItems(&updating_job);
4371 4372 4373
  // Create batches of global handles.
  SeedGlobalHandles<GlobalHandlesUpdatingItem>(isolate()->global_handles(),
                                               &updating_job);
4374
  const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
4375 4376
  const int remembered_set_tasks = CollectRememberedSetUpdatingItems(
      &updating_job, RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
4377 4378 4379 4380
  const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
  for (int i = 0; i < num_tasks; i++) {
    updating_job.AddTask(new PointersUpatingTask(isolate()));
  }
4381 4382 4383

  {
    TRACE_GC(heap()->tracer(),
4384 4385 4386 4387 4388 4389 4390
             GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
    heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_MINOR_MC_UPDATE);
  }
  {
    TRACE_GC(heap()->tracer(),
             GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
    updating_job.Run();
4391 4392 4393 4394
  }

  {
    TRACE_GC(heap()->tracer(),
4395
             GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK);
4396 4397 4398 4399 4400 4401 4402 4403 4404 4405

    EvacuationWeakObjectRetainer evacuation_object_retainer;
    heap()->ProcessWeakListRoots(&evacuation_object_retainer);

    // Update pointers from external string table.
    heap()->UpdateNewSpaceReferencesInExternalStringTable(
        &UpdateReferenceInExternalStringTableEntry);
    heap()->IterateEncounteredWeakCollections(&updating_visitor);
  }
}
4406

4407 4408 4409 4410 4411 4412 4413 4414
void MarkCompactCollector::ReportAbortedEvacuationCandidate(
    HeapObject* failed_object, Page* page) {
  base::LockGuard<base::Mutex> guard(&mutex_);

  page->SetFlag(Page::COMPACTION_WAS_ABORTED);
  aborted_evacuation_candidates_.push_back(std::make_pair(failed_object, page));
}

4415
void MarkCompactCollector::PostProcessEvacuationCandidates() {
4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429
  for (auto object_and_page : aborted_evacuation_candidates_) {
    HeapObject* failed_object = object_and_page.first;
    Page* page = object_and_page.second;
    DCHECK(page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
    // Aborted compaction page. We have to record slots here, since we
    // might not have recorded them in first place.

    // Remove outdated slots.
    RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(),
                                           failed_object->address(),
                                           SlotSet::PREFREE_EMPTY_BUCKETS);
    RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
                                                failed_object->address());
    // Recompute live bytes.
4430
    LiveObjectVisitor::RecomputeLiveBytes(page, non_atomic_marking_state());
4431 4432
    // Re-record slots.
    EvacuateRecordOnlyVisitor record_visitor(heap());
4433 4434
    LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
                                               &record_visitor,
4435 4436 4437 4438 4439 4440 4441 4442 4443
                                               LiveObjectVisitor::kKeepMarking);
    // Fix up array buffers.
    ArrayBufferTracker::ProcessBuffers(
        page, ArrayBufferTracker::kUpdateForwardedKeepOthers);
  }
  const int aborted_pages =
      static_cast<int>(aborted_evacuation_candidates_.size());
  aborted_evacuation_candidates_.clear();
  int aborted_pages_verified = 0;
4444 4445
  for (Page* p : old_space_evacuation_pages_) {
    if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
4446 4447
      // After clearing the evacuation candidate flag the page is again in a
      // regular state.
4448
      p->ClearEvacuationCandidate();
4449
      aborted_pages_verified++;
4450 4451 4452 4453 4454 4455
    } else {
      DCHECK(p->IsEvacuationCandidate());
      DCHECK(p->SweepingDone());
      p->Unlink();
    }
  }
4456
  DCHECK_EQ(aborted_pages_verified, aborted_pages);
4457 4458 4459 4460 4461 4462
  if (FLAG_trace_evacuation && (aborted_pages > 0)) {
    PrintIsolate(isolate(), "%8.0f ms: evacuation: aborted=%d\n",
                 isolate()->time_millis_since_init(), aborted_pages);
  }
}

4463
void MarkCompactCollector::ReleaseEvacuationCandidates() {
4464
  for (Page* p : old_space_evacuation_pages_) {
4465 4466
    if (!p->IsEvacuationCandidate()) continue;
    PagedSpace* space = static_cast<PagedSpace*>(p->owner());
4467
    non_atomic_marking_state()->SetLiveBytes(p, 0);
4468
    CHECK(p->SweepingDone());
4469
    space->ReleasePage(p);
4470
  }
4471
  old_space_evacuation_pages_.clear();
4472
  compacting_ = false;
4473
  heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
4474 4475
}

mlippautz's avatar
mlippautz committed
4476 4477 4478
int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity,
                                                      int required_freed_bytes,
                                                      int max_pages) {
4479
  int max_freed = 0;
mlippautz's avatar
mlippautz committed
4480
  int pages_freed = 0;
4481 4482
  Page* page = nullptr;
  while ((page = GetSweepingPageSafe(identity)) != nullptr) {
4483
    int freed = ParallelSweepPage(page, identity);
4484
    pages_freed += 1;
mlippautz's avatar
mlippautz committed
4485
    DCHECK_GE(freed, 0);
4486 4487 4488 4489
    max_freed = Max(max_freed, freed);
    if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
      return max_freed;
    if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed;
mlippautz's avatar
mlippautz committed
4490
  }
4491
  return max_freed;
mlippautz's avatar
mlippautz committed
4492
}
4493

mlippautz's avatar
mlippautz committed
4494
int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
4495
                                                     AllocationSpace identity) {
4496
  int max_freed = 0;
4497
  {
4498
    base::LockGuard<base::RecursiveMutex> guard(page->mutex());
4499
    // If this page was already swept in the meantime, we can return here.
4500 4501 4502
    if (page->SweepingDone()) return 0;
    DCHECK_EQ(Page::kSweepingPending,
              page->concurrent_sweeping_state().Value());
4503
    page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
4504
    const FreeSpaceTreatmentMode free_space_mode =
mlippautz's avatar
mlippautz committed
4505
        Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
4506
    if (identity == NEW_SPACE) {
mlippautz's avatar
mlippautz committed
4507
      RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
4508
    } else {
mlippautz's avatar
mlippautz committed
4509
      max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
4510
    }
4511
    DCHECK(page->SweepingDone());
4512 4513

    // After finishing sweeping of a page we clean up its remembered set.
4514 4515 4516
    TypedSlotSet* typed_slot_set = page->typed_slot_set<OLD_TO_NEW>();
    if (typed_slot_set) {
      typed_slot_set->FreeToBeFreedChunks();
4517
    }
4518 4519 4520
    SlotSet* slot_set = page->slot_set<OLD_TO_NEW>();
    if (slot_set) {
      slot_set->FreeToBeFreedBuckets();
4521
    }
4522
  }
4523

4524 4525
  {
    base::LockGuard<base::Mutex> guard(&mutex_);
4526
    swept_list_[identity].push_back(page);
4527 4528 4529 4530
  }
  return max_freed;
}

mlippautz's avatar
mlippautz committed
4531
void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) {
4532
  DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
mlippautz's avatar
mlippautz committed
4533 4534 4535 4536 4537 4538 4539
  PrepareToBeSweptPage(space, page);
  sweeping_list_[space].push_back(page);
}

void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space,
                                                         Page* page) {
  page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
4540
  DCHECK_GE(page->area_size(),
4541 4542
            static_cast<size_t>(marking_state_->live_bytes(page)));
  size_t to_sweep = page->area_size() - marking_state_->live_bytes(page);
4543 4544
  if (space != NEW_SPACE)
    heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
mlippautz's avatar
mlippautz committed
4545 4546
}

4547 4548
Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe(
    AllocationSpace space) {
mlippautz's avatar
mlippautz committed
4549
  base::LockGuard<base::Mutex> guard(&mutex_);
4550 4551 4552 4553 4554 4555
  Page* page = nullptr;
  if (!sweeping_list_[space].empty()) {
    page = sweeping_list_[space].front();
    sweeping_list_[space].pop_front();
  }
  return page;
mlippautz's avatar
mlippautz committed
4556 4557
}

4558
void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
4559
  space->ClearStats();
4560

4561
  int will_be_swept = 0;
4562
  bool unused_page_present = false;
4563

4564 4565 4566
  // Loop needs to support deletion if live bytes == 0 for a page.
  for (auto it = space->begin(); it != space->end();) {
    Page* p = *(it++);
4567
    DCHECK(p->SweepingDone());
4568

4569
    if (p->IsEvacuationCandidate()) {
4570
      // Will be processed in Evacuate.
4571
      DCHECK(!evacuation_candidates_.empty());
4572 4573
      continue;
    }
4574

4575 4576 4577 4578 4579
    if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
      // We need to sweep the page to get it into an iterable state again. Note
      // that this adds unusable memory into the free list that is later on
      // (in the free list) dropped again. Since we only use the flag for
      // testing this is fine.
4580
      p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
4581 4582 4583 4584
      sweeper().RawSweep(p, Sweeper::IGNORE_FREE_LIST,
                         Heap::ShouldZapGarbage()
                             ? FreeSpaceTreatmentMode::ZAP_FREE_SPACE
                             : FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
4585 4586 4587
      continue;
    }

4588
    // One unused page is kept, all further are released before sweeping them.
4589
    if (non_atomic_marking_state()->live_bytes(p) == 0) {
4590 4591
      if (unused_page_present) {
        if (FLAG_gc_verbose) {
4592 4593
          PrintIsolate(isolate(), "sweeping: released page: %p",
                       static_cast<void*>(p));
4594
        }
4595
        ArrayBufferTracker::FreeAll(p);
4596
        space->ReleasePage(p);
4597 4598 4599 4600 4601
        continue;
      }
      unused_page_present = true;
    }

mlippautz's avatar
mlippautz committed
4602
    sweeper().AddPage(space->identity(), p);
4603
    will_be_swept++;
4604
  }
4605

4606
  if (FLAG_gc_verbose) {
4607 4608
    PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
                 AllocationSpaceName(space->identity()), will_be_swept);
4609
  }
4610
}
4611

4612
void MarkCompactCollector::StartSweepSpaces() {
4613
  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
4614 4615 4616
#ifdef DEBUG
  state_ = SWEEP_SPACES;
#endif
4617

4618
  {
4619 4620
    {
      GCTracer::Scope sweep_scope(heap()->tracer(),
4621
                                  GCTracer::Scope::MC_SWEEP_OLD);
4622
      StartSweepSpace(heap()->old_space());
4623 4624 4625 4626
    }
    {
      GCTracer::Scope sweep_scope(heap()->tracer(),
                                  GCTracer::Scope::MC_SWEEP_CODE);
4627
      StartSweepSpace(heap()->code_space());
4628
    }
hpayer's avatar
hpayer committed
4629 4630 4631
    {
      GCTracer::Scope sweep_scope(heap()->tracer(),
                                  GCTracer::Scope::MC_SWEEP_MAP);
4632
      StartSweepSpace(heap()->map_space());
hpayer's avatar
hpayer committed
4633
    }
mlippautz's avatar
mlippautz committed
4634
    sweeper().StartSweeping();
4635
  }
4636

4637 4638
  // Deallocate unmarked large objects.
  heap_->lo_space()->FreeUnmarkedObjects();
4639 4640
}

4641

4642
void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
4643
  DCHECK(heap()->gc_state() == Heap::MARK_COMPACT);
4644
  if (is_compacting()) {
4645 4646 4647
    Code* host =
        isolate()->inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(
            pc);
4648
    if (non_atomic_marking_state()->IsBlack(host)) {
4649
      RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4650 4651 4652
      // The target is always in old space, we don't have to record the slot in
      // the old-to-new remembered set.
      DCHECK(!heap()->InNewSpace(target));
4653
      RecordRelocSlot(host, &rinfo, target);
4654 4655 4656 4657
    }
  }
}

4658 4659
}  // namespace internal
}  // namespace v8