mark-compact.h 31 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5 6
#ifndef V8_HEAP_MARK_COMPACT_H_
#define V8_HEAP_MARK_COMPACT_H_
7

8
#include <atomic>
9
#include <vector>
10

11
#include "include/v8-internal.h"
12
#include "src/heap/base/worklist.h"
13
#include "src/heap/concurrent-marking.h"
14
#include "src/heap/marking-visitor.h"
15
#include "src/heap/marking-worklist.h"
16
#include "src/heap/marking.h"
17
#include "src/heap/memory-measurement.h"
18
#include "src/heap/parallel-work-item.h"
19
#include "src/heap/spaces.h"
20
#include "src/heap/sweeper.h"
21

22 23
namespace v8 {
namespace internal {
24

25
// Forward declarations.
26
class EvacuationJobTraits;
27
class HeapObjectVisitor;
28
class ItemParallelJob;
29
class LargePage;
30
class MigrationObserver;
31
class ReadOnlySpace;
32
class RecordMigratedSlotVisitor;
33
class UpdatingItem;
34
class YoungGenerationMarkingVisitor;
35

36
class MarkBitCellIterator {
hpayer's avatar
hpayer committed
37
 public:
38 39
  MarkBitCellIterator(const MemoryChunk* chunk, Bitmap* bitmap)
      : chunk_(chunk) {
40 41
    last_cell_index_ =
        Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(chunk_->area_end()));
42
    cell_base_ = chunk_->address();
43 44
    cell_index_ =
        Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(cell_base_));
45
    cells_ = bitmap->cells();
hpayer's avatar
hpayer committed
46 47
  }

48
  inline bool Done() { return cell_index_ >= last_cell_index_; }
hpayer's avatar
hpayer committed
49 50 51 52

  inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }

  inline MarkBit::CellType* CurrentCell() {
53 54
    DCHECK_EQ(cell_index_, Bitmap::IndexToCell(Bitmap::CellAlignIndex(
                               chunk_->AddressToMarkbitIndex(cell_base_))));
hpayer's avatar
hpayer committed
55 56 57 58
    return &cells_[cell_index_];
  }

  inline Address CurrentCellBase() {
59 60
    DCHECK_EQ(cell_index_, Bitmap::IndexToCell(Bitmap::CellAlignIndex(
                               chunk_->AddressToMarkbitIndex(cell_base_))));
hpayer's avatar
hpayer committed
61 62 63
    return cell_base_;
  }

64
  V8_WARN_UNUSED_RESULT inline bool Advance() {
65
    cell_base_ += Bitmap::kBitsPerCell * kTaggedSize;
66
    return ++cell_index_ != last_cell_index_;
67 68 69 70 71 72 73 74
  }

  inline bool Advance(unsigned int new_cell_index) {
    if (new_cell_index != cell_index_) {
      DCHECK_GT(new_cell_index, cell_index_);
      DCHECK_LE(new_cell_index, last_cell_index_);
      unsigned int diff = new_cell_index - cell_index_;
      cell_index_ = new_cell_index;
75
      cell_base_ += diff * (Bitmap::kBitsPerCell * kTaggedSize);
76 77 78
      return true;
    }
    return false;
hpayer's avatar
hpayer committed
79 80 81 82 83 84 85 86 87 88 89
  }

  // Return the next mark bit cell. If there is no next it returns 0;
  inline MarkBit::CellType PeekNext() {
    if (HasNext()) {
      return cells_[cell_index_ + 1];
    }
    return 0;
  }

 private:
90
  const MemoryChunk* chunk_;
hpayer's avatar
hpayer committed
91 92 93 94 95 96
  MarkBit::CellType* cells_;
  unsigned int last_cell_index_;
  unsigned int cell_index_;
  Address cell_base_;
};

97
enum LiveObjectIterationMode { kBlackObjects, kGreyObjects, kAllLiveObjects };
hpayer's avatar
hpayer committed
98

99 100
template <LiveObjectIterationMode mode>
class LiveObjectRange {
hpayer's avatar
hpayer committed
101
 public:
102 103
  class iterator {
   public:
104
    using value_type = std::pair<HeapObject, int /* size */>;
105 106 107 108
    using pointer = const value_type*;
    using reference = const value_type&;
    using iterator_category = std::forward_iterator_tag;

109
    inline iterator(const MemoryChunk* chunk, Bitmap* bitmap, Address start);
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126

    inline iterator& operator++();
    inline iterator operator++(int);

    bool operator==(iterator other) const {
      return current_object_ == other.current_object_;
    }

    bool operator!=(iterator other) const { return !(*this == other); }

    value_type operator*() {
      return std::make_pair(current_object_, current_size_);
    }

   private:
    inline void AdvanceToNextValidObject();

127
    const MemoryChunk* const chunk_;
128 129 130
    Map const one_word_filler_map_;
    Map const two_word_filler_map_;
    Map const free_space_map_;
131 132 133
    MarkBitCellIterator it_;
    Address cell_base_;
    MarkBit::CellType current_cell_;
134
    HeapObject current_object_;
135 136 137
    int current_size_;
  };

138
  LiveObjectRange(const MemoryChunk* chunk, Bitmap* bitmap)
hpayer's avatar
hpayer committed
139
      : chunk_(chunk),
140
        bitmap_(bitmap),
141
        start_(chunk_->area_start()),
142 143 144
        end_(chunk->area_end()) {
    DCHECK(!chunk->IsLargePage());
  }
hpayer's avatar
hpayer committed
145

146 147
  inline iterator begin();
  inline iterator end();
hpayer's avatar
hpayer committed
148 149

 private:
150
  const MemoryChunk* const chunk_;
151
  Bitmap* bitmap_;
152 153
  Address start_;
  Address end_;
hpayer's avatar
hpayer committed
154
};
155

156
class LiveObjectVisitor : AllStatic {
157 158 159 160 161 162
 public:
  enum IterationMode {
    kKeepMarking,
    kClearMarkbits,
  };

163 164 165
  // Visits black objects on a MemoryChunk until the Visitor returns |false| for
  // an object. If IterationMode::kClearMarkbits is passed the markbits and
  // slots for visited objects are cleared for each successfully visited object.
166 167
  template <class Visitor, typename MarkingState>
  static bool VisitBlackObjects(MemoryChunk* chunk, MarkingState* state,
168
                                Visitor* visitor, IterationMode iteration_mode,
169
                                HeapObject* failed_object);
170

171 172
  // Visits black objects on a MemoryChunk. The visitor is not allowed to fail
  // visitation for an object.
173 174
  template <class Visitor, typename MarkingState>
  static void VisitBlackObjectsNoFail(MemoryChunk* chunk, MarkingState* state,
175 176
                                      Visitor* visitor,
                                      IterationMode iteration_mode);
177

178 179
  // Visits black objects on a MemoryChunk. The visitor is not allowed to fail
  // visitation for an object.
180 181
  template <class Visitor, typename MarkingState>
  static void VisitGreyObjectsNoFail(MemoryChunk* chunk, MarkingState* state,
182 183 184
                                     Visitor* visitor,
                                     IterationMode iteration_mode);

185 186
  template <typename MarkingState>
  static void RecomputeLiveBytes(MemoryChunk* chunk, MarkingState* state);
187 188
};

189
enum class AlwaysPromoteYoung { kYes, kNo };
190
enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
191
enum class RememberedSetUpdatingMode { ALL, OLD_TO_NEW_ONLY };
192

193 194
// Base class for minor and full MC collectors.
class MarkCompactCollectorBase {
195
 public:
196
  virtual ~MarkCompactCollectorBase() = default;
197

198 199 200
  virtual void SetUp() = 0;
  virtual void TearDown() = 0;
  virtual void CollectGarbage() = 0;
201

202
  inline Heap* heap() const { return heap_; }
203
  inline Isolate* isolate();
204

205
 protected:
206
  explicit MarkCompactCollectorBase(Heap* heap) : heap_(heap) {}
207

208
  // Marking operations for objects reachable from roots.
209
  virtual void MarkLiveObjects() = 0;
210
  // Mark objects reachable (transitively) from objects in the marking
211
  // work list.
212
  virtual void DrainMarkingWorklist() = 0;
213 214 215 216 217 218 219
  // Clear non-live references held in side data structures.
  virtual void ClearNonLiveReferences() = 0;
  virtual void EvacuatePrologue() = 0;
  virtual void EvacuateEpilogue() = 0;
  virtual void Evacuate() = 0;
  virtual void EvacuatePagesInParallel() = 0;
  virtual void UpdatePointersAfterEvacuation() = 0;
220
  virtual std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
221
      MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) = 0;
222

223
  // Returns the number of wanted compaction tasks.
224
  template <class Evacuator, class Collector>
225
  size_t CreateAndExecuteEvacuationTasks(
226 227
      Collector* collector,
      std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items,
228
      MigrationObserver* migration_observer);
229

230
  // Returns whether this page should be moved according to heuristics.
231 232
  bool ShouldMovePage(Page* p, intptr_t live_bytes,
                      AlwaysPromoteYoung promote_young);
233

234
  template <typename IterateableSpace>
235 236 237
  int CollectRememberedSetUpdatingItems(
      std::vector<std::unique_ptr<UpdatingItem>>* items,
      IterateableSpace* space, RememberedSetUpdatingMode mode);
238

239
  int NumberOfParallelCompactionTasks();
240

241 242 243
  Heap* heap_;
};

244 245 246
class MinorMarkingState final
    : public MarkingStateBase<MinorMarkingState, AccessMode::ATOMIC> {
 public:
247 248 249
  explicit MinorMarkingState(PtrComprCageBase cage_base)
      : MarkingStateBase(cage_base) {}

250 251 252 253
  ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
      const BasicMemoryChunk* chunk) const {
    return MemoryChunk::cast(chunk)
        ->young_generation_bitmap<AccessMode::ATOMIC>();
254 255 256
  }

  void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
257
    chunk->young_generation_live_byte_count_ += by;
258 259 260
  }

  intptr_t live_bytes(MemoryChunk* chunk) const {
261
    return chunk->young_generation_live_byte_count_;
262 263 264
  }

  void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
265
    chunk->young_generation_live_byte_count_ = value;
266 267 268 269 270 271 272
  }
};

class MinorNonAtomicMarkingState final
    : public MarkingStateBase<MinorNonAtomicMarkingState,
                              AccessMode::NON_ATOMIC> {
 public:
273 274 275
  explicit MinorNonAtomicMarkingState(PtrComprCageBase cage_base)
      : MarkingStateBase(cage_base) {}

276
  ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
277 278 279
      const BasicMemoryChunk* chunk) const {
    return MemoryChunk::cast(chunk)
        ->young_generation_bitmap<AccessMode::NON_ATOMIC>();
280 281 282
  }

  void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
283 284
    chunk->young_generation_live_byte_count_.fetch_add(
        by, std::memory_order_relaxed);
285 286 287
  }

  intptr_t live_bytes(MemoryChunk* chunk) const {
288 289
    return chunk->young_generation_live_byte_count_.load(
        std::memory_order_relaxed);
290 291 292
  }

  void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
293 294
    chunk->young_generation_live_byte_count_.store(value,
                                                   std::memory_order_relaxed);
295 296 297
  }
};

298 299 300
// This is used by marking visitors.
class MajorMarkingState final
    : public MarkingStateBase<MajorMarkingState, AccessMode::ATOMIC> {
301
 public:
302 303 304
  explicit MajorMarkingState(PtrComprCageBase cage_base)
      : MarkingStateBase(cage_base) {}

305 306
  ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
      const BasicMemoryChunk* chunk) const {
307
    return chunk->marking_bitmap<AccessMode::ATOMIC>();
308 309
  }

310 311
  // Concurrent marking uses local live bytes so we may do these accesses
  // non-atomically.
312
  void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
313
    chunk->live_byte_count_.fetch_add(by, std::memory_order_relaxed);
314 315 316
  }

  intptr_t live_bytes(MemoryChunk* chunk) const {
317
    return chunk->live_byte_count_.load(std::memory_order_relaxed);
318 319 320
  }

  void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
321
    chunk->live_byte_count_.store(value, std::memory_order_relaxed);
322 323 324
  }
};

325 326
// This is used by Scavenger and Evacuator in TransferColor.
// Live byte increments have to be atomic.
327 328 329
class MajorAtomicMarkingState final
    : public MarkingStateBase<MajorAtomicMarkingState, AccessMode::ATOMIC> {
 public:
330 331 332
  explicit MajorAtomicMarkingState(PtrComprCageBase cage_base)
      : MarkingStateBase(cage_base) {}

333 334
  ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
      const BasicMemoryChunk* chunk) const {
335
    return chunk->marking_bitmap<AccessMode::ATOMIC>();
336 337 338
  }

  void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
339
    chunk->live_byte_count_.fetch_add(by);
340 341 342
  }
};

343 344 345 346
class MajorNonAtomicMarkingState final
    : public MarkingStateBase<MajorNonAtomicMarkingState,
                              AccessMode::NON_ATOMIC> {
 public:
347 348 349
  explicit MajorNonAtomicMarkingState(PtrComprCageBase cage_base)
      : MarkingStateBase(cage_base) {}

350
  ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
351
      const BasicMemoryChunk* chunk) const {
352
    return chunk->marking_bitmap<AccessMode::NON_ATOMIC>();
353 354 355
  }

  void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
356
    chunk->live_byte_count_.fetch_add(by, std::memory_order_relaxed);
357 358 359
  }

  intptr_t live_bytes(MemoryChunk* chunk) const {
360
    return chunk->live_byte_count_.load(std::memory_order_relaxed);
361 362 363
  }

  void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
364
    chunk->live_byte_count_.store(value, std::memory_order_relaxed);
365 366 367
  }
};

368 369 370 371 372 373 374 375
// This visitor is used for marking on the main thread. It is cheaper than
// the concurrent marking visitor because it does not snapshot JSObjects.
template <typename MarkingState>
class MainMarkingVisitor final
    : public MarkingVisitorBase<MainMarkingVisitor<MarkingState>,
                                MarkingState> {
 public:
  // This is used for revisiting objects that were black allocated.
376
  class V8_NODISCARD RevisitScope {
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
   public:
    explicit RevisitScope(MainMarkingVisitor* visitor) : visitor_(visitor) {
      DCHECK(!visitor->revisiting_object_);
      visitor->revisiting_object_ = true;
    }
    ~RevisitScope() {
      DCHECK(visitor_->revisiting_object_);
      visitor_->revisiting_object_ = false;
    }

   private:
    MainMarkingVisitor<MarkingState>* visitor_;
  };

  MainMarkingVisitor(MarkingState* marking_state,
392
                     MarkingWorklists::Local* local_marking_worklists,
393
                     WeakObjects::Local* local_weak_objects, Heap* heap,
394
                     unsigned mark_compact_epoch,
395
                     base::EnumSet<CodeFlushMode> code_flush_mode,
396 397
                     bool embedder_tracing_enabled,
                     bool should_keep_ages_unchanged)
398
      : MarkingVisitorBase<MainMarkingVisitor<MarkingState>, MarkingState>(
399 400
            local_marking_worklists, local_weak_objects, heap,
            mark_compact_epoch, code_flush_mode, embedder_tracing_enabled,
401
            should_keep_ages_unchanged),
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
        marking_state_(marking_state),
        revisiting_object_(false) {}

  // HeapVisitor override to allow revisiting of black objects.
  bool ShouldVisit(HeapObject object) {
    return marking_state_->GreyToBlack(object) ||
           V8_UNLIKELY(revisiting_object_);
  }

 private:
  // Functions required by MarkingVisitorBase.

  template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
  int VisitJSObjectSubclass(Map map, T object);

  template <typename T>
  int VisitLeftTrimmableArray(Map map, T object);

  template <typename TSlot>
  void RecordSlot(HeapObject object, TSlot slot, HeapObject target);

  void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target);

  void SynchronizePageAccess(HeapObject heap_object) {
    // Nothing to do on the main thread.
  }

  MarkingState* marking_state() { return marking_state_; }

  TraceRetainingPathMode retaining_path_mode() {
    return (V8_UNLIKELY(FLAG_track_retaining_path))
               ? TraceRetainingPathMode::kEnabled
               : TraceRetainingPathMode::kDisabled;
  }

  MarkingState* const marking_state_;

  friend class MarkingVisitorBase<MainMarkingVisitor<MarkingState>,
                                  MarkingState>;
  bool revisiting_object_;
};

444 445
// Collector for young and old generation.
class MarkCompactCollector final : public MarkCompactCollectorBase {
446
 public:
447 448
  using MarkingState = MajorMarkingState;
  using AtomicMarkingState = MajorAtomicMarkingState;
449
  using NonAtomicMarkingState = MajorNonAtomicMarkingState;
450

451 452
  using MarkingVisitor = MainMarkingVisitor<MarkingState>;

453
  class RootMarkingVisitor;
454
  class CustomRootBodyMarkingVisitor;
455
  class SharedHeapObjectVisitor;
456

457 458 459 460 461
  enum IterationMode {
    kKeepMarking,
    kClearMarkbits,
  };

462 463 464 465 466
  enum class MarkingWorklistProcessingMode {
    kDefault,
    kTrackNewlyDiscoveredObjects
  };

467 468 469 470 471
  enum class StartCompactionMode {
    kIncremental,
    kAtomic,
  };

472
  MarkingState* marking_state() { return &marking_state_; }
473

474 475
  NonAtomicMarkingState* non_atomic_marking_state() {
    return &non_atomic_marking_state_;
476 477
  }

478 479 480 481
  void SetUp() override;
  void TearDown() override;
  // Performs a global garbage collection.
  void CollectGarbage() override;
482

483 484 485 486
  void CollectEvacuationCandidates(PagedSpace* space);

  void AddEvacuationCandidate(Page* p);

487 488
  // Prepares for GC by resetting relocation info in old and map spaces and
  // choosing spaces to compact.
489
  void Prepare();
490

491 492
  // Stop concurrent marking (either by preempting it right away or waiting for
  // it to complete as requested by |stop_request|).
493
  void FinishConcurrentMarking();
494

495 496
  // Returns whether compaction is running.
  bool StartCompaction(StartCompactionMode mode);
497

498 499
  void AbortCompaction();

500 501
  void StartMarking();

502
  static inline bool IsOnEvacuationCandidate(Object obj) {
503
    return Page::FromAddress(obj.ptr())->IsEvacuationCandidate();
504 505
  }

506
  static bool IsOnEvacuationCandidate(MaybeObject obj);
507

508 509 510 511 512
  struct RecordRelocSlotInfo {
    MemoryChunk* memory_chunk;
    SlotType slot_type;
    uint32_t offset;
  };
513

514
  static V8_EXPORT_PRIVATE bool IsMapOrForwardedMap(Map map);
515

516 517 518 519 520
  static bool ShouldRecordRelocSlot(Code host, RelocInfo* rinfo,
                                    HeapObject target);
  static RecordRelocSlotInfo ProcessRelocInfo(Code host, RelocInfo* rinfo,
                                              HeapObject target);

521 522 523 524 525
  static void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target);
  V8_INLINE static void RecordSlot(HeapObject object, ObjectSlot slot,
                                   HeapObject target);
  V8_INLINE static void RecordSlot(HeapObject object, HeapObjectSlot slot,
                                   HeapObject target);
526 527
  V8_INLINE static void RecordSlot(MemoryChunk* source_page,
                                   HeapObjectSlot slot, HeapObject target);
528
  void RecordLiveSlotsOnPage(Page* page);
529

530
  bool is_compacting() const { return compacting_; }
531
  bool is_shared_heap() const { return is_shared_heap_; }
532

533 534 535 536
  void FinishSweepingIfOutOfWork();

  enum class SweepingForcedFinalizationMode { kUnifiedHeap, kV8Only };

537 538 539
  // Ensures that sweeping is finished.
  //
  // Note: Can only be called safely from main thread.
540 541
  V8_EXPORT_PRIVATE void EnsureSweepingCompleted(
      SweepingForcedFinalizationMode mode);
542

543 544
  void EnsurePageIsSwept(Page* page);

545
  void DrainSweepingWorklistForSpace(AllocationSpace space);
546

547
  // Checks if sweeping is in progress right now on any space.
548
  bool sweeping_in_progress() const { return sweeper_->sweeping_in_progress(); }
549

550
  void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
551

552
  bool evacuation() const { return evacuation_; }
553

554 555 556 557
  MarkingWorklists* marking_worklists() { return &marking_worklists_; }

  MarkingWorklists::Local* local_marking_worklists() {
    return local_marking_worklists_.get();
558
  }
hpayer's avatar
hpayer committed
559

560
  WeakObjects* weak_objects() { return &weak_objects_; }
561

562 563
  WeakObjects::Local* local_weak_objects() { return local_weak_objects_.get(); }

564
  inline void AddTransitionArray(TransitionArray array);
565

566
  void AddNewlyDiscovered(HeapObject object) {
567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
    if (ephemeron_marking_.newly_discovered_overflowed) return;

    if (ephemeron_marking_.newly_discovered.size() <
        ephemeron_marking_.newly_discovered_limit) {
      ephemeron_marking_.newly_discovered.push_back(object);
    } else {
      ephemeron_marking_.newly_discovered_overflowed = true;
    }
  }

  void ResetNewlyDiscovered() {
    ephemeron_marking_.newly_discovered_overflowed = false;
    ephemeron_marking_.newly_discovered.clear();
  }

582
  Sweeper* sweeper() { return sweeper_; }
583

584 585 586 587 588 589
#ifdef DEBUG
  // Checks whether performing mark-compact collection.
  bool in_use() { return state_ > PREPARE_GC; }
  bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
#endif

590
  void VerifyMarking();
591 592
#ifdef VERIFY_HEAP
  void VerifyMarkbitsAreClean();
593
  void VerifyMarkbitsAreDirty(ReadOnlySpace* space);
594 595
  void VerifyMarkbitsAreClean(PagedSpace* space);
  void VerifyMarkbitsAreClean(NewSpace* space);
596
  void VerifyMarkbitsAreClean(LargeObjectSpace* space);
597 598
#endif

599
  unsigned epoch() const { return epoch_; }
600

601 602 603
  base::EnumSet<CodeFlushMode> code_flush_mode() const {
    return code_flush_mode_;
  }
604

605
  explicit MarkCompactCollector(Heap* heap);
606
  ~MarkCompactCollector() override;
607

608 609
  // Used by wrapper tracing.
  V8_INLINE void MarkExternallyReferencedObject(HeapObject obj);
610 611 612 613
  // Used by incremental marking for object that change their layout.
  void VisitObject(HeapObject obj);
  // Used by incremental marking for black-allocated objects.
  void RevisitObject(HeapObject obj);
614

615 616 617 618 619
  // Drains the main thread marking worklist until the specified number of
  // bytes are processed. If the number of bytes is zero, then the worklist
  // is drained until it is empty.
  template <MarkingWorklistProcessingMode mode =
                MarkingWorklistProcessingMode::kDefault>
620
  std::pair<size_t, size_t> ProcessMarkingWorklist(size_t bytes_to_process);
621

622
 private:
623
  void ComputeEvacuationHeuristics(size_t area_size,
624
                                   int* target_fragmentation_percent,
625
                                   size_t* max_evacuated_bytes);
626

627 628
  void RecordObjectStats();

629
  // Finishes GC, performs heap verification if enabled.
630
  void Finish();
631

632 633 634
  // Free unmarked ArrayBufferExtensions.
  void SweepArrayBufferExtensions();

635 636 637
  // Free unmarked entries in the ExternalPointerTable.
  void SweepExternalPointerTable();

638
  void MarkLiveObjects() override;
639

640
  // Marks the object grey and adds it to the marking work list.
641
  // This is for non-incremental marking only.
642
  V8_INLINE void MarkObject(HeapObject host, HeapObject obj);
643

644
  // Marks the object grey and adds it to the marking work list.
645
  // This is for non-incremental marking only.
646
  V8_INLINE void MarkRootObject(Root root, HeapObject obj);
647

648
  // Mark the heap roots and all objects reachable from them.
649 650
  void MarkRoots(RootVisitor* root_visitor,
                 ObjectVisitor* custom_root_body_visitor);
651

652 653 654 655 656 657
  // Mark all objects that are directly referenced from one of the clients
  // heaps.
  void MarkObjectsFromClientHeaps();

  // Updates pointers to shared objects from client heaps.
  void UpdatePointersInClientHeaps();
658
  void UpdatePointersInClientHeap(Isolate* client);
659

660
  // Marks object reachable from harmony weak maps and wrapper tracing.
661
  void ProcessEphemeronMarking();
662

663
  // If the call-site of the top optimized code was not prepared for
664 665
  // deoptimization, then treat embedded pointers in the code as strong as
  // otherwise they can die and try to deoptimize the underlying code.
666
  void ProcessTopOptimizedFrame(ObjectVisitor* visitor, Isolate* isolate);
667

668 669
  // Drains the main thread marking work list. Will mark all pending objects
  // if no concurrent threads are running.
670
  void DrainMarkingWorklist() override;
671

672 673
  // Implements ephemeron semantics: Marks value if key is already reachable.
  // Returns true if value was actually marked.
674
  bool ProcessEphemeron(HeapObject key, HeapObject value);
675 676

  // Marks ephemerons and drains marking worklist iteratively
677 678 679
  // until a fixpoint is reached. Returns false if too many iterations have been
  // tried and the linear approach should be used.
  bool ProcessEphemeronsUntilFixpoint();
680 681 682 683

  // Drains ephemeron and marking worklists. Single iteration of the
  // fixpoint iteration.
  bool ProcessEphemerons();
684

685 686 687 688 689 690 691
  // Mark ephemerons and drain marking worklist with a linear algorithm.
  // Only used if fixpoint iteration doesn't finish within a few iterations.
  void ProcessEphemeronsLinear();

  // Perform Wrapper Tracing if in use.
  void PerformWrapperTracing();

692 693
  // Callback function for telling whether the object *p is an unmarked
  // heap object.
694
  static bool IsUnmarkedHeapObject(Heap* heap, FullObjectSlot p);
695

696 697
  // Clear non-live references in weak cells, transition and descriptor arrays,
  // and deoptimize dependent code of non-live maps.
698
  void ClearNonLiveReferences() override;
699
  void MarkDependentCodeForDeoptimization();
700 701 702
  // Checks if the given weak cell is a simple transition from the parent map
  // of the given dead target. If so it clears the transition and trims
  // the descriptor array of the parent if needed.
703 704
  void ClearPotentialSimpleMapTransition(Map dead_target);
  void ClearPotentialSimpleMapTransition(Map map, Map dead_target);
705 706 707 708

  // Flushes a weakly held bytecode array from a shared function info.
  void FlushBytecodeFromSFI(SharedFunctionInfo shared_info);

709 710 711 712
  // Clears bytecode arrays / baseline code that have not been executed for
  // multiple collections.
  void ProcessOldCodeCandidates();
  void ProcessFlushedBaselineCandidates();
713

714 715 716
  // Resets any JSFunctions which have had their bytecode flushed.
  void ClearFlushedJsFunctions();

717 718 719
  // Compact every array in the global list of transition arrays and
  // trim the corresponding descriptor array if a transition target is non-live.
  void ClearFullMapTransitions();
720 721
  void TrimDescriptorArray(Map map, DescriptorArray descriptors);
  void TrimEnumCache(Map map, DescriptorArray descriptors);
722
  bool CompactTransitionArray(Map map, TransitionArray transitions,
723
                              DescriptorArray descriptors);
724 725
  bool TransitionArrayNeedsCompaction(TransitionArray transitions,
                                      int num_transitions);
726

727 728 729
  // After all reachable objects have been marked those weak map entries
  // with an unreachable key are removed from all encountered weak maps.
  // The linked list of all encountered weak maps is destroyed.
730
  void ClearWeakCollections();
731

732
  // Goes through the list of encountered weak references and clears those with
733 734 735
  // dead values. If the value is a dead map and the parent map transitions to
  // the dead map via weak cell, then this function also clears the map
  // transition.
736
  void ClearWeakReferences();
737

738 739 740
  // Goes through the list of encountered JSWeakRefs and WeakCells and clears
  // those with dead values.
  void ClearJSWeakRefs();
741

742 743 744 745
  // Starts sweeping of spaces by contributing on the main thread and setting
  // up other pages for sweeping. Does not start sweeper tasks.
  void StartSweepSpaces();
  void StartSweepSpace(PagedSpace* space);
746

747 748 749 750 751
  void EvacuatePrologue() override;
  void EvacuateEpilogue() override;
  void Evacuate() override;
  void EvacuatePagesInParallel() override;
  void UpdatePointersAfterEvacuation() override;
752

753
  std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
754 755
      MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;

756
  void ReleaseEvacuationCandidates();
757 758
  // Returns number of aborted pages.
  size_t PostProcessEvacuationCandidates();
759 760 761 762
  void ReportAbortedEvacuationCandidateDueToOOM(Address failed_start,
                                                Page* page);
  void ReportAbortedEvacuationCandidateDueToFlags(Address failed_start,
                                                  Page* page);
763

764 765 766 767
  static const int kEphemeronChunkSize = 8 * KB;

  int NumberOfParallelEphemeronVisitingTasks(size_t elements);

768
  void RightTrimDescriptorArray(DescriptorArray array, int descriptors_to_trim);
769

770
  base::Mutex mutex_;
771
  base::Semaphore page_parallel_job_semaphore_{0};
772 773

#ifdef DEBUG
774 775 776 777 778 779 780
  enum CollectorState{IDLE,
                      PREPARE_GC,
                      MARK_LIVE_OBJECTS,
                      SWEEP_SPACES,
                      ENCODE_FORWARDING_ADDRESSES,
                      UPDATE_POINTERS,
                      RELOCATE_OBJECTS};
781 782 783 784 785

  // The current stage of the collector.
  CollectorState state_;
#endif

786 787
  const bool is_shared_heap_;

788 789
  bool was_marked_incrementally_ = false;
  bool evacuation_ = false;
790 791
  // True if we are collecting slots to perform evacuation from evacuation
  // candidates.
792 793 794
  bool compacting_ = false;
  bool black_allocation_ = false;
  bool have_code_to_deoptimize_ = false;
795

796
  MarkingWorklists marking_worklists_;
797

798
  WeakObjects weak_objects_;
799
  EphemeronMarking ephemeron_marking_;
800

801
  std::unique_ptr<MarkingVisitor> marking_visitor_;
802
  std::unique_ptr<MarkingWorklists::Local> local_marking_worklists_;
803
  std::unique_ptr<WeakObjects::Local> local_weak_objects_;
804 805
  NativeContextInferrer native_context_inferrer_;
  NativeContextStats native_context_stats_;
806

807
  // Candidates for pages that should be evacuated.
808
  std::vector<Page*> evacuation_candidates_;
809
  // Pages that are actually processed during evacuation.
810 811
  std::vector<Page*> old_space_evacuation_pages_;
  std::vector<Page*> new_space_evacuation_pages_;
812 813 814 815
  std::vector<std::pair<Address, Page*>>
      aborted_evacuation_candidates_due_to_oom_;
  std::vector<std::pair<Address, Page*>>
      aborted_evacuation_candidates_due_to_flags_;
816
  std::vector<LargePage*> promoted_large_pages_;
817

818
  MarkingState marking_state_;
819 820
  NonAtomicMarkingState non_atomic_marking_state_;

821 822
  Sweeper* sweeper_;

823
  // Counts the number of major mark-compact collections. The counter is
824 825 826 827
  // incremented right after marking. This is used for:
  // - marking descriptor arrays. See NumberOfMarkedDescriptors. Only the lower
  //   two bits are used, so it is okay if this counter overflows and wraps
  //   around.
828
  unsigned epoch_ = 0;
829

830 831
  // Bytecode flushing is disabled when the code coverage mode is changed. Since
  // that can happen while a GC is happening and we need the
832
  // code_flush_mode_ to remain the same through out a GC, we record this at
833
  // the start of each GC.
834
  base::EnumSet<CodeFlushMode> code_flush_mode_;
835

836
  friend class FullEvacuator;
837
  friend class RecordMigratedSlotVisitor;
838 839
};

840
class V8_NODISCARD EvacuationScope {
841
 public:
842
  explicit EvacuationScope(MarkCompactCollector* collector)
843
      : collector_(collector) {
844
    collector_->set_evacuation(true);
845 846
  }

847
  ~EvacuationScope() { collector_->set_evacuation(false); }
848 849 850 851 852

 private:
  MarkCompactCollector* collector_;
};

853 854 855 856 857 858
// Collector for young-generation only.
class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
 public:
  using MarkingState = MinorMarkingState;
  using NonAtomicMarkingState = MinorNonAtomicMarkingState;

859 860
  static constexpr size_t kMaxParallelTasks = 8;

861
  explicit MinorMarkCompactCollector(Heap* heap);
862
  ~MinorMarkCompactCollector() override;
863 864 865 866 867 868 869 870 871 872 873

  MarkingState* marking_state() { return &marking_state_; }

  NonAtomicMarkingState* non_atomic_marking_state() {
    return &non_atomic_marking_state_;
  }

  void SetUp() override;
  void TearDown() override;
  void CollectGarbage() override;

Omer Katz's avatar
Omer Katz committed
874
  void MakeIterable(Page* page, FreeSpaceTreatmentMode free_space_mode);
875
  void CleanupPromotedPages();
876 877

 private:
878 879
  using MarkingWorklist =
      ::heap::base::Worklist<HeapObject, 64 /* segment size */>;
880 881 882 883 884 885 886 887 888 889 890 891
  class RootMarkingVisitor;

  static const int kNumMarkers = 8;
  static const int kMainMarker = 0;

  inline MarkingWorklist* worklist() { return worklist_; }

  inline YoungGenerationMarkingVisitor* main_marking_visitor() {
    return main_marking_visitor_;
  }

  void MarkLiveObjects() override;
892
  void MarkRootSetInParallel(RootMarkingVisitor* root_visitor);
893
  V8_INLINE void MarkRootObject(HeapObject obj);
894
  void DrainMarkingWorklist() override;
895
  void TraceFragmentation();
896 897 898 899 900 901 902 903
  void ClearNonLiveReferences() override;

  void EvacuatePrologue() override;
  void EvacuateEpilogue() override;
  void Evacuate() override;
  void EvacuatePagesInParallel() override;
  void UpdatePointersAfterEvacuation() override;

904 905
  std::unique_ptr<UpdatingItem> CreateToSpaceUpdatingItem(MemoryChunk* chunk,
                                                          Address start,
906
                                                          Address end);
907
  std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
908 909
      MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;

910 911 912
  int CollectToSpaceUpdatingItems(
      std::vector<std::unique_ptr<UpdatingItem>>* items);

913 914
  void SweepArrayBufferExtensions();

915
  MarkingWorklist* worklist_;
916
  MarkingWorklist::Local main_thread_worklist_local_;
917

918 919 920
  MarkingState marking_state_;
  NonAtomicMarkingState non_atomic_marking_state_;

921 922 923
  YoungGenerationMarkingVisitor* main_marking_visitor_;
  base::Semaphore page_parallel_job_semaphore_;
  std::vector<Page*> new_space_evacuation_pages_;
924
  std::vector<Page*> promoted_pages_;
925
  std::vector<LargePage*> promoted_large_pages_;
926 927

  friend class YoungGenerationMarkingTask;
928
  friend class YoungGenerationMarkingJob;
929 930 931
  friend class YoungGenerationMarkingVisitor;
};

932 933
}  // namespace internal
}  // namespace v8
934

935
#endif  // V8_HEAP_MARK_COMPACT_H_