mark-compact.h 36.5 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5 6
#ifndef V8_HEAP_MARK_COMPACT_H_
#define V8_HEAP_MARK_COMPACT_H_
7

8
#include <vector>
9

10
#include "src/heap/concurrent-marking.h"
11
#include "src/heap/marking.h"
12
#include "src/heap/objects-visiting.h"
13
#include "src/heap/spaces.h"
14
#include "src/heap/sweeper.h"
15
#include "src/heap/worklist.h"
16
#include "src/objects/heap-object.h"   // For Worklist<HeapObject, ...>
17
#include "src/objects/js-weak-refs.h"  // For Worklist<JSWeakCell, ...>
18

19 20
namespace v8 {
namespace internal {
21

22
// Forward declarations.
23
class EvacuationJobTraits;
24
class HeapObjectVisitor;
25
class ItemParallelJob;
26 27
class MigrationObserver;
class RecordMigratedSlotVisitor;
28
class UpdatingItem;
29
class YoungGenerationMarkingVisitor;
30

31 32
template <typename ConcreteState, AccessMode access_mode>
class MarkingStateBase {
33
 public:
34
  V8_INLINE MarkBit MarkBitFrom(HeapObject obj) {
35 36
    return MarkBitFrom(MemoryChunk::FromAddress(obj->address()),
                       obj->address());
37 38
  }

39 40 41
  V8_INLINE MarkBit MarkBitFrom(MemoryChunk* p, Address addr) {
    return static_cast<ConcreteState*>(this)->bitmap(p)->MarkBitFromIndex(
        p->AddressToMarkbitIndex(addr));
42 43
  }

44
  Marking::ObjectColor Color(HeapObject obj) {
45
    return Marking::Color(MarkBitFrom(obj));
46 47
  }

48
  V8_INLINE bool IsImpossible(HeapObject obj) {
49
    return Marking::IsImpossible<access_mode>(MarkBitFrom(obj));
50 51
  }

52
  V8_INLINE bool IsBlack(HeapObject obj) {
53
    return Marking::IsBlack<access_mode>(MarkBitFrom(obj));
54 55
  }

56
  V8_INLINE bool IsWhite(HeapObject obj) {
57
    return Marking::IsWhite<access_mode>(MarkBitFrom(obj));
58 59
  }

60
  V8_INLINE bool IsGrey(HeapObject obj) {
61
    return Marking::IsGrey<access_mode>(MarkBitFrom(obj));
62 63
  }

64
  V8_INLINE bool IsBlackOrGrey(HeapObject obj) {
65 66 67
    return Marking::IsBlackOrGrey<access_mode>(MarkBitFrom(obj));
  }

68 69 70
  V8_INLINE bool WhiteToGrey(HeapObject obj);
  V8_INLINE bool WhiteToBlack(HeapObject obj);
  V8_INLINE bool GreyToBlack(HeapObject obj);
71

72 73 74 75
  void ClearLiveness(MemoryChunk* chunk) {
    static_cast<ConcreteState*>(this)->bitmap(chunk)->Clear();
    static_cast<ConcreteState*>(this)->SetLiveBytes(chunk, 0);
  }
76 77
};

78
class MarkBitCellIterator {
hpayer's avatar
hpayer committed
79
 public:
80
  MarkBitCellIterator(MemoryChunk* chunk, Bitmap* bitmap) : chunk_(chunk) {
81 82
    last_cell_index_ =
        Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(chunk_->area_end()));
83
    cell_base_ = chunk_->address();
84 85
    cell_index_ =
        Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(cell_base_));
86
    cells_ = bitmap->cells();
hpayer's avatar
hpayer committed
87 88
  }

89
  inline bool Done() { return cell_index_ >= last_cell_index_; }
hpayer's avatar
hpayer committed
90 91 92 93

  inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }

  inline MarkBit::CellType* CurrentCell() {
94 95
    DCHECK_EQ(cell_index_, Bitmap::IndexToCell(Bitmap::CellAlignIndex(
                               chunk_->AddressToMarkbitIndex(cell_base_))));
hpayer's avatar
hpayer committed
96 97 98 99
    return &cells_[cell_index_];
  }

  inline Address CurrentCellBase() {
100 101
    DCHECK_EQ(cell_index_, Bitmap::IndexToCell(Bitmap::CellAlignIndex(
                               chunk_->AddressToMarkbitIndex(cell_base_))));
hpayer's avatar
hpayer committed
102 103 104
    return cell_base_;
  }

105
  V8_WARN_UNUSED_RESULT inline bool Advance() {
106
    cell_base_ += Bitmap::kBitsPerCell * kTaggedSize;
107
    return ++cell_index_ != last_cell_index_;
108 109 110 111 112 113 114 115
  }

  inline bool Advance(unsigned int new_cell_index) {
    if (new_cell_index != cell_index_) {
      DCHECK_GT(new_cell_index, cell_index_);
      DCHECK_LE(new_cell_index, last_cell_index_);
      unsigned int diff = new_cell_index - cell_index_;
      cell_index_ = new_cell_index;
116
      cell_base_ += diff * (Bitmap::kBitsPerCell * kTaggedSize);
117 118 119
      return true;
    }
    return false;
hpayer's avatar
hpayer committed
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
  }

  // Return the next mark bit cell. If there is no next it returns 0;
  inline MarkBit::CellType PeekNext() {
    if (HasNext()) {
      return cells_[cell_index_ + 1];
    }
    return 0;
  }

 private:
  MemoryChunk* chunk_;
  MarkBit::CellType* cells_;
  unsigned int last_cell_index_;
  unsigned int cell_index_;
  Address cell_base_;
};

enum LiveObjectIterationMode {
  kBlackObjects,
  kGreyObjects,
  kAllLiveObjects
};

144 145
template <LiveObjectIterationMode mode>
class LiveObjectRange {
hpayer's avatar
hpayer committed
146
 public:
147 148
  class iterator {
   public:
149
    using value_type = std::pair<HeapObject, int /* size */>;
150 151 152 153
    using pointer = const value_type*;
    using reference = const value_type&;
    using iterator_category = std::forward_iterator_tag;

154
    inline iterator(MemoryChunk* chunk, Bitmap* bitmap, Address start);
155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172

    inline iterator& operator++();
    inline iterator operator++(int);

    bool operator==(iterator other) const {
      return current_object_ == other.current_object_;
    }

    bool operator!=(iterator other) const { return !(*this == other); }

    value_type operator*() {
      return std::make_pair(current_object_, current_size_);
    }

   private:
    inline void AdvanceToNextValidObject();

    MemoryChunk* const chunk_;
173 174 175
    Map const one_word_filler_map_;
    Map const two_word_filler_map_;
    Map const free_space_map_;
176 177 178
    MarkBitCellIterator it_;
    Address cell_base_;
    MarkBit::CellType current_cell_;
179
    HeapObject current_object_;
180 181 182
    int current_size_;
  };

183
  LiveObjectRange(MemoryChunk* chunk, Bitmap* bitmap)
hpayer's avatar
hpayer committed
184
      : chunk_(chunk),
185
        bitmap_(bitmap),
186 187
        start_(chunk_->area_start()),
        end_(chunk->area_end()) {}
hpayer's avatar
hpayer committed
188

189 190
  inline iterator begin();
  inline iterator end();
hpayer's avatar
hpayer committed
191 192

 private:
193
  MemoryChunk* const chunk_;
194
  Bitmap* bitmap_;
195 196
  Address start_;
  Address end_;
hpayer's avatar
hpayer committed
197
};
198

199
class LiveObjectVisitor : AllStatic {
200 201 202 203 204 205
 public:
  enum IterationMode {
    kKeepMarking,
    kClearMarkbits,
  };

206 207 208
  // Visits black objects on a MemoryChunk until the Visitor returns |false| for
  // an object. If IterationMode::kClearMarkbits is passed the markbits and
  // slots for visited objects are cleared for each successfully visited object.
209 210
  template <class Visitor, typename MarkingState>
  static bool VisitBlackObjects(MemoryChunk* chunk, MarkingState* state,
211
                                Visitor* visitor, IterationMode iteration_mode,
212
                                HeapObject* failed_object);
213

214 215
  // Visits black objects on a MemoryChunk. The visitor is not allowed to fail
  // visitation for an object.
216 217
  template <class Visitor, typename MarkingState>
  static void VisitBlackObjectsNoFail(MemoryChunk* chunk, MarkingState* state,
218 219
                                      Visitor* visitor,
                                      IterationMode iteration_mode);
220

221 222
  // Visits black objects on a MemoryChunk. The visitor is not allowed to fail
  // visitation for an object.
223 224
  template <class Visitor, typename MarkingState>
  static void VisitGreyObjectsNoFail(MemoryChunk* chunk, MarkingState* state,
225 226 227
                                     Visitor* visitor,
                                     IterationMode iteration_mode);

228 229
  template <typename MarkingState>
  static void RecomputeLiveBytes(MemoryChunk* chunk, MarkingState* state);
230 231
};

232
enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
233
enum MarkingTreatmentMode { KEEP, CLEAR };
234
enum class RememberedSetUpdatingMode { ALL, OLD_TO_NEW_ONLY };
235

236 237
// Base class for minor and full MC collectors.
class MarkCompactCollectorBase {
238
 public:
239 240
  static const int kMainThread = 0;

241
  virtual ~MarkCompactCollectorBase() = default;
242

243 244 245
  virtual void SetUp() = 0;
  virtual void TearDown() = 0;
  virtual void CollectGarbage() = 0;
246

247
  inline Heap* heap() const { return heap_; }
248
  inline Isolate* isolate();
249

250
 protected:
251 252
  explicit MarkCompactCollectorBase(Heap* heap)
      : heap_(heap), old_to_new_slots_(0) {}
253

254
  // Marking operations for objects reachable from roots.
255
  virtual void MarkLiveObjects() = 0;
256
  // Mark objects reachable (transitively) from objects in the marking
257 258
  // work list.
  virtual void ProcessMarkingWorklist() = 0;
259 260 261 262 263 264 265
  // Clear non-live references held in side data structures.
  virtual void ClearNonLiveReferences() = 0;
  virtual void EvacuatePrologue() = 0;
  virtual void EvacuateEpilogue() = 0;
  virtual void Evacuate() = 0;
  virtual void EvacuatePagesInParallel() = 0;
  virtual void UpdatePointersAfterEvacuation() = 0;
266 267 268 269 270
  virtual UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk,
                                                  Address start,
                                                  Address end) = 0;
  virtual UpdatingItem* CreateRememberedSetUpdatingItem(
      MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) = 0;
271

272 273
  template <class Evacuator, class Collector>
  void CreateAndExecuteEvacuationTasks(
274
      Collector* collector, ItemParallelJob* job,
275
      RecordMigratedSlotVisitor* record_visitor,
276
      MigrationObserver* migration_observer, const intptr_t live_bytes);
277

278 279 280
  // Returns whether this page should be moved according to heuristics.
  bool ShouldMovePage(Page* p, intptr_t live_bytes);

281
  int CollectToSpaceUpdatingItems(ItemParallelJob* job);
282
  template <typename IterateableSpace>
283
  int CollectRememberedSetUpdatingItems(ItemParallelJob* job,
284
                                        IterateableSpace* space,
285
                                        RememberedSetUpdatingMode mode);
286 287

  int NumberOfParallelCompactionTasks(int pages);
288 289
  int NumberOfParallelPointerUpdateTasks(int pages, int slots);
  int NumberOfParallelToSpacePointerUpdateTasks(int pages);
290

291
  Heap* heap_;
292 293 294
  // Number of old to new slots. Should be computed during MarkLiveObjects.
  // -1 indicates that the value couldn't be computed.
  int old_to_new_slots_;
295 296
};

297 298 299 300 301 302 303 304
class MinorMarkingState final
    : public MarkingStateBase<MinorMarkingState, AccessMode::ATOMIC> {
 public:
  Bitmap* bitmap(const MemoryChunk* chunk) const {
    return chunk->young_generation_bitmap_;
  }

  void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
305
    chunk->young_generation_live_byte_count_ += by;
306 307 308
  }

  intptr_t live_bytes(MemoryChunk* chunk) const {
309
    return chunk->young_generation_live_byte_count_;
310 311 312
  }

  void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
313
    chunk->young_generation_live_byte_count_ = value;
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
  }
};

class MinorNonAtomicMarkingState final
    : public MarkingStateBase<MinorNonAtomicMarkingState,
                              AccessMode::NON_ATOMIC> {
 public:
  Bitmap* bitmap(const MemoryChunk* chunk) const {
    return chunk->young_generation_bitmap_;
  }

  void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
    chunk->young_generation_live_byte_count_ += by;
  }

  intptr_t live_bytes(MemoryChunk* chunk) const {
    return chunk->young_generation_live_byte_count_;
  }

  void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
    chunk->young_generation_live_byte_count_ = value;
  }
};

338 339 340 341 342
// This marking state is used when concurrent marking is running.
class IncrementalMarkingState final
    : public MarkingStateBase<IncrementalMarkingState, AccessMode::ATOMIC> {
 public:
  Bitmap* bitmap(const MemoryChunk* chunk) const {
343 344 345 346
    DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
                  reinterpret_cast<intptr_t>(chunk),
              MemoryChunk::kMarkBitmapOffset);
    return chunk->marking_bitmap_;
347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
  }

  // Concurrent marking uses local live bytes.
  void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
    chunk->live_byte_count_ += by;
  }

  intptr_t live_bytes(MemoryChunk* chunk) const {
    return chunk->live_byte_count_;
  }

  void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
    chunk->live_byte_count_ = value;
  }
};

363 364 365 366
class MajorAtomicMarkingState final
    : public MarkingStateBase<MajorAtomicMarkingState, AccessMode::ATOMIC> {
 public:
  Bitmap* bitmap(const MemoryChunk* chunk) const {
367 368 369 370
    DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
                  reinterpret_cast<intptr_t>(chunk),
              MemoryChunk::kMarkBitmapOffset);
    return chunk->marking_bitmap_;
371 372 373
  }

  void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
374
    chunk->live_byte_count_ += by;
375 376 377
  }

  intptr_t live_bytes(MemoryChunk* chunk) const {
378
    return chunk->live_byte_count_;
379 380 381
  }

  void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
382
    chunk->live_byte_count_ = value;
383 384 385
  }
};

386 387 388 389 390
class MajorNonAtomicMarkingState final
    : public MarkingStateBase<MajorNonAtomicMarkingState,
                              AccessMode::NON_ATOMIC> {
 public:
  Bitmap* bitmap(const MemoryChunk* chunk) const {
391 392 393 394
    DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
                  reinterpret_cast<intptr_t>(chunk),
              MemoryChunk::kMarkBitmapOffset);
    return chunk->marking_bitmap_;
395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
  }

  void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
    chunk->live_byte_count_ += by;
  }

  intptr_t live_bytes(MemoryChunk* chunk) const {
    return chunk->live_byte_count_;
  }

  void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
    chunk->live_byte_count_ = value;
  }
};

410
struct Ephemeron {
411 412
  HeapObject key;
  HeapObject value;
413 414 415 416
};

typedef Worklist<Ephemeron, 64> EphemeronWorklist;

417 418
// Weak objects encountered during marking.
struct WeakObjects {
419
  Worklist<TransitionArray, 64> transition_arrays;
420 421 422

  // Keep track of all EphemeronHashTables in the heap to process
  // them in the atomic pause.
423
  Worklist<EphemeronHashTable, 64> ephemeron_hash_tables;
424

425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
  // Keep track of all ephemerons for concurrent marking tasks. Only store
  // ephemerons in these Worklists if both key and value are unreachable at the
  // moment.
  //
  // MarkCompactCollector::ProcessEphemeronsUntilFixpoint drains and fills these
  // worklists.
  //
  // current_ephemerons is used as draining worklist in the current fixpoint
  // iteration.
  EphemeronWorklist current_ephemerons;

  // Stores ephemerons to visit in the next fixpoint iteration.
  EphemeronWorklist next_ephemerons;

  // When draining the marking worklist new discovered ephemerons are pushed
  // into this worklist.
  EphemeronWorklist discovered_ephemerons;

443 444
  // TODO(marja): For old space, we only need the slot, not the host
  // object. Optimize this by adding a different storage for old space.
445 446
  Worklist<std::pair<HeapObject, HeapObjectSlot>, 64> weak_references;
  Worklist<std::pair<HeapObject, Code>, 64> weak_objects_in_code;
447

448
  Worklist<JSWeakRef, 64> js_weak_refs;
449
  Worklist<JSWeakCell, 64> js_weak_cells;
450 451

  Worklist<SharedFunctionInfo, 64> bytecode_flushing_candidates;
452 453
};

454
struct EphemeronMarking {
455
  std::vector<HeapObject> newly_discovered;
456 457 458 459
  bool newly_discovered_overflowed;
  size_t newly_discovered_limit;
};

460 461
// Collector for young and old generation.
class MarkCompactCollector final : public MarkCompactCollectorBase {
462
 public:
463 464 465 466 467
#ifdef V8_CONCURRENT_MARKING
  using MarkingState = IncrementalMarkingState;
#else
  using MarkingState = MajorNonAtomicMarkingState;
#endif  // V8_CONCURRENT_MARKING
468

469
  using NonAtomicMarkingState = MajorNonAtomicMarkingState;
470

471 472 473
  // Wrapper for the shared and bailout worklists.
  class MarkingWorklist {
   public:
474 475
    using ConcurrentMarkingWorklist = Worklist<HeapObject, 64>;
    using EmbedderTracingWorklist = Worklist<HeapObject, 16>;
476

477 478 479
    // The heap parameter is not used but needed to match the sequential case.
    explicit MarkingWorklist(Heap* heap) {}

480
    void Push(HeapObject object) {
481 482 483 484
      bool success = shared_.Push(kMainThread, object);
      USE(success);
      DCHECK(success);
    }
485

486
    void PushBailout(HeapObject object) {
487 488 489
      bool success = bailout_.Push(kMainThread, object);
      USE(success);
      DCHECK(success);
490 491
    }

492 493
    HeapObject Pop() {
      HeapObject result;
494
#ifdef V8_CONCURRENT_MARKING
495
      if (bailout_.Pop(kMainThread, &result)) return result;
496
#endif
497
      if (shared_.Pop(kMainThread, &result)) return result;
498 499 500 501 502
#ifdef V8_CONCURRENT_MARKING
      // The expectation is that this work list is empty almost all the time
      // and we can thus avoid the emptiness checks by putting it last.
      if (on_hold_.Pop(kMainThread, &result)) return result;
#endif
503
      return HeapObject();
504 505
    }

506
    HeapObject PopBailout() {
507
#ifdef V8_CONCURRENT_MARKING
508
      HeapObject result;
509 510
      if (bailout_.Pop(kMainThread, &result)) return result;
#endif
511
      return HeapObject();
512 513
    }

514 515 516
    void Clear() {
      bailout_.Clear();
      shared_.Clear();
517
      on_hold_.Clear();
518
      embedder_.Clear();
519 520
    }

521 522
    bool IsBailoutEmpty() { return bailout_.IsLocalEmpty(kMainThread); }

523 524 525
    bool IsEmpty() {
      return bailout_.IsLocalEmpty(kMainThread) &&
             shared_.IsLocalEmpty(kMainThread) &&
526 527 528
             on_hold_.IsLocalEmpty(kMainThread) &&
             bailout_.IsGlobalPoolEmpty() && shared_.IsGlobalPoolEmpty() &&
             on_hold_.IsGlobalPoolEmpty();
529 530
    }

531 532 533 534 535
    bool IsEmbedderEmpty() {
      return embedder_.IsLocalEmpty(kMainThread) &&
             embedder_.IsGlobalPoolEmpty();
    }

536 537
    int Size() {
      return static_cast<int>(bailout_.LocalSize(kMainThread) +
538 539
                              shared_.LocalSize(kMainThread) +
                              on_hold_.LocalSize(kMainThread));
540 541 542 543 544
    }

    // Calls the specified callback on each element of the deques and replaces
    // the element with the result of the callback. If the callback returns
    // nullptr then the element is removed from the deque.
545
    // The callback must accept HeapObject and return HeapObject.
546 547 548 549
    template <typename Callback>
    void Update(Callback callback) {
      bailout_.Update(callback);
      shared_.Update(callback);
550
      on_hold_.Update(callback);
551
      embedder_.Update(callback);
552 553
    }

554 555
    ConcurrentMarkingWorklist* shared() { return &shared_; }
    ConcurrentMarkingWorklist* bailout() { return &bailout_; }
556
    ConcurrentMarkingWorklist* on_hold() { return &on_hold_; }
557
    EmbedderTracingWorklist* embedder() { return &embedder_; }
558

559 560 561
    void Print() {
      PrintWorklist("shared", &shared_);
      PrintWorklist("bailout", &bailout_);
562
      PrintWorklist("on_hold", &on_hold_);
563 564
    }

565
   private:
566 567
    // Prints the stats about the global pool of the worklist.
    void PrintWorklist(const char* worklist_name,
568
                       ConcurrentMarkingWorklist* worklist);
569 570

    // Worklist used for most objects.
571
    ConcurrentMarkingWorklist shared_;
572 573 574 575

    // Concurrent marking uses this worklist to bail out of concurrently
    // marking certain object types. These objects are handled later in a STW
    // pause after concurrent marking has finished.
576
    ConcurrentMarkingWorklist bailout_;
577 578 579 580 581

    // Concurrent marking uses this worklist to bail out of marking objects
    // in new space's linear allocation area. Used to avoid black allocation
    // for new space. This allow the compiler to remove write barriers
    // for freshly allocatd objects.
582
    ConcurrentMarkingWorklist on_hold_;
583 584 585 586 587

    // Worklist for objects that potentially require embedder tracing, i.e.,
    // these objects need to be handed over to the embedder to find the full
    // transitive closure.
    EmbedderTracingWorklist embedder_;
588
  };
589

590
  class RootMarkingVisitor;
591
  class CustomRootBodyMarkingVisitor;
592

593 594 595 596 597
  enum IterationMode {
    kKeepMarking,
    kClearMarkbits,
  };

598
  MarkingState* marking_state() { return &marking_state_; }
599

600 601
  NonAtomicMarkingState* non_atomic_marking_state() {
    return &non_atomic_marking_state_;
602 603
  }

604 605 606 607
  void SetUp() override;
  void TearDown() override;
  // Performs a global garbage collection.
  void CollectGarbage() override;
608

609 610 611 612
  void CollectEvacuationCandidates(PagedSpace* space);

  void AddEvacuationCandidate(Page* p);

613 614
  // Prepares for GC by resetting relocation info in old and map spaces and
  // choosing spaces to compact.
615
  void Prepare();
616

617 618 619
  // Stop concurrent marking (either by preempting it right away or waiting for
  // it to complete as requested by |stop_request|).
  void FinishConcurrentMarking(ConcurrentMarking::StopRequest stop_request);
620

621
  bool StartCompaction();
622

623 624
  void AbortCompaction();

625 626
  static inline bool IsOnEvacuationCandidate(Object obj) {
    return Page::FromAddress(obj->ptr())->IsEvacuationCandidate();
627 628
  }

629
  static bool IsOnEvacuationCandidate(MaybeObject obj);
630

631 632 633
  struct RecordRelocSlotInfo {
    MemoryChunk* memory_chunk;
    SlotType slot_type;
634
    bool should_record;
635 636 637
    uint32_t offset;
  };
  static RecordRelocSlotInfo PrepareRecordRelocSlot(Code host, RelocInfo* rinfo,
638 639 640 641 642 643
                                                    HeapObject target);
  static void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target);
  V8_INLINE static void RecordSlot(HeapObject object, ObjectSlot slot,
                                   HeapObject target);
  V8_INLINE static void RecordSlot(HeapObject object, HeapObjectSlot slot,
                                   HeapObject target);
644
  void RecordLiveSlotsOnPage(Page* page);
645

646 647
  void UpdateSlots(SlotsBuffer* buffer);
  void UpdateSlotsRecordedIn(SlotsBuffer* buffer);
648

649 650
  bool is_compacting() const { return compacting_; }

651 652 653
  // Ensures that sweeping is finished.
  //
  // Note: Can only be called safely from main thread.
654
  void EnsureSweepingCompleted();
655

656
  // Checks if sweeping is in progress right now on any space.
657
  bool sweeping_in_progress() const { return sweeper_->sweeping_in_progress(); }
658

659
  void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
660

661
  bool evacuation() const { return evacuation_; }
662

663
  MarkingWorklist* marking_worklist() { return &marking_worklist_; }
hpayer's avatar
hpayer committed
664

665
  WeakObjects* weak_objects() { return &weak_objects_; }
666

667
  inline void AddTransitionArray(TransitionArray array);
668

669
  void AddEphemeronHashTable(EphemeronHashTable table) {
670 671 672
    weak_objects_.ephemeron_hash_tables.Push(kMainThread, table);
  }

673
  void AddEphemeron(HeapObject key, HeapObject value) {
674 675 676 677
    weak_objects_.discovered_ephemerons.Push(kMainThread,
                                             Ephemeron{key, value});
  }

678
  void AddWeakReference(HeapObject host, HeapObjectSlot slot) {
679 680 681
    weak_objects_.weak_references.Push(kMainThread, std::make_pair(host, slot));
  }

682
  void AddWeakObjectInCode(HeapObject object, Code code) {
683 684 685 686
    weak_objects_.weak_objects_in_code.Push(kMainThread,
                                            std::make_pair(object, code));
  }

687 688 689 690
  void AddWeakRef(JSWeakRef weak_ref) {
    weak_objects_.js_weak_refs.Push(kMainThread, weak_ref);
  }

691
  void AddWeakCell(JSWeakCell weak_cell) {
692 693 694
    weak_objects_.js_weak_cells.Push(kMainThread, weak_cell);
  }

695 696
  inline void AddBytecodeFlushingCandidate(SharedFunctionInfo flush_candidate);

697
  void AddNewlyDiscovered(HeapObject object) {
698 699 700 701 702 703 704 705 706 707 708 709 710 711 712
    if (ephemeron_marking_.newly_discovered_overflowed) return;

    if (ephemeron_marking_.newly_discovered.size() <
        ephemeron_marking_.newly_discovered_limit) {
      ephemeron_marking_.newly_discovered.push_back(object);
    } else {
      ephemeron_marking_.newly_discovered_overflowed = true;
    }
  }

  void ResetNewlyDiscovered() {
    ephemeron_marking_.newly_discovered_overflowed = false;
    ephemeron_marking_.newly_discovered.clear();
  }

713
  Sweeper* sweeper() { return sweeper_; }
714

715 716 717 718 719 720
#ifdef DEBUG
  // Checks whether performing mark-compact collection.
  bool in_use() { return state_ > PREPARE_GC; }
  bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
#endif

721
  void VerifyMarking();
722 723 724
#ifdef VERIFY_HEAP
  void VerifyValidStoreAndSlotsBufferEntries();
  void VerifyMarkbitsAreClean();
725
  void VerifyMarkbitsAreDirty(PagedSpace* space);
726 727
  void VerifyMarkbitsAreClean(PagedSpace* space);
  void VerifyMarkbitsAreClean(NewSpace* space);
728
  void VerifyMarkbitsAreClean(LargeObjectSpace* space);
729 730
#endif

731 732
  unsigned epoch() const { return epoch_; }

733
 private:
734
  explicit MarkCompactCollector(Heap* heap);
735
  ~MarkCompactCollector() override;
736

737
  void ComputeEvacuationHeuristics(size_t area_size,
738
                                   int* target_fragmentation_percent,
739
                                   size_t* max_evacuated_bytes);
740

741 742
  void RecordObjectStats();

743
  // Finishes GC, performs heap verification if enabled.
744
  void Finish();
745

746
  void MarkLiveObjects() override;
747

748
  // Marks the object black and adds it to the marking work list.
749
  // This is for non-incremental marking only.
750
  V8_INLINE void MarkObject(HeapObject host, HeapObject obj);
751

752 753
  // Marks the object black and adds it to the marking work list.
  // This is for non-incremental marking only.
754
  V8_INLINE void MarkRootObject(Root root, HeapObject obj);
755

756
  // Used by wrapper tracing.
757
  V8_INLINE void MarkExternallyReferencedObject(HeapObject obj);
758

759
  // Mark the heap roots and all objects reachable from them.
760 761
  void MarkRoots(RootVisitor* root_visitor,
                 ObjectVisitor* custom_root_body_visitor);
762

763 764
  // Mark the string table specially.  References to internalized strings from
  // the string table are weak.
765
  void MarkStringTable(ObjectVisitor* visitor);
766

767
  // Marks object reachable from harmony weak maps and wrapper tracing.
768
  void ProcessEphemeronMarking();
769

770
  // If the call-site of the top optimized code was not prepared for
771 772 773
  // deoptimization, then treat embedded pointers in the code as strong as
  // otherwise they can die and try to deoptimize the underlying code.
  void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
774

775 776 777
  // Drains the main thread marking work list. Will mark all pending objects
  // if no concurrent threads are running.
  void ProcessMarkingWorklist() override;
778

779 780 781 782 783 784 785 786
  enum class MarkingWorklistProcessingMode {
    kDefault,
    kTrackNewlyDiscoveredObjects
  };

  template <MarkingWorklistProcessingMode mode>
  void ProcessMarkingWorklistInternal();

787 788
  // Implements ephemeron semantics: Marks value if key is already reachable.
  // Returns true if value was actually marked.
789
  bool VisitEphemeron(HeapObject key, HeapObject value);
790 791 792 793 794 795 796 797

  // Marks ephemerons and drains marking worklist iteratively
  // until a fixpoint is reached.
  void ProcessEphemeronsUntilFixpoint();

  // Drains ephemeron and marking worklists. Single iteration of the
  // fixpoint iteration.
  bool ProcessEphemerons();
798

799 800 801 802 803 804 805
  // Mark ephemerons and drain marking worklist with a linear algorithm.
  // Only used if fixpoint iteration doesn't finish within a few iterations.
  void ProcessEphemeronsLinear();

  // Perform Wrapper Tracing if in use.
  void PerformWrapperTracing();

806 807
  // Callback function for telling whether the object *p is an unmarked
  // heap object.
808
  static bool IsUnmarkedHeapObject(Heap* heap, FullObjectSlot p);
809

810 811
  // Clear non-live references in weak cells, transition and descriptor arrays,
  // and deoptimize dependent code of non-live maps.
812
  void ClearNonLiveReferences() override;
813
  void MarkDependentCodeForDeoptimization();
814 815 816
  // Checks if the given weak cell is a simple transition from the parent map
  // of the given dead target. If so it clears the transition and trims
  // the descriptor array of the parent if needed.
817 818
  void ClearPotentialSimpleMapTransition(Map dead_target);
  void ClearPotentialSimpleMapTransition(Map map, Map dead_target);
819 820 821 822 823 824 825 826

  // Flushes a weakly held bytecode array from a shared function info.
  void FlushBytecodeFromSFI(SharedFunctionInfo shared_info);

  // Clears bytecode arrays that have not been executed for multiple
  // collections.
  void ClearOldBytecodeCandidates();

827 828 829
  // Compact every array in the global list of transition arrays and
  // trim the corresponding descriptor array if a transition target is non-live.
  void ClearFullMapTransitions();
830 831
  void TrimDescriptorArray(Map map, DescriptorArray descriptors);
  void TrimEnumCache(Map map, DescriptorArray descriptors);
832
  bool CompactTransitionArray(Map map, TransitionArray transitions,
833
                              DescriptorArray descriptors);
834

835 836 837
  // After all reachable objects have been marked those weak map entries
  // with an unreachable key are removed from all encountered weak maps.
  // The linked list of all encountered weak maps is destroyed.
838
  void ClearWeakCollections();
839

840
  // Goes through the list of encountered weak references and clears those with
841 842 843
  // dead values. If the value is a dead map and the parent map transitions to
  // the dead map via weak cell, then this function also clears the map
  // transition.
844
  void ClearWeakReferences();
845 846 847 848 849

  // Goes through the list of encountered JSWeakCells and clears those with dead
  // values.
  void ClearJSWeakCells();

850
  void AbortWeakObjects();
851

852 853 854 855
  // Starts sweeping of spaces by contributing on the main thread and setting
  // up other pages for sweeping. Does not start sweeper tasks.
  void StartSweepSpaces();
  void StartSweepSpace(PagedSpace* space);
856

857 858 859 860 861
  void EvacuatePrologue() override;
  void EvacuateEpilogue() override;
  void Evacuate() override;
  void EvacuatePagesInParallel() override;
  void UpdatePointersAfterEvacuation() override;
862

863 864 865 866 867
  UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk, Address start,
                                          Address end) override;
  UpdatingItem* CreateRememberedSetUpdatingItem(
      MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;

868 869
  int CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob* job);
  int CollectOldSpaceArrayBufferTrackerItems(ItemParallelJob* job);
870

871
  void ReleaseEvacuationCandidates();
872
  void PostProcessEvacuationCandidates();
873
  void ReportAbortedEvacuationCandidate(HeapObject failed_object,
874
                                        MemoryChunk* chunk);
875

876 877 878 879
  static const int kEphemeronChunkSize = 8 * KB;

  int NumberOfParallelEphemeronVisitingTasks(size_t elements);

880
  void RightTrimDescriptorArray(DescriptorArray array, int descriptors_to_trim);
881

882
  base::Mutex mutex_;
883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911
  base::Semaphore page_parallel_job_semaphore_;

#ifdef DEBUG
  enum CollectorState {
    IDLE,
    PREPARE_GC,
    MARK_LIVE_OBJECTS,
    SWEEP_SPACES,
    ENCODE_FORWARDING_ADDRESSES,
    UPDATE_POINTERS,
    RELOCATE_OBJECTS
  };

  // The current stage of the collector.
  CollectorState state_;
#endif

  bool was_marked_incrementally_;

  bool evacuation_;

  // True if we are collecting slots to perform evacuation from evacuation
  // candidates.
  bool compacting_;

  bool black_allocation_;

  bool have_code_to_deoptimize_;

912
  MarkingWorklist marking_worklist_;
913
  WeakObjects weak_objects_;
914
  EphemeronMarking ephemeron_marking_;
915

916
  // Candidates for pages that should be evacuated.
917
  std::vector<Page*> evacuation_candidates_;
918
  // Pages that are actually processed during evacuation.
919 920
  std::vector<Page*> old_space_evacuation_pages_;
  std::vector<Page*> new_space_evacuation_pages_;
921
  std::vector<std::pair<HeapObject, Page*>> aborted_evacuation_candidates_;
922

923
  Sweeper* sweeper_;
mlippautz's avatar
mlippautz committed
924

925
  MarkingState marking_state_;
926 927
  NonAtomicMarkingState non_atomic_marking_state_;

928 929 930 931 932
  // Counts the number of mark-compact collections. This is used for marking
  // descriptor arrays. See NumberOfMarkedDescriptors. Only lower two bits are
  // used, so it is okay if this counter overflows and wraps around.
  unsigned epoch_ = 0;

933
  friend class EphemeronHashTableMarkingTask;
934
  friend class FullEvacuator;
935
  friend class Heap;
936
  friend class RecordMigratedSlotVisitor;
937 938
};

939
template <FixedArrayVisitationMode fixed_array_mode,
940
          TraceRetainingPathMode retaining_path_mode, typename MarkingState>
941 942
class MarkingVisitor final
    : public HeapVisitor<
943 944
          int,
          MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>> {
945
 public:
946 947
  typedef HeapVisitor<
      int, MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>>
948 949
      Parent;

950 951
  V8_INLINE MarkingVisitor(MarkCompactCollector* collector,
                           MarkingState* marking_state);
952 953 954

  V8_INLINE bool ShouldVisitMapPointer() { return false; }

955
  V8_INLINE int VisitBytecodeArray(Map map, BytecodeArray object);
956
  V8_INLINE int VisitDescriptorArray(Map map, DescriptorArray object);
957
  V8_INLINE int VisitEphemeronHashTable(Map map, EphemeronHashTable object);
958
  V8_INLINE int VisitFixedArray(Map map, FixedArray object);
959 960 961 962
  V8_INLINE int VisitJSApiObject(Map map, JSObject object);
  V8_INLINE int VisitJSArrayBuffer(Map map, JSArrayBuffer object);
  V8_INLINE int VisitJSDataView(Map map, JSDataView object);
  V8_INLINE int VisitJSTypedArray(Map map, JSTypedArray object);
963
  V8_INLINE int VisitMap(Map map, Map object);
964
  V8_INLINE int VisitSharedFunctionInfo(Map map, SharedFunctionInfo object);
965
  V8_INLINE int VisitTransitionArray(Map map, TransitionArray object);
966
  V8_INLINE int VisitJSWeakCell(Map map, JSWeakCell object);
967
  V8_INLINE int VisitJSWeakRef(Map map, JSWeakRef object);
968 969

  // ObjectVisitor implementation.
970
  V8_INLINE void VisitPointer(HeapObject host, ObjectSlot p) final {
971 972
    VisitPointerImpl(host, p);
  }
973
  V8_INLINE void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
974 975
    VisitPointerImpl(host, p);
  }
976
  V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
977 978 979
                               ObjectSlot end) final {
    VisitPointersImpl(host, start, end);
  }
980
  V8_INLINE void VisitPointers(HeapObject host, MaybeObjectSlot start,
981 982 983
                               MaybeObjectSlot end) final {
    VisitPointersImpl(host, start, end);
  }
984 985
  V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final;
  V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final;
986

987 988
  // Weak list pointers should be ignored during marking. The lists are
  // reconstructed after GC.
989
  void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
990
                               ObjectSlot end) final {}
991

992
  V8_INLINE void VisitDescriptors(DescriptorArray descriptors,
993
                                  int number_of_own_descriptors);
994 995 996 997
  // Marks the descriptor array black without pushing it on the marking work
  // list and visits its header.
  V8_INLINE void MarkDescriptorArrayBlack(HeapObject host,
                                          DescriptorArray descriptors);
998

999 1000 1001
 private:
  // Granularity in which FixedArrays are scanned if |fixed_array_mode|
  // is true.
1002
  static const int kProgressBarScanningChunk = 32 * KB;
1003

1004
  template <typename TSlot>
1005
  V8_INLINE void VisitPointerImpl(HeapObject host, TSlot p);
1006 1007

  template <typename TSlot>
1008
  V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end);
1009

1010
  V8_INLINE int VisitFixedArrayIncremental(Map map, FixedArray object);
1011

1012
  template <typename T>
1013
  V8_INLINE int VisitEmbedderTracingSubclass(Map map, T object);
1014

1015
  // Marks the object grey and pushes it on the marking work list.
1016
  V8_INLINE void MarkObject(HeapObject host, HeapObject obj);
1017

1018
  MarkingState* marking_state() { return marking_state_; }
1019

1020 1021
  MarkCompactCollector::MarkingWorklist* marking_worklist() const {
    return collector_->marking_worklist();
1022 1023 1024 1025
  }

  Heap* const heap_;
  MarkCompactCollector* const collector_;
1026
  MarkingState* const marking_state_;
1027
  const unsigned mark_compact_epoch_;
1028 1029
};

1030
class EvacuationScope {
1031
 public:
1032
  explicit EvacuationScope(MarkCompactCollector* collector)
1033
      : collector_(collector) {
1034
    collector_->set_evacuation(true);
1035 1036
  }

1037
  ~EvacuationScope() { collector_->set_evacuation(false); }
1038 1039 1040 1041 1042

 private:
  MarkCompactCollector* collector_;
};

1043 1044 1045 1046 1047 1048 1049 1050 1051
#ifdef ENABLE_MINOR_MC

// Collector for young-generation only.
class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
 public:
  using MarkingState = MinorMarkingState;
  using NonAtomicMarkingState = MinorNonAtomicMarkingState;

  explicit MinorMarkCompactCollector(Heap* heap);
1052
  ~MinorMarkCompactCollector() override;
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068

  MarkingState* marking_state() { return &marking_state_; }

  NonAtomicMarkingState* non_atomic_marking_state() {
    return &non_atomic_marking_state_;
  }

  void SetUp() override;
  void TearDown() override;
  void CollectGarbage() override;

  void MakeIterable(Page* page, MarkingTreatmentMode marking_mode,
                    FreeSpaceTreatmentMode free_space_mode);
  void CleanupSweepToIteratePages();

 private:
1069
  using MarkingWorklist = Worklist<HeapObject, 64 /* segment size */>;
1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
  class RootMarkingVisitor;

  static const int kNumMarkers = 8;
  static const int kMainMarker = 0;

  inline MarkingWorklist* worklist() { return worklist_; }

  inline YoungGenerationMarkingVisitor* main_marking_visitor() {
    return main_marking_visitor_;
  }

  void MarkLiveObjects() override;
1082
  void MarkRootSetInParallel(RootMarkingVisitor* root_visitor);
1083
  V8_INLINE void MarkRootObject(HeapObject obj);
1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
  void ProcessMarkingWorklist() override;
  void ClearNonLiveReferences() override;

  void EvacuatePrologue() override;
  void EvacuateEpilogue() override;
  void Evacuate() override;
  void EvacuatePagesInParallel() override;
  void UpdatePointersAfterEvacuation() override;

  UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk, Address start,
                                          Address end) override;
  UpdatingItem* CreateRememberedSetUpdatingItem(
      MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;

  int CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob* job);

  int NumberOfParallelMarkingTasks(int pages);

  MarkingWorklist* worklist_;

  YoungGenerationMarkingVisitor* main_marking_visitor_;
  base::Semaphore page_parallel_job_semaphore_;
  std::vector<Page*> new_space_evacuation_pages_;
  std::vector<Page*> sweep_to_iterate_pages_;

  MarkingState marking_state_;
  NonAtomicMarkingState non_atomic_marking_state_;

  friend class YoungGenerationMarkingTask;
  friend class YoungGenerationMarkingVisitor;
};

#endif  // ENABLE_MINOR_MC

1118 1119
}  // namespace internal
}  // namespace v8
1120

1121
#endif  // V8_HEAP_MARK_COMPACT_H_