mark-compact.h 25.1 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5 6
#ifndef V8_HEAP_MARK_COMPACT_H_
#define V8_HEAP_MARK_COMPACT_H_
7

8 9
#include <deque>

10
#include "src/base/bits.h"
11
#include "src/heap/marking.h"
12
#include "src/heap/spaces.h"
13
#include "src/heap/store-buffer.h"
14

15 16
namespace v8 {
namespace internal {
17 18 19 20 21 22

// Callback function, returns whether an object is alive. The heap size
// of the object is returned in size. It optionally updates the offset
// to the first live object in the page (only used for old and map objects).
typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);

23 24 25
// Callback function to mark an object in a given heap.
typedef void (*MarkObjectFunction)(Heap* heap, HeapObject* object);

26
// Forward declarations.
27
class CodeFlusher;
28
class MarkCompactCollector;
29
class MarkingVisitor;
30 31
class RootMarkingVisitor;

32
class ObjectMarking : public AllStatic {
33
 public:
34 35 36 37
  INLINE(static MarkBit MarkBitFrom(Address addr)) {
    MemoryChunk* p = MemoryChunk::FromAddress(addr);
    return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr));
  }
38

39
  INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
40 41 42
    return MarkBitFrom(reinterpret_cast<Address>(obj));
  }

43 44
  static Marking::ObjectColor Color(HeapObject* obj) {
    return Marking::Color(ObjectMarking::MarkBitFrom(obj));
45 46 47
  }

 private:
48
  DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectMarking);
49 50
};

51
// ----------------------------------------------------------------------------
52 53
// Marking deque for tracing live objects.
class MarkingDeque {
54
 public:
55
  MarkingDeque()
56 57 58 59 60 61
      : array_(NULL),
        top_(0),
        bottom_(0),
        mask_(0),
        overflowed_(false),
        in_use_(false) {}
62

63 64
  void Initialize(Address low, Address high);
  void Uninitialize(bool aborting = false);
65

66
  inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
67

68
  inline bool IsEmpty() { return top_ == bottom_; }
69 70 71

  bool overflowed() const { return overflowed_; }

72 73
  bool in_use() const { return in_use_; }

74 75 76
  void ClearOverflowed() { overflowed_ = false; }

  void SetOverflowed() { overflowed_ = true; }
77

78 79 80
  // Push the object on the marking stack if there is room, otherwise mark the
  // deque as overflowed and wait for a rescan of the heap.
  INLINE(bool Push(HeapObject* object)) {
81
    DCHECK(object->IsHeapObject());
82 83
    if (IsFull()) {
      SetOverflowed();
84
      return false;
85
    } else {
86 87
      array_[top_] = object;
      top_ = ((top_ + 1) & mask_);
88
      return true;
89 90 91
    }
  }

92
  INLINE(HeapObject* Pop()) {
93
    DCHECK(!IsEmpty());
94 95
    top_ = ((top_ - 1) & mask_);
    HeapObject* object = array_[top_];
96
    DCHECK(object->IsHeapObject());
97 98 99
    return object;
  }

100 101 102
  // Unshift the object into the marking stack if there is room, otherwise mark
  // the deque as overflowed and wait for a rescan of the heap.
  INLINE(bool Unshift(HeapObject* object)) {
103 104 105
    DCHECK(object->IsHeapObject());
    if (IsFull()) {
      SetOverflowed();
106
      return false;
107 108 109
    } else {
      bottom_ = ((bottom_ - 1) & mask_);
      array_[bottom_] = object;
110
      return true;
111 112 113
    }
  }

114 115 116 117 118 119
  HeapObject** array() { return array_; }
  int bottom() { return bottom_; }
  int top() { return top_; }
  int mask() { return mask_; }
  void set_top(int top) { top_ = top; }

120
 private:
121 122 123 124 125 126 127
  HeapObject** array_;
  // array_[(top - 1) & mask_] is the top element in the deque.  The Deque is
  // empty when top_ == bottom_.  It is full when top_ + 1 == bottom
  // (mod mask + 1).
  int top_;
  int bottom_;
  int mask_;
128
  bool overflowed_;
129
  bool in_use_;
130

131
  DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
132
};
133

134

135 136 137
// CodeFlusher collects candidates for code flushing during marking and
// processes those candidates after marking has completed in order to
// reset those functions referencing code objects that would otherwise
138
// be unreachable. Code objects can be referenced in two ways:
139 140 141 142 143 144 145 146 147
//    - SharedFunctionInfo references unoptimized code.
//    - JSFunction references either unoptimized or optimized code.
// We are not allowed to flush unoptimized code for functions that got
// optimized or inlined into optimized code, because we might bailout
// into the unoptimized code again during deoptimization.
class CodeFlusher {
 public:
  explicit CodeFlusher(Isolate* isolate)
      : isolate_(isolate),
148 149
        jsfunction_candidates_head_(nullptr),
        shared_function_info_candidates_head_(nullptr) {}
150

151 152
  inline void AddCandidate(SharedFunctionInfo* shared_info);
  inline void AddCandidate(JSFunction* function);
153

154
  void EvictCandidate(SharedFunctionInfo* shared_info);
155 156
  void EvictCandidate(JSFunction* function);

157 158 159 160 161
  void ProcessCandidates() {
    ProcessSharedFunctionInfoCandidates();
    ProcessJSFunctionCandidates();
  }

162 163
  void IteratePointersToFromSpace(ObjectVisitor* v);

164 165 166 167
 private:
  void ProcessJSFunctionCandidates();
  void ProcessSharedFunctionInfoCandidates();

168 169 170 171 172 173 174 175 176 177 178 179 180
  static inline JSFunction** GetNextCandidateSlot(JSFunction* candidate);
  static inline JSFunction* GetNextCandidate(JSFunction* candidate);
  static inline void SetNextCandidate(JSFunction* candidate,
                                      JSFunction* next_candidate);
  static inline void ClearNextCandidate(JSFunction* candidate,
                                        Object* undefined);

  static inline SharedFunctionInfo* GetNextCandidate(
      SharedFunctionInfo* candidate);
  static inline void SetNextCandidate(SharedFunctionInfo* candidate,
                                      SharedFunctionInfo* next_candidate);
  static inline void ClearNextCandidate(SharedFunctionInfo* candidate);

181 182 183 184 185 186 187 188
  Isolate* isolate_;
  JSFunction* jsfunction_candidates_head_;
  SharedFunctionInfo* shared_function_info_candidates_head_;

  DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
};


189 190 191
// Defined in isolate.h.
class ThreadLocalTop;

hpayer's avatar
hpayer committed
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
class MarkBitCellIterator BASE_EMBEDDED {
 public:
  explicit MarkBitCellIterator(MemoryChunk* chunk) : chunk_(chunk) {
    last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex(
        chunk_->AddressToMarkbitIndex(chunk_->area_end())));
    cell_base_ = chunk_->area_start();
    cell_index_ = Bitmap::IndexToCell(
        Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_)));
    cells_ = chunk_->markbits()->cells();
  }

  inline bool Done() { return cell_index_ == last_cell_index_; }

  inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }

  inline MarkBit::CellType* CurrentCell() {
    DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
                              chunk_->AddressToMarkbitIndex(cell_base_))));
    return &cells_[cell_index_];
  }

  inline Address CurrentCellBase() {
    DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
                              chunk_->AddressToMarkbitIndex(cell_base_))));
    return cell_base_;
  }

  inline void Advance() {
    cell_index_++;
221 222 223 224 225 226 227 228 229 230 231 232 233
    cell_base_ += Bitmap::kBitsPerCell * kPointerSize;
  }

  inline bool Advance(unsigned int new_cell_index) {
    if (new_cell_index != cell_index_) {
      DCHECK_GT(new_cell_index, cell_index_);
      DCHECK_LE(new_cell_index, last_cell_index_);
      unsigned int diff = new_cell_index - cell_index_;
      cell_index_ = new_cell_index;
      cell_base_ += diff * (Bitmap::kBitsPerCell * kPointerSize);
      return true;
    }
    return false;
hpayer's avatar
hpayer committed
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
  }

  // Return the next mark bit cell. If there is no next it returns 0;
  inline MarkBit::CellType PeekNext() {
    if (HasNext()) {
      return cells_[cell_index_ + 1];
    }
    return 0;
  }

 private:
  MemoryChunk* chunk_;
  MarkBit::CellType* cells_;
  unsigned int last_cell_index_;
  unsigned int cell_index_;
  Address cell_base_;
};

// Grey objects can happen on black pages when black objects transition to
// grey e.g. when calling RecordWrites on them.
enum LiveObjectIterationMode {
  kBlackObjects,
  kGreyObjects,
  kAllLiveObjects
};

template <LiveObjectIterationMode T>
class LiveObjectIterator BASE_EMBEDDED {
 public:
  explicit LiveObjectIterator(MemoryChunk* chunk)
      : chunk_(chunk),
        it_(chunk_),
        cell_base_(it_.CurrentCellBase()),
        current_cell_(*it_.CurrentCell()) {
  }

  HeapObject* Next();

 private:
  MemoryChunk* chunk_;
  MarkBitCellIterator it_;
  Address cell_base_;
  MarkBit::CellType current_cell_;
};
278

279 280
// -------------------------------------------------------------------------
// Mark-Compact collector
281
class MarkCompactCollector {
282
 public:
283 284
  class Evacuator;

mlippautz's avatar
mlippautz committed
285 286 287 288
  class Sweeper {
   public:
    class SweeperTask;

289
    enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
mlippautz's avatar
mlippautz committed
290 291
    enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };

292
    typedef std::deque<Page*> SweepingList;
mlippautz's avatar
mlippautz committed
293 294
    typedef List<Page*> SweptList;

mlippautz's avatar
mlippautz committed
295 296
    static int RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
                        FreeSpaceTreatmentMode free_space_mode);
mlippautz's avatar
mlippautz committed
297 298 299 300 301

    explicit Sweeper(Heap* heap)
        : heap_(heap),
          pending_sweeper_tasks_semaphore_(0),
          sweeping_in_progress_(false),
302 303
          late_pages_(false),
          num_sweeping_tasks_(0) {}
mlippautz's avatar
mlippautz committed
304 305

    bool sweeping_in_progress() { return sweeping_in_progress_; }
306
    bool contains_late_pages() { return late_pages_; }
mlippautz's avatar
mlippautz committed
307 308 309 310 311 312

    void AddPage(AllocationSpace space, Page* page);
    void AddLatePage(AllocationSpace space, Page* page);

    int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
                           int max_pages = 0);
313
    int ParallelSweepPage(Page* page, AllocationSpace identity);
mlippautz's avatar
mlippautz committed
314 315 316 317

    void StartSweeping();
    void StartSweepingHelper(AllocationSpace space_to_start);
    void EnsureCompleted();
318
    void EnsureNewSpaceCompleted();
mlippautz's avatar
mlippautz committed
319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
    bool IsSweepingCompleted();
    void SweepOrWaitUntilSweepingCompleted(Page* page);

    void AddSweptPageSafe(PagedSpace* space, Page* page);
    Page* GetSweptPageSafe(PagedSpace* space);

   private:
    static const int kAllocationSpaces = LAST_PAGED_SPACE + 1;

    template <typename Callback>
    void ForAllSweepingSpaces(Callback callback) {
      for (int i = 0; i < kAllocationSpaces; i++) {
        callback(static_cast<AllocationSpace>(i));
      }
    }

335 336
    Page* GetSweepingPageSafe(AllocationSpace space);
    void AddSweepingPageSafe(AllocationSpace space, Page* page);
mlippautz's avatar
mlippautz committed
337 338 339 340 341

    void PrepareToBeSweptPage(AllocationSpace space, Page* page);

    Heap* heap_;
    base::Semaphore pending_sweeper_tasks_semaphore_;
342
    base::Mutex mutex_;
mlippautz's avatar
mlippautz committed
343 344 345
    SweptList swept_list_[kAllocationSpaces];
    SweepingList sweeping_list_[kAllocationSpaces];
    bool sweeping_in_progress_;
346
    bool late_pages_;
347
    base::AtomicNumber<intptr_t> num_sweeping_tasks_;
mlippautz's avatar
mlippautz committed
348 349
  };

350 351 352 353 354
  enum IterationMode {
    kKeepMarking,
    kClearMarkbits,
  };

355 356
  static void Initialize();

357 358
  void SetUp();

359 360
  void TearDown();

361 362 363 364
  void CollectEvacuationCandidates(PagedSpace* space);

  void AddEvacuationCandidate(Page* p);

365 366
  // Prepares for GC by resetting relocation info in old and map spaces and
  // choosing spaces to compact.
367
  void Prepare();
368

369
  // Performs a global garbage collection.
370
  void CollectGarbage();
371

372
  enum CompactionMode { INCREMENTAL_COMPACTION, NON_INCREMENTAL_COMPACTION };
373 374

  bool StartCompaction(CompactionMode mode);
375

376 377
  void AbortCompaction();

378 379
#ifdef DEBUG
  // Checks whether performing mark-compact collection.
380 381
  bool in_use() { return state_ > PREPARE_GC; }
  bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
382 383
#endif

384
  // Determine type of object and emit deletion log event.
385
  static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
386

387 388 389 390 391
  // Distinguishable invalid map encodings (for single word and multiple words)
  // that indicate free regions.
  static const uint32_t kSingleFreeEncoding = 0;
  static const uint32_t kMultiFreeEncoding = 1;

392
  static inline bool IsMarked(Object* obj);
393
  static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
394

395
  inline Heap* heap() const { return heap_; }
396
  inline Isolate* isolate() const;
397 398 399 400

  CodeFlusher* code_flusher() { return code_flusher_; }
  inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }

401
#ifdef VERIFY_HEAP
402
  void VerifyValidStoreAndSlotsBufferEntries();
403 404 405
  void VerifyMarkbitsAreClean();
  static void VerifyMarkbitsAreClean(PagedSpace* space);
  static void VerifyMarkbitsAreClean(NewSpace* space);
406
  void VerifyWeakEmbeddedObjectsInCode();
407
  void VerifyOmittedMapChecks();
408 409 410
#endif

  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
411 412
    return Page::FromAddress(reinterpret_cast<Address>(host))
        ->ShouldSkipEvacuationSlotRecording();
413 414 415
  }

  INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
416 417
    return Page::FromAddress(reinterpret_cast<Address>(obj))
        ->IsEvacuationCandidate();
418 419
  }

420 421
  void RecordRelocSlot(Code* host, RelocInfo* rinfo, Object* target);
  void RecordCodeEntrySlot(HeapObject* host, Address slot, Code* target);
422
  void RecordCodeTargetPatch(Address pc, Code* target);
423 424 425
  INLINE(void RecordSlot(HeapObject* object, Object** slot, Object* target));
  INLINE(void ForceRecordSlot(HeapObject* object, Object** slot,
                              Object* target));
426

427 428
  void UpdateSlots(SlotsBuffer* buffer);
  void UpdateSlotsRecordedIn(SlotsBuffer* buffer);
429

430 431
  void InvalidateCode(Code* code);

432 433
  void ClearMarkbits();

434 435
  bool is_compacting() const { return compacting_; }

436 437
  MarkingParity marking_parity() { return marking_parity_; }

438 439 440
  // Ensures that sweeping is finished.
  //
  // Note: Can only be called safely from main thread.
441
  void EnsureSweepingCompleted();
442

443 444 445 446 447 448
  // Help out in sweeping the corresponding space and refill memory that has
  // been regained.
  //
  // Note: Thread-safe.
  void SweepAndRefill(CompactionSpace* space);

449
  // Checks if sweeping is in progress right now on any space.
mlippautz's avatar
mlippautz committed
450
  bool sweeping_in_progress() { return sweeper().sweeping_in_progress(); }
451

452
  void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
453

454
  bool evacuation() const { return evacuation_; }
455

456
  // Special case for processing weak references in a full collection. We need
457
  // to artificially keep AllocationSites alive for a time.
458 459
  void MarkAllocationSite(AllocationSite* site);

460 461 462 463
  // Mark objects in implicit references groups if their parent object
  // is marked.
  void MarkImplicitRefGroups(MarkObjectFunction mark_object);

hpayer's avatar
hpayer committed
464 465
  MarkingDeque* marking_deque() { return &marking_deque_; }

466 467
  static const size_t kMaxMarkingDequeSize = 4 * MB;
  static const size_t kMinMarkingDequeSize = 256 * KB;
hpayer's avatar
hpayer committed
468

469 470 471 472 473 474 475 476 477
  void EnsureMarkingDequeIsCommittedAndInitialize(size_t max_size) {
    if (!marking_deque_.in_use()) {
      EnsureMarkingDequeIsCommitted(max_size);
      InitializeMarkingDeque();
    }
  }

  void EnsureMarkingDequeIsCommitted(size_t max_size);
  void EnsureMarkingDequeIsReserved();
hpayer's avatar
hpayer committed
478

479
  void InitializeMarkingDeque();
hpayer's avatar
hpayer committed
480

481
  // The following two methods can just be called after marking, when the
482 483
  // whole transitive closure is known. They must be called before sweeping
  // when mark bits are still intact.
484
  bool IsSlotInBlackObject(MemoryChunk* p, Address slot);
485
  HeapObject* FindBlackObjectBySlotSlow(Address slot);
486

487 488 489 490
  // Removes all the slots in the slot buffers that are within the given
  // address range.
  void RemoveObjectSlots(Address start_slot, Address end_slot);

mlippautz's avatar
mlippautz committed
491
  Sweeper& sweeper() { return sweeper_; }
492

493 494 495
  // ===========================================================================
  // Embedder heap tracer support. =============================================
  // ===========================================================================
496 497 498 499 500

  void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
  EmbedderHeapTracer* embedder_heap_tracer() { return embedder_heap_tracer_; }
  bool UsingEmbedderHeapTracer() { return embedder_heap_tracer(); }

501 502 503 504 505 506
  // In order to avoid running out of memory we force tracing wrappers if there
  // are too many of them.
  bool RequiresImmediateWrapperProcessing();

  void RegisterWrappersWithEmbedderHeapTracer();

507 508
  void TracePossibleWrapper(JSObject* js_object);

509 510
  size_t wrappers_to_trace() { return wrappers_to_trace_.size(); }

511
 private:
512
  class EvacuateNewSpacePageVisitor;
513 514
  class EvacuateNewSpaceVisitor;
  class EvacuateOldSpaceVisitor;
515
  class EvacuateRecordOnlyVisitor;
516 517
  class EvacuateVisitorBase;
  class HeapObjectVisitor;
518
  class ObjectStatsVisitor;
519

520
  explicit MarkCompactCollector(Heap* heap);
521

522
  bool WillBeDeoptimized(Code* code);
523
  void ClearInvalidRememberedSetSlots();
524

525 526 527 528
  void ComputeEvacuationHeuristics(int area_size,
                                   int* target_fragmentation_percent,
                                   int* max_evacuated_bytes);

529 530
  void VisitAllObjects(HeapObjectVisitor* visitor);

531 532
  void RecordObjectStats();

533
  // Finishes GC, performs heap verification if enabled.
534
  void Finish();
535

536 537
  // -----------------------------------------------------------------------
  // Phase 1: Marking live objects.
538
  //
539 540 541
  //  Before: The heap has been prepared for garbage collection by
  //          MarkCompactCollector::Prepare() and is otherwise in its
  //          normal state.
542
  //
543 544
  //   After: Live objects are marked and non-live objects are unmarked.

545
  friend class CodeMarkingVisitor;
546
  friend class IncrementalMarkingMarkingVisitor;
547 548
  friend class MarkCompactMarkingVisitor;
  friend class MarkingVisitor;
549
  friend class RecordMigratedSlotVisitor;
550
  friend class RootMarkingVisitor;
551 552
  friend class SharedFunctionInfoMarkingVisitor;

553 554 555 556
  // Mark code objects that are active on the stack to prevent them
  // from being flushed.
  void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);

557
  void PrepareForCodeFlushing();
558 559

  // Marking operations for objects reachable from roots.
560
  void MarkLiveObjects();
561

562 563 564 565 566 567 568 569
  // Pushes a black object onto the marking stack and accounts for live bytes.
  // Note that this assumes live bytes have not yet been counted.
  INLINE(void PushBlack(HeapObject* obj));

  // Unshifts a black object into the marking stack and accounts for live bytes.
  // Note that this assumes lives bytes have already been counted.
  INLINE(void UnshiftBlack(HeapObject* obj));

570 571
  // Marks the object black and pushes it on the marking stack.
  // This is for non-incremental marking only.
572 573
  INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));

574 575
  // Marks the object black assuming that it is not yet marked.
  // This is for non-incremental marking only.
576
  INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
577

578
  // Mark the heap roots and all objects reachable from them.
579
  void MarkRoots(RootMarkingVisitor* visitor);
580

581 582
  // Mark the string table specially.  References to internalized strings from
  // the string table are weak.
583
  void MarkStringTable(RootMarkingVisitor* visitor);
584

585 586
  // Mark objects reachable (transitively) from objects in the marking stack
  // or overflowed in the heap.
587
  void ProcessMarkingDeque();
588

589 590 591 592
  // Mark objects reachable (transitively) from objects in the marking stack
  // or overflowed in the heap.  This respects references only considered in
  // the final atomic marking pause including the following:
  //    - Processing of objects reachable through Harmony WeakMaps.
593 594
  //    - Objects reachable due to host application logic like object groups,
  //      implicit references' groups, or embedder heap tracing.
595 596
  void ProcessEphemeralMarking(ObjectVisitor* visitor,
                               bool only_process_harmony_weak_collections);
597

598 599 600 601 602
  // If the call-site of the top optimized code was not prepared for
  // deoptimization, then treat the maps in the code as strong pointers,
  // otherwise a map can die and deoptimize the code.
  void ProcessTopOptimizedFrame(ObjectVisitor* visitor);

603 604 605
  // Collects a list of dependent code from maps embedded in optimize code.
  DependentCode* DependentCodeListFromNonLiveMaps();

606 607 608 609
  // Mark objects reachable (transitively) from objects in the marking
  // stack.  This function empties the marking stack, but may leave
  // overflowed objects in the heap, in which case the marking stack's
  // overflow flag will be set.
610
  void EmptyMarkingDeque();
611 612 613 614

  // Refill the marking stack with overflowed objects from the heap.  This
  // function either leaves the marking stack full or clears the overflow
  // flag on the marking stack.
615
  void RefillMarkingDeque();
616

617 618 619 620 621 622 623 624
  // Helper methods for refilling the marking stack by discovering grey objects
  // on various pages of the heap. Used by {RefillMarkingDeque} only.
  template <class T>
  void DiscoverGreyObjectsWithIterator(T* it);
  void DiscoverGreyObjectsOnPage(MemoryChunk* p);
  void DiscoverGreyObjectsInSpace(PagedSpace* space);
  void DiscoverGreyObjectsInNewSpace();

625 626 627
  // Callback function for telling whether the object *p is an unmarked
  // heap object.
  static bool IsUnmarkedHeapObject(Object** p);
628

629 630
  // Clear non-live references in weak cells, transition and descriptor arrays,
  // and deoptimize dependent code of non-live maps.
631
  void ClearNonLiveReferences();
632 633 634 635 636 637 638 639 640 641 642
  void MarkDependentCodeForDeoptimization(DependentCode* list);
  // Find non-live targets of simple transitions in the given list. Clear
  // transitions to non-live targets and if needed trim descriptors arrays.
  void ClearSimpleMapTransitions(Object* non_live_map_list);
  void ClearSimpleMapTransition(Map* map, Map* dead_transition);
  // Compact every array in the global list of transition arrays and
  // trim the corresponding descriptor array if a transition target is non-live.
  void ClearFullMapTransitions();
  bool CompactTransitionArray(Map* map, TransitionArray* transitions,
                              DescriptorArray* descriptors);
  void TrimDescriptorArray(Map* map, DescriptorArray* descriptors);
643
  void TrimEnumCache(Map* map, DescriptorArray* descriptors);
644

645 646 647 648
  // Mark all values associated with reachable keys in weak collections
  // encountered so far.  This might push new object or even new weak maps onto
  // the marking stack.
  void ProcessWeakCollections();
649 650 651 652

  // After all reachable objects have been marked those weak map entries
  // with an unreachable key are removed from all encountered weak maps.
  // The linked list of all encountered weak maps is destroyed.
653
  void ClearWeakCollections();
654

655 656 657 658
  // We have to remove all encountered weak maps from the list of weak
  // collections when incremental marking is aborted.
  void AbortWeakCollections();

659 660
  void ClearWeakCells(Object** non_live_map_list,
                      DependentCode** dependent_code_list);
ulan@chromium.org's avatar
ulan@chromium.org committed
661 662
  void AbortWeakCells();

663 664
  void AbortTransitionArrays();

665 666
  // -----------------------------------------------------------------------
  // Phase 2: Sweeping to clear mark bits and free non-live objects for
667
  // a non-compacting collection.
668 669 670
  //
  //  Before: Live objects are marked and non-live objects are unmarked.
  //
671 672 673
  //   After: Live objects are unmarked, non-live regions have been added to
  //          their space's free list. Active eden semispace is compacted by
  //          evacuation.
674 675
  //

676 677 678
  // If we are not compacting the heap, we simply sweep the spaces except
  // for the large object space, clearing mark bits and adding unmarked
  // regions to each space's free list.
679
  void SweepSpaces();
680

681
  void EvacuateNewSpacePrologue();
682

683
  void EvacuatePagesInParallel();
684

685
  // The number of parallel compaction tasks, including the main thread.
686
  int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes);
687

688
  void EvacuateNewSpaceAndCandidates();
689

690 691
  void UpdatePointersAfterEvacuation();

692 693
  // Iterates through all live objects on a page using marking information.
  // Returns whether all objects have successfully been visited.
694 695
  template <class Visitor>
  bool VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
696 697
                        IterationMode mode);

698 699
  void RecomputeLiveBytes(MemoryChunk* page);

700 701
  void ReleaseEvacuationCandidates();

702 703 704
  // Starts sweeping of a space by contributing on the main thread and setting
  // up other pages for sweeping.
  void StartSweepSpace(PagedSpace* space);
705 706 707 708 709 710 711 712

#ifdef DEBUG
  friend class MarkObjectVisitor;
  static void VisitObject(HeapObject* obj);

  friend class UnmarkObjectVisitor;
  static void UnmarkObject(HeapObject* obj);
#endif
713 714

  Heap* heap_;
715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746

  base::Semaphore page_parallel_job_semaphore_;

#ifdef DEBUG
  enum CollectorState {
    IDLE,
    PREPARE_GC,
    MARK_LIVE_OBJECTS,
    SWEEP_SPACES,
    ENCODE_FORWARDING_ADDRESSES,
    UPDATE_POINTERS,
    RELOCATE_OBJECTS
  };

  // The current stage of the collector.
  CollectorState state_;
#endif

  MarkingParity marking_parity_;

  bool was_marked_incrementally_;

  bool evacuation_;

  // True if we are collecting slots to perform evacuation from evacuation
  // candidates.
  bool compacting_;

  bool black_allocation_;

  bool have_code_to_deoptimize_;

hpayer's avatar
hpayer committed
747
  base::VirtualMemory* marking_deque_memory_;
748
  size_t marking_deque_memory_committed_;
749
  MarkingDeque marking_deque_;
750 751
  std::vector<std::pair<void*, void*>> wrappers_to_trace_;

752
  CodeFlusher* code_flusher_;
753 754 755

  EmbedderHeapTracer* embedder_heap_tracer_;

756
  List<Page*> evacuation_candidates_;
757
  List<Page*> newspace_evacuation_candidates_;
758

mlippautz's avatar
mlippautz committed
759 760
  Sweeper sweeper_;

761
  friend class Heap;
762
  friend class StoreBuffer;
763 764 765
};


766
class EvacuationScope BASE_EMBEDDED {
767
 public:
768
  explicit EvacuationScope(MarkCompactCollector* collector)
769
      : collector_(collector) {
770
    collector_->set_evacuation(true);
771 772
  }

773
  ~EvacuationScope() { collector_->set_evacuation(false); }
774 775 776 777 778 779

 private:
  MarkCompactCollector* collector_;
};


780
const char* AllocationSpaceName(AllocationSpace space);
781 782
}  // namespace internal
}  // namespace v8
783

784
#endif  // V8_HEAP_MARK_COMPACT_H_