mark-compact.h 24.4 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
//     * Redistributions of source code must retain the above copyright
//       notice, this list of conditions and the following disclaimer.
//     * Redistributions in binary form must reproduce the above
//       copyright notice, this list of conditions and the following
//       disclaimer in the documentation and/or other materials provided
//       with the distribution.
//     * Neither the name of Google Inc. nor the names of its
//       contributors may be used to endorse or promote products derived
//       from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#ifndef V8_MARK_COMPACT_H_
#define V8_MARK_COMPACT_H_

31
#include "compiler-intrinsics.h"
32 33
#include "spaces.h"

34 35
namespace v8 {
namespace internal {
36 37 38 39 40 41

// Callback function, returns whether an object is alive. The heap size
// of the object is returned in size. It optionally updates the offset
// to the first live object in the page (only used for old and map objects).
typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);

42
// Forward declarations.
43 44
class CodeFlusher;
class GCTracer;
45
class MarkCompactCollector;
46
class MarkingVisitor;
47 48 49
class RootMarkingVisitor;


50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
class Marking {
 public:
  explicit Marking(Heap* heap)
      : heap_(heap) {
  }

  static inline MarkBit MarkBitFrom(Address addr);

  static inline MarkBit MarkBitFrom(HeapObject* obj) {
    return MarkBitFrom(reinterpret_cast<Address>(obj));
  }

  // Impossible markbits: 01
  static const char* kImpossibleBitPattern;
  static inline bool IsImpossible(MarkBit mark_bit) {
    return !mark_bit.Get() && mark_bit.Next().Get();
  }

  // Black markbits: 10 - this is required by the sweeper.
  static const char* kBlackBitPattern;
  static inline bool IsBlack(MarkBit mark_bit) {
    return mark_bit.Get() && !mark_bit.Next().Get();
  }

  // White markbits: 00 - this is required by the mark bit clearer.
  static const char* kWhiteBitPattern;
  static inline bool IsWhite(MarkBit mark_bit) {
    return !mark_bit.Get();
  }

  // Grey markbits: 11
  static const char* kGreyBitPattern;
  static inline bool IsGrey(MarkBit mark_bit) {
    return mark_bit.Get() && mark_bit.Next().Get();
  }

  static inline void MarkBlack(MarkBit mark_bit) {
    mark_bit.Set();
    mark_bit.Next().Clear();
  }

  static inline void BlackToGrey(MarkBit markbit) {
    markbit.Next().Set();
  }

  static inline void WhiteToGrey(MarkBit markbit) {
    markbit.Set();
    markbit.Next().Set();
  }

  static inline void GreyToBlack(MarkBit markbit) {
    markbit.Next().Clear();
  }

  static inline void BlackToGrey(HeapObject* obj) {
    BlackToGrey(MarkBitFrom(obj));
  }

  static inline void AnyToGrey(MarkBit markbit) {
    markbit.Set();
    markbit.Next().Set();
  }

  // Returns true if the the object whose mark is transferred is marked black.
  bool TransferMark(Address old_start, Address new_start);

#ifdef DEBUG
  enum ObjectColor {
    BLACK_OBJECT,
    WHITE_OBJECT,
    GREY_OBJECT,
    IMPOSSIBLE_COLOR
  };

  static const char* ColorName(ObjectColor color) {
    switch (color) {
      case BLACK_OBJECT: return "black";
      case WHITE_OBJECT: return "white";
      case GREY_OBJECT: return "grey";
      case IMPOSSIBLE_COLOR: return "impossible";
    }
    return "error";
  }

  static ObjectColor Color(HeapObject* obj) {
    return Color(Marking::MarkBitFrom(obj));
  }

  static ObjectColor Color(MarkBit mark_bit) {
    if (IsBlack(mark_bit)) return BLACK_OBJECT;
    if (IsWhite(mark_bit)) return WHITE_OBJECT;
    if (IsGrey(mark_bit)) return GREY_OBJECT;
    UNREACHABLE();
    return IMPOSSIBLE_COLOR;
  }
#endif

  // Returns true if the transferred color is black.
  INLINE(static bool TransferColor(HeapObject* from,
                                   HeapObject* to)) {
    MarkBit from_mark_bit = MarkBitFrom(from);
    MarkBit to_mark_bit = MarkBitFrom(to);
    bool is_black = false;
    if (from_mark_bit.Get()) {
      to_mark_bit.Set();
      is_black = true;  // Looks black so far.
    }
    if (from_mark_bit.Next().Get()) {
      to_mark_bit.Next().Set();
      is_black = false;  // Was actually gray.
    }
    return is_black;
  }

 private:
  Heap* heap_;
};

168
// ----------------------------------------------------------------------------
169 170
// Marking deque for tracing live objects.
class MarkingDeque {
171
 public:
172 173
  MarkingDeque()
      : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) { }
174 175

  void Initialize(Address low, Address high) {
176 177 178 179 180
    HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
    HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
    array_ = obj_low;
    mask_ = RoundDownToPowerOf2(static_cast<int>(obj_high - obj_low)) - 1;
    top_ = bottom_ = 0;
181 182 183
    overflowed_ = false;
  }

184
  inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
185

186
  inline bool IsEmpty() { return top_ == bottom_; }
187 188 189

  bool overflowed() const { return overflowed_; }

190 191 192
  void ClearOverflowed() { overflowed_ = false; }

  void SetOverflowed() { overflowed_ = true; }
193 194 195 196

  // Push the (marked) object on the marking stack if there is room,
  // otherwise mark the object as overflowed and wait for a rescan of the
  // heap.
197 198 199 200
  inline void PushBlack(HeapObject* object) {
    ASSERT(object->IsHeapObject());
    if (IsFull()) {
      Marking::BlackToGrey(object);
201
      MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
202
      SetOverflowed();
203
    } else {
204 205
      array_[top_] = object;
      top_ = ((top_ + 1) & mask_);
206 207 208
    }
  }

209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
  inline void PushGrey(HeapObject* object) {
    ASSERT(object->IsHeapObject());
    if (IsFull()) {
      SetOverflowed();
    } else {
      array_[top_] = object;
      top_ = ((top_ + 1) & mask_);
    }
  }

  inline HeapObject* Pop() {
    ASSERT(!IsEmpty());
    top_ = ((top_ - 1) & mask_);
    HeapObject* object = array_[top_];
    ASSERT(object->IsHeapObject());
224 225 226
    return object;
  }

227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
  inline void UnshiftGrey(HeapObject* object) {
    ASSERT(object->IsHeapObject());
    if (IsFull()) {
      SetOverflowed();
    } else {
      bottom_ = ((bottom_ - 1) & mask_);
      array_[bottom_] = object;
    }
  }

  HeapObject** array() { return array_; }
  int bottom() { return bottom_; }
  int top() { return top_; }
  int mask() { return mask_; }
  void set_top(int top) { top_ = top; }

243
 private:
244 245 246 247 248 249 250
  HeapObject** array_;
  // array_[(top - 1) & mask_] is the top element in the deque.  The Deque is
  // empty when top_ == bottom_.  It is full when top_ + 1 == bottom
  // (mod mask + 1).
  int top_;
  int bottom_;
  int mask_;
251 252
  bool overflowed_;

253
  DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
254
};
255

256

257 258 259 260
class SlotsBufferAllocator {
 public:
  SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
  void DeallocateBuffer(SlotsBuffer* buffer);
261

262 263
  void DeallocateChain(SlotsBuffer** buffer_address);
};
264

265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297

// SlotsBuffer records a sequence of slots that has to be updated
// after live objects were relocated from evacuation candidates.
// All slots are either untyped or typed:
//    - Untyped slots are expected to contain a tagged object pointer.
//      They are recorded by an address.
//    - Typed slots are expected to contain an encoded pointer to a heap
//      object where the way of encoding depends on the type of the slot.
//      They are recorded as a pair (SlotType, slot address).
// We assume that zero-page is never mapped this allows us to distinguish
// untyped slots from typed slots during iteration by a simple comparison:
// if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
// is the first element of typed slot's pair.
class SlotsBuffer {
 public:
  typedef Object** ObjectSlot;

  explicit SlotsBuffer(SlotsBuffer* next_buffer)
      : idx_(0), chain_length_(1), next_(next_buffer) {
    if (next_ != NULL) {
      chain_length_ = next_->chain_length_ + 1;
    }
  }

  ~SlotsBuffer() {
  }

  void Add(ObjectSlot slot) {
    ASSERT(0 <= idx_ && idx_ < kNumberOfElements);
    slots_[idx_++] = slot;
  }

  enum SlotType {
298
    EMBEDDED_OBJECT_SLOT,
299 300 301 302 303 304 305 306 307 308
    RELOCATED_CODE_OBJECT,
    CODE_TARGET_SLOT,
    CODE_ENTRY_SLOT,
    DEBUG_TARGET_SLOT,
    JS_RETURN_SLOT,
    NUMBER_OF_SLOT_TYPES
  };

  void UpdateSlots(Heap* heap);

309 310
  void UpdateSlotsWithFilter(Heap* heap);

311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
  SlotsBuffer* next() { return next_; }

  static int SizeOfChain(SlotsBuffer* buffer) {
    if (buffer == NULL) return 0;
    return static_cast<int>(buffer->idx_ +
                            (buffer->chain_length_ - 1) * kNumberOfElements);
  }

  inline bool IsFull() {
    return idx_ == kNumberOfElements;
  }

  inline bool HasSpaceForTypedSlot() {
    return idx_ < kNumberOfElements - 1;
  }

327 328 329
  static void UpdateSlotsRecordedIn(Heap* heap,
                                    SlotsBuffer* buffer,
                                    bool code_slots_filtering_required) {
330
    while (buffer != NULL) {
331 332 333 334 335
      if (code_slots_filtering_required) {
        buffer->UpdateSlotsWithFilter(heap);
      } else {
        buffer->UpdateSlots(heap);
      }
336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
      buffer = buffer->next();
    }
  }

  enum AdditionMode {
    FAIL_ON_OVERFLOW,
    IGNORE_OVERFLOW
  };

  static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
    return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
  }

  static bool AddTo(SlotsBufferAllocator* allocator,
                    SlotsBuffer** buffer_address,
                    ObjectSlot slot,
                    AdditionMode mode) {
    SlotsBuffer* buffer = *buffer_address;
    if (buffer == NULL || buffer->IsFull()) {
      if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
        allocator->DeallocateChain(buffer_address);
        return false;
      }
      buffer = allocator->AllocateBuffer(buffer);
      *buffer_address = buffer;
    }
    buffer->Add(slot);
    return true;
  }

  static bool IsTypedSlot(ObjectSlot slot);

  static bool AddTo(SlotsBufferAllocator* allocator,
                    SlotsBuffer** buffer_address,
                    SlotType type,
                    Address addr,
                    AdditionMode mode);

  static const int kNumberOfElements = 1021;

 private:
377
  static const int kChainLengthThreshold = 15;
378 379 380 381 382 383 384 385

  intptr_t idx_;
  intptr_t chain_length_;
  SlotsBuffer* next_;
  ObjectSlot slots_[kNumberOfElements];
};


386 387 388 389 390 391 392 393 394 395 396 397
// -------------------------------------------------------------------------
// Marker shared between incremental and non-incremental marking
template<class BaseMarker> class Marker {
 public:
  Marker(BaseMarker* base_marker, MarkCompactCollector* mark_compact_collector)
      : base_marker_(base_marker),
        mark_compact_collector_(mark_compact_collector) {}

  // Mark pointers in a Map and its DescriptorArray together, possibly
  // treating transitions or back pointers weak.
  void MarkMapContents(Map* map);
  void MarkDescriptorArray(DescriptorArray* descriptors);
398
  void MarkTransitionArray(TransitionArray* transitions);
399 400 401 402 403 404 405 406 407 408 409 410 411 412 413

 private:
  BaseMarker* base_marker() {
    return base_marker_;
  }

  MarkCompactCollector* mark_compact_collector() {
    return mark_compact_collector_;
  }

  BaseMarker* base_marker_;
  MarkCompactCollector* mark_compact_collector_;
};


414 415 416 417
// Defined in isolate.h.
class ThreadLocalTop;


418 419
// -------------------------------------------------------------------------
// Mark-Compact collector
420
class MarkCompactCollector {
421 422 423 424 425 426
 public:
  // Type of functions to compute forwarding addresses of objects in
  // compacted spaces.  Given an object and its size, return a (non-failure)
  // Object* that will be the object after forwarding.  There is a separate
  // allocation function for each (compactable) space based on the location
  // of the object before compaction.
427 428
  typedef MaybeObject* (*AllocationFunction)(Heap* heap,
                                             HeapObject* object,
429
                                             int object_size);
430 431 432 433 434 435 436 437

  // Type of functions to encode the forwarding address for an object.
  // Given the object, its size, and the new (non-failure) object it will be
  // forwarded to, encode the forwarding address.  For paged spaces, the
  // 'offset' input/output parameter contains the offset of the forwarded
  // object from the forwarding address of the previous live object in the
  // page as input, and is updated to contain the offset to be used for the
  // next live object in the same page.  For spaces using a different
438
  // encoding (i.e., contiguous spaces), the offset parameter is ignored.
439 440
  typedef void (*EncodingFunction)(Heap* heap,
                                   HeapObject* old_object,
441 442 443 444 445
                                   int object_size,
                                   Object* new_object,
                                   int* offset);

  // Type of functions to process non-live objects.
446
  typedef void (*ProcessNonLiveFunction)(HeapObject* object, Isolate* isolate);
447

448 449 450
  // Pointer to member function, used in IterateLiveObjects.
  typedef int (MarkCompactCollector::*LiveObjectCallback)(HeapObject* obj);

451
  // Set the global flags, it must be called before Prepare to take effect.
452
  inline void SetFlags(int flags);
453

454 455
  static void Initialize();

456 457 458 459
  void CollectEvacuationCandidates(PagedSpace* space);

  void AddEvacuationCandidate(Page* p);

460 461
  // Prepares for GC by resetting relocation info in old and map spaces and
  // choosing spaces to compact.
462
  void Prepare(GCTracer* tracer);
463

464
  // Performs a global garbage collection.
465
  void CollectGarbage();
466

467 468 469 470 471 472
  enum CompactionMode {
    INCREMENTAL_COMPACTION,
    NON_INCREMENTAL_COMPACTION
  };

  bool StartCompaction(CompactionMode mode);
473

474 475
  void AbortCompaction();

476 477
  // During a full GC, there is a stack-allocated GCTracer that is used for
  // bookkeeping information.  Return a pointer to that tracer.
478
  GCTracer* tracer() { return tracer_; }
479

480 481
#ifdef DEBUG
  // Checks whether performing mark-compact collection.
482 483
  bool in_use() { return state_ > PREPARE_GC; }
  bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
484 485
#endif

486
  // Determine type of object and emit deletion log event.
487
  static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
488

489 490 491 492 493
  // Distinguishable invalid map encodings (for single word and multiple words)
  // that indicate free regions.
  static const uint32_t kSingleFreeEncoding = 0;
  static const uint32_t kMultiFreeEncoding = 1;

494 495
  static inline bool IsMarked(Object* obj);

496 497 498 499 500 501
  inline Heap* heap() const { return heap_; }

  CodeFlusher* code_flusher() { return code_flusher_; }
  inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
  void EnableCodeFlushing(bool enable);

502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
  enum SweeperType {
    CONSERVATIVE,
    LAZY_CONSERVATIVE,
    PRECISE
  };

#ifdef DEBUG
  void VerifyMarkbitsAreClean();
  static void VerifyMarkbitsAreClean(PagedSpace* space);
  static void VerifyMarkbitsAreClean(NewSpace* space);
#endif

  // Sweep a single page from the given space conservatively.
  // Return a number of reclaimed bytes.
  static intptr_t SweepConservatively(PagedSpace* space, Page* p);

  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
    return Page::FromAddress(reinterpret_cast<Address>(anchor))->
        ShouldSkipEvacuationSlotRecording();
  }

  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
    return Page::FromAddress(reinterpret_cast<Address>(host))->
        ShouldSkipEvacuationSlotRecording();
  }

  INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
    return Page::FromAddress(reinterpret_cast<Address>(obj))->
        IsEvacuationCandidate();
  }

  void EvictEvacuationCandidate(Page* page) {
    if (FLAG_trace_fragmentation) {
      PrintF("Page %p is too popular. Disabling evacuation.\n",
             reinterpret_cast<void*>(page));
    }

    // TODO(gc) If all evacuation candidates are too popular we
    // should stop slots recording entirely.
    page->ClearEvacuationCandidate();

    // We were not collecting slots on this page that point
    // to other evacuation candidates thus we have to
    // rescan the page after evacuation to discover and update all
    // pointers to evacuated objects.
    if (page->owner()->identity() == OLD_DATA_SPACE) {
      evacuation_candidates_.RemoveElement(page);
    } else {
      page->SetFlag(Page::RESCAN_ON_EVACUATION);
    }
  }

554
  void RecordRelocSlot(RelocInfo* rinfo, Object* target);
555 556 557 558 559 560 561 562 563 564 565
  void RecordCodeEntrySlot(Address slot, Code* target);

  INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object* object));

  void MigrateObject(Address dst,
                     Address src,
                     int size,
                     AllocationSpace to_old_space);

  bool TryPromoteObject(HeapObject* object, int object_size);

566 567 568 569 570
  inline Object* encountered_weak_maps() { return encountered_weak_maps_; }
  inline void set_encountered_weak_maps(Object* weak_map) {
    encountered_weak_maps_ = weak_map;
  }

571 572
  void InvalidateCode(Code* code);

573 574
  void ClearMarkbits();

575 576
  bool is_compacting() const { return compacting_; }

577
 private:
578 579 580
  MarkCompactCollector();
  ~MarkCompactCollector();

581 582 583 584 585
  bool MarkInvalidatedCode();
  void RemoveDeadInvalidatedCode();
  void ProcessInvalidatedCode(ObjectVisitor* visitor);


586 587 588 589 590 591 592 593
#ifdef DEBUG
  enum CollectorState {
    IDLE,
    PREPARE_GC,
    MARK_LIVE_OBJECTS,
    SWEEP_SPACES,
    ENCODE_FORWARDING_ADDRESSES,
    UPDATE_POINTERS,
594
    RELOCATE_OBJECTS
595 596 597
  };

  // The current stage of the collector.
598
  CollectorState state_;
599
#endif
600

601 602 603
  // Global flag that forces sweeping to be precise, so we can traverse the
  // heap.
  bool sweep_precisely_;
604

605 606
  bool reduce_memory_footprint_;

607 608
  bool abort_incremental_marking_;

609 610 611
  // True if we are collecting slots to perform evacuation from evacuation
  // candidates.
  bool compacting_;
612

613 614
  bool was_marked_incrementally_;

615 616
  // A pointer to the current stack-allocated GC tracer object during a full
  // collection (NULL before and after).
617
  GCTracer* tracer_;
618

619 620 621 622
  SlotsBufferAllocator slots_buffer_allocator_;

  SlotsBuffer* migration_slots_buffer_;

623
  // Finishes GC, performs heap verification if enabled.
624
  void Finish();
625

626 627
  // -----------------------------------------------------------------------
  // Phase 1: Marking live objects.
628
  //
629 630 631
  //  Before: The heap has been prepared for garbage collection by
  //          MarkCompactCollector::Prepare() and is otherwise in its
  //          normal state.
632
  //
633 634
  //   After: Live objects are marked and non-live objects are unmarked.

635
  friend class RootMarkingVisitor;
636
  friend class MarkingVisitor;
637
  friend class MarkCompactMarkingVisitor;
638 639
  friend class CodeMarkingVisitor;
  friend class SharedFunctionInfoMarkingVisitor;
640 641
  friend class Marker<IncrementalMarking>;
  friend class Marker<MarkCompactCollector>;
642

643 644 645 646 647 648 649 650
  // Mark non-optimize code for functions inlined into the given optimized
  // code. This will prevent it from being flushed.
  void MarkInlinedFunctionsCode(Code* code);

  // Mark code objects that are active on the stack to prevent them
  // from being flushed.
  void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);

651
  void PrepareForCodeFlushing();
652 653

  // Marking operations for objects reachable from roots.
654
  void MarkLiveObjects();
655

656
  void AfterMarking();
657

658
  // Marks the object black and pushes it on the marking stack.
659 660 661 662 663 664
  // Returns true if object needed marking and false otherwise.
  // This is for non-incremental marking only.
  INLINE(bool MarkObjectAndPush(HeapObject* obj));

  // Marks the object black and pushes it on the marking stack.
  // This is for non-incremental marking only.
665 666
  INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));

667 668 669 670
  // Marks the object black without pushing it on the marking stack.
  // Returns true if object needed marking and false otherwise.
  // This is for non-incremental marking only.
  INLINE(bool MarkObjectWithoutPush(HeapObject* obj));
671

672 673
  // Marks the object black assuming that it is not yet marked.
  // This is for non-incremental marking only.
674
  INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
675

676
  void ProcessNewlyMarkedObject(HeapObject* obj);
677

678
  // Mark the heap roots and all objects reachable from them.
679
  void MarkRoots(RootMarkingVisitor* visitor);
680

681 682
  // Mark the symbol table specially.  References to symbols from the
  // symbol table are weak.
683
  void MarkSymbolTable();
684 685 686

  // Mark objects in object groups that have at least one object in the
  // group marked.
687
  void MarkObjectGroups();
688

689 690
  // Mark objects in implicit references groups if their parent object
  // is marked.
691
  void MarkImplicitRefGroups();
692 693 694

  // Mark all objects which are reachable due to host application
  // logic like object groups or implicit references' groups.
695
  void ProcessExternalMarking();
696 697 698

  // Mark objects reachable (transitively) from objects in the marking stack
  // or overflowed in the heap.
699
  void ProcessMarkingDeque();
700 701 702 703 704

  // Mark objects reachable (transitively) from objects in the marking
  // stack.  This function empties the marking stack, but may leave
  // overflowed objects in the heap, in which case the marking stack's
  // overflow flag will be set.
705
  void EmptyMarkingDeque();
706 707 708 709

  // Refill the marking stack with overflowed objects from the heap.  This
  // function either leaves the marking stack full or clears the overflow
  // flag on the marking stack.
710
  void RefillMarkingDeque();
711

712 713 714 715
  // After reachable maps have been marked process per context object
  // literal map caches removing unmarked entries.
  void ProcessMapCaches();

716 717 718
  // Callback function for telling whether the object *p is an unmarked
  // heap object.
  static bool IsUnmarkedHeapObject(Object** p);
719

720
  // Map transitions from a live map to a dead map must be killed.
721
  // We replace them with a null descriptor, with the same key.
722
  void ClearNonLiveTransitions();
723 724
  void ClearNonLivePrototypeTransitions(Map* map);
  void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
725

726 727 728 729 730 731
  // Marking detaches initial maps from SharedFunctionInfo objects
  // to make this reference weak. We need to reattach initial maps
  // back after collection. This is either done during
  // ClearNonLiveTransitions pass or by calling this function.
  void ReattachInitialMaps();

732 733 734 735 736 737 738 739 740 741
  // Mark all values associated with reachable keys in weak maps encountered
  // so far.  This might push new object or even new weak maps onto the
  // marking stack.
  void ProcessWeakMaps();

  // After all reachable objects have been marked those weak map entries
  // with an unreachable key are removed from all encountered weak maps.
  // The linked list of all encountered weak maps is destroyed.
  void ClearWeakMaps();

742 743
  // -----------------------------------------------------------------------
  // Phase 2: Sweeping to clear mark bits and free non-live objects for
744
  // a non-compacting collection.
745 746 747
  //
  //  Before: Live objects are marked and non-live objects are unmarked.
  //
748 749 750
  //   After: Live objects are unmarked, non-live regions have been added to
  //          their space's free list. Active eden semispace is compacted by
  //          evacuation.
751 752
  //

753 754 755
  // If we are not compacting the heap, we simply sweep the spaces except
  // for the large object space, clearing mark bits and adding unmarked
  // regions to each space's free list.
756
  void SweepSpaces();
757

758
  void EvacuateNewSpace();
759

760
  void EvacuateLiveObjectsFromPage(Page* p);
761

762
  void EvacuatePages();
763

764
  void EvacuateNewSpaceAndCandidates();
765

766
  void SweepSpace(PagedSpace* space, SweeperType sweeper);
767 768 769 770 771 772 773 774

#ifdef DEBUG
  friend class MarkObjectVisitor;
  static void VisitObject(HeapObject* obj);

  friend class UnmarkObjectVisitor;
  static void UnmarkObject(HeapObject* obj);
#endif
775 776

  Heap* heap_;
777
  MarkingDeque marking_deque_;
778
  CodeFlusher* code_flusher_;
779
  Object* encountered_weak_maps_;
780
  Marker<MarkCompactCollector> marker_;
781

782
  List<Page*> evacuation_candidates_;
783
  List<Code*> invalidated_code_;
784

785
  friend class Heap;
786 787 788
};


789 790
const char* AllocationSpaceName(AllocationSpace space);

791 792 793
} }  // namespace v8::internal

#endif  // V8_MARK_COMPACT_H_