mark-compact.h 31.8 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5 6
#ifndef V8_HEAP_MARK_COMPACT_H_
#define V8_HEAP_MARK_COMPACT_H_
7

8
#include "src/base/bits.h"
9
#include "src/heap/spaces.h"
10

11 12
namespace v8 {
namespace internal {
13 14 15 16 17 18

// Callback function, returns whether an object is alive. The heap size
// of the object is returned in size. It optionally updates the offset
// to the first live object in the page (only used for old and map objects).
typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);

19 20 21
// Callback function to mark an object in a given heap.
typedef void (*MarkObjectFunction)(Heap* heap, HeapObject* object);

22
// Forward declarations.
23
class CodeFlusher;
24
class MarkCompactCollector;
25
class MarkingVisitor;
26 27 28
class RootMarkingVisitor;


29 30
class Marking {
 public:
31
  explicit Marking(Heap* heap) : heap_(heap) {}
32

33
  INLINE(static MarkBit MarkBitFrom(Address addr));
34

35
  INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
36 37 38 39 40
    return MarkBitFrom(reinterpret_cast<Address>(obj));
  }

  // Impossible markbits: 01
  static const char* kImpossibleBitPattern;
41
  INLINE(static bool IsImpossible(MarkBit mark_bit)) {
42 43 44 45 46
    return !mark_bit.Get() && mark_bit.Next().Get();
  }

  // Black markbits: 10 - this is required by the sweeper.
  static const char* kBlackBitPattern;
47
  INLINE(static bool IsBlack(MarkBit mark_bit)) {
48 49 50 51 52
    return mark_bit.Get() && !mark_bit.Next().Get();
  }

  // White markbits: 00 - this is required by the mark bit clearer.
  static const char* kWhiteBitPattern;
53 54 55 56
  INLINE(static bool IsWhite(MarkBit mark_bit)) {
    DCHECK(!IsImpossible(mark_bit));
    return !mark_bit.Get();
  }
57 58 59

  // Grey markbits: 11
  static const char* kGreyBitPattern;
60
  INLINE(static bool IsGrey(MarkBit mark_bit)) {
61 62 63
    return mark_bit.Get() && mark_bit.Next().Get();
  }

64 65 66 67
  // IsBlackOrGrey assumes that the first bit is set for black or grey
  // objects.
  INLINE(static bool IsBlackOrGrey(MarkBit mark_bit)) { return mark_bit.Get(); }

68
  INLINE(static void MarkBlack(MarkBit mark_bit)) {
69 70 71 72
    mark_bit.Set();
    mark_bit.Next().Clear();
  }

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
  INLINE(static void MarkWhite(MarkBit mark_bit)) {
    mark_bit.Clear();
    mark_bit.Next().Clear();
  }

  INLINE(static void BlackToWhite(MarkBit markbit)) {
    DCHECK(IsBlack(markbit));
    markbit.Clear();
  }

  INLINE(static void GreyToWhite(MarkBit markbit)) {
    DCHECK(IsGrey(markbit));
    markbit.Clear();
    markbit.Next().Clear();
  }

  INLINE(static void BlackToGrey(MarkBit markbit)) {
    DCHECK(IsBlack(markbit));
    markbit.Next().Set();
  }
93

94
  INLINE(static void WhiteToGrey(MarkBit markbit)) {
95
    DCHECK(IsWhite(markbit));
96 97 98 99
    markbit.Set();
    markbit.Next().Set();
  }

100 101 102 103 104 105 106 107 108
  INLINE(static void WhiteToBlack(MarkBit markbit)) {
    DCHECK(IsWhite(markbit));
    markbit.Set();
  }

  INLINE(static void GreyToBlack(MarkBit markbit)) {
    DCHECK(IsGrey(markbit));
    markbit.Next().Clear();
  }
109

110
  INLINE(static void BlackToGrey(HeapObject* obj)) {
111 112 113
    BlackToGrey(MarkBitFrom(obj));
  }

114
  INLINE(static void AnyToGrey(MarkBit markbit)) {
115 116 117 118
    markbit.Set();
    markbit.Next().Set();
  }

119 120 121 122
  static void SetAllMarkBitsInRange(MarkBit start, MarkBit end);
  static void ClearAllMarkBitsOfCellsContainedInRange(MarkBit start,
                                                      MarkBit end);

123
  void TransferMark(Address old_start, Address new_start);
124 125 126 127 128 129 130 131 132 133 134

#ifdef DEBUG
  enum ObjectColor {
    BLACK_OBJECT,
    WHITE_OBJECT,
    GREY_OBJECT,
    IMPOSSIBLE_COLOR
  };

  static const char* ColorName(ObjectColor color) {
    switch (color) {
135 136 137 138 139 140 141 142
      case BLACK_OBJECT:
        return "black";
      case WHITE_OBJECT:
        return "white";
      case GREY_OBJECT:
        return "grey";
      case IMPOSSIBLE_COLOR:
        return "impossible";
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
    }
    return "error";
  }

  static ObjectColor Color(HeapObject* obj) {
    return Color(Marking::MarkBitFrom(obj));
  }

  static ObjectColor Color(MarkBit mark_bit) {
    if (IsBlack(mark_bit)) return BLACK_OBJECT;
    if (IsWhite(mark_bit)) return WHITE_OBJECT;
    if (IsGrey(mark_bit)) return GREY_OBJECT;
    UNREACHABLE();
    return IMPOSSIBLE_COLOR;
  }
#endif

  // Returns true if the transferred color is black.
161
  INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) {
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
    MarkBit from_mark_bit = MarkBitFrom(from);
    MarkBit to_mark_bit = MarkBitFrom(to);
    bool is_black = false;
    if (from_mark_bit.Get()) {
      to_mark_bit.Set();
      is_black = true;  // Looks black so far.
    }
    if (from_mark_bit.Next().Get()) {
      to_mark_bit.Next().Set();
      is_black = false;  // Was actually gray.
    }
    return is_black;
  }

 private:
  Heap* heap_;
};

180
// ----------------------------------------------------------------------------
181 182
// Marking deque for tracing live objects.
class MarkingDeque {
183
 public:
184
  MarkingDeque()
185
      : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) {}
186 187

  void Initialize(Address low, Address high) {
188 189 190
    HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
    HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
    array_ = obj_low;
191 192 193
    mask_ = base::bits::RoundDownToPowerOfTwo32(
                static_cast<uint32_t>(obj_high - obj_low)) -
            1;
194
    top_ = bottom_ = 0;
195 196 197
    overflowed_ = false;
  }

198
  inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
199

200
  inline bool IsEmpty() { return top_ == bottom_; }
201 202 203

  bool overflowed() const { return overflowed_; }

204 205 206
  void ClearOverflowed() { overflowed_ = false; }

  void SetOverflowed() { overflowed_ = true; }
207 208 209 210

  // Push the (marked) object on the marking stack if there is room,
  // otherwise mark the object as overflowed and wait for a rescan of the
  // heap.
211
  INLINE(void PushBlack(HeapObject* object)) {
212
    DCHECK(object->IsHeapObject());
213
    DCHECK(object->map()->IsMap());
214 215
    if (IsFull()) {
      Marking::BlackToGrey(object);
216
      MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
217
      SetOverflowed();
218
    } else {
219 220
      array_[top_] = object;
      top_ = ((top_ + 1) & mask_);
221 222 223
    }
  }

224
  INLINE(void PushGrey(HeapObject* object)) {
225
    DCHECK(object->IsHeapObject());
226
    DCHECK(object->map()->IsMap());
227 228 229 230 231 232 233 234
    if (IsFull()) {
      SetOverflowed();
    } else {
      array_[top_] = object;
      top_ = ((top_ + 1) & mask_);
    }
  }

235
  INLINE(HeapObject* Pop()) {
236
    DCHECK(!IsEmpty());
237 238
    top_ = ((top_ - 1) & mask_);
    HeapObject* object = array_[top_];
239
    DCHECK(object->IsHeapObject());
240 241 242
    return object;
  }

243
  INLINE(void UnshiftGrey(HeapObject* object)) {
244
    DCHECK(object->IsHeapObject());
245
    DCHECK(Marking::IsGrey(Marking::MarkBitFrom(object)));
246 247 248 249 250 251 252 253
    if (IsFull()) {
      SetOverflowed();
    } else {
      bottom_ = ((bottom_ - 1) & mask_);
      array_[bottom_] = object;
    }
  }

254 255 256 257 258 259 260 261 262 263 264 265 266
  INLINE(void UnshiftBlack(HeapObject* object)) {
    DCHECK(object->IsHeapObject());
    DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
    if (IsFull()) {
      Marking::BlackToGrey(object);
      MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
      SetOverflowed();
    } else {
      bottom_ = ((bottom_ - 1) & mask_);
      array_[bottom_] = object;
    }
  }

267 268 269 270 271 272
  HeapObject** array() { return array_; }
  int bottom() { return bottom_; }
  int top() { return top_; }
  int mask() { return mask_; }
  void set_top(int top) { top_ = top; }

273
 private:
274 275 276 277 278 279 280
  HeapObject** array_;
  // array_[(top - 1) & mask_] is the top element in the deque.  The Deque is
  // empty when top_ == bottom_.  It is full when top_ + 1 == bottom
  // (mod mask + 1).
  int top_;
  int bottom_;
  int mask_;
281 282
  bool overflowed_;

283
  DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
284
};
285

286

287 288 289 290
class SlotsBufferAllocator {
 public:
  SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
  void DeallocateBuffer(SlotsBuffer* buffer);
291

292 293
  void DeallocateChain(SlotsBuffer** buffer_address);
};
294

295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318

// SlotsBuffer records a sequence of slots that has to be updated
// after live objects were relocated from evacuation candidates.
// All slots are either untyped or typed:
//    - Untyped slots are expected to contain a tagged object pointer.
//      They are recorded by an address.
//    - Typed slots are expected to contain an encoded pointer to a heap
//      object where the way of encoding depends on the type of the slot.
//      They are recorded as a pair (SlotType, slot address).
// We assume that zero-page is never mapped this allows us to distinguish
// untyped slots from typed slots during iteration by a simple comparison:
// if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
// is the first element of typed slot's pair.
class SlotsBuffer {
 public:
  typedef Object** ObjectSlot;

  explicit SlotsBuffer(SlotsBuffer* next_buffer)
      : idx_(0), chain_length_(1), next_(next_buffer) {
    if (next_ != NULL) {
      chain_length_ = next_->chain_length_ + 1;
    }
  }

319
  ~SlotsBuffer() {}
320 321

  void Add(ObjectSlot slot) {
322
    DCHECK(0 <= idx_ && idx_ < kNumberOfElements);
323 324 325 326 327
#ifdef DEBUG
    if (slot >= reinterpret_cast<ObjectSlot>(NUMBER_OF_SLOT_TYPES)) {
      DCHECK_NOT_NULL(*slot);
    }
#endif
328 329 330 331
    slots_[idx_++] = slot;
  }

  enum SlotType {
332
    EMBEDDED_OBJECT_SLOT,
333
    RELOCATED_CODE_OBJECT,
334
    CELL_TARGET_SLOT,
335 336 337 338 339 340 341
    CODE_TARGET_SLOT,
    CODE_ENTRY_SLOT,
    DEBUG_TARGET_SLOT,
    JS_RETURN_SLOT,
    NUMBER_OF_SLOT_TYPES
  };

342 343 344 345 346 347
  static const char* SlotTypeToString(SlotType type) {
    switch (type) {
      case EMBEDDED_OBJECT_SLOT:
        return "EMBEDDED_OBJECT_SLOT";
      case RELOCATED_CODE_OBJECT:
        return "RELOCATED_CODE_OBJECT";
348 349
      case CELL_TARGET_SLOT:
        return "CELL_TARGET_SLOT";
350 351 352 353 354 355 356 357 358 359 360 361 362 363
      case CODE_TARGET_SLOT:
        return "CODE_TARGET_SLOT";
      case CODE_ENTRY_SLOT:
        return "CODE_ENTRY_SLOT";
      case DEBUG_TARGET_SLOT:
        return "DEBUG_TARGET_SLOT";
      case JS_RETURN_SLOT:
        return "JS_RETURN_SLOT";
      case NUMBER_OF_SLOT_TYPES:
        return "NUMBER_OF_SLOT_TYPES";
    }
    return "UNKNOWN SlotType";
  }

364 365
  void UpdateSlots(Heap* heap);

366 367
  void UpdateSlotsWithFilter(Heap* heap);

368 369 370 371 372 373 374 375
  SlotsBuffer* next() { return next_; }

  static int SizeOfChain(SlotsBuffer* buffer) {
    if (buffer == NULL) return 0;
    return static_cast<int>(buffer->idx_ +
                            (buffer->chain_length_ - 1) * kNumberOfElements);
  }

376
  inline bool IsFull() { return idx_ == kNumberOfElements; }
377

378
  inline bool HasSpaceForTypedSlot() { return idx_ < kNumberOfElements - 1; }
379

380
  static void UpdateSlotsRecordedIn(Heap* heap, SlotsBuffer* buffer,
381
                                    bool code_slots_filtering_required) {
382
    while (buffer != NULL) {
383 384 385 386 387
      if (code_slots_filtering_required) {
        buffer->UpdateSlotsWithFilter(heap);
      } else {
        buffer->UpdateSlots(heap);
      }
388 389 390 391
      buffer = buffer->next();
    }
  }

392
  enum AdditionMode { FAIL_ON_OVERFLOW, IGNORE_OVERFLOW };
393 394 395 396 397

  static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
    return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
  }

398
  INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
399
                           SlotsBuffer** buffer_address, ObjectSlot slot,
400
                           AdditionMode mode)) {
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
    SlotsBuffer* buffer = *buffer_address;
    if (buffer == NULL || buffer->IsFull()) {
      if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
        allocator->DeallocateChain(buffer_address);
        return false;
      }
      buffer = allocator->AllocateBuffer(buffer);
      *buffer_address = buffer;
    }
    buffer->Add(slot);
    return true;
  }

  static bool IsTypedSlot(ObjectSlot slot);

  static bool AddTo(SlotsBufferAllocator* allocator,
417
                    SlotsBuffer** buffer_address, SlotType type, Address addr,
418 419
                    AdditionMode mode);

420 421 422 423 424 425 426 427 428
  // Eliminates all stale entries from the slots buffer, i.e., slots that
  // are not part of live objects anymore. This method must be called after
  // marking, when the whole transitive closure is known and must be called
  // before sweeping when mark bits are still intact.
  static void RemoveInvalidSlots(Heap* heap, SlotsBuffer* buffer);

  // Ensures that there are no invalid slots in the chain of slots buffers.
  static void VerifySlots(Heap* heap, SlotsBuffer* buffer);

429 430 431
  static const int kNumberOfElements = 1021;

 private:
432
  static const int kChainLengthThreshold = 15;
433 434 435 436 437 438 439 440

  intptr_t idx_;
  intptr_t chain_length_;
  SlotsBuffer* next_;
  ObjectSlot slots_[kNumberOfElements];
};


441 442 443
// CodeFlusher collects candidates for code flushing during marking and
// processes those candidates after marking has completed in order to
// reset those functions referencing code objects that would otherwise
444
// be unreachable. Code objects can be referenced in three ways:
445 446
//    - SharedFunctionInfo references unoptimized code.
//    - JSFunction references either unoptimized or optimized code.
447
//    - OptimizedCodeMap references optimized code.
448 449 450 451 452 453 454 455
// We are not allowed to flush unoptimized code for functions that got
// optimized or inlined into optimized code, because we might bailout
// into the unoptimized code again during deoptimization.
class CodeFlusher {
 public:
  explicit CodeFlusher(Isolate* isolate)
      : isolate_(isolate),
        jsfunction_candidates_head_(NULL),
456 457
        shared_function_info_candidates_head_(NULL),
        optimized_code_map_holder_head_(NULL) {}
458 459

  void AddCandidate(SharedFunctionInfo* shared_info) {
460 461 462 463
    if (GetNextCandidate(shared_info) == NULL) {
      SetNextCandidate(shared_info, shared_function_info_candidates_head_);
      shared_function_info_candidates_head_ = shared_info;
    }
464 465 466
  }

  void AddCandidate(JSFunction* function) {
467
    DCHECK(function->code() == function->shared()->code());
468 469 470 471
    if (GetNextCandidate(function)->IsUndefined()) {
      SetNextCandidate(function, jsfunction_candidates_head_);
      jsfunction_candidates_head_ = function;
    }
472 473
  }

474 475 476 477 478 479 480 481
  void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
    if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
      SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
      optimized_code_map_holder_head_ = code_map_holder;
    }
  }

  void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
482
  void EvictCandidate(SharedFunctionInfo* shared_info);
483 484
  void EvictCandidate(JSFunction* function);

485
  void ProcessCandidates() {
486
    ProcessOptimizedCodeMaps();
487 488 489 490
    ProcessSharedFunctionInfoCandidates();
    ProcessJSFunctionCandidates();
  }

491
  void EvictAllCandidates() {
492
    EvictOptimizedCodeMaps();
493 494 495 496
    EvictJSFunctionCandidates();
    EvictSharedFunctionInfoCandidates();
  }

497 498
  void IteratePointersToFromSpace(ObjectVisitor* v);

499
 private:
500
  void ProcessOptimizedCodeMaps();
501 502
  void ProcessJSFunctionCandidates();
  void ProcessSharedFunctionInfoCandidates();
503
  void EvictOptimizedCodeMaps();
504 505
  void EvictJSFunctionCandidates();
  void EvictSharedFunctionInfoCandidates();
506

507 508 509 510 511
  static JSFunction** GetNextCandidateSlot(JSFunction* candidate) {
    return reinterpret_cast<JSFunction**>(
        HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
  }

512
  static JSFunction* GetNextCandidate(JSFunction* candidate) {
513 514
    Object* next_candidate = candidate->next_function_link();
    return reinterpret_cast<JSFunction*>(next_candidate);
515 516 517 518
  }

  static void SetNextCandidate(JSFunction* candidate,
                               JSFunction* next_candidate) {
519
    candidate->set_next_function_link(next_candidate);
520 521
  }

522
  static void ClearNextCandidate(JSFunction* candidate, Object* undefined) {
523
    DCHECK(undefined->IsUndefined());
524
    candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
525 526 527
  }

  static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
528 529
    Object* next_candidate = candidate->code()->gc_metadata();
    return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
530 531 532 533 534 535
  }

  static void SetNextCandidate(SharedFunctionInfo* candidate,
                               SharedFunctionInfo* next_candidate) {
    candidate->code()->set_gc_metadata(next_candidate);
  }
536 537 538 539

  static void ClearNextCandidate(SharedFunctionInfo* candidate) {
    candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
  }
540

541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557
  static SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder) {
    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
    Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
    return reinterpret_cast<SharedFunctionInfo*>(next_map);
  }

  static void SetNextCodeMap(SharedFunctionInfo* holder,
                             SharedFunctionInfo* next_holder) {
    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
    code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
  }

  static void ClearNextCodeMap(SharedFunctionInfo* holder) {
    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
    code_map->set_undefined(SharedFunctionInfo::kNextMapIndex);
  }

558 559 560
  Isolate* isolate_;
  JSFunction* jsfunction_candidates_head_;
  SharedFunctionInfo* shared_function_info_candidates_head_;
561
  SharedFunctionInfo* optimized_code_map_holder_head_;
562 563 564 565 566

  DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
};


567 568 569 570
// Defined in isolate.h.
class ThreadLocalTop;


571 572
// -------------------------------------------------------------------------
// Mark-Compact collector
573
class MarkCompactCollector {
574
 public:
575
  // Set the global flags, it must be called before Prepare to take effect.
576
  inline void SetFlags(int flags);
577

578 579
  static void Initialize();

580 581
  void SetUp();

582 583
  void TearDown();

584 585 586 587
  void CollectEvacuationCandidates(PagedSpace* space);

  void AddEvacuationCandidate(Page* p);

588 589
  // Prepares for GC by resetting relocation info in old and map spaces and
  // choosing spaces to compact.
590
  void Prepare();
591

592
  // Performs a global garbage collection.
593
  void CollectGarbage();
594

595
  enum CompactionMode { INCREMENTAL_COMPACTION, NON_INCREMENTAL_COMPACTION };
596 597

  bool StartCompaction(CompactionMode mode);
598

599 600
  void AbortCompaction();

601 602
#ifdef DEBUG
  // Checks whether performing mark-compact collection.
603 604
  bool in_use() { return state_ > PREPARE_GC; }
  bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
605 606
#endif

607
  // Determine type of object and emit deletion log event.
608
  static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
609

610 611 612 613 614
  // Distinguishable invalid map encodings (for single word and multiple words)
  // that indicate free regions.
  static const uint32_t kSingleFreeEncoding = 0;
  static const uint32_t kMultiFreeEncoding = 1;

615
  static inline bool IsMarked(Object* obj);
616
  static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
617

618
  inline Heap* heap() const { return heap_; }
619
  inline Isolate* isolate() const;
620 621 622 623 624

  CodeFlusher* code_flusher() { return code_flusher_; }
  inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
  void EnableCodeFlushing(bool enable);

625
  enum SweeperType {
626 627
    CONCURRENT_SWEEPING,
    SEQUENTIAL_SWEEPING
628 629
  };

630
  enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
631

632
#ifdef VERIFY_HEAP
633 634 635
  void VerifyMarkbitsAreClean();
  static void VerifyMarkbitsAreClean(PagedSpace* space);
  static void VerifyMarkbitsAreClean(NewSpace* space);
636
  void VerifyWeakEmbeddedObjectsInCode();
637
  void VerifyOmittedMapChecks();
638 639 640
#endif

  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
641 642
    return Page::FromAddress(reinterpret_cast<Address>(anchor))
        ->ShouldSkipEvacuationSlotRecording();
643 644 645
  }

  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
646 647
    return Page::FromAddress(reinterpret_cast<Address>(host))
        ->ShouldSkipEvacuationSlotRecording();
648 649 650
  }

  INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
651 652
    return Page::FromAddress(reinterpret_cast<Address>(obj))
        ->IsEvacuationCandidate();
653 654
  }

655
  void RecordRelocSlot(RelocInfo* rinfo, Object* target);
656
  void RecordCodeEntrySlot(Address slot, Code* target);
657
  void RecordCodeTargetPatch(Address pc, Code* target);
658

659 660 661
  INLINE(void RecordSlot(
      Object** anchor_slot, Object** slot, Object* object,
      SlotsBuffer::AdditionMode mode = SlotsBuffer::FAIL_ON_OVERFLOW));
662

663
  void MigrateObject(HeapObject* dst, HeapObject* src, int size,
664 665 666 667
                     AllocationSpace to_old_space);

  bool TryPromoteObject(HeapObject* object, int object_size);

668 669
  void InvalidateCode(Code* code);

670 671
  void ClearMarkbits();

672 673
  bool abort_incremental_marking() const { return abort_incremental_marking_; }

674 675 676 677
  bool finalize_incremental_marking() const {
    return finalize_incremental_marking_;
  }

678 679
  bool is_compacting() const { return compacting_; }

680 681
  MarkingParity marking_parity() { return marking_parity_; }

682 683 684
  // Concurrent and parallel sweeping support. If required_freed_bytes was set
  // to a value larger than 0, then sweeping returns after a block of at least
  // required_freed_bytes was freed. If required_freed_bytes was set to zero
685 686
  // then the whole given space is swept. It returns the size of the maximum
  // continuous freed memory chunk.
687
  int SweepInParallel(PagedSpace* space, int required_freed_bytes);
688

689 690 691 692
  // Sweeps a given page concurrently to the sweeper threads. It returns the
  // size of the maximum continuous freed memory chunk.
  int SweepInParallel(Page* page, PagedSpace* space);

693
  void EnsureSweepingCompleted();
694

695 696 697
  // If sweeper threads are not active this method will return true. If
  // this is a latency issue we should be smarter here. Otherwise, it will
  // return true if the sweeper threads are done processing the pages.
698 699
  bool IsSweepingCompleted();

700
  void RefillFreeList(PagedSpace* space);
701

702 703
  // Checks if sweeping is in progress right now on any space.
  bool sweeping_in_progress() { return sweeping_in_progress_; }
704

705
  void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
706

707
  bool evacuation() const { return evacuation_; }
708

709
  // Special case for processing weak references in a full collection. We need
710
  // to artificially keep AllocationSites alive for a time.
711 712
  void MarkAllocationSite(AllocationSite* site);

713 714 715 716
  // Mark objects in implicit references groups if their parent object
  // is marked.
  void MarkImplicitRefGroups(MarkObjectFunction mark_object);

hpayer's avatar
hpayer committed
717 718
  MarkingDeque* marking_deque() { return &marking_deque_; }

719
  void EnsureMarkingDequeIsCommittedAndInitialize(size_t max_size = 4 * MB);
hpayer's avatar
hpayer committed
720 721 722 723 724

  void InitializeMarkingDeque();

  void UncommitMarkingDeque();

725 726 727
  // The following four methods can just be called after marking, when the
  // whole transitive closure is known. They must be called before sweeping
  // when mark bits are still intact.
728
  bool IsSlotInBlackObject(Page* p, Address slot, HeapObject** out_object);
729
  bool IsSlotInBlackObjectSlow(Page* p, Address slot);
730 731
  bool IsSlotInLiveObject(Address slot);
  void VerifyIsSlotInLiveObject(Address slot, HeapObject* object);
732

733
 private:
734 735
  class SweeperTask;

736
  explicit MarkCompactCollector(Heap* heap);
737 738
  ~MarkCompactCollector();

739
  bool MarkInvalidatedCode();
740
  bool WillBeDeoptimized(Code* code);
741 742
  void RemoveDeadInvalidatedCode();
  void ProcessInvalidatedCode(ObjectVisitor* visitor);
743
  void EvictPopularEvacuationCandidate(Page* page);
744 745
  void ClearInvalidSlotsBufferEntries(PagedSpace* space);
  void ClearInvalidStoreAndSlotsBufferEntries();
746

747
  void StartSweeperThreads();
748

749 750 751 752 753 754 755 756
#ifdef DEBUG
  enum CollectorState {
    IDLE,
    PREPARE_GC,
    MARK_LIVE_OBJECTS,
    SWEEP_SPACES,
    ENCODE_FORWARDING_ADDRESSES,
    UPDATE_POINTERS,
757
    RELOCATE_OBJECTS
758 759 760
  };

  // The current stage of the collector.
761
  CollectorState state_;
762
#endif
763

764 765
  bool reduce_memory_footprint_;

766 767
  bool abort_incremental_marking_;

768 769
  bool finalize_incremental_marking_;

770 771
  MarkingParity marking_parity_;

772 773 774
  // True if we are collecting slots to perform evacuation from evacuation
  // candidates.
  bool compacting_;
775

776 777
  bool was_marked_incrementally_;

778
  // True if concurrent or parallel sweeping is currently in progress.
779
  bool sweeping_in_progress_;
780

781
  base::Semaphore pending_sweeper_jobs_semaphore_;
782

783
  bool evacuation_;
784

785 786 787 788
  SlotsBufferAllocator slots_buffer_allocator_;

  SlotsBuffer* migration_slots_buffer_;

789
  // Finishes GC, performs heap verification if enabled.
790
  void Finish();
791

792 793
  // -----------------------------------------------------------------------
  // Phase 1: Marking live objects.
794
  //
795 796 797
  //  Before: The heap has been prepared for garbage collection by
  //          MarkCompactCollector::Prepare() and is otherwise in its
  //          normal state.
798
  //
799 800
  //   After: Live objects are marked and non-live objects are unmarked.

801
  friend class RootMarkingVisitor;
802
  friend class MarkingVisitor;
803
  friend class MarkCompactMarkingVisitor;
804 805 806
  friend class CodeMarkingVisitor;
  friend class SharedFunctionInfoMarkingVisitor;

807 808 809 810
  // Mark code objects that are active on the stack to prevent them
  // from being flushed.
  void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);

811
  void PrepareForCodeFlushing();
812 813

  // Marking operations for objects reachable from roots.
814
  void MarkLiveObjects();
815

816
  void AfterMarking();
817

818 819
  // Marks the object black and pushes it on the marking stack.
  // This is for non-incremental marking only.
820 821
  INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));

822 823
  // Marks the object black assuming that it is not yet marked.
  // This is for non-incremental marking only.
824
  INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
825

826
  // Mark the heap roots and all objects reachable from them.
827
  void MarkRoots(RootMarkingVisitor* visitor);
828

829 830
  // Mark the string table specially.  References to internalized strings from
  // the string table are weak.
831
  void MarkStringTable(RootMarkingVisitor* visitor);
832

833 834
  // Mark objects reachable (transitively) from objects in the marking stack
  // or overflowed in the heap.
835
  void ProcessMarkingDeque();
836

837 838 839 840 841 842
  // Mark objects reachable (transitively) from objects in the marking stack
  // or overflowed in the heap.  This respects references only considered in
  // the final atomic marking pause including the following:
  //    - Processing of objects reachable through Harmony WeakMaps.
  //    - Objects reachable due to host application logic like object groups
  //      or implicit references' groups.
843 844
  void ProcessEphemeralMarking(ObjectVisitor* visitor,
                               bool only_process_harmony_weak_collections);
845

846 847 848 849 850
  // If the call-site of the top optimized code was not prepared for
  // deoptimization, then treat the maps in the code as strong pointers,
  // otherwise a map can die and deoptimize the code.
  void ProcessTopOptimizedFrame(ObjectVisitor* visitor);

851 852 853 854
  // Retain dying maps for <FLAG_retain_maps_for_n_gc> garbage collections to
  // increase chances of reusing of map transition tree in future.
  void RetainMaps();

855 856 857 858
  // Mark objects reachable (transitively) from objects in the marking
  // stack.  This function empties the marking stack, but may leave
  // overflowed objects in the heap, in which case the marking stack's
  // overflow flag will be set.
859
  void EmptyMarkingDeque();
860 861 862 863

  // Refill the marking stack with overflowed objects from the heap.  This
  // function either leaves the marking stack full or clears the overflow
  // flag on the marking stack.
864
  void RefillMarkingDeque();
865

866 867 868
  // Callback function for telling whether the object *p is an unmarked
  // heap object.
  static bool IsUnmarkedHeapObject(Object** p);
869

870
  // Map transitions from a live map to a dead map must be killed.
871
  // We replace them with a null descriptor, with the same key.
872
  void ClearNonLiveReferences();
873 874
  void ClearNonLivePrototypeTransitions(Map* map);
  void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
875
  void ClearMapTransitions(Map* map, Map* dead_transition);
876 877 878 879
  bool ClearMapBackPointer(Map* map);
  void TrimDescriptorArray(Map* map, DescriptorArray* descriptors,
                           int number_of_own_descriptors);
  void TrimEnumCache(Map* map, DescriptorArray* descriptors);
880

881 882 883 884
  // Mark all values associated with reachable keys in weak collections
  // encountered so far.  This might push new object or even new weak maps onto
  // the marking stack.
  void ProcessWeakCollections();
885 886 887 888

  // After all reachable objects have been marked those weak map entries
  // with an unreachable key are removed from all encountered weak maps.
  // The linked list of all encountered weak maps is destroyed.
889
  void ClearWeakCollections();
890

891 892 893 894
  // We have to remove all encountered weak maps from the list of weak
  // collections when incremental marking is aborted.
  void AbortWeakCollections();

ulan@chromium.org's avatar
ulan@chromium.org committed
895 896 897 898

  void ProcessAndClearWeakCells();
  void AbortWeakCells();

899 900
  // -----------------------------------------------------------------------
  // Phase 2: Sweeping to clear mark bits and free non-live objects for
901
  // a non-compacting collection.
902 903 904
  //
  //  Before: Live objects are marked and non-live objects are unmarked.
  //
905 906 907
  //   After: Live objects are unmarked, non-live regions have been added to
  //          their space's free list. Active eden semispace is compacted by
  //          evacuation.
908 909
  //

910 911 912
  // If we are not compacting the heap, we simply sweep the spaces except
  // for the large object space, clearing mark bits and adding unmarked
  // regions to each space's free list.
913
  void SweepSpaces();
914

915
  int DiscoverAndEvacuateBlackObjectsOnPage(NewSpace* new_space,
916
                                            NewSpacePage* p);
917

918
  void EvacuateNewSpace();
919

920
  void EvacuateLiveObjectsFromPage(Page* p);
921

922
  void EvacuatePages();
923

924
  void EvacuateNewSpaceAndCandidates();
925

926 927 928 929 930 931
  void ReleaseEvacuationCandidates();

  // Moves the pages of the evacuation_candidates_ list to the end of their
  // corresponding space pages list.
  void MoveEvacuationCandidatesToEndOfPagesList();

932
  void SweepSpace(PagedSpace* space, SweeperType sweeper);
933

934 935 936 937 938 939
  // Finalizes the parallel sweeping phase. Marks all the pages that were
  // swept in parallel.
  void ParallelSweepSpacesComplete();

  void ParallelSweepSpaceComplete(PagedSpace* space);

940 941 942
  // Updates store buffer and slot buffer for a pointer in a migrating object.
  void RecordMigratedSlot(Object* value, Address slot);

943 944 945 946 947 948 949
#ifdef DEBUG
  friend class MarkObjectVisitor;
  static void VisitObject(HeapObject* obj);

  friend class UnmarkObjectVisitor;
  static void UnmarkObject(HeapObject* obj);
#endif
950 951

  Heap* heap_;
hpayer's avatar
hpayer committed
952 953
  base::VirtualMemory* marking_deque_memory_;
  bool marking_deque_memory_committed_;
954
  MarkingDeque marking_deque_;
955
  CodeFlusher* code_flusher_;
956
  bool have_code_to_deoptimize_;
957

958
  List<Page*> evacuation_candidates_;
959
  List<Code*> invalidated_code_;
960

961
  SmartPointer<FreeList> free_list_old_space_;
962

963
  friend class Heap;
964 965 966
};


967 968
class MarkBitCellIterator BASE_EMBEDDED {
 public:
969 970 971
  explicit MarkBitCellIterator(MemoryChunk* chunk) : chunk_(chunk) {
    last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex(
        chunk_->AddressToMarkbitIndex(chunk_->area_end())));
972
    cell_base_ = chunk_->area_start();
973
    cell_index_ = Bitmap::IndexToCell(
974
        Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_)));
975
    cells_ = chunk_->markbits()->cells();
976 977 978 979 980 981 982
  }

  inline bool Done() { return cell_index_ == last_cell_index_; }

  inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }

  inline MarkBit::CellType* CurrentCell() {
983
    DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
984
                              chunk_->AddressToMarkbitIndex(cell_base_))));
985 986 987 988
    return &cells_[cell_index_];
  }

  inline Address CurrentCellBase() {
989
    DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
990
                              chunk_->AddressToMarkbitIndex(cell_base_))));
991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
    return cell_base_;
  }

  inline void Advance() {
    cell_index_++;
    cell_base_ += 32 * kPointerSize;
  }

 private:
  MemoryChunk* chunk_;
  MarkBit::CellType* cells_;
  unsigned int last_cell_index_;
  unsigned int cell_index_;
  Address cell_base_;
};


1008
class EvacuationScope BASE_EMBEDDED {
1009
 public:
1010
  explicit EvacuationScope(MarkCompactCollector* collector)
1011
      : collector_(collector) {
1012
    collector_->set_evacuation(true);
1013 1014
  }

1015
  ~EvacuationScope() { collector_->set_evacuation(false); }
1016 1017 1018 1019 1020 1021

 private:
  MarkCompactCollector* collector_;
};


1022
const char* AllocationSpaceName(AllocationSpace space);
1023 1024
}
}  // namespace v8::internal
1025

1026
#endif  // V8_HEAP_MARK_COMPACT_H_