spaces.h 102 KB
Newer Older
1
// Copyright 2011 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5 6
#ifndef V8_HEAP_SPACES_H_
#define V8_HEAP_SPACES_H_
7

8
#include <list>
9
#include <map>
10
#include <memory>
11
#include <unordered_map>
12
#include <unordered_set>
13
#include <vector>
14

15
#include "src/allocation.h"
lpy's avatar
lpy committed
16
#include "src/base/atomic-utils.h"
17
#include "src/base/iterator.h"
18
#include "src/base/platform/mutex.h"
19
#include "src/cancelable-task.h"
20
#include "src/flags.h"
21
#include "src/globals.h"
22
#include "src/heap/heap.h"
23
#include "src/heap/invalidated-slots.h"
24
#include "src/heap/marking.h"
25
#include "src/objects.h"
26
#include "src/objects/map.h"
27
#include "src/utils.h"
28

29 30
namespace v8 {
namespace internal {
31

32 33 34 35 36
namespace heap {
class HeapTester;
class TestCodeRangeScope;
}  // namespace heap

mlippautz's avatar
mlippautz committed
37
class AllocationInfo;
38
class AllocationObserver;
mlippautz's avatar
mlippautz committed
39
class CompactionSpace;
40
class CompactionSpaceCollection;
mlippautz's avatar
mlippautz committed
41
class FreeList;
42
class Isolate;
43
class LocalArrayBufferTracker;
mlippautz's avatar
mlippautz committed
44 45
class MemoryAllocator;
class MemoryChunk;
46
class Page;
mlippautz's avatar
mlippautz committed
47 48 49 50
class PagedSpace;
class SemiSpace;
class SkipList;
class SlotsBuffer;
ulan's avatar
ulan committed
51
class SlotSet;
52
class TypedSlotSet;
mlippautz's avatar
mlippautz committed
53
class Space;
54

55 56 57 58 59 60 61 62 63 64 65
// -----------------------------------------------------------------------------
// Heap structures:
//
// A JS heap consists of a young generation, an old generation, and a large
// object space. The young generation is divided into two semispaces. A
// scavenger implements Cheney's copying algorithm. The old generation is
// separated into a map space and an old object space. The map space contains
// all (and only) map objects, the rest of old objects go into the old space.
// The old generation is collected by a mark-sweep-compact collector.
//
// The semispaces of the young generation are contiguous.  The old and map
66
// spaces consists of a list of pages. A page has a page header and an object
67
// area.
68 69
//
// There is a separate large object space for objects larger than
70
// kMaxRegularHeapObjectSize, so that they do not have to move during
71
// collection. The large object space is paged. Pages in large object space
72
// may be larger than the page size.
73
//
74
// A store-buffer based write barrier is used to keep track of intergenerational
75
// references.  See heap/store-buffer.h.
76
//
77 78
// During scavenges and mark-sweep collections we sometimes (after a store
// buffer overflow) iterate intergenerational pointers without decoding heap
79 80
// object maps so if the page belongs to old space or large object space
// it is essential to guarantee that the page does not contain any
81 82
// garbage pointers to new space: every pointer aligned word which satisfies
// the Heap::InNewSpace() predicate must be a pointer to a live heap object in
83
// new space. Thus objects in old space and large object spaces should have a
84 85 86
// special layout (e.g. no bare integer fields). This requirement does not
// apply to map space which is iterated in a special fashion. However we still
// require pointer fields of dead maps to be cleaned.
87
//
88 89 90 91 92 93 94 95
// To enable lazy cleaning of old space pages we can mark chunks of the page
// as being garbage.  Garbage sections are marked with a special map.  These
// sections are skipped when scanning the page, even if we are otherwise
// scanning without regard for object boundaries.  Garbage sections are chained
// together to form a free list after a GC.  Garbage sections created outside
// of GCs by object trunctation etc. may not be in the free list chain.  Very
// small free spaces are ignored, they need only be cleaned of bogus pointers
// into new space.
96
//
97 98 99 100 101 102 103 104 105 106 107
// Each page may have up to one special garbage section.  The start of this
// section is denoted by the top field in the space.  The end of the section
// is denoted by the limit field in the space.  This special garbage section
// is not marked with a free space map in the data.  The point of this section
// is to enable linear allocation without having to constantly update the byte
// array every time the top field is updated and a new object is created.  The
// special garbage section is not in the chain of garbage sections.
//
// Since the top and limit fields are in the space, not the page, only one page
// has a special garbage section, and if the top and limit are equal then there
// is no special garbage section.
108 109 110

// Some assertion macros used in the debugging mode.

111
#define DCHECK_PAGE_ALIGNED(address) \
112
  DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
113

114
#define DCHECK_OBJECT_ALIGNED(address) \
115
  DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
116

117
#define DCHECK_OBJECT_SIZE(size) \
118
  DCHECK((0 < size) && (size <= kMaxRegularHeapObjectSize))
119

120 121 122
#define DCHECK_CODEOBJECT_SIZE(size, code_space) \
  DCHECK((0 < size) && (size <= code_space->AreaSize()))

123 124
#define DCHECK_PAGE_OFFSET(offset) \
  DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
125

126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
enum FreeListCategoryType {
  kTiniest,
  kTiny,
  kSmall,
  kMedium,
  kLarge,
  kHuge,

  kFirstCategory = kTiniest,
  kLastCategory = kHuge,
  kNumberOfCategories = kLastCategory + 1,
  kInvalidCategory
};

enum FreeMode { kLinkCategory, kDoNotLinkCategory };

142 143 144 145 146 147
enum RememberedSetType {
  OLD_TO_NEW,
  OLD_TO_OLD,
  NUMBER_OF_REMEMBERED_SET_TYPES = OLD_TO_OLD + 1
};

148 149 150 151
// A free list category maintains a linked list of free memory blocks.
class FreeListCategory {
 public:
  static const int kSize = kIntSize +      // FreeListCategoryType type_
152 153
                           kIntSize +      // padding for type_
                           kSizetSize +    // size_t available_
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
                           kPointerSize +  // FreeSpace* top_
                           kPointerSize +  // FreeListCategory* prev_
                           kPointerSize;   // FreeListCategory* next_

  FreeListCategory()
      : type_(kInvalidCategory),
        available_(0),
        top_(nullptr),
        prev_(nullptr),
        next_(nullptr) {}

  void Initialize(FreeListCategoryType type) {
    type_ = type;
    available_ = 0;
    top_ = nullptr;
    prev_ = nullptr;
    next_ = nullptr;
  }

  void Invalidate();

  void Reset();

  void ResetStats() { Reset(); }

  void RepairFreeList(Heap* heap);

  // Relinks the category into the currently owning free list. Requires that the
  // category is currently unlinked.
  void Relink();

185
  void Free(FreeSpace* node, size_t size_in_bytes, FreeMode mode);
186 187 188

  // Picks a node from the list and stores its size in |node_size|. Returns
  // nullptr if the category is empty.
189
  FreeSpace* PickNodeFromList(size_t* node_size);
190 191 192 193

  // Performs a single try to pick a node of at least |minimum_size| from the
  // category. Stores the actual size in |node_size|. Returns nullptr if no
  // node is found.
194
  FreeSpace* TryPickNodeFromList(size_t minimum_size, size_t* node_size);
195 196 197

  // Picks a node of at least |minimum_size| from the category. Stores the
  // actual size in |node_size|. Returns nullptr if no node is found.
198
  FreeSpace* SearchForNodeInList(size_t minimum_size, size_t* node_size);
199 200

  inline FreeList* owner();
201
  inline Page* page() const;
202 203
  inline bool is_linked();
  bool is_empty() { return top() == nullptr; }
204
  size_t available() const { return available_; }
205 206

#ifdef DEBUG
207
  size_t SumFreeList();
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
  int FreeListLength();
#endif

 private:
  // For debug builds we accurately compute free lists lengths up until
  // {kVeryLongFreeList} by manually walking the list.
  static const int kVeryLongFreeList = 500;

  FreeSpace* top() { return top_; }
  void set_top(FreeSpace* top) { top_ = top; }
  FreeListCategory* prev() { return prev_; }
  void set_prev(FreeListCategory* prev) { prev_ = prev; }
  FreeListCategory* next() { return next_; }
  void set_next(FreeListCategory* next) { next_ = next; }

  // |type_|: The type of this free list category.
  FreeListCategoryType type_;

  // |available_|: Total available bytes in all blocks of this free list
  // category.
228
  size_t available_;
229 230 231 232 233 234 235 236 237 238

  // |top_|: Points to the top FreeSpace* in the free list category.
  FreeSpace* top_;

  FreeListCategory* prev_;
  FreeListCategory* next_;

  friend class FreeList;
  friend class PagedSpace;
};
239 240 241

// MemoryChunk represents a memory region owned by a specific space.
// It is divided into the header and the body. Chunk start is always
242
// 1MB aligned. Start of the body is aligned so it can accommodate
243 244 245
// any heap object.
class MemoryChunk {
 public:
246 247
  // Use with std data structures.
  struct Hasher {
248 249
    size_t operator()(MemoryChunk* const chunk) const {
      return reinterpret_cast<size_t>(chunk) >> kPageSizeBits;
250 251 252
    }
  };

mlippautz's avatar
mlippautz committed
253 254 255 256 257 258 259 260
  enum Flag {
    NO_FLAGS = 0u,
    IS_EXECUTABLE = 1u << 0,
    POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
    POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
    // A page in new space has one of the next to flags set.
    IN_FROM_SPACE = 1u << 3,
    IN_TO_SPACE = 1u << 4,
261
    NEW_SPACE_BELOW_AGE_MARK = 1u << 5,
mlippautz's avatar
mlippautz committed
262 263
    EVACUATION_CANDIDATE = 1u << 6,
    NEVER_EVACUATE = 1u << 7,
264 265 266 267 268

    // Large objects can have a progress bar in their page header. These object
    // are scanned in increments and will be kept black while being scanned.
    // Even if the mutator writes to them they will be kept black and a white
    // to grey transition is performed in the value.
mlippautz's avatar
mlippautz committed
269
    HAS_PROGRESS_BAR = 1u << 8,
270

271 272
    // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
    // from new to old space during evacuation.
mlippautz's avatar
mlippautz committed
273
    PAGE_NEW_OLD_PROMOTION = 1u << 9,
274

275 276
    // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
    // within the new space during evacuation.
mlippautz's avatar
mlippautz committed
277
    PAGE_NEW_NEW_PROMOTION = 1u << 10,
278

279 280 281 282
    // This flag is intended to be used for testing. Works only when both
    // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
    // are set. It forces the page to become an evacuation candidate at next
    // candidates selection cycle.
mlippautz's avatar
mlippautz committed
283
    FORCE_EVACUATION_CANDIDATE_FOR_TESTING = 1u << 11,
284

285
    // This flag is intended to be used for testing.
mlippautz's avatar
mlippautz committed
286
    NEVER_ALLOCATE_ON_PAGE = 1u << 12,
287

288 289
    // The memory chunk is already logically freed, however the actual freeing
    // still has to be performed.
mlippautz's avatar
mlippautz committed
290
    PRE_FREED = 1u << 13,
291

292 293
    // |POOLED|: When actually freeing this chunk, only uncommit and do not
    // give up the reservation as we still reuse the chunk at some point.
mlippautz's avatar
mlippautz committed
294
    POOLED = 1u << 14,
295

296
    // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
297
    //   has been aborted and needs special handling by the sweeper.
mlippautz's avatar
mlippautz committed
298
    COMPACTION_WAS_ABORTED = 1u << 15,
299

300 301 302
    // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
    // on pages is sometimes aborted. The flag is used to avoid repeatedly
    // triggering on the same page.
mlippautz's avatar
mlippautz committed
303
    COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
304

305
    // |ANCHOR|: Flag is set if page is an anchor.
mlippautz's avatar
mlippautz committed
306
    ANCHOR = 1u << 17,
307 308 309

    // |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
    // to iterate the page.
310
    SWEEP_TO_ITERATE = 1u << 18
311
  };
mlippautz's avatar
mlippautz committed
312

313 314 315
  using Flags = uintptr_t;

  static const Flags kPointersToHereAreInterestingMask =
mlippautz's avatar
mlippautz committed
316 317
      POINTERS_TO_HERE_ARE_INTERESTING;

318
  static const Flags kPointersFromHereAreInterestingMask =
mlippautz's avatar
mlippautz committed
319 320
      POINTERS_FROM_HERE_ARE_INTERESTING;

321
  static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
mlippautz's avatar
mlippautz committed
322

323
  static const Flags kIsInNewSpaceMask = IN_FROM_SPACE | IN_TO_SPACE;
mlippautz's avatar
mlippautz committed
324

325
  static const Flags kSkipEvacuationSlotsRecordingMask =
mlippautz's avatar
mlippautz committed
326
      kEvacuationCandidateMask | kIsInNewSpaceMask;
327

328
  // |kSweepingDone|: The page state when sweeping is complete or sweeping must
329 330
  //   not be performed on that page. Sweeper threads that are done with their
  //   work will set this value and not touch the page anymore.
331
  // |kSweepingPending|: This page is ready for parallel sweeping.
332 333
  // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
  enum ConcurrentSweepingState {
334
    kSweepingDone,
335
    kSweepingPending,
336 337 338
    kSweepingInProgress,
  };

339 340 341 342 343 344
  static const intptr_t kAlignment =
      (static_cast<uintptr_t>(1) << kPageSizeBits);

  static const intptr_t kAlignmentMask = kAlignment - 1;

  static const intptr_t kSizeOffset = 0;
345 346 347 348 349
  static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
  static const intptr_t kAreaStartOffset = kFlagsOffset + kIntptrSize;
  static const intptr_t kAreaEndOffset = kAreaStartOffset + kPointerSize;
  static const intptr_t kReservationOffset = kAreaEndOffset + kPointerSize;
  static const intptr_t kOwnerOffset = kReservationOffset + 2 * kPointerSize;
350

351
  static const size_t kMinHeaderSize =
352 353
      kSizeOffset         // NOLINT
      + kSizetSize        // size_t size
354
      + kUIntptrSize      // uintptr_t flags_
355 356
      + kPointerSize      // Address area_start_
      + kPointerSize      // Address area_end_
357
      + 2 * kPointerSize  // VirtualMemory reservation_
358 359
      + kPointerSize      // Address owner_
      + kPointerSize      // Heap* heap_
360 361
      + kIntptrSize       // intptr_t progress_bar_
      + kIntptrSize       // intptr_t live_byte_count_
362 363
      + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES  // SlotSet* array
      + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES  // TypedSlotSet* array
364 365 366 367 368
      + kPointerSize  // InvalidatedSlots* invalidated_slots_
      + kPointerSize  // SkipList* skip_list_
      + kPointerSize  // AtomicValue high_water_mark_
      + kPointerSize  // base::RecursiveMutex* mutex_
      + kPointerSize  // base::AtomicWord concurrent_sweeping_
369 370
      + kPointerSize  // base::Mutex* page_protection_change_mutex_
      + kPointerSize  // unitptr_t write_unprotect_counter_
371 372 373 374
      + kSizetSize    // size_t allocated_bytes_
      + kSizetSize    // size_t wasted_memory_
      + kPointerSize  // AtomicValue next_chunk_
      + kPointerSize  // AtomicValue prev_chunk_
375
      + FreeListCategory::kSize * kNumberOfCategories
376
      // FreeListCategory categories_[kNumberOfCategories]
377 378 379
      + kPointerSize   // LocalArrayBufferTracker* local_tracker_
      + kIntptrSize    // intptr_t young_generation_live_byte_count_
      + kPointerSize;  // Bitmap* young_generation_bitmap_
380 381 382 383

  // We add some more space to the computed header size to amount for missing
  // alignment requirements in our computation.
  // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
384
  static const size_t kHeaderSize = kMinHeaderSize;
385 386 387 388 389 390 391 392 393 394 395 396

  static const int kBodyOffset =
      CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);

  // The start offset of the object area in a page. Aligned to both maps and
  // code alignment to be suitable for both.  Also aligned to 32 words because
  // the marking bitmap is arranged in 32 bit chunks.
  static const int kObjectStartAlignment = 32 * kPointerSize;
  static const int kObjectStartOffset =
      kBodyOffset - 1 +
      (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);

397 398 399 400 401 402
  // Page size in bytes.  This must be a multiple of the OS page size.
  static const int kPageSize = 1 << kPageSizeBits;
  static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;

  static const int kAllocatableMemory = kPageSize - kObjectStartOffset;

403 404 405 406
  // Only works if the pointer is in the first kPageSize of the MemoryChunk.
  static MemoryChunk* FromAddress(Address a) {
    return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
  }
407

408
  static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
409

410 411 412 413
  static inline void UpdateHighWaterMark(Address mark) {
    if (mark == nullptr) return;
    // Need to subtract one from the mark because when a chunk is full the
    // top points to the next address after the chunk, which effectively belongs
414
    // to another chunk. See the comment to Page::FromTopOrLimit.
415 416 417 418 419 420 421 422 423
    MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
    intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
    intptr_t old_mark = 0;
    do {
      old_mark = chunk->high_water_mark_.Value();
    } while ((new_mark > old_mark) &&
             !chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
  }

424
  static bool IsValid(MemoryChunk* chunk) { return chunk != nullptr; }
425

426 427 428
  Address address() const {
    return reinterpret_cast<Address>(const_cast<MemoryChunk*>(this));
  }
429

430
  base::RecursiveMutex* mutex() { return mutex_; }
431 432

  bool Contains(Address addr) {
433
    return addr >= area_start() && addr < area_end();
434 435
  }

436 437
  // Checks whether |addr| can be a limit of addresses in this page. It's a
  // limit if it's in the page, or if it's just after the last byte of the page.
438
  bool ContainsLimit(Address addr) {
439
    return addr >= area_start() && addr <= area_end();
440 441
  }

lpy's avatar
lpy committed
442
  base::AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() {
443
    return concurrent_sweeping_;
444 445
  }

446 447 448 449
  bool SweepingDone() {
    return concurrent_sweeping_state().Value() == kSweepingDone;
  }

450
  size_t size() const { return size_; }
451
  void set_size(size_t size) { size_ = size; }
452 453 454

  inline Heap* heap() const { return heap_; }

455 456
  Heap* synchronized_heap();

457 458 459 460
  inline SkipList* skip_list() { return skip_list_; }

  inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }

461
  template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
462
  SlotSet* slot_set() {
463
    if (access_mode == AccessMode::ATOMIC)
464
      return base::AsAtomicPointer::Acquire_Load(&slot_set_[type]);
465
    return slot_set_[type];
466
  }
467

468
  template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
469
  TypedSlotSet* typed_slot_set() {
470
    if (access_mode == AccessMode::ATOMIC)
471
      return base::AsAtomicPointer::Acquire_Load(&typed_slot_set_[type]);
472
    return typed_slot_set_[type];
473
  }
474 475 476

  template <RememberedSetType type>
  SlotSet* AllocateSlotSet();
477
  // Not safe to be called concurrently.
478 479 480 481
  template <RememberedSetType type>
  void ReleaseSlotSet();
  template <RememberedSetType type>
  TypedSlotSet* AllocateTypedSlotSet();
482
  // Not safe to be called concurrently.
483 484
  template <RememberedSetType type>
  void ReleaseTypedSlotSet();
485

486 487 488 489 490
  InvalidatedSlots* AllocateInvalidatedSlots();
  void ReleaseInvalidatedSlots();
  void RegisterObjectWithInvalidatedSlots(HeapObject* object, int size);
  InvalidatedSlots* invalidated_slots() { return invalidated_slots_; }

491 492
  void AllocateLocalTracker();
  void ReleaseLocalTracker();
493 494 495
  inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
  bool contains_array_buffers();

496 497
  void AllocateYoungGenerationBitmap();
  void ReleaseYoungGenerationBitmap();
498 499 500

  Address area_start() { return area_start_; }
  Address area_end() { return area_end_; }
501
  size_t area_size() { return static_cast<size_t>(area_end() - area_start()); }
502 503 504 505

  bool CommitArea(size_t requested);

  // Approximate amount of physical memory committed for this chunk.
506
  size_t CommittedPhysicalMemory();
507

508 509
  Address HighWaterMark() { return address() + high_water_mark_.Value(); }

510
  int progress_bar() {
511
    DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
512
    return static_cast<int>(progress_bar_);
513 514 515
  }

  void set_progress_bar(int progress_bar) {
516
    DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
517 518 519 520 521 522 523 524 525
    progress_bar_ = progress_bar;
  }

  void ResetProgressBar() {
    if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
      set_progress_bar(0);
    }
  }

526
  inline uint32_t AddressToMarkbitIndex(Address addr) const {
527 528 529
    return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
  }

530
  inline Address MarkbitIndexToAddress(uint32_t index) const {
531 532 533
    return this->address() + (index << kPointerSizeLog2);
  }

534 535 536 537 538 539 540 541 542 543 544 545 546
  template <AccessMode access_mode = AccessMode::NON_ATOMIC>
  void SetFlag(Flag flag) {
    if (access_mode == AccessMode::NON_ATOMIC) {
      flags_ |= flag;
    } else {
      base::AsAtomicWord::SetBits<uintptr_t>(&flags_, flag, flag);
    }
  }

  template <AccessMode access_mode = AccessMode::NON_ATOMIC>
  bool IsFlagSet(Flag flag) {
    return (GetFlags<access_mode>() & flag) != 0;
  }
547

548
  void ClearFlag(Flag flag) { flags_ &= ~flag; }
549 550
  // Set or clear multiple flags at a time. The flags in the mask are set to
  // the value in "flags", the rest retain the current value in |flags_|.
mlippautz's avatar
mlippautz committed
551
  void SetFlags(uintptr_t flags, uintptr_t mask) {
552
    flags_ = (flags_ & ~mask) | (flags & mask);
553 554 555
  }

  // Return all current flags.
556 557 558 559 560 561 562 563
  template <AccessMode access_mode = AccessMode::NON_ATOMIC>
  uintptr_t GetFlags() {
    if (access_mode == AccessMode::NON_ATOMIC) {
      return flags_;
    } else {
      return base::AsAtomicWord::Relaxed_Load(&flags_);
    }
  }
564

565 566 567 568
  bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }

  void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }

569 570
  bool CanAllocate() {
    return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
571 572
  }

573 574 575 576 577 578 579 580
  template <AccessMode access_mode = AccessMode::NON_ATOMIC>
  bool IsEvacuationCandidate() {
    DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
             IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
    return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
  }

  template <AccessMode access_mode = AccessMode::NON_ATOMIC>
581
  bool ShouldSkipEvacuationSlotRecording() {
582 583 584
    uintptr_t flags = GetFlags<access_mode>();
    return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
           ((flags & COMPACTION_WAS_ABORTED) == 0);
585 586
  }

587 588 589
  Executability executable() {
    return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
  }
590

mlippautz's avatar
mlippautz committed
591
  bool InNewSpace() { return (flags_ & kIsInNewSpaceMask) != 0; }
592

593
  bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
594

595
  bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
596

597
  MemoryChunk* next_chunk() { return next_chunk_.Value(); }
ulan's avatar
ulan committed
598

599
  MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
ulan's avatar
ulan committed
600

601 602 603 604 605
  void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); }

  void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }

  Space* owner() const {
606
    uintptr_t owner_value = base::AsAtomicWord::Acquire_Load(
607 608 609 610
        reinterpret_cast<const uintptr_t*>(&owner_));
    return ((owner_value & kPageHeaderTagMask) == kPageHeaderTag)
               ? reinterpret_cast<Space*>(owner_value - kPageHeaderTag)
               : nullptr;
611 612
  }

613
  void set_owner(Space* space) {
614 615 616 617 618 619 620
    DCHECK_EQ(0, reinterpret_cast<uintptr_t>(space) & kPageHeaderTagMask);
    base::AsAtomicWord::Release_Store(
        reinterpret_cast<uintptr_t*>(&owner_),
        reinterpret_cast<uintptr_t>(space) + kPageHeaderTag);
    DCHECK_EQ(kPageHeaderTag, base::AsAtomicWord::Relaxed_Load(
                                  reinterpret_cast<const uintptr_t*>(&owner_)) &
                                  kPageHeaderTagMask);
621 622
  }

623 624
  bool HasPageHeader() { return owner() != nullptr; }

625 626
  void InsertAfter(MemoryChunk* other);
  void Unlink();
627

628 629 630 631
  // Emits a memory barrier. For TSAN builds the other thread needs to perform
  // MemoryChunk::synchronized_heap() to simulate the barrier.
  void InitializationMemoryFence();

632 633 634
  void SetReadAndExecutable();
  void SetReadAndWritable();

635 636 637 638
 protected:
  static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
                                 Address area_start, Address area_end,
                                 Executability executable, Space* owner,
639
                                 VirtualMemory* reservation);
640

641 642 643
  // Should be called when memory chunk is about to be freed.
  void ReleaseAllocatedMemory();

644
  VirtualMemory* reserved_memory() { return &reservation_; }
645

646
  size_t size_;
647
  uintptr_t flags_;
648 649 650 651 652

  // Start and end of allocatable memory on this chunk.
  Address area_start_;
  Address area_end_;

653
  // If the chunk needs to remember its memory reservation, it is stored here.
654
  VirtualMemory reservation_;
655

656 657 658 659
  // The identity of the owning space.  This is tagged as a failure pointer, but
  // no failure can be in an object, so this can be distinguished from any entry
  // in a fixed array.
  Address owner_;
660

661
  Heap* heap_;
662

663 664
  // Used by the incremental marker to keep track of the scanning progress in
  // large objects that have a progress bar and are scanned in increments.
665
  intptr_t progress_bar_;
666

667
  // Count of bytes marked black on page.
668
  intptr_t live_byte_count_;
669

ulan's avatar
ulan committed
670 671 672
  // A single slot set for small pages (of size kPageSize) or an array of slot
  // set for large pages. In the latter case the number of entries in the array
  // is ceil(size() / kPageSize).
673 674
  SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
  TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
675
  InvalidatedSlots* invalidated_slots_;
676

677
  SkipList* skip_list_;
678

679 680
  // Assuming the initial allocation on a page is sequential,
  // count highest number of bytes ever allocated on the page.
lpy's avatar
lpy committed
681
  base::AtomicValue<intptr_t> high_water_mark_;
682

683
  base::RecursiveMutex* mutex_;
684

lpy's avatar
lpy committed
685
  base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
686

687 688 689 690 691 692 693 694 695 696
  base::Mutex* page_protection_change_mutex_;

  // This field is only relevant for code pages. It depicts the number of
  // times a component requested this page to be read+writeable. The
  // counter is decremented when a component resets to read+executable.
  // If Value() == 0 => The memory is read and executable.
  // If Value() >= 1 => The Memory is read and writable.
  // The maximum value can right now only be 2.
  uintptr_t write_unprotect_counter_;

697 698
  // Byte allocated on the page, which includes all objects on the page
  // and the linear allocation area.
699
  size_t allocated_bytes_;
700
  // Freed memory that was not added to the free list.
701
  size_t wasted_memory_;
702

703
  // next_chunk_ holds a pointer of type MemoryChunk
lpy's avatar
lpy committed
704
  base::AtomicValue<MemoryChunk*> next_chunk_;
705
  // prev_chunk_ holds a pointer of type MemoryChunk
lpy's avatar
lpy committed
706
  base::AtomicValue<MemoryChunk*> prev_chunk_;
707

708 709
  FreeListCategory categories_[kNumberOfCategories];

710 711
  LocalArrayBufferTracker* local_tracker_;

712 713 714
  intptr_t young_generation_live_byte_count_;
  Bitmap* young_generation_bitmap_;

715
 private:
716 717
  void InitializeReservedMemory() { reservation_.Reset(); }

718
  friend class ConcurrentMarkingState;
719
  friend class IncrementalMarkingState;
720
  friend class MajorAtomicMarkingState;
721
  friend class MajorNonAtomicMarkingState;
722
  friend class MemoryAllocator;
723
  friend class MemoryChunkValidator;
724 725
  friend class MinorMarkingState;
  friend class MinorNonAtomicMarkingState;
726
  friend class PagedSpace;
727 728
};

mlippautz's avatar
mlippautz committed
729 730
static_assert(kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory,
              "kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory");
731

732

733
// -----------------------------------------------------------------------------
734
// A page is a memory chunk of a size 512K. Large object pages may be larger.
735 736 737
//
// The only way to get a page pointer is by calling factory methods:
//   Page* p = Page::FromAddress(addr); or
738
//   Page* p = Page::FromTopOrLimit(top);
739
class Page : public MemoryChunk {
740
 public:
741 742 743 744
  static const intptr_t kCopyAllFlags = ~0;

  // Page flags copied from from-space to to-space when flipping semispaces.
  static const intptr_t kCopyOnFlipFlagsMask =
mlippautz's avatar
mlippautz committed
745 746
      static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
      static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
747

748
  // Returns the page containing a given address. The address ranges
749 750 751 752
  // from [page_addr .. page_addr + kPageSize[. This only works if the object
  // is in fact in a page.
  static Page* FromAddress(Address addr) {
    return reinterpret_cast<Page*>(OffsetFrom(addr) & ~kPageAlignmentMask);
753 754
  }

755 756 757 758 759 760 761
  // Returns the page containing the address provided. The address can
  // potentially point righter after the page. To be also safe for tagged values
  // we subtract a hole word. The valid address ranges from
  // [page_addr + kObjectStartOffset .. page_addr + kPageSize + kPointerSize].
  static Page* FromAllocationAreaAddress(Address address) {
    return Page::FromAddress(address - kPointerSize);
  }
762

763 764 765
  // Checks if address1 and address2 are on the same new space page.
  static bool OnSamePage(Address address1, Address address2) {
    return Page::FromAddress(address1) == Page::FromAddress(address2);
766 767
  }

768 769 770
  // Checks whether an address is page aligned.
  static bool IsAlignedToPageSize(Address addr) {
    return (OffsetFrom(addr) & kPageAlignmentMask) == 0;
771
  }
772 773 774 775

  static bool IsAtObjectStart(Address addr) {
    return (reinterpret_cast<intptr_t>(addr) & kPageAlignmentMask) ==
           kObjectStartOffset;
776
  }
777

778 779
  static Page* ConvertNewToOld(Page* old_page);

780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
  inline static Page* FromAnyPointerAddress(Heap* heap, Address addr);

  // Create a Page object that is only used as anchor for the doubly-linked
  // list of real pages.
  explicit Page(Space* owner) { InitializeAsAnchor(owner); }

  inline void MarkNeverAllocateForTesting();
  inline void MarkEvacuationCandidate();
  inline void ClearEvacuationCandidate();

  Page* next_page() { return static_cast<Page*>(next_chunk()); }
  Page* prev_page() { return static_cast<Page*>(prev_chunk()); }
  void set_next_page(Page* page) { set_next_chunk(page); }
  void set_prev_page(Page* page) { set_prev_chunk(page); }

  template <typename Callback>
  inline void ForAllFreeListCategories(Callback callback) {
    for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
      callback(&categories_[i]);
    }
800 801 802
  }

  // Returns the offset of a given address to this page.
803
  inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); }
804 805

  // Returns the address for a given offset to the this page.
806
  Address OffsetToAddress(size_t offset) {
807
    DCHECK_PAGE_OFFSET(offset);
808 809 810
    return address() + offset;
  }

811 812 813 814 815 816 817 818 819
  // WaitUntilSweepingCompleted only works when concurrent sweeping is in
  // progress. In particular, when we know that right before this call a
  // sweeper thread was sweeping this page.
  void WaitUntilSweepingCompleted() {
    mutex_->Lock();
    mutex_->Unlock();
    DCHECK(SweepingDone());
  }

820 821
  void ResetFreeListStatistics();

822 823
  size_t AvailableInFreeList();

824 825 826
  size_t AvailableInFreeListFromAllocatedBytes() {
    DCHECK_GE(area_size(), wasted_memory() + allocated_bytes());
    return area_size() - wasted_memory() - allocated_bytes();
827 828
  }

829 830 831 832
  FreeListCategory* free_list_category(FreeListCategoryType type) {
    return &categories_[type];
  }

833 834
  inline void InitializeFreeListCategories();

835
  bool is_anchor() { return IsFlagSet(Page::ANCHOR); }
836

837 838 839
  size_t wasted_memory() { return wasted_memory_; }
  void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
  size_t allocated_bytes() { return allocated_bytes_; }
840 841
  void IncreaseAllocatedBytes(size_t bytes) {
    DCHECK_LE(bytes, area_size());
842
    allocated_bytes_ += bytes;
843
  }
844 845 846
  void DecreaseAllocatedBytes(size_t bytes) {
    DCHECK_LE(bytes, area_size());
    DCHECK_GE(allocated_bytes(), bytes);
847
    allocated_bytes_ -= bytes;
848
  }
849

850 851
  void ResetAllocatedBytes();

852 853
  size_t ShrinkToHighWaterMark();

854
  V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end);
855
  void DestroyBlackArea(Address start, Address end);
856

857 858 859 860
#ifdef DEBUG
  void Print();
#endif  // DEBUG

861
 private:
862 863
  enum InitializationMode { kFreeMemory, kDoNotFreeMemory };

864 865
  void InitializeAsAnchor(Space* owner);

866 867
  friend class MemoryAllocator;
};
868

869 870
class LargePage : public MemoryChunk {
 public:
871
  HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
872

873
  inline LargePage* next_page() {
874 875 876
    return static_cast<LargePage*>(next_chunk());
  }

877 878
  inline void set_next_page(LargePage* page) { set_next_chunk(page); }

879 880
  // Uncommit memory that is not in use anymore by the object. If the object
  // cannot be shrunk 0 is returned.
881
  Address GetAddressToShrink(Address object_address, size_t object_size);
882 883 884

  void ClearOutOfLiveRangeSlots(Address free_start);

885 886 887 888 889 890
  // A limit to guarantee that we do not overflow typed slot offset in
  // the old to old remembered set.
  // Note that this limit is higher than what assembler already imposes on
  // x64 and ia32 architectures.
  static const int kMaxCodePageSize = 512 * MB;

891
 private:
892 893
  static LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
                               Executability executable, Space* owner);
894 895

  friend class MemoryAllocator;
896 897
};

898

899 900 901 902
// ----------------------------------------------------------------------------
// Space is the abstract superclass for all allocation spaces.
class Space : public Malloced {
 public:
903
  Space(Heap* heap, AllocationSpace id, Executability executable)
904
      : allocation_observers_paused_(false),
905
        heap_(heap),
906 907 908 909
        id_(id),
        executable_(executable),
        committed_(0),
        max_committed_(0) {}
910

911
  virtual ~Space() {}
912

913 914
  Heap* heap() const { return heap_; }

915
  // Does the space need executable memory?
916
  Executability executable() { return executable_; }
917

918 919
  // Identity used in error reporting.
  AllocationSpace identity() { return id_; }
920

921
  void AddAllocationObserver(AllocationObserver* observer);
922

923
  void RemoveAllocationObserver(AllocationObserver* observer);
924

925
  V8_EXPORT_PRIVATE virtual void PauseAllocationObservers();
926

927
  V8_EXPORT_PRIVATE virtual void ResumeAllocationObservers();
928

929 930 931
  V8_EXPORT_PRIVATE virtual void StartNextInlineAllocationStep() {}

  void AllocationStep(int bytes_since_last, Address soon_object, int size);
932

933 934
  // Return the total amount committed memory for this space, i.e., allocatable
  // memory and page headers.
935
  virtual size_t CommittedMemory() { return committed_; }
936

937
  virtual size_t MaximumCommittedMemory() { return max_committed_; }
938

939
  // Returns allocated size.
940
  virtual size_t Size() = 0;
941

942 943
  // Returns size of objects. Can differ from the allocated size
  // (e.g. see LargeObjectSpace).
944
  virtual size_t SizeOfObjects() { return Size(); }
945

946 947 948 949
  // Approximate amount of physical memory committed for this space.
  virtual size_t CommittedPhysicalMemory() = 0;

  // Return the available bytes without growing.
950
  virtual size_t Available() = 0;
951

952 953 954 955 956 957 958 959
  virtual int RoundSizeDownToObjectAlignment(int size) {
    if (id_ == CODE_SPACE) {
      return RoundDown(size, kCodeAlignment);
    } else {
      return RoundDown(size, kPointerSize);
    }
  }

960 961
  virtual std::unique_ptr<ObjectIterator> GetObjectIterator() = 0;

962 963
  void AccountCommitted(size_t bytes) {
    DCHECK_GE(committed_ + bytes, committed_);
964 965 966 967 968 969
    committed_ += bytes;
    if (committed_ > max_committed_) {
      max_committed_ = committed_;
    }
  }

970 971
  void AccountUncommitted(size_t bytes) {
    DCHECK_GE(committed_, committed_ - bytes);
972 973 974
    committed_ -= bytes;
  }

975 976
  V8_EXPORT_PRIVATE void* GetRandomMmapAddr();

977 978 979 980 981
#ifdef DEBUG
  virtual void Print() = 0;
#endif

 protected:
982 983
  intptr_t GetNextInlineAllocationStepSize();

984
  std::vector<AllocationObserver*> allocation_observers_;
985 986
  bool allocation_observers_paused_;

987
 private:
988
  Heap* heap_;
989
  AllocationSpace id_;
990
  Executability executable_;
991 992

  // Keeps track of committed memory in a space.
993 994
  size_t committed_;
  size_t max_committed_;
995 996

  DISALLOW_COPY_AND_ASSIGN(Space);
997 998 999
};


1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
class MemoryChunkValidator {
  // Computed offsets should match the compiler generated ones.
  STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));

  // Validate our estimates on the header size.
  STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
  STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
  STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
};


1011 1012 1013 1014 1015 1016 1017
// ----------------------------------------------------------------------------
// All heap objects containing executable code (code objects) must be allocated
// from a 2 GB range of memory, so that they can call each other using 32-bit
// displacements.  This happens automatically on 32-bit platforms, where 32-bit
// displacements cover the entire 4GB virtual address space.  On 64-bit
// platforms, we support this using the CodeRange object, which reserves and
// manages a range of virtual memory.
1018
class CodeRange {
1019
 public:
1020
  explicit CodeRange(Isolate* isolate);
1021 1022 1023
  ~CodeRange() {
    if (virtual_memory_.IsReserved()) virtual_memory_.Release();
  }
1024

1025 1026 1027
  // Reserves a range of virtual memory, but does not commit any of it.
  // Can only be called once, at heap initialization time.
  // Returns false on failure.
1028
  bool SetUp(size_t requested_size);
1029

1030
  bool valid() { return virtual_memory_.IsReserved(); }
1031
  Address start() {
1032
    DCHECK(valid());
1033
    return static_cast<Address>(virtual_memory_.address());
1034
  }
1035 1036
  size_t size() {
    DCHECK(valid());
1037
    return virtual_memory_.size();
1038
  }
1039
  bool contains(Address address) {
1040
    if (!valid()) return false;
1041 1042
    Address start = static_cast<Address>(virtual_memory_.address());
    return start <= address && address < start + virtual_memory_.size();
1043 1044 1045 1046 1047
  }

  // Allocates a chunk of memory from the large-object portion of
  // the code range.  On platforms with no separate code range, should
  // not be called.
1048 1049
  MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
                                            const size_t commit_size,
1050
                                            size_t* allocated);
1051 1052
  bool CommitRawMemory(Address start, size_t length);
  bool UncommitRawMemory(Address start, size_t length);
1053
  void FreeRawMemory(Address buf, size_t length);
1054 1055 1056 1057

 private:
  class FreeBlock {
   public:
1058
    FreeBlock() : start(0), size(0) {}
1059
    FreeBlock(Address start_arg, size_t size_arg)
1060
        : start(start_arg), size(size_arg) {
1061 1062
      DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
      DCHECK(size >= static_cast<size_t>(Page::kPageSize));
1063
    }
1064
    FreeBlock(void* start_arg, size_t size_arg)
1065
        : start(static_cast<Address>(start_arg)), size(size_arg) {
1066 1067
      DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
      DCHECK(size >= static_cast<size_t>(Page::kPageSize));
1068
    }
1069 1070 1071 1072 1073

    Address start;
    size_t size;
  };

1074 1075 1076 1077 1078 1079
  // Finds a block on the allocation list that contains at least the
  // requested amount of memory.  If none is found, sorts and merges
  // the existing free memory blocks, and searches again.
  // If none can be found, returns false.
  bool GetNextAllocationBlock(size_t requested);
  // Compares the start addresses of two free blocks.
1080 1081
  static bool CompareFreeBlockAddress(const FreeBlock& left,
                                      const FreeBlock& right);
1082 1083 1084 1085 1086 1087
  bool ReserveBlock(const size_t requested_size, FreeBlock* block);
  void ReleaseBlock(const FreeBlock* block);

  Isolate* isolate_;

  // The reserved range of virtual memory that all code objects are put in.
1088
  VirtualMemory virtual_memory_;
1089

1090 1091 1092
  // The global mutex guards free_list_ and allocation_list_ as GC threads may
  // access both lists concurrently to the main thread.
  base::Mutex code_range_mutex_;
1093

1094 1095 1096
  // Freed blocks of memory are added to the free list.  When the allocation
  // list is exhausted, the free list is sorted and merged to make the new
  // allocation list.
1097
  std::vector<FreeBlock> free_list_;
1098

1099 1100
  // Memory is allocated from the free blocks on the allocation list.
  // The block at current_allocation_block_index_ is the current block.
1101 1102
  std::vector<FreeBlock> allocation_list_;
  size_t current_allocation_block_index_;
1103

1104
  DISALLOW_COPY_AND_ASSIGN(CodeRange);
1105 1106 1107
};


1108 1109
class SkipList {
 public:
1110
  SkipList() { Clear(); }
1111 1112 1113 1114 1115 1116 1117

  void Clear() {
    for (int idx = 0; idx < kSize; idx++) {
      starts_[idx] = reinterpret_cast<Address>(-1);
    }
  }

1118
  Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
1119 1120 1121 1122 1123

  void AddObject(Address addr, int size) {
    int start_region = RegionNumber(addr);
    int end_region = RegionNumber(addr + size - kPointerSize);
    for (int idx = start_region; idx <= end_region; idx++) {
1124 1125 1126 1127 1128 1129 1130 1131
      if (starts_[idx] > addr) {
        starts_[idx] = addr;
      } else {
        // In the first region, there may already be an object closer to the
        // start of the region. Do not change the start in that case. If this
        // is not the first region, you probably added overlapping objects.
        DCHECK_EQ(start_region, idx);
      }
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
    }
  }

  static inline int RegionNumber(Address addr) {
    return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
  }

  static void Update(Address addr, int size) {
    Page* page = Page::FromAddress(addr);
    SkipList* list = page->skip_list();
1142
    if (list == nullptr) {
1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
      list = new SkipList();
      page->set_skip_list(list);
    }

    list->AddObject(addr, size);
  }

 private:
  static const int kRegionSizeLog2 = 13;
  static const int kRegionSize = 1 << kRegionSizeLog2;
  static const int kSize = Page::kPageSize / kRegionSize;

  STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);

  Address starts_[kSize];
};


1161 1162
// ----------------------------------------------------------------------------
// A space acquires chunks of memory from the operating system. The memory
1163
// allocator allocates and deallocates pages for the paged heap spaces and large
1164
// pages for large object space.
1165
class V8_EXPORT_PRIVATE MemoryAllocator {
1166
 public:
1167 1168 1169 1170 1171 1172
  // Unmapper takes care of concurrently unmapping and uncommitting memory
  // chunks.
  class Unmapper {
   public:
    class UnmapFreeMemoryTask;

1173 1174 1175
    Unmapper(Heap* heap, MemoryAllocator* allocator)
        : heap_(heap),
          allocator_(allocator),
1176
          pending_unmapping_tasks_semaphore_(0),
1177 1178 1179 1180
          concurrent_unmapping_tasks_active_(0) {
      chunks_[kRegular].reserve(kReservedQueueingSlots);
      chunks_[kPooled].reserve(kReservedQueueingSlots);
    }
1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208

    void AddMemoryChunkSafe(MemoryChunk* chunk) {
      if ((chunk->size() == Page::kPageSize) &&
          (chunk->executable() != EXECUTABLE)) {
        AddMemoryChunkSafe<kRegular>(chunk);
      } else {
        AddMemoryChunkSafe<kNonRegular>(chunk);
      }
    }

    MemoryChunk* TryGetPooledMemoryChunkSafe() {
      // Procedure:
      // (1) Try to get a chunk that was declared as pooled and already has
      // been uncommitted.
      // (2) Try to steal any memory chunk of kPageSize that would've been
      // unmapped.
      MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
      if (chunk == nullptr) {
        chunk = GetMemoryChunkSafe<kRegular>();
        if (chunk != nullptr) {
          // For stolen chunks we need to manually free any allocated memory.
          chunk->ReleaseAllocatedMemory();
        }
      }
      return chunk;
    }

    void FreeQueuedChunks();
1209
    void WaitUntilCompleted();
1210
    void TearDown();
1211

1212 1213
    bool has_delayed_chunks() { return delayed_regular_chunks_.size() > 0; }

1214 1215 1216 1217 1218
    int NumberOfDelayedChunks() {
      base::LockGuard<base::Mutex> guard(&mutex_);
      return static_cast<int>(delayed_regular_chunks_.size());
    }

1219 1220
    int NumberOfChunks();

1221
   private:
1222
    static const int kReservedQueueingSlots = 64;
1223
    static const int kMaxUnmapperTasks = 24;
1224

1225 1226 1227 1228 1229 1230 1231 1232
    enum ChunkQueueType {
      kRegular,     // Pages of kPageSize that do not live in a CodeRange and
                    // can thus be used for stealing.
      kNonRegular,  // Large chunks and executable chunks.
      kPooled,      // Pooled chunks, already uncommited and ready for reuse.
      kNumberOfChunkQueues,
    };

1233 1234 1235 1236 1237
    enum class FreeMode {
      kUncommitPooled,
      kReleasePooled,
    };

1238 1239 1240
    template <ChunkQueueType type>
    void AddMemoryChunkSafe(MemoryChunk* chunk) {
      base::LockGuard<base::Mutex> guard(&mutex_);
1241 1242 1243 1244 1245 1246
      if (type != kRegular || allocator_->CanFreeMemoryChunk(chunk)) {
        chunks_[type].push_back(chunk);
      } else {
        DCHECK_EQ(type, kRegular);
        delayed_regular_chunks_.push_back(chunk);
      }
1247 1248 1249 1250 1251 1252
    }

    template <ChunkQueueType type>
    MemoryChunk* GetMemoryChunkSafe() {
      base::LockGuard<base::Mutex> guard(&mutex_);
      if (chunks_[type].empty()) return nullptr;
1253 1254
      MemoryChunk* chunk = chunks_[type].back();
      chunks_[type].pop_back();
1255 1256 1257
      return chunk;
    }

1258
    void ReconsiderDelayedChunks();
1259
    template <FreeMode mode>
1260 1261
    void PerformFreeMemoryOnQueuedChunks();

1262 1263
    Heap* const heap_;
    MemoryAllocator* const allocator_;
1264
    base::Mutex mutex_;
1265
    std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
1266 1267 1268 1269
    // Delayed chunks cannot be processed in the current unmapping cycle because
    // of dependencies such as an active sweeper.
    // See MemoryAllocator::CanFreeMemoryChunk.
    std::list<MemoryChunk*> delayed_regular_chunks_;
1270
    CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
1271 1272 1273 1274 1275 1276
    base::Semaphore pending_unmapping_tasks_semaphore_;
    intptr_t concurrent_unmapping_tasks_active_;

    friend class MemoryAllocator;
  };

1277 1278 1279 1280
  enum AllocationMode {
    kRegular,
    kPooled,
  };
1281

1282 1283
  enum FreeMode {
    kFull,
1284
    kAlreadyPooled,
1285 1286 1287
    kPreFreeAndQueue,
    kPooledAndQueue,
  };
1288

1289
  static size_t CodePageGuardStartOffset();
1290

1291
  static size_t CodePageGuardSize();
1292

1293
  static size_t CodePageAreaStartOffset();
1294

1295
  static size_t CodePageAreaEndOffset();
1296

1297
  static size_t CodePageAreaSize() {
1298 1299 1300
    return CodePageAreaEndOffset() - CodePageAreaStartOffset();
  }

1301
  static size_t PageAreaSize(AllocationSpace space) {
1302 1303 1304 1305 1306
    DCHECK_NE(LO_SPACE, space);
    return (space == CODE_SPACE) ? CodePageAreaSize()
                                 : Page::kAllocatableMemory;
  }

1307 1308
  static intptr_t GetCommitPageSize();

1309 1310
  explicit MemoryAllocator(Isolate* isolate);

1311
  // Initializes its internal bookkeeping structures.
1312
  // Max capacity of the total space and executable memory limit.
1313
  bool SetUp(size_t max_capacity, size_t code_range_size);
1314

1315
  void TearDown();
1316

1317 1318 1319 1320
  // Allocates a Page from the allocator. AllocationMode is used to indicate
  // whether pooled allocation, which only works for MemoryChunk::kPageSize,
  // should be tried first.
  template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
1321
            typename SpaceType>
1322
  Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
1323

1324
  LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
1325
                               Executability executable);
1326

1327
  template <MemoryAllocator::FreeMode mode = kFull>
1328
  void Free(MemoryChunk* chunk);
1329

1330 1331
  bool CanFreeMemoryChunk(MemoryChunk* chunk);

1332
  // Returns allocated spaces in bytes.
1333
  size_t Size() { return size_.Value(); }
1334 1335

  // Returns allocated executable spaces in bytes.
1336
  size_t SizeExecutable() { return size_executable_.Value(); }
1337 1338

  // Returns the maximum available bytes of heaps.
1339 1340
  size_t Available() {
    const size_t size = Size();
1341 1342
    return capacity_ < size ? 0 : capacity_ - size;
  }
1343

1344
  // Returns maximum available bytes that the old space can have.
1345
  size_t MaxAvailable() {
1346
    return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
1347 1348
  }

1349 1350
  // Returns an indication of whether a pointer is in a space that has
  // been allocated by this MemoryAllocator.
1351 1352 1353
  V8_INLINE bool IsOutsideAllocatedSpace(const void* address) {
    return address < lowest_ever_allocated_.Value() ||
           address >= highest_ever_allocated_.Value();
1354 1355
  }

1356 1357 1358
  // Returns a MemoryChunk in which the memory region from commit_area_size to
  // reserve_area_size of the chunk area is reserved but not committed, it
  // could be committed later by calling MemoryChunk::CommitArea.
1359
  MemoryChunk* AllocateChunk(size_t reserve_area_size, size_t commit_area_size,
1360
                             Executability executable, Space* space);
1361

1362
  Address ReserveAlignedMemory(size_t requested, size_t alignment, void* hint,
1363
                               VirtualMemory* controller);
1364 1365
  Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
                                size_t alignment, Executability executable,
1366
                                void* hint, VirtualMemory* controller);
1367

1368
  bool CommitMemory(Address addr, size_t size, Executability executable);
1369

1370
  void FreeMemory(VirtualMemory* reservation, Executability executable);
1371
  void FreeMemory(Address addr, size_t size, Executability executable);
1372

1373 1374 1375 1376 1377
  // Partially release |bytes_to_free| bytes starting at |start_free|. Note that
  // internally memory is freed from |start_free| to the end of the reservation.
  // Additional memory beyond the page is not accounted though, so
  // |bytes_to_free| is computed by the caller.
  void PartialFreeMemory(MemoryChunk* chunk, Address start_free,
1378
                         size_t bytes_to_free, Address new_area_end);
1379

1380
  // Commit a contiguous block of memory from the initial chunk.  Assumes that
1381
  // the address is not nullptr, the size is greater than zero, and that the
1382 1383
  // block is contained in the initial chunk.  Returns true if it succeeded
  // and false otherwise.
1384
  bool CommitBlock(Address start, size_t size, Executability executable);
1385

1386
  // Uncommit a contiguous block of memory [start..(start+size)[.
1387
  // start is not nullptr, the size is greater than zero, and the
1388 1389 1390
  // block is contained in the initial chunk.  Returns true if it succeeded
  // and false otherwise.
  bool UncommitBlock(Address start, size_t size);
1391

1392
  // Zaps a contiguous block of memory [start..(start+size)[ thus
1393
  // filling it up with a recognizable non-nullptr bit pattern.
1394
  void ZapBlock(Address start, size_t size);
1395

1396 1397
  MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory* vm, Address start,
                                              size_t commit_size,
1398
                                              size_t reserved_size);
1399

1400
  CodeRange* code_range() { return code_range_; }
1401
  Unmapper* unmapper() { return &unmapper_; }
1402

1403 1404 1405 1406 1407
#ifdef DEBUG
  // Reports statistic info of the space.
  void ReportStatistics();
#endif

1408
 private:
1409 1410 1411 1412 1413 1414 1415
  // PreFree logically frees the object, i.e., it takes care of the size
  // bookkeeping and calls the allocation callback.
  void PreFreeMemory(MemoryChunk* chunk);

  // FreeMemory can be called concurrently when PreFree was executed before.
  void PerformFreeMemory(MemoryChunk* chunk);

1416 1417 1418 1419 1420
  // See AllocatePage for public interface. Note that currently we only support
  // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
  template <typename SpaceType>
  MemoryChunk* AllocatePagePooled(SpaceType* owner);

1421 1422 1423 1424
  // Initializes pages in a chunk. Returns the first page address.
  // This function and GetChunkId() are provided for the mark-compact
  // collector to rebuild page headers in the from space, which is
  // used as a marking stack and its page headers are destroyed.
1425 1426
  Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
                               PagedSpace* owner);
1427

1428
  void UpdateAllocatedSpaceLimits(void* low, void* high) {
1429 1430 1431 1432 1433 1434 1435 1436 1437 1438
    // The use of atomic primitives does not guarantee correctness (wrt.
    // desired semantics) by default. The loop here ensures that we update the
    // values only if they did not change in between.
    void* ptr = nullptr;
    do {
      ptr = lowest_ever_allocated_.Value();
    } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low));
    do {
      ptr = highest_ever_allocated_.Value();
    } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
1439 1440
  }

1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459
  Isolate* isolate_;
  CodeRange* code_range_;

  // Maximum space size in bytes.
  size_t capacity_;

  // Allocated space size in bytes.
  base::AtomicNumber<size_t> size_;
  // Allocated executable space size in bytes.
  base::AtomicNumber<size_t> size_executable_;

  // We keep the lowest and highest addresses allocated as a quick way
  // of determining that pointers are outside the heap. The estimate is
  // conservative, i.e. not all addresses in 'allocated' space are allocated
  // to our heap. The range is [lowest, highest[, inclusive on the low end
  // and exclusive on the high end.
  base::AtomicValue<void*> lowest_ever_allocated_;
  base::AtomicValue<void*> highest_ever_allocated_;

1460
  VirtualMemory last_chunk_;
1461
  Unmapper unmapper_;
1462

1463
  friend class heap::TestCodeRangeScope;
1464

1465
  DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1466 1467
};

1468 1469 1470 1471 1472 1473 1474 1475 1476
extern template Page*
MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
    size_t size, PagedSpace* owner, Executability executable);
extern template Page*
MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
    size_t size, SemiSpace* owner, Executability executable);
extern template Page*
MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
    size_t size, SemiSpace* owner, Executability executable);
1477 1478 1479 1480 1481

// -----------------------------------------------------------------------------
// Interface for heap object iterator to be implemented by all object space
// object iterators.
//
1482 1483
// NOTE: The space specific object iterators also implements the own next()
//       method which is used to avoid using virtual functions
1484 1485
//       iterating a specific space.

1486
class V8_EXPORT_PRIVATE ObjectIterator : public Malloced {
1487
 public:
1488
  virtual ~ObjectIterator() {}
1489 1490
  virtual HeapObject* Next() = 0;
};
1491

1492 1493
template <class PAGE_TYPE>
class PageIteratorImpl
1494
    : public base::iterator<std::forward_iterator_tag, PAGE_TYPE> {
1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509
 public:
  explicit PageIteratorImpl(PAGE_TYPE* p) : p_(p) {}
  PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE>& other) : p_(other.p_) {}
  PAGE_TYPE* operator*() { return p_; }
  bool operator==(const PageIteratorImpl<PAGE_TYPE>& rhs) {
    return rhs.p_ == p_;
  }
  bool operator!=(const PageIteratorImpl<PAGE_TYPE>& rhs) {
    return rhs.p_ != p_;
  }
  inline PageIteratorImpl<PAGE_TYPE>& operator++();
  inline PageIteratorImpl<PAGE_TYPE> operator++(int);

 private:
  PAGE_TYPE* p_;
1510 1511
};

1512 1513 1514 1515 1516 1517
typedef PageIteratorImpl<Page> PageIterator;
typedef PageIteratorImpl<LargePage> LargePageIterator;

class PageRange {
 public:
  typedef PageIterator iterator;
1518
  PageRange(Page* begin, Page* end) : begin_(begin), end_(end) {}
1519
  explicit PageRange(Page* page) : PageRange(page, page->next_page()) {}
1520 1521
  inline PageRange(Address start, Address limit);

1522 1523 1524 1525 1526 1527 1528
  iterator begin() { return iterator(begin_); }
  iterator end() { return iterator(end_); }

 private:
  Page* begin_;
  Page* end_;
};
1529 1530 1531 1532

// -----------------------------------------------------------------------------
// Heap object iterator in new/old/map spaces.
//
1533 1534
// A HeapObjectIterator iterates objects from the bottom of the given space
// to its top or from the bottom of the given page to its top.
1535
//
1536 1537 1538
// If objects are allocated in the page during iteration the iterator may
// or may not iterate over those objects.  The caller must create a new
// iterator in order to be sure to visit these new objects.
1539
class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
1540
 public:
1541
  // Creates a new object iterator in a given space.
1542
  explicit HeapObjectIterator(PagedSpace* space);
1543
  explicit HeapObjectIterator(Page* page);
1544

1545 1546
  // Advance to the next object, skipping free spaces and other fillers and
  // skipping the special garbage section of which there is one per space.
1547 1548
  // Returns nullptr when the iteration has ended.
  inline HeapObject* Next() override;
1549 1550

 private:
1551 1552
  // Fast (inlined) path of next().
  inline HeapObject* FromCurrentPage();
1553

1554 1555 1556
  // Slow path of next(), goes into the next page.  Returns false if the
  // iteration has ended.
  bool AdvanceToNextPage();
1557

1558 1559
  Address cur_addr_;  // Current iteration point.
  Address cur_end_;   // End iteration point.
1560
  PagedSpace* space_;
1561
  PageRange page_range_;
1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
  PageRange::iterator current_page_;
};


// -----------------------------------------------------------------------------
// A space has a circular list of pages. The next page can be accessed via
// Page::next_page() call.

// An abstraction of allocation and relocation pointers in a page-structured
// space.
class AllocationInfo {
 public:
1574 1575
  AllocationInfo() : top_(nullptr), limit_(nullptr) {}
  AllocationInfo(Address top, Address limit) : top_(top), limit_(limit) {}
1576 1577 1578 1579 1580 1581 1582

  void Reset(Address top, Address limit) {
    set_top(top);
    set_limit(limit);
  }

  INLINE(void set_top(Address top)) {
1583
    SLOW_DCHECK(top == nullptr ||
1584 1585 1586 1587 1588
                (reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0);
    top_ = top;
  }

  INLINE(Address top()) const {
1589
    SLOW_DCHECK(top_ == nullptr ||
1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618
                (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
    return top_;
  }

  Address* top_address() { return &top_; }

  INLINE(void set_limit(Address limit)) {
    limit_ = limit;
  }

  INLINE(Address limit()) const {
    return limit_;
  }

  Address* limit_address() { return &limit_; }

#ifdef DEBUG
  bool VerifyPagedAllocation() {
    return (Page::FromAllocationAreaAddress(top_) ==
            Page::FromAllocationAreaAddress(limit_)) &&
           (top_ <= limit_);
  }
#endif

 private:
  // Current allocation top.
  Address top_;
  // Current allocation limit.
  Address limit_;
1619 1620
};

1621

1622 1623 1624
// An abstraction of the accounting statistics of a page-structured space.
//
// The stats are only set by functions that ensure they stay balanced. These
1625 1626 1627
// functions increase or decrease one of the non-capacity stats in conjunction
// with capacity, or else they always balance increases and decreases to the
// non-capacity stats.
1628 1629 1630 1631
class AllocationStats BASE_EMBEDDED {
 public:
  AllocationStats() { Clear(); }

1632
  // Zero out all the allocation statistics (i.e., no capacity).
1633 1634
  void Clear() {
    capacity_ = 0;
1635
    max_capacity_ = 0;
1636
    ClearSize();
1637 1638
  }

1639 1640 1641 1642 1643 1644
  void ClearSize() {
    size_ = 0;
#ifdef DEBUG
    allocated_on_page_.clear();
#endif
  }
1645

1646
  // Accessors for the allocation statistics.
1647
  size_t Capacity() { return capacity_.Value(); }
1648 1649
  size_t MaxCapacity() { return max_capacity_; }
  size_t Size() { return size_; }
1650 1651 1652
#ifdef DEBUG
  size_t AllocatedOnPage(Page* page) { return allocated_on_page_[page]; }
#endif
1653

1654
  void IncreaseAllocatedBytes(size_t bytes, Page* page) {
1655 1656
    DCHECK_GE(size_ + bytes, size_);
    size_ += bytes;
1657 1658 1659
#ifdef DEBUG
    allocated_on_page_[page] += bytes;
#endif
1660 1661
  }

1662
  void DecreaseAllocatedBytes(size_t bytes, Page* page) {
1663 1664
    DCHECK_GE(size_, bytes);
    size_ -= bytes;
1665 1666 1667 1668
#ifdef DEBUG
    DCHECK_GE(allocated_on_page_[page], bytes);
    allocated_on_page_[page] -= bytes;
#endif
1669 1670
  }

1671
  void DecreaseCapacity(size_t bytes) {
1672 1673 1674 1675 1676
    size_t capacity = capacity_.Value();
    DCHECK_GE(capacity, bytes);
    DCHECK_GE(capacity - bytes, size_);
    USE(capacity);
    capacity_.Decrement(bytes);
1677 1678
  }

1679
  void IncreaseCapacity(size_t bytes) {
1680 1681 1682 1683 1684
    size_t capacity = capacity_.Value();
    DCHECK_GE(capacity + bytes, capacity);
    capacity_.Increment(bytes);
    if (capacity > max_capacity_) {
      max_capacity_ = capacity;
1685 1686 1687
    }
  }

1688
 private:
1689 1690
  // |capacity_|: The number of object-area bytes (i.e., not including page
  // bookkeeping structures) currently in the space.
1691 1692 1693
  // During evacuation capacity of the main spaces is accessed from multiple
  // threads to check the old generation hard limit.
  base::AtomicNumber<size_t> capacity_;
1694 1695

  // |max_capacity_|: The maximum capacity ever observed.
1696
  size_t max_capacity_;
1697 1698

  // |size_|: The number of allocated bytes.
1699
  size_t size_;
1700 1701 1702 1703

#ifdef DEBUG
  std::unordered_map<Page*, size_t, Page::Hasher> allocated_on_page_;
#endif
1704 1705
};

1706 1707 1708
// A free list maintaining free blocks of memory. The free list is organized in
// a way to encourage objects allocated around the same time to be near each
// other. The normal way to allocate is intended to be by bumping a 'top'
1709
// pointer until it hits a 'limit' pointer.  When the limit is hit we need to
1710 1711
// find a new space to allocate from. This is done with the free list, which is
// divided up into rough categories to cut down on waste. Having finer
1712 1713
// categories would scatter allocation more.

1714
// The free list is organized in categories as follows:
1715 1716 1717 1718
// kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
//   allocation, when categories >= small do not have entries anymore.
// 11-31 words (tiny): The tiny blocks are only used for allocation, when
//   categories >= small do not have entries anymore.
1719 1720 1721 1722 1723 1724 1725 1726
// 32-255 words (small): Used for allocating free space between 1-31 words in
//   size.
// 256-2047 words (medium): Used for allocating free space between 32-255 words
//   in size.
// 1048-16383 words (large): Used for allocating free space between 256-2047
//   words in size.
// At least 16384 words (huge): This list is for objects of 2048 words or
//   larger. Empty pages are also added to this list.
1727
class V8_EXPORT_PRIVATE FreeList {
1728
 public:
1729 1730
  // This method returns how much memory can be allocated after freeing
  // maximum_freed memory.
1731
  static inline size_t GuaranteedAllocatable(size_t maximum_freed) {
1732 1733 1734
    if (maximum_freed <= kTiniestListMax) {
      // Since we are not iterating over all list entries, we cannot guarantee
      // that we can find the maximum freed block in that free list.
1735
      return 0;
1736 1737
    } else if (maximum_freed <= kTinyListMax) {
      return kTinyAllocationMax;
1738 1739 1740 1741 1742 1743 1744 1745 1746 1747
    } else if (maximum_freed <= kSmallListMax) {
      return kSmallAllocationMax;
    } else if (maximum_freed <= kMediumListMax) {
      return kMediumAllocationMax;
    } else if (maximum_freed <= kLargeListMax) {
      return kLargeAllocationMax;
    }
    return maximum_freed;
  }

1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762
  static FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
    if (size_in_bytes <= kTiniestListMax) {
      return kTiniest;
    } else if (size_in_bytes <= kTinyListMax) {
      return kTiny;
    } else if (size_in_bytes <= kSmallListMax) {
      return kSmall;
    } else if (size_in_bytes <= kMediumListMax) {
      return kMedium;
    } else if (size_in_bytes <= kLargeListMax) {
      return kLarge;
    }
    return kHuge;
  }

1763 1764 1765 1766 1767 1768 1769 1770
  explicit FreeList(PagedSpace* owner);

  // Adds a node on the free list. The block of size {size_in_bytes} starting
  // at {start} is placed on the free list. The return value is the number of
  // bytes that were not added to the free list, because they freed memory block
  // was too small. Bookkeeping information will be written to the block, i.e.,
  // its contents will be destroyed. The start address should be word aligned,
  // and the size should be a non-zero multiple of the word size.
1771
  size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
1772

1773 1774 1775 1776
  // Finds a node of size at least size_in_bytes and sets up a linear allocation
  // area using this node. Returns false if there is no such node and the caller
  // has to retry allocation after collecting garbage.
  MUST_USE_RESULT bool Allocate(size_t size_in_bytes);
1777

1778 1779 1780
  // Clear the free list.
  void Reset();

1781 1782 1783 1784 1785
  void ResetStats() {
    wasted_bytes_.SetValue(0);
    ForAllFreeListCategories(
        [](FreeListCategory* category) { category->ResetStats(); });
  }
1786 1787

  // Return the number of bytes available on the free list.
1788 1789
  size_t Available() {
    size_t available = 0;
1790 1791 1792
    ForAllFreeListCategories([&available](FreeListCategory* category) {
      available += category->available();
    });
1793
    return available;
1794 1795
  }

1796
  bool IsEmpty() {
1797 1798 1799 1800 1801
    bool empty = true;
    ForAllFreeListCategories([&empty](FreeListCategory* category) {
      if (!category->is_empty()) empty = false;
    });
    return empty;
1802 1803
  }

1804 1805 1806
  // Used after booting the VM.
  void RepairLists(Heap* heap);

1807
  size_t EvictFreeListItems(Page* page);
1808
  bool ContainsPageFreeListItems(Page* page);
1809

1810
  PagedSpace* owner() { return owner_; }
1811
  size_t wasted_bytes() { return wasted_bytes_.Value(); }
1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832

  template <typename Callback>
  void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
    FreeListCategory* current = categories_[type];
    while (current != nullptr) {
      FreeListCategory* next = current->next();
      callback(current);
      current = next;
    }
  }

  template <typename Callback>
  void ForAllFreeListCategories(Callback callback) {
    for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
      ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
    }
  }

  bool AddCategory(FreeListCategory* category);
  void RemoveCategory(FreeListCategory* category);
  void PrintCategories(FreeListCategoryType type);
1833

1834 1835 1836
  // Returns a page containing an entry for a given type, or nullptr otherwise.
  inline Page* GetPageForCategoryType(FreeListCategoryType type);

1837
#ifdef DEBUG
1838
  size_t SumFreeLists();
1839 1840 1841
  bool IsVeryLong();
#endif

1842
 private:
1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860
  class FreeListCategoryIterator {
   public:
    FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
        : current_(free_list->categories_[type]) {}

    bool HasNext() { return current_ != nullptr; }

    FreeListCategory* Next() {
      DCHECK(HasNext());
      FreeListCategory* tmp = current_;
      current_ = current_->next();
      return tmp;
    }

   private:
    FreeListCategory* current_;
  };

1861
  // The size range of blocks, in bytes.
1862 1863
  static const size_t kMinBlockSize = 3 * kPointerSize;
  static const size_t kMaxBlockSize = Page::kAllocatableMemory;
1864

1865 1866 1867 1868 1869 1870 1871 1872 1873
  static const size_t kTiniestListMax = 0xa * kPointerSize;
  static const size_t kTinyListMax = 0x1f * kPointerSize;
  static const size_t kSmallListMax = 0xff * kPointerSize;
  static const size_t kMediumListMax = 0x7ff * kPointerSize;
  static const size_t kLargeListMax = 0x3fff * kPointerSize;
  static const size_t kTinyAllocationMax = kTiniestListMax;
  static const size_t kSmallAllocationMax = kTinyListMax;
  static const size_t kMediumAllocationMax = kSmallListMax;
  static const size_t kLargeAllocationMax = kMediumListMax;
1874

1875
  FreeSpace* FindNodeFor(size_t size_in_bytes, size_t* node_size);
1876

1877 1878
  // Walks all available categories for a given |type| and tries to retrieve
  // a node. Returns nullptr if the category is empty.
1879
  FreeSpace* FindNodeIn(FreeListCategoryType type, size_t* node_size);
1880 1881 1882

  // Tries to retrieve a node from the first category in a given |type|.
  // Returns nullptr if the category is empty.
1883 1884
  FreeSpace* TryFindNodeIn(FreeListCategoryType type, size_t* node_size,
                           size_t minimum_size);
1885 1886

  // Searches a given |type| for a node of at least |minimum_size|.
1887 1888
  FreeSpace* SearchForNodeInList(FreeListCategoryType type, size_t* node_size,
                                 size_t minimum_size);
1889

1890
  // The tiny categories are not used for fast allocation.
1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902
  FreeListCategoryType SelectFastAllocationFreeListCategoryType(
      size_t size_in_bytes) {
    if (size_in_bytes <= kSmallAllocationMax) {
      return kSmall;
    } else if (size_in_bytes <= kMediumAllocationMax) {
      return kMedium;
    } else if (size_in_bytes <= kLargeAllocationMax) {
      return kLarge;
    }
    return kHuge;
  }

1903 1904 1905
  FreeListCategory* top(FreeListCategoryType type) const {
    return categories_[type];
  }
1906

1907
  PagedSpace* owner_;
1908
  base::AtomicNumber<size_t> wasted_bytes_;
1909 1910 1911
  FreeListCategory* categories_[kNumberOfCategories];

  friend class FreeListCategory;
1912 1913 1914 1915

  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
};

1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959
// LocalAllocationBuffer represents a linear allocation area that is created
// from a given {AllocationResult} and can be used to allocate memory without
// synchronization.
//
// The buffer is properly closed upon destruction and reassignment.
// Example:
//   {
//     AllocationResult result = ...;
//     LocalAllocationBuffer a(heap, result, size);
//     LocalAllocationBuffer b = a;
//     CHECK(!a.IsValid());
//     CHECK(b.IsValid());
//     // {a} is invalid now and cannot be used for further allocations.
//   }
//   // Since {b} went out of scope, the LAB is closed, resulting in creating a
//   // filler object for the remaining area.
class LocalAllocationBuffer {
 public:
  // Indicates that a buffer cannot be used for allocations anymore. Can result
  // from either reassigning a buffer, or trying to construct it from an
  // invalid {AllocationResult}.
  static inline LocalAllocationBuffer InvalidBuffer();

  // Creates a new LAB from a given {AllocationResult}. Results in
  // InvalidBuffer if the result indicates a retry.
  static inline LocalAllocationBuffer FromResult(Heap* heap,
                                                 AllocationResult result,
                                                 intptr_t size);

  ~LocalAllocationBuffer() { Close(); }

  // Convert to C++11 move-semantics once allowed by the style guide.
  LocalAllocationBuffer(const LocalAllocationBuffer& other);
  LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other);

  MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
      int size_in_bytes, AllocationAlignment alignment);

  inline bool IsValid() { return allocation_info_.top() != nullptr; }

  // Try to merge LABs, which is only possible when they are adjacent in memory.
  // Returns true if the merge was successful, false otherwise.
  inline bool TryMerge(LocalAllocationBuffer* other);

1960 1961
  inline bool TryFreeLast(HeapObject* object, int object_size);

1962 1963 1964
  // Close a LAB, effectively invalidating it. Returns the unused area.
  AllocationInfo Close();

1965 1966 1967 1968 1969 1970 1971
 private:
  LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info);

  Heap* heap_;
  AllocationInfo allocation_info_;
};

1972
class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
1973
 public:
1974 1975
  typedef PageIterator iterator;

1976
  static const size_t kCompactionMemoryWanted = 500 * KB;
1977

1978 1979
  // Creates a space with an id.
  PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
1980

1981
  ~PagedSpace() override { TearDown(); }
1982 1983 1984 1985 1986

  // Set up the space using the given address range of virtual memory (from
  // the memory allocator's initial chunk) if possible.  If the block of
  // addresses is not big enough to contain a single page-aligned page, a
  // fresh chunk will be allocated.
1987
  bool SetUp();
1988 1989 1990

  // Returns true if the space has been successfully set up and not
  // subsequently torn down.
1991
  bool HasBeenSetUp();
1992 1993 1994

  // Checks whether an object/address is in this space.
  inline bool Contains(Address a);
1995 1996
  inline bool Contains(Object* o);
  bool ContainsSlow(Address addr);
1997

1998 1999
  // During boot the free_space_map is created, and afterwards we may need
  // to write it into the free list nodes that were already created.
2000
  void RepairFreeListsAfterDeserialization();
2001

2002
  // Prepares for a mark-compact GC.
2003
  void PrepareForMarkCompact();
2004

2005
  // Current capacity without growing (Size() + Available()).
2006
  size_t Capacity() { return accounting_stats_.Capacity(); }
2007

2008
  // Approximate amount of physical memory committed for this space.
2009
  size_t CommittedPhysicalMemory() override;
2010

2011 2012
  void ResetFreeListStatistics();

2013 2014 2015 2016 2017 2018
  // Sets the capacity, the available space and the wasted space to zero.
  // The stats are rebuilt during sweeping by adding each page to the
  // capacity and the size when it is encountered.  As free spaces are
  // discovered during the sweeping they are subtracted from the size and added
  // to the available and wasted totals.
  void ClearStats() {
2019 2020
    accounting_stats_.ClearSize();
    free_list_.ResetStats();
2021
    ResetFreeListStatistics();
2022 2023 2024 2025 2026 2027
  }

  // Available bytes without growing.  These are the bytes on the free list.
  // The bytes in the linear allocation area are not included in this total
  // because updating the stats would slow down allocation.  New pages are
  // immediately added to the free list so they show up here.
2028
  size_t Available() override { return free_list_.Available(); }
2029

2030
  // Allocated bytes in this space.  Garbage bytes that were not found due to
2031 2032 2033
  // concurrent sweeping are counted as being allocated!  The bytes in the
  // current linear allocation area (between top and limit) are also counted
  // here.
2034
  size_t Size() override { return accounting_stats_.Size(); }
2035

2036 2037
  // As size, but the bytes in lazily swept pages are estimated and the bytes
  // in the current linear allocation area are not included.
2038
  size_t SizeOfObjects() override;
2039

2040
  // Wasted bytes in this space.  These are just the bytes that were thrown away
2041
  // due to being too small to use for allocation.
2042
  virtual size_t Waste() { return free_list_.wasted_bytes(); }
2043

2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055
  // Returns the allocation pointer in this space.
  Address top() { return allocation_info_.top(); }
  Address limit() { return allocation_info_.limit(); }

  // The allocation top address.
  Address* allocation_top_address() { return allocation_info_.top_address(); }

  // The allocation limit address.
  Address* allocation_limit_address() {
    return allocation_info_.limit_address();
  }

2056 2057
  enum UpdateSkipList { UPDATE_SKIP_LIST, IGNORE_SKIP_LIST };

2058
  // Allocate the requested number of bytes in the space if possible, return a
2059 2060
  // failure object if not. Only use IGNORE_SKIP_LIST if the skip list is going
  // to be manually updated later.
2061
  MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned(
2062
      int size_in_bytes, UpdateSkipList update_skip_list = UPDATE_SKIP_LIST);
2063

2064 2065
  // Allocate the requested number of bytes in the space double aligned if
  // possible, return a failure object if not.
2066 2067
  MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
      int size_in_bytes, AllocationAlignment alignment);
2068

2069 2070 2071 2072 2073
  // Allocate the requested number of bytes in the space and consider allocation
  // alignment if needed.
  MUST_USE_RESULT inline AllocationResult AllocateRaw(
      int size_in_bytes, AllocationAlignment alignment);

2074 2075 2076 2077
  // Give a block of memory to the space's free list.  It might be added to
  // the free list or accounted as waste.
  // If add_to_freelist is false then just accounting stats are updated and
  // no attempt to add area to free list is made.
2078 2079
  size_t Free(Address start, size_t size_in_bytes) {
    size_t wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
2080 2081
    Page* page = Page::FromAddress(start);
    accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
2082
    DCHECK_GE(size_in_bytes, wasted);
2083 2084
    return size_in_bytes - wasted;
  }
2085

2086 2087 2088
  size_t UnaccountedFree(Address start, size_t size_in_bytes) {
    size_t wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
    DCHECK_GE(size_in_bytes, wasted);
2089 2090 2091
    return size_in_bytes - wasted;
  }

2092 2093
  inline bool TryFreeLast(HeapObject* object, int object_size);

2094
  void ResetFreeList() { free_list_.Reset(); }
2095

2096 2097
  void PauseAllocationObservers() override;
  void ResumeAllocationObservers() override;
2098

2099
  // Empty space allocation info, returning unused area to free list.
2100 2101 2102
  void EmptyAllocationInfo();

  void MarkAllocationInfoBlack();
2103
  void UnmarkAllocationInfo();
2104

2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115
  void DecreaseAllocatedBytes(size_t bytes, Page* page) {
    accounting_stats_.DecreaseAllocatedBytes(bytes, page);
  }
  void IncreaseAllocatedBytes(size_t bytes, Page* page) {
    accounting_stats_.IncreaseAllocatedBytes(bytes, page);
  }
  void DecreaseCapacity(size_t bytes) {
    accounting_stats_.DecreaseCapacity(bytes);
  }
  void IncreaseCapacity(size_t bytes) {
    accounting_stats_.IncreaseCapacity(bytes);
2116
  }
2117

2118 2119
  void RefineAllocatedBytesAfterSweeping(Page* page);

2120 2121 2122
  // The dummy page that anchors the linked list of pages.
  Page* anchor() { return &anchor_; }

2123 2124
  Page* InitializePage(MemoryChunk* chunk, Executability executable);
  void ReleasePage(Page* page);
2125 2126 2127 2128
  // Adds the page to this space and returns the number of bytes added to the
  // free list of the space.
  size_t AddPage(Page* page);
  void RemovePage(Page* page);
2129 2130 2131
  // Remove a page if it has at least |size_in_bytes| bytes available that can
  // be used for allocation.
  Page* RemovePageSafe(int size_in_bytes);
2132

2133 2134 2135
  void SetReadAndExecutable();
  void SetReadAndWritable();

2136
#ifdef VERIFY_HEAP
2137 2138 2139
  // Verify integrity of this space.
  virtual void Verify(ObjectVisitor* visitor);

2140 2141
  void VerifyLiveBytes();

2142 2143 2144
  // Overridden by subclasses to verify space-specific object
  // properties (e.g., only maps or free-list nodes are in map space).
  virtual void VerifyObject(HeapObject* obj) {}
2145 2146 2147
#endif

#ifdef DEBUG
2148 2149
  void VerifyCountersAfterSweeping();
  void VerifyCountersBeforeConcurrentSweeping();
2150
  // Print meta info and objects in this space.
2151
  void Print() override;
2152 2153 2154

  // Reports statistics for the space
  void ReportStatistics();
2155

2156
  // Report code object related statistics
2157 2158
  static void ReportCodeStatistics(Isolate* isolate);
  static void ResetCodeStatistics(Isolate* isolate);
2159 2160
#endif

2161 2162 2163
  Page* FirstPage() { return anchor_.next_page(); }
  Page* LastPage() { return anchor_.prev_page(); }

2164
  bool CanExpand(size_t size);
2165

2166 2167 2168
  // Returns the number of total pages in this space.
  int CountTotalPages();

2169
  // Return size of allocatable area on a page in this space.
2170
  inline int AreaSize() { return static_cast<int>(area_size_); }
2171

2172 2173
  virtual bool is_local() { return false; }

2174 2175 2176 2177
  // Merges {other} into the current space. Note that this modifies {other},
  // e.g., removes its bump pointer area and resets statistics.
  void MergeCompactionSpace(CompactionSpace* other);

2178 2179 2180
  // Refills the free list from the corresponding free list filled by the
  // sweeper.
  virtual void RefillFreeList();
2181

2182
  FreeList* free_list() { return &free_list_; }
2183

2184
  base::Mutex* mutex() { return &space_mutex_; }
2185

2186
  inline void UnlinkFreeListCategories(Page* page);
2187
  inline size_t RelinkFreeListCategories(Page* page);
2188

2189 2190 2191
  iterator begin() { return iterator(anchor_.next_page()); }
  iterator end() { return iterator(&anchor_); }

2192 2193 2194 2195
  // Shrink immortal immovable pages of the space to be exactly the size needed
  // using the high water mark.
  void ShrinkImmortalImmovablePages();

2196 2197
  size_t ShrinkPageToHighWaterMark(Page* page);

2198 2199
  std::unique_ptr<ObjectIterator> GetObjectIterator() override;

2200 2201 2202 2203 2204
  // Sets the page that is currently locked by the task using the space. This
  // page will be preferred for sweeping to avoid a potential deadlock where
  // multiple tasks hold locks on pages while trying to sweep each others pages.
  void AnnounceLockedPage(Page* page) { locked_page_ = page; }

2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219
  Address ComputeLimit(Address start, Address end, size_t size_in_bytes);
  void SetAllocationInfo(Address top, Address limit);

 private:
  // Set space allocation info.
  void SetTopAndLimit(Address top, Address limit) {
    DCHECK(top == limit ||
           Page::FromAddress(top) == Page::FromAddress(limit - 1));
    MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
    allocation_info_.Reset(top, limit);
  }
  void DecreaseLimit(Address new_limit);
  void StartNextInlineAllocationStep() override;
  bool SupportsInlineAllocation() { return identity() == OLD_SPACE; }

2220
 protected:
2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235
  // PagedSpaces that should be included in snapshots have different, i.e.,
  // smaller, initial pages.
  virtual bool snapshotable() { return true; }

  bool HasPages() { return anchor_.next_page() != &anchor_; }

  // Cleans up the space, frees all pages in this space except those belonging
  // to the initial chunk, uncommits addresses in the initial chunk.
  void TearDown();

  // Expands the space by allocating a fixed number of pages. Returns false if
  // it cannot allocate requested number of pages from OS, or if the hard heap
  // size limit has been hit.
  bool Expand();

2236 2237 2238 2239 2240 2241
  // Sets up a linear allocation area that fits the given number of bytes.
  // Returns false if there is not enough space and the caller has to retry
  // after collecting garbage.
  inline bool EnsureLinearAllocationArea(int size_in_bytes);
  // Allocates an object from the linear allocation area. Assumes that the
  // linear allocation area is large enought to fit the object.
2242
  inline HeapObject* AllocateLinearly(int size_in_bytes);
2243 2244 2245 2246 2247 2248
  // Tries to allocate an aligned object from the linear allocation area.
  // Returns nullptr if the linear allocation area does not fit the object.
  // Otherwise, returns the object pointer and writes the allocation size
  // (object size + alignment filler size) to the size_in_bytes.
  inline HeapObject* TryAllocateLinearlyAligned(int* size_in_bytes,
                                                AllocationAlignment alignment);
2249
  // If sweeping is still in progress try to sweep unswept pages. If that is
2250 2251 2252 2253
  // not successful, wait for the sweeper threads and retry free-list
  // allocation. Returns false if there is not enough space and the caller
  // has to retry after collecting garbage.
  MUST_USE_RESULT virtual bool SweepAndRetryAllocation(int size_in_bytes);
2254

2255 2256 2257 2258
  // Slow path of AllocateRaw. This function is space-dependent. Returns false
  // if there is not enough space and the caller has to retry after
  // collecting garbage.
  MUST_USE_RESULT virtual bool SlowAllocateRaw(int size_in_bytes);
2259

2260 2261 2262
  // Implementation of SlowAllocateRaw. Returns false if there is not enough
  // space and the caller has to retry after collecting garbage.
  MUST_USE_RESULT bool RawSlowAllocateRaw(int size_in_bytes);
2263

2264
  size_t area_size_;
2265

2266 2267 2268
  // Accounting information for this space.
  AllocationStats accounting_stats_;

2269 2270
  // The dummy page that anchors the double linked list of pages.
  Page anchor_;
2271

2272 2273
  // The space's free list.
  FreeList free_list_;
2274

2275 2276 2277
  // Normal allocation information.
  AllocationInfo allocation_info_;

2278 2279 2280
  // Mutex guarding any concurrent access to the space.
  base::Mutex space_mutex_;

2281
  Page* locked_page_;
2282
  Address top_on_previous_step_;
2283

hpayer's avatar
hpayer committed
2284
  friend class IncrementalMarking;
2285
  friend class MarkCompactCollector;
2286 2287

  // Used in cctest.
2288
  friend class heap::HeapTester;
2289 2290
};

2291
enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
2292

2293 2294 2295
// -----------------------------------------------------------------------------
// SemiSpace in young generation
//
mlippautz's avatar
mlippautz committed
2296 2297 2298
// A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
// The mark-compact collector  uses the memory of the first page in the from
// space as a marking stack when tracing live objects.
2299
class SemiSpace : public Space {
2300
 public:
2301 2302
  typedef PageIterator iterator;

mlippautz's avatar
mlippautz committed
2303 2304
  static void Swap(SemiSpace* from, SemiSpace* to);

2305
  SemiSpace(Heap* heap, SemiSpaceId semispace)
2306
      : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
mlippautz's avatar
mlippautz committed
2307 2308 2309
        current_capacity_(0),
        maximum_capacity_(0),
        minimum_capacity_(0),
2310
        age_mark_(nullptr),
mlippautz's avatar
mlippautz committed
2311
        committed_(false),
2312 2313
        id_(semispace),
        anchor_(this),
2314 2315
        current_page_(nullptr),
        pages_used_(0) {}
2316

2317 2318 2319 2320
  inline bool Contains(HeapObject* o);
  inline bool Contains(Object* o);
  inline bool ContainsSlow(Address a);

2321
  void SetUp(size_t initial_capacity, size_t maximum_capacity);
2322
  void TearDown();
2323
  bool HasBeenSetUp() { return maximum_capacity_ != 0; }
2324

2325 2326 2327
  bool Commit();
  bool Uncommit();
  bool is_committed() { return committed_; }
2328

2329 2330
  // Grow the semispace to the new capacity.  The new capacity requested must
  // be larger than the current capacity and less than the maximum capacity.
2331
  bool GrowTo(size_t new_capacity);
2332

2333 2334 2335
  // Shrinks the semispace to the new capacity.  The new capacity requested
  // must be more than the amount of used memory in the semispace and less
  // than the current capacity.
2336
  bool ShrinkTo(size_t new_capacity);
2337

2338 2339
  bool EnsureCurrentCapacity();

2340 2341
  // Returns the start address of the first page of the space.
  Address space_start() {
2342
    DCHECK_NE(anchor_.next_page(), anchor());
2343
    return anchor_.next_page()->area_start();
2344 2345
  }

2346 2347
  Page* first_page() { return anchor_.next_page(); }
  Page* current_page() { return current_page_; }
2348
  int pages_used() { return pages_used_; }
2349

2350
  // Returns one past the end address of the space.
2351
  Address space_end() { return anchor_.prev_page()->area_end(); }
2352

2353 2354 2355
  // Returns the start address of the current page of the space.
  Address page_low() { return current_page_->area_start(); }

2356
  // Returns one past the end address of the current page of the space.
2357
  Address page_high() { return current_page_->area_end(); }
2358 2359

  bool AdvancePage() {
2360
    Page* next_page = current_page_->next_page();
2361 2362 2363 2364 2365
    // We cannot expand if we reached the maximum number of pages already. Note
    // that we need to account for the next page already for this check as we
    // could potentially fill the whole page after advancing.
    const bool reached_max_pages = (pages_used_ + 1) == max_pages();
    if (next_page == anchor() || reached_max_pages) {
2366 2367
      return false;
    }
2368
    current_page_ = next_page;
2369
    pages_used_++;
2370 2371 2372 2373 2374
    return true;
  }

  // Resets the space to using the first page.
  void Reset();
2375

2376 2377
  void RemovePage(Page* page);
  void PrependPage(Page* page);
2378
  Page* InitializePage(MemoryChunk* chunk, Executability executable);
2379

2380 2381 2382 2383
  // Age mark accessors.
  Address age_mark() { return age_mark_; }
  void set_age_mark(Address mark);

2384
  // Returns the current capacity of the semispace.
2385
  size_t current_capacity() { return current_capacity_; }
mlippautz's avatar
mlippautz committed
2386

2387
  // Returns the maximum capacity of the semispace.
2388
  size_t maximum_capacity() { return maximum_capacity_; }
mlippautz's avatar
mlippautz committed
2389 2390

  // Returns the initial capacity of the semispace.
2391
  size_t minimum_capacity() { return minimum_capacity_; }
mlippautz's avatar
mlippautz committed
2392 2393 2394 2395 2396 2397

  SemiSpaceId id() { return id_; }

  // Approximate amount of physical memory committed for this space.
  size_t CommittedPhysicalMemory() override;

2398
  // If we don't have these here then SemiSpace will be abstract.  However
2399 2400
  // they should never be called:

2401
  size_t Size() override {
2402 2403 2404
    UNREACHABLE();
  }

2405
  size_t SizeOfObjects() override { return Size(); }
2406

2407
  size_t Available() override {
2408 2409 2410
    UNREACHABLE();
  }

2411 2412 2413 2414 2415
  iterator begin() { return iterator(anchor_.next_page()); }
  iterator end() { return iterator(anchor()); }

  std::unique_ptr<ObjectIterator> GetObjectIterator() override;

2416
#ifdef DEBUG
2417
  void Print() override;
2418 2419 2420 2421 2422 2423 2424
  // Validate a range of of addresses in a SemiSpace.
  // The "from" address must be on a page prior to the "to" address,
  // in the linked page order, or it must be earlier on the same page.
  static void AssertValidRange(Address from, Address to);
#else
  // Do nothing.
  inline static void AssertValidRange(Address from, Address to) {}
2425 2426
#endif

mlippautz's avatar
mlippautz committed
2427 2428 2429
#ifdef VERIFY_HEAP
  virtual void Verify();
#endif
2430

mlippautz's avatar
mlippautz committed
2431
 private:
2432
  void RewindPages(Page* start, int num_pages);
2433

2434
  inline Page* anchor() { return &anchor_; }
2435 2436 2437
  inline int max_pages() {
    return static_cast<int>(current_capacity_ / Page::kPageSize);
  }
2438

2439 2440
  // Copies the flags into the masked positions on all pages in the space.
  void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
2441

mlippautz's avatar
mlippautz committed
2442
  // The currently committed space capacity.
2443
  size_t current_capacity_;
2444

2445 2446
  // The maximum capacity that can be used by this space. A space cannot grow
  // beyond that size.
2447
  size_t maximum_capacity_;
2448

2449
  // The minimum capacity for the space. A space cannot shrink below this size.
2450
  size_t minimum_capacity_;
2451

2452 2453 2454
  // Used to govern object promotion during mark-compact collection.
  Address age_mark_;

2455
  bool committed_;
2456 2457
  SemiSpaceId id_;

2458 2459
  Page anchor_;
  Page* current_page_;
2460
  int pages_used_;
2461

2462 2463
  friend class NewSpace;
  friend class SemiSpaceIterator;
2464 2465 2466 2467 2468 2469 2470 2471 2472 2473
};


// A SemiSpaceIterator is an ObjectIterator that iterates over the active
// semispace of the heap's new space.  It iterates over the objects in the
// semispace from a given start address (defaulting to the bottom of the
// semispace) to the top of the semispace.  New objects allocated after the
// iterator is created are not iterated.
class SemiSpaceIterator : public ObjectIterator {
 public:
2474
  // Create an iterator over the allocated objects in the given to-space.
2475 2476
  explicit SemiSpaceIterator(NewSpace* space);

2477
  inline HeapObject* Next() override;
2478 2479

 private:
2480
  void Initialize(Address start, Address end);
2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493

  // The current iteration point.
  Address current_;
  // The end of iteration.
  Address limit_;
};

// -----------------------------------------------------------------------------
// The young generation space.
//
// The new space consists of a contiguous pair of semispaces.  It simply
// forwards most functions to the appropriate semispace.

2494
class NewSpace : public Space {
2495
 public:
2496 2497
  typedef PageIterator iterator;

2498
  explicit NewSpace(Heap* heap)
2499
      : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2500
        top_on_previous_step_(0),
2501 2502 2503
        to_space_(heap, kToSpace),
        from_space_(heap, kFromSpace),
        reservation_(),
2504
        allocated_histogram_(nullptr),
2505
        promoted_histogram_(nullptr) {}
2506

2507 2508 2509 2510
  inline bool Contains(HeapObject* o);
  inline bool ContainsSlow(Address a);
  inline bool Contains(Object* o);

2511
  bool SetUp(size_t initial_semispace_capacity, size_t max_semispace_capacity);
2512 2513 2514 2515 2516 2517

  // Tears down the space.  Heap memory was not allocated by the space, so it
  // is not deallocated here.
  void TearDown();

  // True if the space has been set up but not torn down.
2518 2519
  bool HasBeenSetUp() {
    return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
2520 2521 2522 2523 2524
  }

  // Flip the pair of spaces.
  void Flip();

2525
  // Grow the capacity of the semispaces.  Assumes that they are not at
2526 2527 2528 2529 2530
  // their maximum capacity.
  void Grow();

  // Shrink the capacity of the semispaces.
  void Shrink();
2531 2532

  // Return the allocated bytes in the active semispace.
2533 2534
  size_t Size() override {
    DCHECK_GE(top(), to_space_.page_low());
2535
    return to_space_.pages_used() * Page::kAllocatableMemory +
2536
           static_cast<size_t>(top() - to_space_.page_low());
2537 2538
  }

2539
  size_t SizeOfObjects() override { return Size(); }
mlippautz's avatar
mlippautz committed
2540

2541
  // Return the allocatable capacity of a semispace.
2542
  size_t Capacity() {
mlippautz's avatar
mlippautz committed
2543 2544
    SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
    return (to_space_.current_capacity() / Page::kPageSize) *
2545
           Page::kAllocatableMemory;
2546 2547
  }

2548 2549
  // Return the current size of a semispace, allocatable and non-allocatable
  // memory.
2550
  size_t TotalCapacity() {
mlippautz's avatar
mlippautz committed
2551 2552
    DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
    return to_space_.current_capacity();
2553
  }
2554

2555 2556
  // Committed memory for NewSpace is the committed memory of both semi-spaces
  // combined.
2557
  size_t CommittedMemory() override {
2558
    return from_space_.CommittedMemory() + to_space_.CommittedMemory();
2559 2560
  }

2561
  size_t MaximumCommittedMemory() override {
2562 2563
    return from_space_.MaximumCommittedMemory() +
           to_space_.MaximumCommittedMemory();
2564 2565
  }

2566
  // Approximate amount of physical memory committed for this space.
2567
  size_t CommittedPhysicalMemory() override;
2568

2569
  // Return the available bytes without growing.
2570 2571 2572 2573
  size_t Available() override {
    DCHECK_GE(Capacity(), Size());
    return Capacity() - Size();
  }
2574

2575
  size_t AllocatedSinceLastGC() {
2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586
    const Address age_mark = to_space_.age_mark();
    DCHECK_NOT_NULL(age_mark);
    DCHECK_NOT_NULL(top());
    Page* const age_mark_page = Page::FromAllocationAreaAddress(age_mark);
    Page* const last_page = Page::FromAllocationAreaAddress(top());
    Page* current_page = age_mark_page;
    size_t allocated = 0;
    if (current_page != last_page) {
      DCHECK_EQ(current_page, age_mark_page);
      DCHECK_GE(age_mark_page->area_end(), age_mark);
      allocated += age_mark_page->area_end() - age_mark;
2587
      current_page = current_page->next_page();
2588 2589 2590
    } else {
      DCHECK_GE(top(), age_mark);
      return top() - age_mark;
2591 2592
    }
    while (current_page != last_page) {
2593
      DCHECK_NE(current_page, age_mark_page);
2594 2595 2596
      allocated += Page::kAllocatableMemory;
      current_page = current_page->next_page();
    }
2597
    DCHECK_GE(top(), current_page->area_start());
2598 2599
    allocated += top() - current_page->area_start();
    DCHECK_LE(allocated, Size());
2600
    return allocated;
2601
  }
2602

2603
  void MovePageFromSpaceToSpace(Page* page) {
2604
    DCHECK(page->InFromSpace());
2605 2606
    from_space_.RemovePage(page);
    to_space_.PrependPage(page);
2607 2608
  }

2609 2610
  bool Rebalance();

2611
  // Return the maximum capacity of a semispace.
2612
  size_t MaximumCapacity() {
mlippautz's avatar
mlippautz committed
2613 2614
    DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
    return to_space_.maximum_capacity();
2615
  }
2616

2617
  bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
2618

2619
  // Returns the initial capacity of a semispace.
2620
  size_t InitialTotalCapacity() {
mlippautz's avatar
mlippautz committed
2621 2622
    DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
    return to_space_.minimum_capacity();
2623 2624
  }

2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636
  // Return the address of the allocation pointer in the active semispace.
  Address top() {
    DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
    return allocation_info_.top();
  }

  // Return the address of the allocation pointer limit in the active semispace.
  Address limit() {
    DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
    return allocation_info_.limit();
  }

2637 2638 2639 2640 2641
  void ResetOriginalTop() {
    DCHECK_GE(top(), original_top());
    DCHECK_LE(top(), original_limit());
    original_top_.SetValue(top());
  }
2642

2643
  Address original_top() { return original_top_.Value(); }
2644 2645
  Address original_limit() { return original_limit_.Value(); }

2646
  // Return the address of the first object in the active semispace.
2647
  Address bottom() { return to_space_.space_start(); }
2648

2649 2650 2651 2652
  // Get the age mark of the inactive semispace.
  Address age_mark() { return from_space_.age_mark(); }
  // Set the age mark in the active semispace.
  void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2653

2654 2655 2656 2657 2658 2659 2660 2661
  // The allocation top and limit address.
  Address* allocation_top_address() { return allocation_info_.top_address(); }

  // The allocation limit address.
  Address* allocation_limit_address() {
    return allocation_info_.limit_address();
  }

2662 2663
  MUST_USE_RESULT INLINE(AllocationResult AllocateRawAligned(
      int size_in_bytes, AllocationAlignment alignment));
2664

2665 2666 2667 2668 2669
  MUST_USE_RESULT INLINE(
      AllocationResult AllocateRawUnaligned(int size_in_bytes));

  MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(
      int size_in_bytes, AllocationAlignment alignment));
2670

2671 2672 2673
  MUST_USE_RESULT inline AllocationResult AllocateRawSynchronized(
      int size_in_bytes, AllocationAlignment alignment);

2674 2675 2676
  // Reset the allocation pointer to the beginning of the active semispace.
  void ResetAllocationInfo();

2677 2678 2679 2680 2681
  // When inline allocation stepping is active, either because of incremental
  // marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
  // inline allocation every once in a while. This is done by setting
  // allocation_info_.limit to be lower than the actual limit and and increasing
  // it in steps to guarantee that the observers are notified periodically.
2682
  void UpdateInlineAllocationLimit(int size_in_bytes);
2683

2684 2685 2686 2687 2688
  void DisableInlineAllocationSteps() {
    top_on_previous_step_ = 0;
    UpdateInlineAllocationLimit(0);
  }

2689 2690 2691 2692 2693 2694 2695
  // Get the extent of the inactive semispace (for use as a marking stack,
  // or to zap it). Notice: space-addresses are not necessarily on the
  // same page, so FromSpaceStart() might be above FromSpaceEnd().
  Address FromSpacePageLow() { return from_space_.page_low(); }
  Address FromSpacePageHigh() { return from_space_.page_high(); }
  Address FromSpaceStart() { return from_space_.space_start(); }
  Address FromSpaceEnd() { return from_space_.space_end(); }
2696

2697 2698 2699 2700
  // Get the extent of the active semispace's pages' memory.
  Address ToSpaceStart() { return to_space_.space_start(); }
  Address ToSpaceEnd() { return to_space_.space_end(); }

2701 2702 2703 2704
  inline bool ToSpaceContainsSlow(Address a);
  inline bool FromSpaceContainsSlow(Address a);
  inline bool ToSpaceContains(Object* o);
  inline bool FromSpaceContains(Object* o);
2705

2706 2707 2708 2709 2710
  // Try to switch the active semispace to a new, empty, page.
  // Returns false if this isn't possible or reasonable (i.e., there
  // are no pages, or the current page is already empty), or true
  // if successful.
  bool AddFreshPage();
2711
  bool AddFreshPageSynchronized();
2712

2713
#ifdef VERIFY_HEAP
2714
  // Verify the active semispace.
2715
  virtual void Verify();
2716 2717 2718
#endif

#ifdef DEBUG
2719
  // Print the active semispace.
2720
  void Print() override { to_space_.Print(); }
2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735
#endif

  // Iterates the active semispace to collect statistics.
  void CollectStatistics();
  // Reports previously collected statistics of the active semispace.
  void ReportStatistics();
  // Clears previously collected statistics.
  void ClearHistograms();

  // Record the allocation or promotion of a heap object.  Note that we don't
  // record every single allocation, but only those that happen in the
  // to space during a scavenge GC.
  void RecordAllocation(HeapObject* obj);
  void RecordPromotion(HeapObject* obj);

2736
  // Return whether the operation succeeded.
2737 2738 2739 2740 2741 2742 2743 2744 2745 2746
  bool CommitFromSpaceIfNeeded() {
    if (from_space_.is_committed()) return true;
    return from_space_.Commit();
  }

  bool UncommitFromSpace() {
    if (!from_space_.is_committed()) return true;
    return from_space_.Uncommit();
  }

2747 2748
  bool IsFromSpaceCommitted() { return from_space_.is_committed(); }

2749 2750
  SemiSpace* active_space() { return &to_space_; }

2751 2752 2753
  void PauseAllocationObservers() override;
  void ResumeAllocationObservers() override;

2754 2755 2756
  iterator begin() { return to_space_.begin(); }
  iterator end() { return to_space_.end(); }

2757 2758
  std::unique_ptr<ObjectIterator> GetObjectIterator() override;

2759 2760 2761
  SemiSpace& from_space() { return from_space_; }
  SemiSpace& to_space() { return to_space_; }

2762
 private:
2763 2764 2765
  // Update allocation info to match the current to-space page.
  void UpdateAllocationInfo();

2766 2767
  base::Mutex mutex_;

2768 2769 2770 2771 2772 2773 2774 2775 2776
  // Allocation pointer and limit for normal allocation and allocation during
  // mark-compact collection.
  AllocationInfo allocation_info_;
  Address top_on_previous_step_;
  // The top and the limit at the time of setting the allocation info.
  // These values can be accessed by background tasks.
  base::AtomicValue<Address> original_top_;
  base::AtomicValue<Address> original_limit_;

2777
  // The semispaces.
2778 2779
  SemiSpace to_space_;
  SemiSpace from_space_;
2780
  VirtualMemory reservation_;
2781 2782 2783 2784

  HistogramInfo* allocated_histogram_;
  HistogramInfo* promoted_histogram_;

2785
  bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
2786

2787
  // If we are doing inline allocation in steps, this method performs the 'step'
2788
  // operation. top is the memory address of the bump pointer at the last
2789 2790 2791 2792
  // inline allocation (i.e. it determines the numbers of bytes actually
  // allocated since the last step.) new_top is the address of the bump pointer
  // where the next byte is going to be allocated from. top and new_top may be
  // different when we cross a page boundary or reset the space.
2793 2794
  void InlineAllocationStep(Address top, Address new_top, Address soon_object,
                            size_t size);
2795
  void StartNextInlineAllocationStep() override;
2796

2797 2798 2799
  friend class SemiSpaceIterator;
};

2800
class PauseAllocationObserversScope {
2801
 public:
2802 2803
  explicit PauseAllocationObserversScope(Heap* heap);
  ~PauseAllocationObserversScope();
2804 2805

 private:
2806 2807
  Heap* heap_;
  DISALLOW_COPY_AND_ASSIGN(PauseAllocationObserversScope);
2808 2809
};

2810 2811 2812
// -----------------------------------------------------------------------------
// Compaction space that is used temporarily during compaction.

2813
class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
2814 2815 2816 2817
 public:
  CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
      : PagedSpace(heap, id, executable) {}

2818
  bool is_local() override { return true; }
2819

2820 2821
 protected:
  // The space is temporary and not included in any snapshots.
2822
  bool snapshotable() override { return false; }
2823

2824
  MUST_USE_RESULT bool SweepAndRetryAllocation(int size_in_bytes) override;
2825

2826
  MUST_USE_RESULT bool SlowAllocateRaw(int size_in_bytes) override;
2827 2828
};

2829

2830 2831 2832 2833 2834
// A collection of |CompactionSpace|s used by a single compaction task.
class CompactionSpaceCollection : public Malloced {
 public:
  explicit CompactionSpaceCollection(Heap* heap)
      : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
2835
        code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854

  CompactionSpace* Get(AllocationSpace space) {
    switch (space) {
      case OLD_SPACE:
        return &old_space_;
      case CODE_SPACE:
        return &code_space_;
      default:
        UNREACHABLE();
    }
    UNREACHABLE();
  }

 private:
  CompactionSpace old_space_;
  CompactionSpace code_space_;
};


2855
// -----------------------------------------------------------------------------
2856
// Old object space (includes the old space of objects and code space)
2857 2858 2859

class OldSpace : public PagedSpace {
 public:
2860 2861 2862 2863
  // Creates an old space object. The constructor does not allocate pages
  // from OS.
  OldSpace(Heap* heap, AllocationSpace id, Executability executable)
      : PagedSpace(heap, id, executable) {}
2864 2865 2866
};


2867 2868
// For contiguous spaces, top should be in the space (or at the end) and limit
// should be the end of the space.
2869
#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
2870 2871 2872
  SLOW_DCHECK((space).page_low() <= (info).top() &&   \
              (info).top() <= (space).page_high() &&  \
              (info).limit() <= (space).page_high())
2873 2874


2875 2876 2877
// -----------------------------------------------------------------------------
// Old space for all map objects

2878
class MapSpace : public PagedSpace {
2879
 public:
2880 2881
  // Creates a map space object.
  MapSpace(Heap* heap, AllocationSpace id)
2882
      : PagedSpace(heap, id, NOT_EXECUTABLE) {}
2883

2884
  int RoundSizeDownToObjectAlignment(int size) override {
2885
    if (base::bits::IsPowerOfTwo(Map::kSize)) {
2886 2887 2888
      return RoundDown(size, Map::kSize);
    } else {
      return (size / Map::kSize) * Map::kSize;
2889 2890 2891
    }
  }

2892 2893 2894
#ifdef VERIFY_HEAP
  void VerifyObject(HeapObject* obj) override;
#endif
2895 2896 2897 2898
};


// -----------------------------------------------------------------------------
2899
// Large objects ( > kMaxRegularHeapObjectSize ) are allocated and
2900 2901
// managed by the large object space. A large object is allocated from OS
// heap with extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2902 2903 2904
// A large object always starts at Page::kObjectStartOffset to a page.
// Large objects do not move during garbage collections.

2905
class LargeObjectSpace : public Space {
2906
 public:
2907 2908
  typedef LargePageIterator iterator;

2909
  LargeObjectSpace(Heap* heap, AllocationSpace id);
2910
  virtual ~LargeObjectSpace();
2911 2912

  // Initializes internal data structures.
2913
  bool SetUp();
2914 2915 2916 2917

  // Releases internal resources, frees objects in this space.
  void TearDown();

2918
  static size_t ObjectSizeFor(size_t chunk_size) {
2919 2920 2921 2922 2923 2924
    if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
    return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
  }

  // Shared implementation of AllocateRaw, AllocateRawCode and
  // AllocateRawFixedArray.
2925 2926
  MUST_USE_RESULT AllocationResult
      AllocateRaw(int object_size, Executability executable);
2927

2928
  // Available bytes for objects in this space.
2929
  inline size_t Available() override;
2930

2931
  size_t Size() override { return size_; }
2932

2933
  size_t SizeOfObjects() override { return objects_size_; }
2934

2935
  // Approximate amount of physical memory committed for this space.
2936
  size_t CommittedPhysicalMemory() override;
2937

2938
  int PageCount() { return page_count_; }
2939

2940 2941 2942
  // Finds an object for a given address, returns a Smi if it is not found.
  // The function iterates through all objects in this space, may be slow.
  Object* FindObject(Address a);
2943

2944 2945 2946
  // Takes the chunk_map_mutex_ and calls FindPage after that.
  LargePage* FindPageThreadSafe(Address a);

2947
  // Finds a large object page containing the given address, returns nullptr
2948
  // if such a page doesn't exist.
2949
  LargePage* FindPage(Address a);
2950

2951 2952 2953
  // Clears the marking state of live objects.
  void ClearMarkingStateOfLiveObjects();

2954 2955 2956
  // Frees unmarked objects.
  void FreeUnmarkedObjects();

2957 2958 2959 2960
  void InsertChunkMapEntries(LargePage* page);
  void RemoveChunkMapEntries(LargePage* page);
  void RemoveChunkMapEntries(LargePage* page, Address free_start);

2961 2962
  // Checks whether a heap object is in this space; O(1).
  bool Contains(HeapObject* obj);
2963 2964 2965
  // Checks whether an address is in the object area in this space. Iterates
  // all objects in the space. May be slow.
  bool ContainsSlow(Address addr) { return FindObject(addr)->IsHeapObject(); }
2966 2967

  // Checks whether the space is empty.
2968
  bool IsEmpty() { return first_page_ == nullptr; }
2969

2970 2971
  LargePage* first_page() { return first_page_; }

2972 2973 2974
  // Collect code statistics.
  void CollectCodeStatistics();

2975 2976 2977
  iterator begin() { return iterator(first_page_); }
  iterator end() { return iterator(nullptr); }

2978 2979
  std::unique_ptr<ObjectIterator> GetObjectIterator() override;

2980
#ifdef VERIFY_HEAP
2981
  virtual void Verify();
2982 2983 2984
#endif

#ifdef DEBUG
2985
  void Print() override;
2986 2987 2988 2989 2990
  void ReportStatistics();
#endif

 private:
  // The head of the linked list of large object chunks.
2991
  LargePage* first_page_;
2992
  size_t size_;            // allocated bytes
2993
  int page_count_;         // number of chunks
2994
  size_t objects_size_;    // size of objects
2995 2996 2997
  // The chunk_map_mutex_ has to be used when the chunk map is accessed
  // concurrently.
  base::Mutex chunk_map_mutex_;
2998 2999
  // Page-aligned addresses to their corresponding LargePage.
  std::unordered_map<Address, LargePage*> chunk_map_;
3000

3001
  friend class LargeObjectIterator;
3002 3003 3004
};


3005
class LargeObjectIterator : public ObjectIterator {
3006 3007 3008
 public:
  explicit LargeObjectIterator(LargeObjectSpace* space);

3009
  HeapObject* Next() override;
3010 3011

 private:
3012
  LargePage* current_;
3013 3014
};

3015
// Iterates over the chunks (pages and large object pages) that can contain
3016 3017
// pointers to new space or to evacuation candidates.
class MemoryChunkIterator BASE_EMBEDDED {
3018
 public:
3019
  inline explicit MemoryChunkIterator(Heap* heap);
3020

3021
  // Return nullptr when the iterator is done.
3022
  inline MemoryChunk* next();
3023 3024

 private:
3025 3026 3027 3028 3029 3030 3031
  enum State {
    kOldSpaceState,
    kMapState,
    kCodeState,
    kLargeObjectState,
    kFinishedState
  };
3032
  Heap* heap_;
3033
  State state_;
3034
  PageIterator old_iterator_;
3035
  PageIterator code_iterator_;
3036
  PageIterator map_iterator_;
3037
  LargePageIterator lo_iterator_;
3038 3039
};

3040 3041
}  // namespace internal
}  // namespace v8
3042

3043
#endif  // V8_HEAP_SPACES_H_