spaces.h 19.3 KB
Newer Older
1
// Copyright 2011 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5 6
#ifndef V8_HEAP_SPACES_H_
#define V8_HEAP_SPACES_H_
7

8
#include <atomic>
9
#include <memory>
10
#include <vector>
11

12
#include "src/base/iterator.h"
13
#include "src/base/macros.h"
14
#include "src/common/globals.h"
15
#include "src/heap/allocation-observer.h"
16
#include "src/heap/base-space.h"
17
#include "src/heap/basic-memory-chunk.h"
18
#include "src/heap/free-list.h"
19
#include "src/heap/heap.h"
20
#include "src/heap/list.h"
21
#include "src/heap/memory-chunk.h"
22
#include "src/objects/objects.h"
23 24
#include "src/utils/allocation.h"
#include "src/utils/utils.h"
25
#include "testing/gtest/include/gtest/gtest_prod.h"  // nogncheck
26

27 28
namespace v8 {
namespace internal {
29

30 31
namespace heap {
class HeapTester;
32
class TestCodePageAllocatorScope;
33 34
}  // namespace heap

35
class AllocationObserver;
mlippautz's avatar
mlippautz committed
36
class FreeList;
37
class Isolate;
38
class LargeObjectSpace;
39
class LargePage;
40
class LinearAllocationArea;
41
class Page;
mlippautz's avatar
mlippautz committed
42 43
class PagedSpace;
class SemiSpace;
44

45 46 47 48 49 50 51 52 53 54 55
// -----------------------------------------------------------------------------
// Heap structures:
//
// A JS heap consists of a young generation, an old generation, and a large
// object space. The young generation is divided into two semispaces. A
// scavenger implements Cheney's copying algorithm. The old generation is
// separated into a map space and an old object space. The map space contains
// all (and only) map objects, the rest of old objects go into the old space.
// The old generation is collected by a mark-sweep-compact collector.
//
// The semispaces of the young generation are contiguous.  The old and map
56
// spaces consists of a list of pages. A page has a page header and an object
57
// area.
58 59
//
// There is a separate large object space for objects larger than
60
// kMaxRegularHeapObjectSize, so that they do not have to move during
61
// collection. The large object space is paged. Pages in large object space
62
// may be larger than the page size.
63
//
64
// A store-buffer based write barrier is used to keep track of intergenerational
65
// references.  See heap/store-buffer.h.
66
//
67 68
// During scavenges and mark-sweep collections we sometimes (after a store
// buffer overflow) iterate intergenerational pointers without decoding heap
69 70
// object maps so if the page belongs to old space or large object space
// it is essential to guarantee that the page does not contain any
71 72
// garbage pointers to new space: every pointer aligned word which satisfies
// the Heap::InNewSpace() predicate must be a pointer to a live heap object in
73
// new space. Thus objects in old space and large object spaces should have a
74 75 76
// special layout (e.g. no bare integer fields). This requirement does not
// apply to map space which is iterated in a special fashion. However we still
// require pointer fields of dead maps to be cleaned.
77
//
78 79 80 81 82 83 84 85
// To enable lazy cleaning of old space pages we can mark chunks of the page
// as being garbage.  Garbage sections are marked with a special map.  These
// sections are skipped when scanning the page, even if we are otherwise
// scanning without regard for object boundaries.  Garbage sections are chained
// together to form a free list after a GC.  Garbage sections created outside
// of GCs by object trunctation etc. may not be in the free list chain.  Very
// small free spaces are ignored, they need only be cleaned of bogus pointers
// into new space.
86
//
87 88 89 90 91 92 93 94 95 96 97
// Each page may have up to one special garbage section.  The start of this
// section is denoted by the top field in the space.  The end of the section
// is denoted by the limit field in the space.  This special garbage section
// is not marked with a free space map in the data.  The point of this section
// is to enable linear allocation without having to constantly update the byte
// array every time the top field is updated and a new object is created.  The
// special garbage section is not in the chain of garbage sections.
//
// Since the top and limit fields are in the space, not the page, only one page
// has a special garbage section, and if the top and limit are equal then there
// is no special garbage section.
98 99 100

// Some assertion macros used in the debugging mode.

101
#define DCHECK_OBJECT_SIZE(size) \
102
  DCHECK((0 < size) && (size <= kMaxRegularHeapObjectSize))
103

104 105 106 107
#define DCHECK_CODEOBJECT_SIZE(size, code_space)                          \
  DCHECK((0 < size) &&                                                    \
         (size <= std::min(MemoryChunkLayout::MaxRegularCodeObjectSize(), \
                           code_space->AreaSize())))
108

109 110 111 112
// ----------------------------------------------------------------------------
// Space is the abstract superclass for all allocation spaces that are not
// sealed after startup (i.e. not ReadOnlySpace).
class V8_EXPORT_PRIVATE Space : public BaseSpace {
113 114
 public:
  Space(Heap* heap, AllocationSpace id, FreeList* free_list)
115
      : BaseSpace(heap, id),
116 117 118 119 120 121 122 123
        free_list_(std::unique_ptr<FreeList>(free_list)) {
    external_backing_store_bytes_ =
        new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes];
    external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
    external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] =
        0;
  }

124 125 126
  Space(const Space&) = delete;
  Space& operator=(const Space&) = delete;

127 128 129
  static inline void MoveExternalBackingStoreBytes(
      ExternalBackingStoreType type, Space* from, Space* to, size_t amount);

130
  ~Space() override {
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
    delete[] external_backing_store_bytes_;
    external_backing_store_bytes_ = nullptr;
  }

  virtual void AddAllocationObserver(AllocationObserver* observer);

  virtual void RemoveAllocationObserver(AllocationObserver* observer);

  virtual void PauseAllocationObservers();

  virtual void ResumeAllocationObservers();

  virtual void StartNextInlineAllocationStep() {}

  // Returns size of objects. Can differ from the allocated size
146
  // (e.g. see OldLargeObjectSpace).
147 148 149 150 151 152 153 154 155 156 157 158 159
  virtual size_t SizeOfObjects() { return Size(); }

  // Return the available bytes without growing.
  virtual size_t Available() = 0;

  virtual int RoundSizeDownToObjectAlignment(int size) {
    if (id_ == CODE_SPACE) {
      return RoundDown(size, kCodeAlignment);
    } else {
      return RoundDown(size, kTaggedSize);
    }
  }

160
  virtual std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) = 0;
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176

  inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
                                                 size_t amount);

  inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
                                                 size_t amount);

  // Returns amount of off-heap memory in-use by objects in this Space.
  virtual size_t ExternalBackingStoreBytes(
      ExternalBackingStoreType type) const {
    return external_backing_store_bytes_[type];
  }

  MemoryChunk* first_page() { return memory_chunk_list_.front(); }
  MemoryChunk* last_page() { return memory_chunk_list_.back(); }

177 178 179
  const MemoryChunk* first_page() const { return memory_chunk_list_.front(); }
  const MemoryChunk* last_page() const { return memory_chunk_list_.back(); }

180
  heap::List<MemoryChunk>& memory_chunk_list() { return memory_chunk_list_; }
181 182 183

  FreeList* free_list() { return free_list_.get(); }

184 185
  Address FirstPageAddress() const { return first_page()->address(); }

186 187 188 189 190
#ifdef DEBUG
  virtual void Print() = 0;
#endif

 protected:
191
  AllocationCounter allocation_counter_;
192 193

  // The List manages the pages that belong to the given space.
194
  heap::List<MemoryChunk> memory_chunk_list_;
195 196 197 198 199 200 201

  // Tracks off-heap memory used by this space.
  std::atomic<size_t>* external_backing_store_bytes_;

  std::unique_ptr<FreeList> free_list_;
};

202
STATIC_ASSERT(sizeof(std::atomic<intptr_t>) == kSystemPointerSize);
203

204
// -----------------------------------------------------------------------------
205
// A page is a memory chunk of a size 256K. Large object pages may be larger.
206 207 208
//
// The only way to get a page pointer is by calling factory methods:
//   Page* p = Page::FromAddress(addr); or
209
//   Page* p = Page::FromAllocationAreaAddress(address);
210
class Page : public MemoryChunk {
211
 public:
212 213 214 215
  static const intptr_t kCopyAllFlags = ~0;

  // Page flags copied from from-space to to-space when flipping semispaces.
  static const intptr_t kCopyOnFlipFlagsMask =
mlippautz's avatar
mlippautz committed
216
      static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
217 218
      static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
      static_cast<intptr_t>(MemoryChunk::INCREMENTAL_MARKING);
219

220
  // Returns the page containing a given address. The address ranges
221 222 223
  // from [page_addr .. page_addr + kPageSize[. This only works if the object
  // is in fact in a page.
  static Page* FromAddress(Address addr) {
224
    return reinterpret_cast<Page*>(addr & ~kPageAlignmentMask);
225
  }
226
  static Page* FromHeapObject(HeapObject o) {
227
    return reinterpret_cast<Page*>(o.ptr() & ~kAlignmentMask);
228
  }
229

230 231 232
  // Returns the page containing the address provided. The address can
  // potentially point righter after the page. To be also safe for tagged values
  // we subtract a hole word. The valid address ranges from
233
  // [page_addr + area_start_ .. page_addr + kPageSize + kTaggedSize].
234
  static Page* FromAllocationAreaAddress(Address address) {
235
    return Page::FromAddress(address - kTaggedSize);
236
  }
237

238 239 240
  // Checks if address1 and address2 are on the same new space page.
  static bool OnSamePage(Address address1, Address address2) {
    return Page::FromAddress(address1) == Page::FromAddress(address2);
241 242
  }

243 244
  // Checks whether an address is page aligned.
  static bool IsAlignedToPageSize(Address addr) {
245
    return (addr & kPageAlignmentMask) == 0;
246
  }
247

248 249
  static Page* ConvertNewToOld(Page* old_page);

250 251 252 253
  inline void MarkNeverAllocateForTesting();
  inline void MarkEvacuationCandidate();
  inline void ClearEvacuationCandidate();

254 255
  Page* next_page() { return static_cast<Page*>(list_node_.next()); }
  Page* prev_page() { return static_cast<Page*>(list_node_.prev()); }
256

257 258 259 260 261 262 263
  const Page* next_page() const {
    return static_cast<const Page*>(list_node_.next());
  }
  const Page* prev_page() const {
    return static_cast<const Page*>(list_node_.prev());
  }

264 265
  template <typename Callback>
  inline void ForAllFreeListCategories(Callback callback) {
266 267
    for (int i = kFirstCategory;
         i < owner()->free_list()->number_of_categories(); i++) {
268
      callback(categories_[i]);
269
    }
270 271
  }

272 273
  size_t AvailableInFreeList();

274 275 276
  size_t AvailableInFreeListFromAllocatedBytes() {
    DCHECK_GE(area_size(), wasted_memory() + allocated_bytes());
    return area_size() - wasted_memory() - allocated_bytes();
277 278
  }

279
  FreeListCategory* free_list_category(FreeListCategoryType type) {
280
    return categories_[type];
281 282
  }

283 284
  size_t ShrinkToHighWaterMark();

285
  V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end);
286
  V8_EXPORT_PRIVATE void CreateBlackAreaBackground(Address start, Address end);
287
  void DestroyBlackArea(Address start, Address end);
288
  void DestroyBlackAreaBackground(Address start, Address end);
289

290 291 292 293
  void InitializeFreeListCategories();
  void AllocateFreeListCategories();
  void ReleaseFreeListCategories();

294 295 296
  void MoveOldToNewRememberedSetForSweeping();
  void MergeOldToNewRememberedSets();

297
 private:
298 299
  friend class MemoryAllocator;
};
300

301 302 303 304
// Validate our estimates on the header size.
STATIC_ASSERT(sizeof(BasicMemoryChunk) <= BasicMemoryChunk::kHeaderSize);
STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
305

306 307 308 309
// -----------------------------------------------------------------------------
// Interface for heap object iterator to be implemented by all object space
// object iterators.

310
class V8_EXPORT_PRIVATE ObjectIterator : public Malloced {
311
 public:
312
  virtual ~ObjectIterator() = default;
313
  virtual HeapObject Next() = 0;
314
};
315

316 317
template <class PAGE_TYPE>
class PageIteratorImpl
318
    : public base::iterator<std::forward_iterator_tag, PAGE_TYPE> {
319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
 public:
  explicit PageIteratorImpl(PAGE_TYPE* p) : p_(p) {}
  PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE>& other) : p_(other.p_) {}
  PAGE_TYPE* operator*() { return p_; }
  bool operator==(const PageIteratorImpl<PAGE_TYPE>& rhs) {
    return rhs.p_ == p_;
  }
  bool operator!=(const PageIteratorImpl<PAGE_TYPE>& rhs) {
    return rhs.p_ != p_;
  }
  inline PageIteratorImpl<PAGE_TYPE>& operator++();
  inline PageIteratorImpl<PAGE_TYPE> operator++(int);

 private:
  PAGE_TYPE* p_;
334 335
};

336
using PageIterator = PageIteratorImpl<Page>;
337
using ConstPageIterator = PageIteratorImpl<const Page>;
338
using LargePageIterator = PageIteratorImpl<LargePage>;
339 340 341

class PageRange {
 public:
342
  using iterator = PageIterator;
343
  PageRange(Page* begin, Page* end) : begin_(begin), end_(end) {}
344
  explicit PageRange(Page* page) : PageRange(page, page->next_page()) {}
345 346
  inline PageRange(Address start, Address limit);

347 348 349 350 351 352 353
  iterator begin() { return iterator(begin_); }
  iterator end() { return iterator(end_); }

 private:
  Page* begin_;
  Page* end_;
};
354

355 356 357 358 359 360
// -----------------------------------------------------------------------------
// A space has a circular list of pages. The next page can be accessed via
// Page::next_page() call.

// An abstraction of allocation and relocation pointers in a page-structured
// space.
361
class LinearAllocationArea {
362
 public:
363 364 365 366
  LinearAllocationArea()
      : start_(kNullAddress), top_(kNullAddress), limit_(kNullAddress) {}
  LinearAllocationArea(Address top, Address limit)
      : start_(top), top_(top), limit_(limit) {}
367 368

  void Reset(Address top, Address limit) {
369
    start_ = top;
370 371 372 373
    set_top(top);
    set_limit(limit);
  }

374 375
  void MoveStartToTop() { start_ = top_; }

376 377
  V8_INLINE Address start() const { return start_; }

378
  V8_INLINE void set_top(Address top) {
379
    SLOW_DCHECK(top == kNullAddress || (top & kHeapObjectTagMask) == 0);
380 381 382
    top_ = top;
  }

383
  V8_INLINE Address top() const {
384
    SLOW_DCHECK(top_ == kNullAddress || (top_ & kHeapObjectTagMask) == 0);
385 386 387 388 389
    return top_;
  }

  Address* top_address() { return &top_; }

390
  V8_INLINE void set_limit(Address limit) { limit_ = limit; }
391

392
  V8_INLINE Address limit() const { return limit_; }
393 394 395 396 397 398 399 400 401 402 403 404

  Address* limit_address() { return &limit_; }

#ifdef DEBUG
  bool VerifyPagedAllocation() {
    return (Page::FromAllocationAreaAddress(top_) ==
            Page::FromAllocationAreaAddress(limit_)) &&
           (top_ <= limit_);
  }
#endif

 private:
405 406
  // Current allocation top.
  Address start_;
407 408 409 410
  // Current allocation top.
  Address top_;
  // Current allocation limit.
  Address limit_;
411 412
};

413

414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
// LocalAllocationBuffer represents a linear allocation area that is created
// from a given {AllocationResult} and can be used to allocate memory without
// synchronization.
//
// The buffer is properly closed upon destruction and reassignment.
// Example:
//   {
//     AllocationResult result = ...;
//     LocalAllocationBuffer a(heap, result, size);
//     LocalAllocationBuffer b = a;
//     CHECK(!a.IsValid());
//     CHECK(b.IsValid());
//     // {a} is invalid now and cannot be used for further allocations.
//   }
//   // Since {b} went out of scope, the LAB is closed, resulting in creating a
//   // filler object for the remaining area.
class LocalAllocationBuffer {
 public:
  // Indicates that a buffer cannot be used for allocations anymore. Can result
  // from either reassigning a buffer, or trying to construct it from an
  // invalid {AllocationResult}.
435 436 437 438
  static LocalAllocationBuffer InvalidBuffer() {
    return LocalAllocationBuffer(
        nullptr, LinearAllocationArea(kNullAddress, kNullAddress));
  }
439 440 441 442 443 444 445

  // Creates a new LAB from a given {AllocationResult}. Results in
  // InvalidBuffer if the result indicates a retry.
  static inline LocalAllocationBuffer FromResult(Heap* heap,
                                                 AllocationResult result,
                                                 intptr_t size);

446
  ~LocalAllocationBuffer() { CloseAndMakeIterable(); }
447

448 449
  LocalAllocationBuffer(const LocalAllocationBuffer& other) = delete;
  V8_EXPORT_PRIVATE LocalAllocationBuffer(LocalAllocationBuffer&& other)
450
      V8_NOEXCEPT;
451 452

  LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other) = delete;
453
  V8_EXPORT_PRIVATE LocalAllocationBuffer& operator=(
454
      LocalAllocationBuffer&& other) V8_NOEXCEPT;
455

456
  V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
457 458
      int size_in_bytes, AllocationAlignment alignment);

459
  inline bool IsValid() { return allocation_info_.top() != kNullAddress; }
460 461 462 463 464

  // Try to merge LABs, which is only possible when they are adjacent in memory.
  // Returns true if the merge was successful, false otherwise.
  inline bool TryMerge(LocalAllocationBuffer* other);

465
  inline bool TryFreeLast(HeapObject object, int object_size);
466

467
  // Close a LAB, effectively invalidating it. Returns the unused area.
468 469
  V8_EXPORT_PRIVATE LinearAllocationArea CloseAndMakeIterable();
  void MakeIterable();
470

471 472 473
  Address top() const { return allocation_info_.top(); }
  Address limit() const { return allocation_info_.limit(); }

474
 private:
475 476
  V8_EXPORT_PRIVATE LocalAllocationBuffer(
      Heap* heap, LinearAllocationArea allocation_info) V8_NOEXCEPT;
477 478

  Heap* heap_;
479
  LinearAllocationArea allocation_info_;
480 481
};

482 483
class SpaceWithLinearArea : public Space {
 public:
484
  SpaceWithLinearArea(Heap* heap, AllocationSpace id, FreeList* free_list)
485
      : Space(heap, id, free_list) {
486
    allocation_info_.Reset(kNullAddress, kNullAddress);
487 488
  }

489
  virtual bool SupportsAllocationObserver() = 0;
490

491 492 493 494 495 496 497 498 499 500 501 502
  // Returns the allocation pointer in this space.
  Address top() { return allocation_info_.top(); }
  Address limit() { return allocation_info_.limit(); }

  // The allocation top address.
  Address* allocation_top_address() { return allocation_info_.top_address(); }

  // The allocation limit address.
  Address* allocation_limit_address() {
    return allocation_info_.limit_address();
  }

503
  // Methods needed for allocation observers.
504 505 506 507 508 509 510
  V8_EXPORT_PRIVATE void AddAllocationObserver(
      AllocationObserver* observer) override;
  V8_EXPORT_PRIVATE void RemoveAllocationObserver(
      AllocationObserver* observer) override;
  V8_EXPORT_PRIVATE void ResumeAllocationObservers() override;
  V8_EXPORT_PRIVATE void PauseAllocationObservers() override;

511 512 513 514 515 516
  V8_EXPORT_PRIVATE void AdvanceAllocationObservers();
  V8_EXPORT_PRIVATE void InvokeAllocationObservers(Address soon_object,
                                                   size_t size_in_bytes,
                                                   size_t aligned_size_in_bytes,
                                                   size_t allocation_size);

517 518
  void MarkLabStartInitialized();

519 520 521 522 523 524 525 526 527
  // When allocation observers are active we may use a lower limit to allow the
  // observers to 'interrupt' earlier than the natural limit. Given a linear
  // area bounded by [start, end), this function computes the limit to use to
  // allow proper observation based on existing observers. min_size specifies
  // the minimum size that the limited area should have.
  Address ComputeLimit(Address start, Address end, size_t min_size);
  V8_EXPORT_PRIVATE virtual void UpdateInlineAllocationLimit(
      size_t min_size) = 0;

528 529 530 531
  V8_EXPORT_PRIVATE void UpdateAllocationOrigins(AllocationOrigin origin);

  void PrintAllocationsOrigins();

532
 protected:
533
  // TODO(ofrobots): make these private after refactoring is complete.
534
  LinearAllocationArea allocation_info_;
535 536 537

  size_t allocations_origins_[static_cast<int>(
      AllocationOrigin::kNumberOfAllocationOrigins)] = {0};
538 539
};

540 541
}  // namespace internal
}  // namespace v8
542

543
#endif  // V8_HEAP_SPACES_H_