paged-spaces.h 20.2 KB
Newer Older
1 2 3 4 5 6 7
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef V8_HEAP_PAGED_SPACES_H_
#define V8_HEAP_PAGED_SPACES_H_

8
#include <atomic>
9 10 11 12 13 14 15 16
#include <memory>
#include <utility>

#include "src/base/bounds.h"
#include "src/base/macros.h"
#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
17
#include "src/flags/flags.h"
18
#include "src/heap/allocation-stats.h"
19
#include "src/heap/memory-chunk-layout.h"
20 21 22 23 24 25
#include "src/heap/memory-chunk.h"
#include "src/heap/spaces.h"

namespace v8 {
namespace internal {

26
class CompactionSpace;
27 28 29 30 31 32
class Heap;
class HeapObject;
class Isolate;
class ObjectVisitor;

// -----------------------------------------------------------------------------
33
// Heap object iterator in paged spaces.
34 35 36 37 38 39 40 41 42 43
//
// A PagedSpaceObjectIterator iterates objects from the bottom of the given
// space to its top or from the bottom of the given page to its top.
//
// If objects are allocated in the page during iteration the iterator may
// or may not iterate over those objects.  The caller must create a new
// iterator in order to be sure to visit these new objects.
class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
 public:
  // Creates a new object iterator in a given space.
44 45
  PagedSpaceObjectIterator(Heap* heap, const PagedSpaceBase* space);
  PagedSpaceObjectIterator(Heap* heap, const PagedSpaceBase* space,
46
                           const Page* page);
47 48
  PagedSpaceObjectIterator(Heap* heap, const PagedSpace* space,
                           const Page* page, Address start_address);
49 50 51 52 53 54

  // Advance to the next object, skipping free spaces and other fillers and
  // skipping the special garbage section of which there is one per space.
  // Returns nullptr when the iteration has ended.
  inline HeapObject Next() override;

55 56 57 58 59 60 61 62 63 64
  // The pointer compression cage base value used for decompression of all
  // tagged values except references to Code objects.
  PtrComprCageBase cage_base() const {
#if V8_COMPRESS_POINTERS
    return cage_base_;
#else
    return PtrComprCageBase{};
#endif  // V8_COMPRESS_POINTERS
  }

65 66 67 68 69 70 71 72 73 74
 private:
  // Fast (inlined) path of next().
  inline HeapObject FromCurrentPage();

  // Slow path of next(), goes into the next page.  Returns false if the
  // iteration has ended.
  bool AdvanceToNextPage();

  Address cur_addr_;  // Current iteration point.
  Address cur_end_;   // End iteration point.
75
  const PagedSpaceBase* const space_;
76 77
  ConstPageRange page_range_;
  ConstPageRange::iterator current_page_;
78 79 80
#if V8_COMPRESS_POINTERS
  const PtrComprCageBase cage_base_;
#endif  // V8_COMPRESS_POINTERS
81 82
};

83
class V8_EXPORT_PRIVATE PagedSpaceBase
84 85 86 87 88 89 90 91
    : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
 public:
  using iterator = PageIterator;
  using const_iterator = ConstPageIterator;

  static const size_t kCompactionMemoryWanted = 500 * KB;

  // Creates a space with an id.
92
  PagedSpaceBase(
93
      Heap* heap, AllocationSpace id, Executability executable,
94 95
      FreeList* free_list, LinearAllocationArea* allocation_info,
      LinearAreaOriginalData& linear_area_original_data,
96
      CompactionSpaceKind compaction_space_kind = CompactionSpaceKind::kNone);
97

98
  ~PagedSpaceBase() override { TearDown(); }
99 100 101 102 103 104 105

  // Checks whether an object/address is in this space.
  inline bool Contains(Address a) const;
  inline bool Contains(Object o) const;
  bool ContainsSlow(Address addr) const;

  // Does the space need executable memory?
106
  Executability executable() const { return executable_; }
107 108 109 110 111

  // Prepares for a mark-compact GC.
  void PrepareForMarkCompact();

  // Current capacity without growing (Size() + Available()).
112
  size_t Capacity() const { return accounting_stats_.Capacity(); }
113 114

  // Approximate amount of physical memory committed for this space.
115
  size_t CommittedPhysicalMemory() const override;
116

117
#if DEBUG
118
  void VerifyCommittedPhysicalMemory() const;
119 120 121 122 123
#endif  // DEBUG

  void IncrementCommittedPhysicalMemory(size_t increment_value);
  void DecrementCommittedPhysicalMemory(size_t decrement_value);

124 125 126 127 128 129 130 131 132 133 134 135 136 137
  // Sets the capacity, the available space and the wasted space to zero.
  // The stats are rebuilt during sweeping by adding each page to the
  // capacity and the size when it is encountered.  As free spaces are
  // discovered during the sweeping they are subtracted from the size and added
  // to the available and wasted totals. The free list is cleared as well.
  void ClearAllocatorState() {
    accounting_stats_.ClearSize();
    free_list_->Reset();
  }

  // Available bytes without growing.  These are the bytes on the free list.
  // The bytes in the linear allocation area are not included in this total
  // because updating the stats would slow down allocation.  New pages are
  // immediately added to the free list so they show up here.
138
  size_t Available() const override;
139 140 141 142 143

  // Allocated bytes in this space.  Garbage bytes that were not found due to
  // concurrent sweeping are counted as being allocated!  The bytes in the
  // current linear allocation area (between top and limit) are also counted
  // here.
144
  size_t Size() const override { return accounting_stats_.Size(); }
145 146 147

  // Wasted bytes in this space.  These are just the bytes that were thrown away
  // due to being too small to use for allocation.
148
  virtual size_t Waste() const { return free_list_->wasted_bytes(); }
149 150 151 152

  // Allocate the requested number of bytes in the space from a background
  // thread.
  V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
153 154 155 156
  RawRefillLabBackground(LocalHeap* local_heap, size_t min_size_in_bytes,
                         size_t max_size_in_bytes,
                         AllocationAlignment alignment,
                         AllocationOrigin origin);
157 158 159

  size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
    if (size_in_bytes == 0) return 0;
160 161
    heap()->CreateFillerObjectAtBackground(start,
                                           static_cast<int>(size_in_bytes));
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
    if (mode == SpaceAccountingMode::kSpaceAccounted) {
      return AccountedFree(start, size_in_bytes);
    } else {
      return UnaccountedFree(start, size_in_bytes);
    }
  }

  // Give a block of memory to the space's free list.  It might be added to
  // the free list or accounted as waste.
  // If add_to_freelist is false then just accounting stats are updated and
  // no attempt to add area to free list is made.
  size_t AccountedFree(Address start, size_t size_in_bytes) {
    size_t wasted = free_list_->Free(start, size_in_bytes, kLinkCategory);
    Page* page = Page::FromAddress(start);
    accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
    DCHECK_GE(size_in_bytes, wasted);
    return size_in_bytes - wasted;
  }

  size_t UnaccountedFree(Address start, size_t size_in_bytes) {
    size_t wasted = free_list_->Free(start, size_in_bytes, kDoNotLinkCategory);
    DCHECK_GE(size_in_bytes, wasted);
    return size_in_bytes - wasted;
  }

187
  inline bool TryFreeLast(Address object_address, int object_size);
188 189 190 191

  void ResetFreeList();

  // Empty space linear allocation area, returning unused area to free list.
192
  void FreeLinearAllocationArea() override;
193

194 195
  void MakeLinearAllocationAreaIterable();

196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
  void MarkLinearAllocationAreaBlack();
  void UnmarkLinearAllocationArea();

  void DecreaseAllocatedBytes(size_t bytes, Page* page) {
    accounting_stats_.DecreaseAllocatedBytes(bytes, page);
  }
  void IncreaseAllocatedBytes(size_t bytes, Page* page) {
    accounting_stats_.IncreaseAllocatedBytes(bytes, page);
  }
  void DecreaseCapacity(size_t bytes) {
    accounting_stats_.DecreaseCapacity(bytes);
  }
  void IncreaseCapacity(size_t bytes) {
    accounting_stats_.IncreaseCapacity(bytes);
  }

  void RefineAllocatedBytesAfterSweeping(Page* page);

214
  Page* InitializePage(MemoryChunk* chunk) override;
215 216 217 218 219 220 221

  void ReleasePage(Page* page);

  // Adds the page to this space and returns the number of bytes added to the
  // free list of the space.
  size_t AddPage(Page* page);
  void RemovePage(Page* page);
222 223 224
  // Remove a page if it has at least |size_in_bytes| bytes available that can
  // be used for allocation.
  Page* RemovePageSafe(int size_in_bytes);
225 226 227

  void SetReadable();
  void SetReadAndExecutable();
228
  void SetCodeModificationPermissions();
229 230 231 232 233 234 235 236 237 238 239

  void SetDefaultCodePermissions() {
    if (FLAG_jitless) {
      SetReadable();
    } else {
      SetReadAndExecutable();
    }
  }

#ifdef VERIFY_HEAP
  // Verify integrity of this space.
240
  virtual void Verify(Isolate* isolate, ObjectVisitor* visitor) const;
241

242
  void VerifyLiveBytes() const;
243 244 245

  // Overridden by subclasses to verify space-specific object
  // properties (e.g., only maps or free-list nodes are in map space).
246
  virtual void VerifyObject(HeapObject obj) const {}
247 248 249
#endif

#ifdef DEBUG
250 251
  void VerifyCountersAfterSweeping(Heap* heap) const;
  void VerifyCountersBeforeConcurrentSweeping() const;
252 253 254 255 256 257 258 259
  // Print meta info and objects in this space.
  void Print() override;

  // Report code object related statistics
  static void ReportCodeStatistics(Isolate* isolate);
  static void ResetCodeStatistics(Isolate* isolate);
#endif

260
  bool CanExpand(size_t size) const;
261 262

  // Returns the number of total pages in this space.
263
  int CountTotalPages() const;
264 265

  // Return size of allocatable area on a page in this space.
266
  inline int AreaSize() const { return static_cast<int>(area_size_); }
267

268
  bool is_compaction_space() const {
269
    return compaction_space_kind_ != CompactionSpaceKind::kNone;
270 271
  }

272 273 274
  CompactionSpaceKind compaction_space_kind() const {
    return compaction_space_kind_;
  }
275 276 277

  // Merges {other} into the current space. Note that this modifies {other},
  // e.g., removes its bump pointer area and resets statistics.
278
  void MergeCompactionSpace(CompactionSpace* other);
279 280 281 282 283 284 285 286 287 288

  // Refills the free list from the corresponding free list filled by the
  // sweeper.
  virtual void RefillFreeList();

  base::Mutex* mutex() { return &space_mutex_; }

  inline void UnlinkFreeListCategories(Page* page);
  inline size_t RelinkFreeListCategories(Page* page);

289 290 291 292 293
  Page* first_page() override {
    return reinterpret_cast<Page*>(memory_chunk_list_.front());
  }
  const Page* first_page() const override {
    return reinterpret_cast<const Page*>(memory_chunk_list_.front());
294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
  }

  iterator begin() { return iterator(first_page()); }
  iterator end() { return iterator(nullptr); }

  const_iterator begin() const { return const_iterator(first_page()); }
  const_iterator end() const { return const_iterator(nullptr); }

  // Shrink immortal immovable pages of the space to be exactly the size needed
  // using the high water mark.
  void ShrinkImmortalImmovablePages();

  size_t ShrinkPageToHighWaterMark(Page* page);

  std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;

  void SetLinearAllocationArea(Address top, Address limit);

312 313 314 315
  void AddRangeToActiveSystemPages(Page* page, Address start, Address end);
  void ReduceActiveSystemPages(Page* page,
                               ActiveSystemPages active_system_pages);

316
 private:
317 318
  class ConcurrentAllocationMutex {
   public:
319
    explicit ConcurrentAllocationMutex(const PagedSpaceBase* space) {
320
      if (space->SupportsConcurrentAllocation()) {
321
        guard_.emplace(&space->space_mutex_);
322 323 324 325 326 327
      }
    }

    base::Optional<base::MutexGuard> guard_;
  };

328
  bool SupportsConcurrentAllocation() const { return !is_compaction_space(); }
329

330
  // Set space linear allocation area.
331
  void SetTopAndLimit(Address top, Address limit);
332 333
  void DecreaseLimit(Address new_limit);
  void UpdateInlineAllocationLimit(size_t min_size) override;
334 335 336
  bool SupportsAllocationObserver() const override {
    return !is_compaction_space();
  }
337 338 339 340

 protected:
  // PagedSpaces that should be included in snapshots have different, i.e.,
  // smaller, initial pages.
341
  virtual bool snapshotable() const { return true; }
342

343
  bool HasPages() const { return first_page() != nullptr; }
344

345 346
  // Returns whether sweeping of this space is safe on this thread. Code space
  // sweeping is only allowed on the main thread.
347
  bool IsSweepingAllowedOnThread(LocalHeap* local_heap) const;
348

349 350 351 352 353 354 355
  // Cleans up the space, frees all pages in this space except those belonging
  // to the initial chunk, uncommits addresses in the initial chunk.
  void TearDown();

  // Expands the space by allocating a fixed number of pages. Returns false if
  // it cannot allocate requested number of pages from OS, or if the hard heap
  // size limit has been hit.
356
  virtual Page* Expand();
357 358 359 360 361

  // Expands the space by a single page from a background thread and allocates
  // a memory area of the given size in it. If successful the method returns
  // the address and size of the area.
  base::Optional<std::pair<Address, size_t>> ExpandBackground(
362
      size_t size_in_bytes);
363

364 365 366
  bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment,
                        AllocationOrigin origin,
                        int* out_max_aligned_size) final;
367

368 369
  V8_WARN_UNUSED_RESULT bool TryAllocationFromFreeListMain(
      size_t size_in_bytes, AllocationOrigin origin);
370

371 372 373 374
  V8_WARN_UNUSED_RESULT bool ContributeToSweepingMain(int required_freed_bytes,
                                                      int max_pages,
                                                      int size_in_bytes,
                                                      AllocationOrigin origin);
375

376 377
  // Refills LAB for EnsureLabMain. This function is space-dependent. Returns
  // false if there is not enough space and the caller has to retry after
378
  // collecting garbage.
379 380
  V8_WARN_UNUSED_RESULT virtual bool RefillLabMain(int size_in_bytes,
                                                   AllocationOrigin origin);
381

382 383 384 385
  // Actual implementation of refilling LAB. Returns false if there is not
  // enough space and the caller has to retry after collecting garbage.
  V8_WARN_UNUSED_RESULT bool RawRefillLabMain(int size_in_bytes,
                                              AllocationOrigin origin);
386 387

  V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
388
  TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
389 390 391 392
                                      size_t max_size_in_bytes,
                                      AllocationAlignment alignment,
                                      AllocationOrigin origin);

393 394 395
  V8_WARN_UNUSED_RESULT bool TryExpand(int size_in_bytes,
                                       AllocationOrigin origin);

396 397 398 399
  size_t committed_physical_memory() const {
    return committed_physical_memory_.load(std::memory_order_relaxed);
  }

400 401
  Executability executable_;

402
  CompactionSpaceKind compaction_space_kind_;
403 404 405 406 407 408 409

  size_t area_size_;

  // Accounting information for this space.
  AllocationStats accounting_stats_;

  // Mutex guarding any concurrent access to the space.
410
  mutable base::Mutex space_mutex_;
411

412 413
  std::atomic<size_t> committed_physical_memory_{0};

414 415 416 417 418 419 420
  friend class IncrementalMarking;
  friend class MarkCompactCollector;

  // Used in cctest.
  friend class heap::HeapTester;
};

421 422 423 424 425 426 427 428 429 430 431 432 433 434
class V8_EXPORT_PRIVATE PagedSpace : public PagedSpaceBase {
 public:
  // Creates a space with an id.
  PagedSpace(
      Heap* heap, AllocationSpace id, Executability executable,
      FreeList* free_list, LinearAllocationArea* allocation_info,
      CompactionSpaceKind compaction_space_kind = CompactionSpaceKind::kNone)
      : PagedSpaceBase(heap, id, executable, free_list, allocation_info,
                       linear_area_original_data_, compaction_space_kind) {}

 private:
  LinearAreaOriginalData linear_area_original_data_;
};

435
// -----------------------------------------------------------------------------
436
// Compaction space that is used temporarily during compaction.
437

438
class V8_EXPORT_PRIVATE CompactionSpace final : public PagedSpace {
439
 public:
440 441
  CompactionSpace(Heap* heap, AllocationSpace id, Executability executable,
                  CompactionSpaceKind compaction_space_kind)
442
      : PagedSpace(heap, id, executable, FreeList::CreateFreeList(),
443
                   &allocation_info_, compaction_space_kind) {
444
    DCHECK(is_compaction_space());
445 446
  }

447 448
  const std::vector<Page*>& GetNewPages() { return new_pages_; }

449 450 451
 private:
  LinearAllocationArea allocation_info_;

452
 protected:
453 454 455
  V8_WARN_UNUSED_RESULT bool RefillLabMain(int size_in_bytes,
                                           AllocationOrigin origin) override;

456
  Page* Expand() override;
457
  // The space is temporary and not included in any snapshots.
458
  bool snapshotable() const override { return false; }
459 460 461
  // Pages that were allocated in this local space and need to be merged
  // to the main space.
  std::vector<Page*> new_pages_;
462 463 464 465 466 467
};

// A collection of |CompactionSpace|s used by a single compaction task.
class CompactionSpaceCollection : public Malloced {
 public:
  explicit CompactionSpaceCollection(Heap* heap,
468
                                     CompactionSpaceKind compaction_space_kind)
469
      : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE,
470
                   compaction_space_kind),
471 472
        map_space_(heap, MAP_SPACE, Executability::NOT_EXECUTABLE,
                   compaction_space_kind),
473
        code_space_(heap, CODE_SPACE, Executability::EXECUTABLE,
474
                    compaction_space_kind) {}
475 476 477 478 479

  CompactionSpace* Get(AllocationSpace space) {
    switch (space) {
      case OLD_SPACE:
        return &old_space_;
480 481
      case MAP_SPACE:
        return &map_space_;
482 483 484 485 486 487 488 489 490 491
      case CODE_SPACE:
        return &code_space_;
      default:
        UNREACHABLE();
    }
    UNREACHABLE();
  }

 private:
  CompactionSpace old_space_;
492
  CompactionSpace map_space_;
493 494 495 496 497 498
  CompactionSpace code_space_;
};

// -----------------------------------------------------------------------------
// Old generation regular object space.

499
class OldSpace final : public PagedSpace {
500 501 502
 public:
  // Creates an old space object. The constructor does not allocate pages
  // from OS.
503 504 505
  explicit OldSpace(Heap* heap, LinearAllocationArea* allocation_info)
      : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE, FreeList::CreateFreeList(),
                   allocation_info) {}
506 507 508 509 510 511 512

  static bool IsAtPageStart(Address addr) {
    return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
           MemoryChunkLayout::ObjectStartOffsetInDataPage();
  }

  size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final {
513
    if (type == ExternalBackingStoreType::kArrayBuffer)
514 515 516 517 518 519 520 521
      return heap()->OldArrayBufferBytes();
    return external_backing_store_bytes_[type];
  }
};

// -----------------------------------------------------------------------------
// Old generation code object space.

522
class CodeSpace final : public PagedSpace {
523 524 525 526
 public:
  // Creates an old space object. The constructor does not allocate pages
  // from OS.
  explicit CodeSpace(Heap* heap)
527 528 529 530 531
      : PagedSpace(heap, CODE_SPACE, EXECUTABLE, FreeList::CreateFreeList(),
                   &paged_allocation_info_) {}

 private:
  LinearAllocationArea paged_allocation_info_;
532 533 534 535 536
};

// -----------------------------------------------------------------------------
// Old space for all map objects

537
class MapSpace final : public PagedSpace {
538 539 540
 public:
  // Creates a map space object.
  explicit MapSpace(Heap* heap)
541 542
      : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE, FreeList::CreateFreeList(),
                   &paged_allocation_info_) {}
543

544
  int RoundSizeDownToObjectAlignment(int size) const override {
545 546 547 548 549 550 551 552 553 554
    if (base::bits::IsPowerOfTwo(Map::kSize)) {
      return RoundDown(size, Map::kSize);
    } else {
      return (size / Map::kSize) * Map::kSize;
    }
  }

  void SortFreeList();

#ifdef VERIFY_HEAP
555
  void VerifyObject(HeapObject obj) const override;
556
#endif
557 558 559

 private:
  LinearAllocationArea paged_allocation_info_;
560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579
};

// Iterates over the chunks (pages and large object pages) that can contain
// pointers to new space or to evacuation candidates.
class OldGenerationMemoryChunkIterator {
 public:
  inline explicit OldGenerationMemoryChunkIterator(Heap* heap);

  // Return nullptr when the iterator is done.
  inline MemoryChunk* next();

 private:
  enum State {
    kOldSpaceState,
    kMapState,
    kCodeState,
    kLargeObjectState,
    kCodeLargeObjectState,
    kFinishedState
  };
580
  Heap* const heap_;
581 582 583 584
  State state_;
  PageIterator old_iterator_;
  PageIterator code_iterator_;
  PageIterator map_iterator_;
585
  const PageIterator map_iterator_end_;
586 587 588 589 590 591 592 593
  LargePageIterator lo_iterator_;
  LargePageIterator code_lo_iterator_;
};

}  // namespace internal
}  // namespace v8

#endif  // V8_HEAP_PAGED_SPACES_H_