paged-spaces.h 21.6 KB
Newer Older
1 2 3 4 5 6 7
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef V8_HEAP_PAGED_SPACES_H_
#define V8_HEAP_PAGED_SPACES_H_

8
#include <atomic>
9 10 11 12 13 14 15 16
#include <memory>
#include <utility>

#include "src/base/bounds.h"
#include "src/base/macros.h"
#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
17
#include "src/flags/flags.h"
18
#include "src/heap/allocation-stats.h"
19
#include "src/heap/memory-chunk-layout.h"
20 21 22 23 24 25
#include "src/heap/memory-chunk.h"
#include "src/heap/spaces.h"

namespace v8 {
namespace internal {

26
class CompactionSpace;
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
class Heap;
class HeapObject;
class Isolate;
class ObjectVisitor;

// -----------------------------------------------------------------------------
// Heap object iterator in old/map spaces.
//
// A PagedSpaceObjectIterator iterates objects from the bottom of the given
// space to its top or from the bottom of the given page to its top.
//
// If objects are allocated in the page during iteration the iterator may
// or may not iterate over those objects.  The caller must create a new
// iterator in order to be sure to visit these new objects.
class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
 public:
  // Creates a new object iterator in a given space.
  PagedSpaceObjectIterator(Heap* heap, PagedSpace* space);
  PagedSpaceObjectIterator(Heap* heap, PagedSpace* space, Page* page);

  // Advance to the next object, skipping free spaces and other fillers and
  // skipping the special garbage section of which there is one per space.
  // Returns nullptr when the iteration has ended.
  inline HeapObject Next() override;

52 53 54 55 56 57 58 59 60 61
  // The pointer compression cage base value used for decompression of all
  // tagged values except references to Code objects.
  PtrComprCageBase cage_base() const {
#if V8_COMPRESS_POINTERS
    return cage_base_;
#else
    return PtrComprCageBase{};
#endif  // V8_COMPRESS_POINTERS
  }

62 63 64 65 66 67 68 69 70 71 72 73 74
 private:
  // Fast (inlined) path of next().
  inline HeapObject FromCurrentPage();

  // Slow path of next(), goes into the next page.  Returns false if the
  // iteration has ended.
  bool AdvanceToNextPage();

  Address cur_addr_;  // Current iteration point.
  Address cur_end_;   // End iteration point.
  PagedSpace* space_;
  PageRange page_range_;
  PageRange::iterator current_page_;
75 76 77
#if V8_COMPRESS_POINTERS
  const PtrComprCageBase cage_base_;
#endif  // V8_COMPRESS_POINTERS
78 79 80 81 82 83 84 85 86 87 88
};

class V8_EXPORT_PRIVATE PagedSpace
    : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
 public:
  using iterator = PageIterator;
  using const_iterator = ConstPageIterator;

  static const size_t kCompactionMemoryWanted = 500 * KB;

  // Creates a space with an id.
89 90
  PagedSpace(
      Heap* heap, AllocationSpace id, Executability executable,
91
      FreeList* free_list, LinearAllocationArea* allocation_info_,
92
      CompactionSpaceKind compaction_space_kind = CompactionSpaceKind::kNone);
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112

  ~PagedSpace() override { TearDown(); }

  // Checks whether an object/address is in this space.
  inline bool Contains(Address a) const;
  inline bool Contains(Object o) const;
  bool ContainsSlow(Address addr) const;

  // Does the space need executable memory?
  Executability executable() { return executable_; }

  // Prepares for a mark-compact GC.
  void PrepareForMarkCompact();

  // Current capacity without growing (Size() + Available()).
  size_t Capacity() { return accounting_stats_.Capacity(); }

  // Approximate amount of physical memory committed for this space.
  size_t CommittedPhysicalMemory() override;

113 114 115 116 117 118 119
#if DEBUG
  void VerifyCommittedPhysicalMemory();
#endif  // DEBUG

  void IncrementCommittedPhysicalMemory(size_t increment_value);
  void DecrementCommittedPhysicalMemory(size_t decrement_value);

120 121 122 123 124 125 126 127 128 129 130 131 132 133
  // Sets the capacity, the available space and the wasted space to zero.
  // The stats are rebuilt during sweeping by adding each page to the
  // capacity and the size when it is encountered.  As free spaces are
  // discovered during the sweeping they are subtracted from the size and added
  // to the available and wasted totals. The free list is cleared as well.
  void ClearAllocatorState() {
    accounting_stats_.ClearSize();
    free_list_->Reset();
  }

  // Available bytes without growing.  These are the bytes on the free list.
  // The bytes in the linear allocation area are not included in this total
  // because updating the stats would slow down allocation.  New pages are
  // immediately added to the free list so they show up here.
134
  size_t Available() override;
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165

  // Allocated bytes in this space.  Garbage bytes that were not found due to
  // concurrent sweeping are counted as being allocated!  The bytes in the
  // current linear allocation area (between top and limit) are also counted
  // here.
  size_t Size() override { return accounting_stats_.Size(); }

  // Wasted bytes in this space.  These are just the bytes that were thrown away
  // due to being too small to use for allocation.
  virtual size_t Waste() { return free_list_->wasted_bytes(); }

  // Allocate the requested number of bytes in the space if possible, return a
  // failure object if not.
  V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
      int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);

  // Allocate the requested number of bytes in the space double aligned if
  // possible, return a failure object if not.
  V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
      int size_in_bytes, AllocationAlignment alignment,
      AllocationOrigin origin = AllocationOrigin::kRuntime);

  // Allocate the requested number of bytes in the space and consider allocation
  // alignment if needed.
  V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
      int size_in_bytes, AllocationAlignment alignment,
      AllocationOrigin origin = AllocationOrigin::kRuntime);

  // Allocate the requested number of bytes in the space from a background
  // thread.
  V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
166 167 168 169
  RawRefillLabBackground(LocalHeap* local_heap, size_t min_size_in_bytes,
                         size_t max_size_in_bytes,
                         AllocationAlignment alignment,
                         AllocationOrigin origin);
170 171 172

  size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
    if (size_in_bytes == 0) return 0;
173 174 175
    heap()->CreateFillerObjectAtBackground(
        start, static_cast<int>(size_in_bytes),
        ClearFreedMemoryMode::kDontClearFreedMemory);
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
    if (mode == SpaceAccountingMode::kSpaceAccounted) {
      return AccountedFree(start, size_in_bytes);
    } else {
      return UnaccountedFree(start, size_in_bytes);
    }
  }

  // Give a block of memory to the space's free list.  It might be added to
  // the free list or accounted as waste.
  // If add_to_freelist is false then just accounting stats are updated and
  // no attempt to add area to free list is made.
  size_t AccountedFree(Address start, size_t size_in_bytes) {
    size_t wasted = free_list_->Free(start, size_in_bytes, kLinkCategory);
    Page* page = Page::FromAddress(start);
    accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
    DCHECK_GE(size_in_bytes, wasted);
    return size_in_bytes - wasted;
  }

  size_t UnaccountedFree(Address start, size_t size_in_bytes) {
    size_t wasted = free_list_->Free(start, size_in_bytes, kDoNotLinkCategory);
    DCHECK_GE(size_in_bytes, wasted);
    return size_in_bytes - wasted;
  }

201
  inline bool TryFreeLast(Address object_address, int object_size);
202 203 204 205

  void ResetFreeList();

  // Empty space linear allocation area, returning unused area to free list.
206
  void FreeLinearAllocationArea() override;
207

208 209
  void MakeLinearAllocationAreaIterable();

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
  void MarkLinearAllocationAreaBlack();
  void UnmarkLinearAllocationArea();

  void DecreaseAllocatedBytes(size_t bytes, Page* page) {
    accounting_stats_.DecreaseAllocatedBytes(bytes, page);
  }
  void IncreaseAllocatedBytes(size_t bytes, Page* page) {
    accounting_stats_.IncreaseAllocatedBytes(bytes, page);
  }
  void DecreaseCapacity(size_t bytes) {
    accounting_stats_.DecreaseCapacity(bytes);
  }
  void IncreaseCapacity(size_t bytes) {
    accounting_stats_.IncreaseCapacity(bytes);
  }

  void RefineAllocatedBytesAfterSweeping(Page* page);

228
  Page* InitializePage(MemoryChunk* chunk) override;
229 230 231 232 233 234 235

  void ReleasePage(Page* page);

  // Adds the page to this space and returns the number of bytes added to the
  // free list of the space.
  size_t AddPage(Page* page);
  void RemovePage(Page* page);
236 237 238
  // Remove a page if it has at least |size_in_bytes| bytes available that can
  // be used for allocation.
  Page* RemovePageSafe(int size_in_bytes);
239 240 241

  void SetReadable();
  void SetReadAndExecutable();
242
  void SetCodeModificationPermissions();
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282

  void SetDefaultCodePermissions() {
    if (FLAG_jitless) {
      SetReadable();
    } else {
      SetReadAndExecutable();
    }
  }

#ifdef VERIFY_HEAP
  // Verify integrity of this space.
  virtual void Verify(Isolate* isolate, ObjectVisitor* visitor);

  void VerifyLiveBytes();

  // Overridden by subclasses to verify space-specific object
  // properties (e.g., only maps or free-list nodes are in map space).
  virtual void VerifyObject(HeapObject obj) {}
#endif

#ifdef DEBUG
  void VerifyCountersAfterSweeping(Heap* heap);
  void VerifyCountersBeforeConcurrentSweeping();
  // Print meta info and objects in this space.
  void Print() override;

  // Report code object related statistics
  static void ReportCodeStatistics(Isolate* isolate);
  static void ResetCodeStatistics(Isolate* isolate);
#endif

  bool CanExpand(size_t size);

  // Returns the number of total pages in this space.
  int CountTotalPages();

  // Return size of allocatable area on a page in this space.
  inline int AreaSize() { return static_cast<int>(area_size_); }

  bool is_compaction_space() {
283
    return compaction_space_kind_ != CompactionSpaceKind::kNone;
284 285
  }

286
  CompactionSpaceKind compaction_space_kind() { return compaction_space_kind_; }
287 288 289

  // Merges {other} into the current space. Note that this modifies {other},
  // e.g., removes its bump pointer area and resets statistics.
290
  void MergeCompactionSpace(CompactionSpace* other);
291 292 293 294 295 296 297 298 299 300

  // Refills the free list from the corresponding free list filled by the
  // sweeper.
  virtual void RefillFreeList();

  base::Mutex* mutex() { return &space_mutex_; }

  inline void UnlinkFreeListCategories(Page* page);
  inline size_t RelinkFreeListCategories(Page* page);

301 302 303 304 305
  Page* first_page() override {
    return reinterpret_cast<Page*>(memory_chunk_list_.front());
  }
  const Page* first_page() const override {
    return reinterpret_cast<const Page*>(memory_chunk_list_.front());
306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
  }

  iterator begin() { return iterator(first_page()); }
  iterator end() { return iterator(nullptr); }

  const_iterator begin() const { return const_iterator(first_page()); }
  const_iterator end() const { return const_iterator(nullptr); }

  // Shrink immortal immovable pages of the space to be exactly the size needed
  // using the high water mark.
  void ShrinkImmortalImmovablePages();

  size_t ShrinkPageToHighWaterMark(Page* page);

  std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;

  void SetLinearAllocationArea(Address top, Address limit);

324
  Address original_top() { return original_top_; }
325

326
  Address original_limit() { return original_limit_; }
327 328

  void MoveOriginalTopForward() {
329
    base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_);
330 331
    DCHECK_GE(top(), original_top_);
    DCHECK_LE(top(), original_limit_);
332
    original_top_ = top();
333 334
  }

335 336 337 338
  base::SharedMutex* pending_allocation_mutex() {
    return &pending_allocation_mutex_;
  }

339 340 341 342
  void AddRangeToActiveSystemPages(Page* page, Address start, Address end);
  void ReduceActiveSystemPages(Page* page,
                               ActiveSystemPages active_system_pages);

343
 private:
344 345 346 347
  class ConcurrentAllocationMutex {
   public:
    explicit ConcurrentAllocationMutex(PagedSpace* space) {
      if (space->SupportsConcurrentAllocation()) {
348
        guard_.emplace(&space->space_mutex_);
349 350 351 352 353 354
      }
    }

    base::Optional<base::MutexGuard> guard_;
  };

355
  bool SupportsConcurrentAllocation() { return !is_compaction_space(); }
356

357
  // Set space linear allocation area.
358
  void SetTopAndLimit(Address top, Address limit);
359 360
  void DecreaseLimit(Address new_limit);
  void UpdateInlineAllocationLimit(size_t min_size) override;
361
  bool SupportsAllocationObserver() override { return !is_compaction_space(); }
362

363 364 365 366 367
  // Slow path of allocation function
  V8_WARN_UNUSED_RESULT AllocationResult
  AllocateRawSlow(int size_in_bytes, AllocationAlignment alignment,
                  AllocationOrigin origin);

368 369 370 371 372 373 374
 protected:
  // PagedSpaces that should be included in snapshots have different, i.e.,
  // smaller, initial pages.
  virtual bool snapshotable() { return true; }

  bool HasPages() { return first_page() != nullptr; }

375 376 377 378
  // Returns whether sweeping of this space is safe on this thread. Code space
  // sweeping is only allowed on the main thread.
  bool IsSweepingAllowedOnThread(LocalHeap* local_heap);

379 380 381 382 383 384 385
  // Cleans up the space, frees all pages in this space except those belonging
  // to the initial chunk, uncommits addresses in the initial chunk.
  void TearDown();

  // Expands the space by allocating a fixed number of pages. Returns false if
  // it cannot allocate requested number of pages from OS, or if the hard heap
  // size limit has been hit.
386
  virtual Page* Expand();
387 388 389 390 391

  // Expands the space by a single page from a background thread and allocates
  // a memory area of the given size in it. If successful the method returns
  // the address and size of the area.
  base::Optional<std::pair<Address, size_t>> ExpandBackground(
392
      size_t size_in_bytes);
393

394
  Page* AllocatePage();
395 396 397 398

  // Sets up a linear allocation area that fits the given number of bytes.
  // Returns false if there is not enough space and the caller has to retry
  // after collecting garbage.
399
  inline bool EnsureLabMain(int size_in_bytes, AllocationOrigin origin);
400 401
  // Allocates an object from the linear allocation area. Assumes that the
  // linear allocation area is large enought to fit the object.
402
  inline AllocationResult AllocateFastUnaligned(int size_in_bytes);
403 404 405 406
  // Tries to allocate an aligned object from the linear allocation area.
  // Returns nullptr if the linear allocation area does not fit the object.
  // Otherwise, returns the object pointer and writes the allocation size
  // (object size + alignment filler size) to the size_in_bytes.
407 408 409
  inline AllocationResult AllocateFastAligned(int size_in_bytes,
                                              int* aligned_size_in_bytes,
                                              AllocationAlignment alignment);
410

411 412
  V8_WARN_UNUSED_RESULT bool TryAllocationFromFreeListMain(
      size_t size_in_bytes, AllocationOrigin origin);
413

414 415 416 417
  V8_WARN_UNUSED_RESULT bool ContributeToSweepingMain(int required_freed_bytes,
                                                      int max_pages,
                                                      int size_in_bytes,
                                                      AllocationOrigin origin);
418

419 420
  // Refills LAB for EnsureLabMain. This function is space-dependent. Returns
  // false if there is not enough space and the caller has to retry after
421
  // collecting garbage.
422 423
  V8_WARN_UNUSED_RESULT virtual bool RefillLabMain(int size_in_bytes,
                                                   AllocationOrigin origin);
424

425 426 427 428
  // Actual implementation of refilling LAB. Returns false if there is not
  // enough space and the caller has to retry after collecting garbage.
  V8_WARN_UNUSED_RESULT bool RawRefillLabMain(int size_in_bytes,
                                              AllocationOrigin origin);
429 430

  V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
431
  TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
432 433 434 435
                                      size_t max_size_in_bytes,
                                      AllocationAlignment alignment,
                                      AllocationOrigin origin);

436 437 438
  V8_WARN_UNUSED_RESULT bool TryExpand(int size_in_bytes,
                                       AllocationOrigin origin);

439 440 441 442
  size_t committed_physical_memory() const {
    return committed_physical_memory_.load(std::memory_order_relaxed);
  }

443 444
  Executability executable_;

445
  CompactionSpaceKind compaction_space_kind_;
446 447 448 449 450 451 452 453 454

  size_t area_size_;

  // Accounting information for this space.
  AllocationStats accounting_stats_;

  // Mutex guarding any concurrent access to the space.
  base::Mutex space_mutex_;

455
  // The top and the limit at the time of setting the linear allocation area.
456
  // These values are protected by pending_allocation_mutex_.
457 458
  Address original_top_;
  Address original_limit_;
459

460 461 462
  // Protects original_top_ and original_limit_.
  base::SharedMutex pending_allocation_mutex_;

463 464
  std::atomic<size_t> committed_physical_memory_{0};

465 466 467 468 469 470 471 472
  friend class IncrementalMarking;
  friend class MarkCompactCollector;

  // Used in cctest.
  friend class heap::HeapTester;
};

// -----------------------------------------------------------------------------
473
// Compaction space that is used temporarily during compaction.
474

475
class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
476
 public:
477 478
  CompactionSpace(Heap* heap, AllocationSpace id, Executability executable,
                  CompactionSpaceKind compaction_space_kind)
479
      : PagedSpace(heap, id, executable, FreeList::CreateFreeList(),
480
                   &allocation_info_, compaction_space_kind) {
481
    DCHECK(is_compaction_space());
482 483
  }

484 485
  const std::vector<Page*>& GetNewPages() { return new_pages_; }

486 487 488
 private:
  LinearAllocationArea allocation_info_;

489
 protected:
490 491 492
  V8_WARN_UNUSED_RESULT bool RefillLabMain(int size_in_bytes,
                                           AllocationOrigin origin) override;

493
  Page* Expand() override;
494 495
  // The space is temporary and not included in any snapshots.
  bool snapshotable() override { return false; }
496 497 498
  // Pages that were allocated in this local space and need to be merged
  // to the main space.
  std::vector<Page*> new_pages_;
499 500 501 502 503 504
};

// A collection of |CompactionSpace|s used by a single compaction task.
class CompactionSpaceCollection : public Malloced {
 public:
  explicit CompactionSpaceCollection(Heap* heap,
505
                                     CompactionSpaceKind compaction_space_kind)
506
      : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE,
507
                   compaction_space_kind),
508 509
        map_space_(heap, MAP_SPACE, Executability::NOT_EXECUTABLE,
                   compaction_space_kind),
510
        code_space_(heap, CODE_SPACE, Executability::EXECUTABLE,
511
                    compaction_space_kind) {}
512 513 514 515 516

  CompactionSpace* Get(AllocationSpace space) {
    switch (space) {
      case OLD_SPACE:
        return &old_space_;
517 518
      case MAP_SPACE:
        return &map_space_;
519 520 521 522 523 524 525 526 527 528
      case CODE_SPACE:
        return &code_space_;
      default:
        UNREACHABLE();
    }
    UNREACHABLE();
  }

 private:
  CompactionSpace old_space_;
529
  CompactionSpace map_space_;
530 531 532 533 534 535 536 537 538 539
  CompactionSpace code_space_;
};

// -----------------------------------------------------------------------------
// Old generation regular object space.

class OldSpace : public PagedSpace {
 public:
  // Creates an old space object. The constructor does not allocate pages
  // from OS.
540 541 542
  explicit OldSpace(Heap* heap, LinearAllocationArea* allocation_info)
      : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE, FreeList::CreateFreeList(),
                   allocation_info) {}
543 544 545 546 547 548 549

  static bool IsAtPageStart(Address addr) {
    return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
           MemoryChunkLayout::ObjectStartOffsetInDataPage();
  }

  size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final {
550
    if (type == ExternalBackingStoreType::kArrayBuffer)
551 552 553 554 555 556 557 558 559 560 561 562 563
      return heap()->OldArrayBufferBytes();
    return external_backing_store_bytes_[type];
  }
};

// -----------------------------------------------------------------------------
// Old generation code object space.

class CodeSpace : public PagedSpace {
 public:
  // Creates an old space object. The constructor does not allocate pages
  // from OS.
  explicit CodeSpace(Heap* heap)
564 565 566 567 568
      : PagedSpace(heap, CODE_SPACE, EXECUTABLE, FreeList::CreateFreeList(),
                   &paged_allocation_info_) {}

 private:
  LinearAllocationArea paged_allocation_info_;
569 570 571 572 573 574 575 576 577
};

// -----------------------------------------------------------------------------
// Old space for all map objects

class MapSpace : public PagedSpace {
 public:
  // Creates a map space object.
  explicit MapSpace(Heap* heap)
578 579
      : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE, FreeList::CreateFreeList(),
                   &paged_allocation_info_) {}
580 581 582 583 584 585 586 587 588 589 590 591 592 593

  int RoundSizeDownToObjectAlignment(int size) override {
    if (base::bits::IsPowerOfTwo(Map::kSize)) {
      return RoundDown(size, Map::kSize);
    } else {
      return (size / Map::kSize) * Map::kSize;
    }
  }

  void SortFreeList();

#ifdef VERIFY_HEAP
  void VerifyObject(HeapObject obj) override;
#endif
594 595 596

 private:
  LinearAllocationArea paged_allocation_info_;
597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629
};

// Iterates over the chunks (pages and large object pages) that can contain
// pointers to new space or to evacuation candidates.
class OldGenerationMemoryChunkIterator {
 public:
  inline explicit OldGenerationMemoryChunkIterator(Heap* heap);

  // Return nullptr when the iterator is done.
  inline MemoryChunk* next();

 private:
  enum State {
    kOldSpaceState,
    kMapState,
    kCodeState,
    kLargeObjectState,
    kCodeLargeObjectState,
    kFinishedState
  };
  Heap* heap_;
  State state_;
  PageIterator old_iterator_;
  PageIterator code_iterator_;
  PageIterator map_iterator_;
  LargePageIterator lo_iterator_;
  LargePageIterator code_lo_iterator_;
};

}  // namespace internal
}  // namespace v8

#endif  // V8_HEAP_PAGED_SPACES_H_