heap.h 105 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5 6
#ifndef V8_HEAP_HEAP_H_
#define V8_HEAP_HEAP_H_
7

8
#include <atomic>
9
#include <cmath>
10
#include <memory>
11
#include <unordered_map>
12
#include <unordered_set>
13
#include <vector>
14

15 16
// Clients of this interface shouldn't depend on lots of heap internals.
// Do not include anything from src/heap here!
17 18
#include "include/v8-callbacks.h"
#include "include/v8-embedder-heap.h"
19
#include "include/v8-internal.h"
20
#include "include/v8-isolate.h"
lpy's avatar
lpy committed
21
#include "src/base/atomic-utils.h"
22
#include "src/base/enum-set.h"
23
#include "src/base/platform/condition-variable.h"
24
#include "src/base/platform/mutex.h"
25
#include "src/builtins/accessors.h"
26 27
#include "src/common/assert-scope.h"
#include "src/common/globals.h"
28
#include "src/heap/allocation-observer.h"
29
#include "src/heap/allocation-result.h"
30
#include "src/init/heap-symbols.h"
31
#include "src/objects/allocation-site.h"
Marja Hölttä's avatar
Marja Hölttä committed
32
#include "src/objects/fixed-array.h"
33
#include "src/objects/hash-table.h"
34
#include "src/objects/heap-object.h"
35
#include "src/objects/js-array-buffer.h"
36
#include "src/objects/objects.h"
37
#include "src/objects/smi.h"
38 39
#include "src/objects/visitors.h"
#include "src/roots/roots.h"
40
#include "src/utils/allocation.h"
41
#include "testing/gtest/include/gtest/gtest_prod.h"  // nogncheck
42

43
namespace v8 {
44 45

namespace debug {
46
using OutOfMemoryCallback = void (*)(void* data);
47 48
}  // namespace debug

49
namespace internal {
50

51 52 53 54 55
namespace heap {
class HeapTester;
class TestMemoryAllocatorScope;
}  // namespace heap

56 57
namespace third_party_heap {
class Heap;
58
class Impl;
59
}  // namespace third_party_heap
60

61
class IncrementalMarking;
62
class BackingStore;
63
class JSArrayBuffer;
64 65 66
class JSPromise;
class NativeContext;

67 68
using v8::MemoryPressureLevel;

69
class ArrayBufferCollector;
70
class ArrayBufferSweeper;
71
class BasicMemoryChunk;
72
class CodeLargeObjectSpace;
73
class CodeRange;
74
class CollectionBarrier;
75
class ConcurrentAllocator;
76
class ConcurrentMarking;
77
class CppHeap;
78 79 80
class GCIdleTimeHandler;
class GCIdleTimeHeapState;
class GCTracer;
81 82
template <typename T>
class GlobalHandleVector;
83
class IsolateSafepoint;
84
class HeapObjectAllocationTracker;
85
class HeapObjectsFilter;
86
class HeapStats;
87
class Isolate;
88
class JSFinalizationRegistry;
89
class LinearAllocationArea;
90
class LocalEmbedderHeapTracer;
91
class LocalHeap;
92
class MarkingBarrier;
mlippautz's avatar
mlippautz committed
93
class MemoryAllocator;
94
class MemoryChunk;
95
class MemoryMeasurement;
96
class MemoryReducer;
97
class MinorMarkCompactCollector;
98
class ObjectIterator;
99
class ObjectStats;
100
class Page;
mlippautz's avatar
mlippautz committed
101
class PagedSpace;
102
class ReadOnlyHeap;
103
class RootVisitor;
104
class SafepointScope;
ulan's avatar
ulan committed
105
class ScavengeJob;
106
class Scavenger;
107
class ScavengerCollector;
108
class SharedReadOnlySpace;
mlippautz's avatar
mlippautz committed
109
class Space;
110
class StressScavengeObserver;
111
class TimedHistogram;
112
class WeakObjectRetainer;
113

114 115 116 117
enum ArrayStorageAllocationMode {
  DONT_INITIALIZE_ARRAY_ELEMENTS,
  INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
};
118

119 120
enum class ClearRecordedSlots { kYes, kNo };

121 122
enum class InvalidateRecordedSlots { kYes, kNo };

123 124
enum class ClearFreedMemoryMode { kClearFreedMemory, kDontClearFreedMemory };

125 126
enum ExternalBackingStoreType { kArrayBuffer, kExternalString, kNumTypes };

127
enum class RetainingPathOption { kDefault, kTrackEphemeronPath };
128

129 130 131 132 133 134 135 136 137
enum class AllocationOrigin {
  kGeneratedCode = 0,
  kRuntime = 1,
  kGC = 2,
  kFirstAllocationOrigin = kGeneratedCode,
  kLastAllocationOrigin = kGC,
  kNumberOfAllocationOrigins = kLastAllocationOrigin + 1
};

138 139 140 141
// These values are persisted to logs. Entries should not be renumbered and
// numeric values should never be reused. If you add new items here, update
// src/tools/metrics/histograms/enums.xml in chromium.
enum class GarbageCollectionReason : int {
142 143 144 145 146 147 148 149 150 151 152 153
  kUnknown = 0,
  kAllocationFailure = 1,
  kAllocationLimit = 2,
  kContextDisposal = 3,
  kCountersExtension = 4,
  kDebugger = 5,
  kDeserializer = 6,
  kExternalMemoryPressure = 7,
  kFinalizeMarkingViaStackGuard = 8,
  kFinalizeMarkingViaTask = 9,
  kFullHashtable = 10,
  kHeapProfiler = 11,
154
  kTask = 12,
155 156 157 158 159 160 161 162
  kLastResort = 13,
  kLowMemoryNotification = 14,
  kMakeHeapIterable = 15,
  kMemoryPressure = 16,
  kMemoryReducer = 17,
  kRuntime = 18,
  kSamplingProfiler = 19,
  kSnapshotCreator = 20,
163
  kTesting = 21,
164 165
  kExternalFinalize = 22,
  kGlobalAllocationLimit = 23,
166 167
  kMeasureMemory = 24,
  kBackgroundAllocationFailure = 25,
168 169

  kLastReason = kBackgroundAllocationFailure,
170 171
};

172 173 174 175
static_assert(kGarbageCollectionReasonMaxValue ==
                  static_cast<int>(GarbageCollectionReason::kLastReason),
              "The value of kGarbageCollectionReasonMaxValue is inconsistent.");

176 177 178 179 180 181 182 183 184 185 186
enum class YoungGenerationHandling {
  kRegularScavenge = 0,
  kFastPromotionDuringScavenge = 1,
  // Histogram::InspectConstructionArguments in chromium requires us to have at
  // least three buckets.
  kUnusedBucket = 2,
  // If you add new items here, then update the young_generation_handling in
  // counters.h.
  // Also update src/tools/metrics/histograms/histograms.xml in chromium.
};

187 188
enum class GCIdleTimeAction : uint8_t;

189 190 191 192 193
enum class SkipRoot {
  kExternalStringTable,
  kGlobalHandles,
  kOldGeneration,
  kStack,
194
  kMainThreadHandles,
195 196 197 198
  kUnserializable,
  kWeak
};

199 200 201 202 203
enum UnprotectMemoryOrigin {
  kMainThread,
  kMaybeOffMainThread,
};

204 205
class StrongRootsEntry final {
  explicit StrongRootsEntry(const char* label) : label(label) {}
206

207 208
  // Label that identifies the roots in tooling.
  const char* label;
209 210 211 212 213 214 215 216
  FullObjectSlot start;
  FullObjectSlot end;
  StrongRootsEntry* prev;
  StrongRootsEntry* next;

  friend class Heap;
};

217 218 219 220 221 222
#ifdef DEBUG
struct CommentStatistic {
  const char* comment;
  int size;
  int count;
  void Clear() {
223
    comment = nullptr;
224 225 226 227 228 229 230 231
    size = 0;
    count = 0;
  }
  // Must be small, since an iteration is used for lookup.
  static const int kMaxComments = 64;
};
#endif

232 233 234 235
using EphemeronRememberedSet =
    std::unordered_map<EphemeronHashTable, std::unordered_set<int>,
                       Object::Hasher>;

236
class Heap {
237
 public:
238 239 240 241
  // Stores ephemeron entries where the EphemeronHashTable is in old-space,
  // and the key of the entry is in new-space. Such keys do not appear in the
  // usual OLD_TO_NEW remembered set.
  EphemeronRememberedSet ephemeron_remembered_set_;
242 243
  enum FindMementoMode { kForRuntime, kForGC };

244 245
  enum class HeapGrowingMode { kSlow, kConservative, kMinimal, kDefault };

246 247 248 249 250 251 252
  enum HeapState {
    NOT_IN_GC,
    SCAVENGE,
    MARK_COMPACT,
    MINOR_MARK_COMPACT,
    TEAR_DOWN
  };
253

254
  // Emits GC events for DevTools timeline.
255
  class V8_NODISCARD DevToolsTraceEventScope {
256 257 258 259 260 261 262 263 264 265
   public:
    DevToolsTraceEventScope(Heap* heap, const char* event_name,
                            const char* event_type);
    ~DevToolsTraceEventScope();

   private:
    Heap* heap_;
    const char* event_name_;
  };

266 267
  class ExternalMemoryAccounting {
   public:
268 269 270 271 272
    int64_t total() { return total_.load(std::memory_order_relaxed); }
    int64_t limit() { return limit_.load(std::memory_order_relaxed); }
    int64_t low_since_mark_compact() {
      return low_since_mark_compact_.load(std::memory_order_relaxed);
    }
273 274

    void ResetAfterGC() {
275 276
      set_low_since_mark_compact(total());
      set_limit(total() + kExternalAllocationSoftLimit);
277 278 279
    }

    int64_t Update(int64_t delta) {
280 281 282 283 284
      const int64_t amount =
          total_.fetch_add(delta, std::memory_order_relaxed) + delta;
      if (amount < low_since_mark_compact()) {
        set_low_since_mark_compact(amount);
        set_limit(amount + kExternalAllocationSoftLimit);
285 286 287 288 289
      }
      return amount;
    }

    int64_t AllocatedSinceMarkCompact() {
290 291 292 293
      int64_t total_bytes = total();
      int64_t low_since_mark_compact_bytes = low_since_mark_compact();

      if (total_bytes <= low_since_mark_compact_bytes) {
294 295
        return 0;
      }
296
      return static_cast<uint64_t>(total_bytes - low_since_mark_compact_bytes);
297 298 299
    }

   private:
300 301 302 303 304 305 306 307 308 309 310 311
    void set_total(int64_t value) {
      total_.store(value, std::memory_order_relaxed);
    }

    void set_limit(int64_t value) {
      limit_.store(value, std::memory_order_relaxed);
    }

    void set_low_since_mark_compact(int64_t value) {
      low_since_mark_compact_.store(value, std::memory_order_relaxed);
    }

312
    // The amount of external memory registered through the API.
313
    std::atomic<int64_t> total_{0};
314 315

    // The limit when to trigger memory pressure from the API.
316
    std::atomic<int64_t> limit_{kExternalAllocationSoftLimit};
317 318

    // Caches the amount of external memory registered at the last MC.
319
    std::atomic<int64_t> low_since_mark_compact_{0};
320 321
  };

322
  using PretenuringFeedbackMap =
323
      std::unordered_map<AllocationSite, size_t, Object::Hasher>;
324

325
  // Taking this mutex prevents the GC from entering a phase that relocates
326
  // object references.
327
  base::Mutex* relocation_mutex() { return &relocation_mutex_; }
328

329
  // Support for context snapshots.  After calling this we have a linear
330 331 332 333 334 335
  // space to write objects in each space.
  struct Chunk {
    uint32_t size;
    Address start;
    Address end;
  };
336
  using Reservation = std::vector<Chunk>;
337

338 339 340 341
#if V8_OS_ANDROID
  // Don't apply pointer multiplier on Android since it has no swap space and
  // should instead adapt it's heap size based on available physical memory.
  static const int kPointerMultiplier = 1;
342
  static const int kHeapLimitMultiplier = 1;
343
#else
344 345 346 347 348
  static const int kPointerMultiplier = kTaggedSize / 4;
  // The heap limit needs to be computed based on the system pointer size
  // because we want a pointer-compressed heap to have larger limit than
  // an orinary 32-bit which that is contrained by 2GB virtual address space.
  static const int kHeapLimitMultiplier = kSystemPointerSize / 4;
349 350
#endif

351
  static const size_t kMaxInitialOldGenerationSize =
352
      256 * MB * kHeapLimitMultiplier;
353

354
  // These constants control heap configuration based on the physical memory.
355
  static constexpr size_t kPhysicalMemoryToOldGenerationRatio = 4;
356 357 358 359 360
  // Young generation size is the same for compressed heaps and 32-bit heaps.
  static constexpr size_t kOldGenerationToSemiSpaceRatio =
      128 * kHeapLimitMultiplier / kPointerMultiplier;
  static constexpr size_t kOldGenerationToSemiSpaceRatioLowMemory =
      256 * kHeapLimitMultiplier / kPointerMultiplier;
361
  static constexpr size_t kOldGenerationLowMemory =
362
      128 * MB * kHeapLimitMultiplier;
363
  static constexpr size_t kNewLargeObjectSpaceToSemiSpaceRatio = 1;
364 365 366 367 368 369
#if ENABLE_HUGEPAGE
  static constexpr size_t kMinSemiSpaceSize =
      kHugePageSize * kPointerMultiplier;
  static constexpr size_t kMaxSemiSpaceSize =
      kHugePageSize * 16 * kPointerMultiplier;
#else
370 371
  static constexpr size_t kMinSemiSpaceSize = 512 * KB * kPointerMultiplier;
  static constexpr size_t kMaxSemiSpaceSize = 8192 * KB * kPointerMultiplier;
372
#endif
373

374 375
  STATIC_ASSERT(kMinSemiSpaceSize % (1 << kPageSizeBits) == 0);
  STATIC_ASSERT(kMaxSemiSpaceSize % (1 << kPageSizeBits) == 0);
376 377 378 379

  static const int kTraceRingBufferSize = 512;
  static const int kStacktraceBufferSize = 512;

380 381
  static const int kNoGCFlags = 0;
  static const int kReduceMemoryFootprintMask = 1;
382 383 384
  // GCs that are forced, either through testing configurations (requring
  // --expose-gc) or through DevTools (using LowMemoryNotificaton).
  static const int kForcedGC = 2;
385

386
  // The minimum size of a HeapObject on the heap.
387
  static const int kMinObjectSizeInTaggedWords = 2;
388

389
  static const int kMinPromotedPercentForFastPromotionMode = 90;
390

391
  STATIC_ASSERT(static_cast<int>(RootIndex::kUndefinedValue) ==
392
                Internals::kUndefinedValueRootIndex);
393 394 395 396 397 398 399 400 401 402
  STATIC_ASSERT(static_cast<int>(RootIndex::kTheHoleValue) ==
                Internals::kTheHoleValueRootIndex);
  STATIC_ASSERT(static_cast<int>(RootIndex::kNullValue) ==
                Internals::kNullValueRootIndex);
  STATIC_ASSERT(static_cast<int>(RootIndex::kTrueValue) ==
                Internals::kTrueValueRootIndex);
  STATIC_ASSERT(static_cast<int>(RootIndex::kFalseValue) ==
                Internals::kFalseValueRootIndex);
  STATIC_ASSERT(static_cast<int>(RootIndex::kempty_string) ==
                Internals::kEmptyStringRootIndex);
403 404 405

  // Calculates the maximum amount of filler that could be required by the
  // given alignment.
406 407
  V8_EXPORT_PRIVATE static int GetMaximumFillToAlign(
      AllocationAlignment alignment);
408 409
  // Calculates the actual amount of filler required for a given address at the
  // given alignment.
410 411
  V8_EXPORT_PRIVATE static int GetFillToAlign(Address address,
                                              AllocationAlignment alignment);
412

413 414 415 416
  // Returns the size of the initial area of a code-range, which is marked
  // writable and reserved to contain unwind information.
  static size_t GetCodeRangeReservedAreaSize();

417
  [[noreturn]] void FatalProcessOutOfMemory(const char* location);
418 419 420 421 422 423 424 425 426 427 428 429

  // Checks whether the space is valid.
  static bool IsValidAllocationSpace(AllocationSpace space);

  // Zapping is needed for verify heap, and always done in debug builds.
  static inline bool ShouldZapGarbage() {
#ifdef DEBUG
    return true;
#else
#ifdef VERIFY_HEAP
    return FLAG_verify_heap;
#else
430
    return false;
431 432
#endif
#endif
433 434
  }

435 436
  // Helper function to get the bytecode flushing mode based on the flags. This
  // is required because it is not safe to acess flags in concurrent marker.
437
  static inline base::EnumSet<CodeFlushMode> GetCodeFlushMode(Isolate* isolate);
438

439 440 441 442
  static uintptr_t ZapValue() {
    return FLAG_clear_free_memory ? kClearedFreeMemoryValue : kZapValue;
  }

443
  static inline bool IsYoungGenerationCollector(GarbageCollector collector) {
444 445
    return collector == GarbageCollector::SCAVENGER ||
           collector == GarbageCollector::MINOR_MARK_COMPACTOR;
446 447 448
  }

  static inline GarbageCollector YoungGenerationCollector() {
449 450
    return (FLAG_minor_mc) ? GarbageCollector::MINOR_MARK_COMPACTOR
                           : GarbageCollector::SCAVENGER;
451 452 453 454
  }

  static inline const char* CollectorName(GarbageCollector collector) {
    switch (collector) {
455
      case GarbageCollector::SCAVENGER:
456
        return "Scavenger";
457
      case GarbageCollector::MARK_COMPACTOR:
458
        return "Mark-Compact";
459
      case GarbageCollector::MINOR_MARK_COMPACTOR:
460 461 462 463 464
        return "Minor Mark-Compact";
    }
    return "Unknown collector";
  }

465 466 467 468 469 470 471 472 473 474 475 476 477 478
  static inline const char* CollectorName(v8::GCType gc_type) {
    switch (gc_type) {
      case kGCTypeScavenge:
        return "Scavenger";
      case kGCTypeMarkSweepCompact:
        return "Mark-Compact";
      case kGCTypeMinorMarkCompact:
        return "Minor Mark-Compact";
      default:
        break;
    }
    return "Unknown collector";
  }

479 480 481 482
  // Copy block of memory from src to dst. Size of block should be aligned
  // by pointer size.
  static inline void CopyBlock(Address dst, Address src, int byte_size);

483 484
  // Executes generational and/or marking write barrier for a [start, end) range
  // of non-weak slots inside |object|.
485 486 487
  template <typename TSlot>
  V8_EXPORT_PRIVATE void WriteBarrierForRange(HeapObject object, TSlot start,
                                              TSlot end);
488

489
  V8_EXPORT_PRIVATE static void WriteBarrierForCodeSlow(Code host);
490

491
  V8_EXPORT_PRIVATE static void GenerationalBarrierSlow(HeapObject object,
492
                                                        Address slot,
493
                                                        HeapObject value);
494 495
  V8_EXPORT_PRIVATE inline void RecordEphemeronKeyWrite(
      EphemeronHashTable table, Address key_slot);
496 497
  V8_EXPORT_PRIVATE static void EphemeronKeyWriteBarrierFromCode(
      Address raw_object, Address address, Isolate* isolate);
498
  V8_EXPORT_PRIVATE static void GenerationalBarrierForCodeSlow(
499 500
      Code host, RelocInfo* rinfo, HeapObject value);
  V8_EXPORT_PRIVATE static bool PageFlagsAreConsistent(HeapObject object);
501

502 503 504 505
  // Notifies the heap that is ok to start marking or other activities that
  // should not happen during deserialization.
  void NotifyDeserializationComplete();

506 507
  void NotifyBootstrapComplete();

508
  void NotifyOldGenerationExpansion(AllocationSpace space, MemoryChunk* chunk);
509

mlippautz's avatar
mlippautz committed
510 511 512 513
  inline Address* NewSpaceAllocationTopAddress();
  inline Address* NewSpaceAllocationLimitAddress();
  inline Address* OldSpaceAllocationTopAddress();
  inline Address* OldSpaceAllocationLimitAddress();
514

515 516 517
  size_t NewSpaceSize();
  size_t NewSpaceCapacity();

518 519
  // Move len non-weak tagged elements from src_slot to dst_slot of dst_object.
  // The source and destination memory ranges can overlap.
520 521 522
  V8_EXPORT_PRIVATE void MoveRange(HeapObject dst_object, ObjectSlot dst_slot,
                                   ObjectSlot src_slot, int len,
                                   WriteBarrierMode mode);
523

524 525
  // Copy len non-weak tagged elements from src_slot to dst_slot of dst_object.
  // The source and destination memory ranges must not overlap.
526 527 528
  template <typename TSlot>
  void CopyRange(HeapObject dst_object, TSlot dst_slot, TSlot src_slot, int len,
                 WriteBarrierMode mode);
529

530
  // Initialize a filler object to keep the ability to iterate over the heap
531 532
  // when introducing gaps within pages. If slots could have been recorded in
  // the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise,
533
  // pass ClearRecordedSlots::kNo. Clears memory if clearing slots.
534
  V8_EXPORT_PRIVATE HeapObject CreateFillerObjectAt(
535
      Address addr, int size, ClearRecordedSlots clear_slots_mode);
536

537 538
  void CreateFillerObjectAtBackground(Address addr, int size,
                                      ClearFreedMemoryMode clear_memory_mode);
539

540
  template <typename T>
541
  void CreateFillerForArray(T object, int elements_to_trim, int bytes_to_trim);
542

543
  bool CanMoveObjectStart(HeapObject object);
544

545
  bool IsImmovable(HeapObject object);
546

547
  V8_EXPORT_PRIVATE static bool IsLargeObject(HeapObject object);
548

549 550
  // Trim the given array from the left. Note that this relocates the object
  // start and hence is only valid if there is only a single reference to it.
551 552
  V8_EXPORT_PRIVATE FixedArrayBase LeftTrimFixedArray(FixedArrayBase obj,
                                                      int elements_to_trim);
553

554
  // Trim the given array from the right.
555 556
  V8_EXPORT_PRIVATE void RightTrimFixedArray(FixedArrayBase obj,
                                             int elements_to_trim);
557
  void RightTrimWeakFixedArray(WeakFixedArray obj, int elements_to_trim);
558

559
  // Converts the given boolean condition to JavaScript boolean value.
560
  inline Oddball ToBoolean(bool condition);
561

562
  // Notify the heap that a context has been disposed.
563
  V8_EXPORT_PRIVATE int NotifyContextDisposed(bool dependant_context);
564

565
  void set_native_contexts_list(Object object) {
566 567 568 569 570
    native_contexts_list_.store(object.ptr(), std::memory_order_release);
  }

  Object native_contexts_list() const {
    return Object(native_contexts_list_.load(std::memory_order_acquire));
571 572
  }

573
  void set_allocation_sites_list(Object object) {
574 575
    allocation_sites_list_ = object;
  }
576
  Object allocation_sites_list() { return allocation_sites_list_; }
577

578 579
  void set_dirty_js_finalization_registries_list(Object object) {
    dirty_js_finalization_registries_list_ = object;
580
  }
581 582
  Object dirty_js_finalization_registries_list() {
    return dirty_js_finalization_registries_list_;
583
  }
584 585
  void set_dirty_js_finalization_registries_list_tail(Object object) {
    dirty_js_finalization_registries_list_tail_ = object;
586
  }
587 588
  Object dirty_js_finalization_registries_list_tail() {
    return dirty_js_finalization_registries_list_tail_;
589
  }
590

591
  // Used in CreateAllocationSiteStub and the (de)serializer.
592 593 594
  Address allocation_sites_list_address() {
    return reinterpret_cast<Address>(&allocation_sites_list_);
  }
595

596 597
  // Traverse all the allocaions_sites [nested_site and weak_next] in the list
  // and foreach call the visitor
598
  void ForeachAllocationSite(
599
      Object list, const std::function<void(AllocationSite)>& visitor);
600

601
  // Number of mark-sweeps.
602
  int ms_count() const { return ms_count_; }
603

604 605
  // Checks whether the given object is allowed to be migrated from it's
  // current space into the given destination space. Used for debugging.
606
  bool AllowedToBeMigrated(Map map, HeapObject object, AllocationSpace dest);
607

608
  void CheckHandleCount();
609

610
  // Print short heap statistics.
611
  void PrintShortHeapStatistics();
612

613 614 615 616 617 618
  // Print statistics of freelists of old_space:
  //  with FLAG_trace_gc_freelists: summary of each FreeListCategory.
  //  with FLAG_trace_gc_freelists_verbose: also prints the statistics of each
  //  FreeListCategory of each page.
  void PrintFreeListsStats();

619 620 621
  // Dump heap statistics in JSON format.
  void DumpJSONHeapStatistics(std::stringstream& stream);

622 623
  bool write_protect_code_memory() const { return write_protect_code_memory_; }

624 625 626 627 628 629 630 631 632 633 634 635
  uintptr_t code_space_memory_modification_scope_depth() {
    return code_space_memory_modification_scope_depth_;
  }

  void increment_code_space_memory_modification_scope_depth() {
    code_space_memory_modification_scope_depth_++;
  }

  void decrement_code_space_memory_modification_scope_depth() {
    code_space_memory_modification_scope_depth_--;
  }

636 637 638 639
  void UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk,
                                       UnprotectMemoryOrigin origin);
  V8_EXPORT_PRIVATE void UnprotectAndRegisterMemoryChunk(
      HeapObject object, UnprotectMemoryOrigin origin);
640
  void UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk);
641
  V8_EXPORT_PRIVATE void ProtectUnprotectedMemoryChunks();
642

643 644 645 646 647 648 649 650 651 652 653 654
  void IncrementCodePageCollectionMemoryModificationScopeDepth() {
    code_page_collection_memory_modification_scope_depth_++;
  }

  void DecrementCodePageCollectionMemoryModificationScopeDepth() {
    code_page_collection_memory_modification_scope_depth_--;
  }

  uintptr_t code_page_collection_memory_modification_scope_depth() {
    return code_page_collection_memory_modification_scope_depth_;
  }

655 656 657
  inline HeapState gc_state() const {
    return gc_state_.load(std::memory_order_relaxed);
  }
658
  void SetGCState(HeapState state);
659
  bool IsTearingDown() const { return gc_state() == TEAR_DOWN; }
660
  bool force_oom() const { return force_oom_; }
661

662 663 664 665
  bool ignore_local_gc_requests() const {
    return ignore_local_gc_requests_depth_ > 0;
  }

666
  inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
667

668 669
  bool IsGCWithoutStack() const;

670
  // If an object has an AllocationMemento trailing it, return it, otherwise
671
  // return a null AllocationMemento.
672
  template <FindMementoMode mode>
673
  inline AllocationMemento FindAllocationMemento(Map map, HeapObject object);
674

675 676
  // Performs GC after background allocation failure.
  void CollectGarbageForBackground(LocalHeap* local_heap);
677

678 679 680 681
  //
  // Support for the API.
  //

682
  void CreateApiObjects();
683

684
  // Implements the corresponding V8 API function.
685
  bool IdleNotification(double deadline_in_seconds);
686
  bool IdleNotification(int idle_time_in_ms);
687

688 689
  V8_EXPORT_PRIVATE void MemoryPressureNotification(MemoryPressureLevel level,
                                                    bool is_isolate_locked);
690 691
  void CheckMemoryPressure();

692 693 694 695 696 697
  V8_EXPORT_PRIVATE void AddNearHeapLimitCallback(v8::NearHeapLimitCallback,
                                                  void* data);
  V8_EXPORT_PRIVATE void RemoveNearHeapLimitCallback(
      v8::NearHeapLimitCallback callback, size_t heap_limit);
  V8_EXPORT_PRIVATE void AutomaticallyRestoreInitialHeapLimit(
      double threshold_percent);
698

699 700
  void AppendArrayBufferExtension(JSArrayBuffer object,
                                  ArrayBufferExtension* extension);
701 702
  void DetachArrayBufferExtension(JSArrayBuffer object,
                                  ArrayBufferExtension* extension);
703

704
  IsolateSafepoint* safepoint() { return safepoint_.get(); }
705

706
  V8_EXPORT_PRIVATE double MonotonicallyIncreasingTimeInMs() const;
707

708 709
  void VerifyNewSpaceTop();

710
  void RecordStats(HeapStats* stats, bool take_snapshot = false);
711

712 713 714 715 716 717
  bool MeasureMemory(std::unique_ptr<v8::MeasureMemoryDelegate> delegate,
                     v8::MeasureMemoryExecution execution);

  std::unique_ptr<v8::MeasureMemoryDelegate> MeasureMemoryDelegate(
      Handle<NativeContext> context, Handle<JSPromise> promise,
      v8::MeasureMemoryMode mode);
718

719
  // Check new space expansion criteria and expand semispaces if it was hit.
720
  void CheckNewSpaceExpansionCriteria();
721

722 723
  void VisitExternalResources(v8::ExternalResourceVisitor* visitor);

724 725
  // An object should be promoted if the object has survived a
  // scavenge operation.
726
  inline bool ShouldBePromoted(Address old_address);
727

728 729
  void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);

730
  inline int NextScriptId();
731
  inline int NextDebuggingId();
732
  inline int GetNextTemplateSerialNumber();
733

734 735
  void SetSerializedObjects(FixedArray objects);
  void SetSerializedGlobalProxySizes(FixedArray sizes);
736

737 738
  void SetBasicBlockProfilingData(Handle<ArrayList> list);

739 740 741
  // For post mortem debugging.
  void RememberUnmappedPage(Address page, bool compacted);

742
  int64_t external_memory_hard_limit() { return max_old_generation_size() / 2; }
743

744
  V8_INLINE int64_t external_memory();
745 746
  V8_EXPORT_PRIVATE int64_t external_memory_limit();
  V8_INLINE int64_t update_external_memory(int64_t delta);
747

748 749 750
  V8_EXPORT_PRIVATE size_t YoungArrayBufferBytes();
  V8_EXPORT_PRIVATE size_t OldArrayBufferBytes();

751 752 753
  uint64_t backing_store_bytes() const {
    return backing_store_bytes_.load(std::memory_order_relaxed);
  }
754

755
  void CompactWeakArrayLists();
756

757 758
  V8_EXPORT_PRIVATE void AddRetainedMap(Handle<NativeContext> context,
                                        Handle<Map> map);
759

760
  // This event is triggered after object is moved to a new place.
761
  void OnMoveEvent(HeapObject target, HeapObject source, int size_in_bytes);
762

763
  inline bool CanAllocateInReadOnlySpace();
764 765
  bool deserialization_complete() const { return deserialization_complete_; }

766 767 768 769 770
  // We can only invoke Safepoint() on the main thread local heap after
  // deserialization is complete. Before that, main_thread_local_heap_ might be
  // null.
  V8_INLINE bool CanSafepoint() const { return deserialization_complete(); }

771 772
  bool HasLowAllocationRate();
  bool HasHighFragmentation();
773
  bool HasHighFragmentation(size_t used, size_t committed);
774

775 776
  void ActivateMemoryReducerIfNeeded();

777
  V8_EXPORT_PRIVATE bool ShouldOptimizeForMemoryUsage();
778

779
  bool HighMemoryPressure() {
780 781
    return memory_pressure_level_.load(std::memory_order_relaxed) !=
           MemoryPressureLevel::kNone;
782
  }
783

784
  bool CollectionRequested();
785 786 787

  void CheckCollectionRequested();

788
  void RestoreHeapLimit(size_t heap_limit) {
789 790
    // Do not set the limit lower than the live size + some slack.
    size_t min_limit = SizeOfObjects() + SizeOfObjects() / 4;
791
    set_max_old_generation_size(
792
        std::min(max_old_generation_size(), std::max(heap_limit, min_limit)));
793 794
  }

795 796 797 798
  // ===========================================================================
  // Initialization. ===========================================================
  // ===========================================================================

799
  void ConfigureHeap(const v8::ResourceConstraints& constraints);
800
  void ConfigureHeapDefault();
801

802
  // Prepares the heap, setting up for deserialization.
803
  void SetUp(LocalHeap* main_thread_local_heap);
804 805 806 807

  // Sets read-only heap and space.
  void SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap);

808 809
  void ReplaceReadOnlySpace(SharedReadOnlySpace* shared_ro_space);

810
  // Sets up the heap memory without creating any objects.
811 812
  void SetUpSpaces(LinearAllocationArea* new_allocation_info,
                   LinearAllocationArea* old_allocation_info);
813

814 815 816
  // Prepares the heap, setting up for deserialization.
  void InitializeMainThreadLocalHeap(LocalHeap* main_thread_local_heap);

817 818 819
  // (Re-)Initialize hash seed from flag or RNG.
  void InitializeHashSeed();

820 821 822 823
  // Bootstraps the object heap with the core set of objects required to run.
  // Returns whether it succeeded.
  bool CreateHeapObjects();

824
  // Create ObjectStats if live_object_stats_ or dead_object_stats_ are nullptr.
825
  void CreateObjectStats();
826

827 828 829
  // Sets the TearDown state, so no new GC tasks get posted.
  void StartTearDown();

830 831 832
  // Destroys all memory allocated by the heap.
  void TearDown();

833
  // Returns whether SetUp has been called.
834
  bool HasBeenSetUp() const;
835

836 837 838 839
  // ===========================================================================
  // Getters for spaces. =======================================================
  // ===========================================================================

840
  inline Address NewSpaceTop();
841

842
  NewSpace* new_space() { return new_space_; }
843
  OldSpace* old_space() { return old_space_; }
844
  OldSpace* shared_old_space() { return shared_old_space_; }
845
  CodeSpace* code_space() { return code_space_; }
846
  MapSpace* map_space() { return map_space_; }
847
  inline PagedSpace* space_for_maps();
848
  OldLargeObjectSpace* lo_space() { return lo_space_; }
849
  CodeLargeObjectSpace* code_lo_space() { return code_lo_space_; }
850
  NewLargeObjectSpace* new_lo_space() { return new_lo_space_; }
851
  ReadOnlySpace* read_only_space() { return read_only_space_; }
852

mlippautz's avatar
mlippautz committed
853 854
  inline PagedSpace* paged_space(int idx);
  inline Space* space(int idx);
855 856 857 858 859

  // ===========================================================================
  // Getters to other components. ==============================================
  // ===========================================================================

860
  GCTracer* tracer() { return tracer_.get(); }
861

862
  MemoryAllocator* memory_allocator() { return memory_allocator_.get(); }
863 864 865
  const MemoryAllocator* memory_allocator() const {
    return memory_allocator_.get();
  }
866

867 868
  inline ConcurrentAllocator* concurrent_allocator_for_maps();

869 870 871
  inline Isolate* isolate();

  MarkCompactCollector* mark_compact_collector() {
872
    return mark_compact_collector_.get();
873 874
  }

875
  MinorMarkCompactCollector* minor_mark_compact_collector() {
Omer Katz's avatar
Omer Katz committed
876
    return minor_mark_compact_collector_.get();
877 878
  }

879 880 881 882
  ArrayBufferSweeper* array_buffer_sweeper() {
    return array_buffer_sweeper_.get();
  }

883 884
  // The potentially overreserved address space region reserved by the code
  // range if it exists or empty region otherwise.
885
  const base::AddressRegion& code_region();
886

887 888
  CodeRange* code_range() { return code_range_.get(); }

889 890 891
  // The base of the code range if it exists or null address.
  inline Address code_range_base();

892
  LocalHeap* main_thread_local_heap() { return main_thread_local_heap_; }
893

894
  Heap* AsHeap() { return this; }
895

896 897 898
  // ===========================================================================
  // Root set access. ==========================================================
  // ===========================================================================
899

900 901
  // Shortcut to the roots table stored in the Isolate.
  V8_INLINE RootsTable& roots_table();
902

903
// Heap root getters.
904
#define ROOT_ACCESSOR(type, name, CamelName) inline type name();
905
  MUTABLE_ROOT_LIST(ROOT_ACCESSOR)
906
#undef ROOT_ACCESSOR
907

908
  V8_INLINE void SetRootMaterializedObjects(FixedArray objects);
909 910
  V8_INLINE void SetRootScriptList(Object value);
  V8_INLINE void SetRootNoScriptSharedFunctionInfos(Object value);
911
  V8_INLINE void SetMessageListeners(TemplateList value);
912
  V8_INLINE void SetPendingOptimizeForTestBytecode(Object bytecode);
913

914
  StrongRootsEntry* RegisterStrongRoots(const char* label, FullObjectSlot start,
915 916 917 918
                                        FullObjectSlot end);
  void UnregisterStrongRoots(StrongRootsEntry* entry);
  void UpdateStrongRoots(StrongRootsEntry* entry, FullObjectSlot start,
                         FullObjectSlot end);
919

920
  void SetBuiltinsConstantsTable(FixedArray cache);
921
  void SetDetachedContexts(WeakArrayList detached_contexts);
922

923 924 925 926 927 928 929 930
  // A full copy of the interpreter entry trampoline, used as a template to
  // create copies of the builtin at runtime. The copies are used to create
  // better profiling information for ticks in bytecode execution. Note that
  // this is always a copy of the full builtin, i.e. not the off-heap
  // trampoline.
  // See also: FLAG_interpreted_frames_native_stack.
  void SetInterpreterEntryTrampolineForProfiling(Code code);

931 932
  void EnqueueDirtyJSFinalizationRegistry(
      JSFinalizationRegistry finalization_registry,
933
      std::function<void(HeapObject object, ObjectSlot slot, Object target)>
934 935
          gc_notify_updated_slot);

936
  MaybeHandle<JSFinalizationRegistry> DequeueDirtyJSFinalizationRegistry();
937

938 939 940 941 942
  // Called from Heap::NotifyContextDisposed to remove all
  // FinalizationRegistries with {context} from the dirty list when the context
  // e.g. navigates away or is detached. If the dirty list is empty afterwards,
  // the cleanup task is aborted if needed.
  void RemoveDirtyFinalizationRegistriesOnContext(NativeContext context);
943

944
  inline bool HasDirtyJSFinalizationRegistries();
945

946
  void PostFinalizationRegistryCleanupTaskIfNeeded();
947

948 949
  void set_is_finalization_registry_cleanup_task_posted(bool posted) {
    is_finalization_registry_cleanup_task_posted_ = posted;
950 951
  }

952 953
  bool is_finalization_registry_cleanup_task_posted() {
    return is_finalization_registry_cleanup_task_posted_;
954 955
  }

956 957
  V8_EXPORT_PRIVATE void KeepDuringJob(Handle<JSReceiver> target);
  void ClearKeptObjects();
958

959 960 961 962 963
  // ===========================================================================
  // Inline allocation. ========================================================
  // ===========================================================================

  // Switch whether inline bump-pointer allocation should be used.
964 965
  V8_EXPORT_PRIVATE void EnableInlineAllocation();
  V8_EXPORT_PRIVATE void DisableInlineAllocation();
966 967 968 969 970

  // ===========================================================================
  // Methods triggering GCs. ===================================================
  // ===========================================================================

971
  // Performs garbage collection operation.
972 973
  // Returns whether there is a chance that another major GC could
  // collect more garbage.
974
  V8_EXPORT_PRIVATE bool CollectGarbage(
975
      AllocationSpace space, GarbageCollectionReason gc_reason,
976
      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
977

978
  // Performs a full garbage collection.
979
  V8_EXPORT_PRIVATE void CollectAllGarbage(
980
      int flags, GarbageCollectionReason gc_reason,
981 982 983
      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);

  // Last hope GC, should try to squeeze as much as possible.
984 985
  V8_EXPORT_PRIVATE void CollectAllAvailableGarbage(
      GarbageCollectionReason gc_reason);
986

987 988 989
  // Precise garbage collection that potentially finalizes already running
  // incremental marking before performing an atomic garbage collection.
  // Only use if absolutely necessary or in tests to avoid floating garbage!
990
  V8_EXPORT_PRIVATE void PreciseCollectAllGarbage(
991 992 993
      int flags, GarbageCollectionReason gc_reason,
      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);

994 995 996 997
  // Performs garbage collection operation for the shared heap.
  V8_EXPORT_PRIVATE void CollectSharedGarbage(
      GarbageCollectionReason gc_reason);

998 999
  // Reports and external memory pressure event, either performs a major GC or
  // completes incremental marking in order to free external resources.
1000
  void ReportExternalMemoryPressure();
1001

1002 1003
  using GetExternallyAllocatedMemoryInBytesCallback =
      v8::Isolate::GetExternallyAllocatedMemoryInBytesCallback;
1004 1005 1006 1007 1008 1009

  void SetGetExternallyAllocatedMemoryInBytesCallback(
      GetExternallyAllocatedMemoryInBytesCallback callback) {
    external_memory_callback_ = callback;
  }

1010 1011 1012 1013 1014 1015 1016
  // Invoked when GC was requested via the stack guard.
  void HandleGCRequest();

  // ===========================================================================
  // Iterators. ================================================================
  // ===========================================================================

Dan Elphick's avatar
Dan Elphick committed
1017 1018 1019 1020 1021 1022
  // None of these methods iterate over the read-only roots. To do this use
  // ReadOnlyRoots::Iterate. Read-only root iteration is not necessary for
  // garbage collection and is usually only performed as part of
  // (de)serialization or heap verification.

  // Iterates over the strong roots and the weak roots.
1023
  void IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options);
1024 1025 1026
  void IterateRootsIncludingClients(RootVisitor* v,
                                    base::EnumSet<SkipRoot> options);

1027 1028
  // Iterates over entries in the smi roots list.  Only interesting to the
  // serializer/deserializer, since GC does not care about smis.
1029
  void IterateSmiRoots(RootVisitor* v);
1030
  // Iterates over weak string tables.
1031
  void IterateWeakRoots(RootVisitor* v, base::EnumSet<SkipRoot> options);
1032
  void IterateWeakGlobalHandles(RootVisitor* v);
1033
  void IterateBuiltins(RootVisitor* v);
1034
  void IterateStackRoots(RootVisitor* v);
1035

1036
  // ===========================================================================
1037
  // Remembered set API. =======================================================
1038 1039
  // ===========================================================================

1040 1041 1042 1043 1044 1045 1046
  // Used for query incremental marking status in generated code.
  Address* IsMarkingFlagAddress() {
    return reinterpret_cast<Address*>(&is_marking_flag_);
  }

  void SetIsMarkingFlag(uint8_t flag) { is_marking_flag_ = flag; }

1047 1048
  void ClearRecordedSlot(HeapObject object, ObjectSlot slot);
  void ClearRecordedSlotRange(Address start, Address end);
1049
  static int InsertIntoRememberedSetFromCode(MemoryChunk* chunk, Address slot);
1050

1051
#ifdef DEBUG
1052
  void VerifyClearedSlot(HeapObject object, ObjectSlot slot);
1053
  void VerifySlotRangeHasNoRecordedSlots(Address start, Address end);
1054
#endif
1055

1056 1057 1058 1059
  // ===========================================================================
  // Incremental marking API. ==================================================
  // ===========================================================================

1060 1061 1062 1063 1064
  int GCFlagsForIncrementalMarking() {
    return ShouldOptimizeForMemoryUsage() ? kReduceMemoryFootprintMask
                                          : kNoGCFlags;
  }

1065 1066
  // Start incremental marking and ensure that idle time handler can perform
  // incremental steps.
1067
  V8_EXPORT_PRIVATE void StartIdleIncrementalMarking(
1068 1069
      GarbageCollectionReason gc_reason,
      GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
1070 1071 1072

  // Starts incremental marking assuming incremental marking is currently
  // stopped.
1073
  V8_EXPORT_PRIVATE void StartIncrementalMarking(
1074 1075
      int gc_flags, GarbageCollectionReason gc_reason,
      GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
1076

1077 1078 1079
  void StartIncrementalMarkingIfAllocationLimitIsReached(
      int gc_flags,
      GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
1080
  void StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
1081

1082
  void FinalizeIncrementalMarkingIfComplete(GarbageCollectionReason gc_reason);
1083
  // Synchronously finalizes incremental marking.
1084 1085
  V8_EXPORT_PRIVATE void FinalizeIncrementalMarkingAtomically(
      GarbageCollectionReason gc_reason);
1086

1087 1088 1089
  void CompleteSweepingFull();
  void CompleteSweepingYoung(GarbageCollector collector);

1090
  // Ensures that sweeping is finished for that object's page.
1091
  void EnsureSweepingCompleted(HeapObject object);
1092

1093
  IncrementalMarking* incremental_marking() const {
1094 1095
    return incremental_marking_.get();
  }
1096

1097
  MarkingBarrier* marking_barrier() const { return marking_barrier_.get(); }
1098

1099 1100 1101 1102
  // ===========================================================================
  // Concurrent marking API. ===================================================
  // ===========================================================================

1103 1104 1105
  ConcurrentMarking* concurrent_marking() const {
    return concurrent_marking_.get();
  }
1106

1107 1108
  // The runtime uses this function to notify potentially unsafe object layout
  // changes that require special synchronization with the concurrent marker.
1109
  // The old size is the size of the object before layout change.
1110 1111 1112 1113
  // By default recorded slots in the object are invalidated. Pass
  // InvalidateRecordedSlots::kNo if this is not necessary or to perform this
  // manually.
  void NotifyObjectLayoutChange(
1114
      HeapObject object, const DisallowGarbageCollection&,
1115 1116
      InvalidateRecordedSlots invalidate_recorded_slots =
          InvalidateRecordedSlots::kYes);
1117

1118 1119 1120 1121
#ifdef VERIFY_HEAP
  // This function checks that either
  // - the map transition is safe,
  // - or it was communicated to GC using NotifyObjectLayoutChange.
1122 1123
  V8_EXPORT_PRIVATE void VerifyObjectLayoutChange(HeapObject object,
                                                  Map new_map);
1124 1125 1126
  // Checks that this is a safe map transition.
  V8_EXPORT_PRIVATE void VerifySafeMapTransition(HeapObject object,
                                                 Map new_map);
1127 1128
#endif

1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139
  // ===========================================================================
  // Deoptimization support API. ===============================================
  // ===========================================================================

  // Setters for code offsets of well-known deoptimization targets.
  void SetConstructStubCreateDeoptPCOffset(int pc_offset);
  void SetConstructStubInvokeDeoptPCOffset(int pc_offset);
  void SetInterpreterEntryReturnPCOffset(int pc_offset);

  // Invalidates references in the given {code} object that are referenced
  // transitively from the deoptimization data. Mutates write-protected code.
1140
  void InvalidateCodeDeoptimizationData(Code code);
1141 1142 1143 1144 1145

  void DeoptMarkedAllocationSites();

  bool DeoptMaybeTenuredAllocationSites();

1146 1147 1148 1149
  // ===========================================================================
  // Embedder heap tracer support. =============================================
  // ===========================================================================

1150
  LocalEmbedderHeapTracer* local_embedder_heap_tracer() const {
1151
    return local_embedder_heap_tracer_.get();
1152
  }
1153

1154
  V8_EXPORT_PRIVATE void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
1155 1156
  EmbedderHeapTracer* GetEmbedderHeapTracer() const;

1157
  void RegisterExternallyReferencedObject(Address* location);
1158
  V8_EXPORT_PRIVATE void SetEmbedderStackStateForNextFinalization(
1159
      EmbedderHeapTracer::EmbedderStackState stack_state);
1160

1161 1162
  EmbedderHeapTracer::TraceFlags flags_for_embedder_tracer() const;

1163 1164 1165 1166
  // ===========================================================================
  // Unified heap (C++) support. ===============================================
  // ===========================================================================

1167 1168
  V8_EXPORT_PRIVATE void AttachCppHeap(v8::CppHeap* cpp_heap);
  V8_EXPORT_PRIVATE void DetachCppHeap();
1169

1170
  v8::CppHeap* cpp_heap() const { return cpp_heap_; }
1171

1172 1173
  const cppgc::EmbedderStackState* overriden_stack_state() const;

1174 1175 1176 1177 1178 1179 1180 1181
  // ===========================================================================
  // Embedder roots optimizations. =============================================
  // ===========================================================================

  V8_EXPORT_PRIVATE void SetEmbedderRootsHandler(EmbedderRootsHandler* handler);

  EmbedderRootsHandler* GetEmbedderRootsHandler() const;

1182 1183 1184 1185 1186
  // ===========================================================================
  // External string table API. ================================================
  // ===========================================================================

  // Registers an external string.
1187
  inline void RegisterExternalString(String string);
1188

1189 1190
  // Called when a string's resource is changed. The size of the payload is sent
  // as argument of the method.
1191 1192
  V8_EXPORT_PRIVATE void UpdateExternalString(String string, size_t old_payload,
                                              size_t new_payload);
1193

1194 1195
  // Finalizes an external string by deleting the associated external
  // data and clearing the resource pointer.
1196
  inline void FinalizeExternalString(String string);
1197

1198
  static String UpdateYoungReferenceInExternalStringTableEntry(
1199
      Heap* heap, FullObjectSlot pointer);
1200

1201 1202 1203 1204 1205
  // ===========================================================================
  // Methods checking/returning the space of a given object/address. ===========
  // ===========================================================================

  // Returns whether the object resides in new space.
1206 1207 1208 1209 1210 1211 1212 1213 1214
  static inline bool InYoungGeneration(Object object);
  static inline bool InYoungGeneration(MaybeObject object);
  static inline bool InYoungGeneration(HeapObject heap_object);
  static inline bool InFromPage(Object object);
  static inline bool InFromPage(MaybeObject object);
  static inline bool InFromPage(HeapObject heap_object);
  static inline bool InToPage(Object object);
  static inline bool InToPage(MaybeObject object);
  static inline bool InToPage(HeapObject heap_object);
1215 1216

  // Returns whether the object resides in old space.
1217
  inline bool InOldSpace(Object object);
1218

1219 1220 1221
  // Checks whether an address/object is in the non-read-only heap (including
  // auxiliary area and unused area). Use IsValidHeapObject if checking both
  // heaps is required.
1222
  V8_EXPORT_PRIVATE bool Contains(HeapObject value) const;
1223 1224 1225
  // Same as above, but checks whether the object resides in any of the code
  // spaces.
  V8_EXPORT_PRIVATE bool ContainsCode(HeapObject value) const;
1226

1227 1228 1229 1230 1231
  // Checks whether an address/object is in the non-read-only heap (including
  // auxiliary area and unused area). Use IsValidHeapObject if checking both
  // heaps is required.
  V8_EXPORT_PRIVATE bool SharedHeapContains(HeapObject value) const;

1232 1233 1234
  // Returns whether the object should be in the shared old space.
  V8_EXPORT_PRIVATE bool ShouldBeInSharedOldSpace(HeapObject value);

1235 1236
  // Checks whether an address/object in a space.
  // Currently used by tests, serialization and heap verification only.
1237
  V8_EXPORT_PRIVATE bool InSpace(HeapObject value, AllocationSpace space) const;
1238

1239 1240 1241
  // Returns true when this heap is shared.
  V8_EXPORT_PRIVATE bool IsShared();

1242 1243
  // Slow methods that can be used for verification as they can also be used
  // with off-heap Addresses.
1244
  V8_EXPORT_PRIVATE bool InSpaceSlow(Address addr, AllocationSpace space) const;
1245

1246
  static inline Heap* FromWritableHeapObject(HeapObject obj);
1247

1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
  // ===========================================================================
  // Object statistics tracking. ===============================================
  // ===========================================================================

  // Returns the number of buckets used by object statistics tracking during a
  // major GC. Note that the following methods fail gracefully when the bounds
  // are exceeded though.
  size_t NumberOfTrackedHeapObjectTypes();

  // Returns object statistics about count and size at the last major GC.
  // Objects are being grouped into buckets that roughly resemble existing
  // instance types.
  size_t ObjectCountAtLastGC(size_t index);
  size_t ObjectSizeAtLastGC(size_t index);

  // Retrieves names of buckets used by object statistics tracking.
  bool GetObjectTypeName(size_t index, const char** object_type,
                         const char** object_sub_type);

1267 1268 1269 1270 1271 1272
  // The total number of native contexts object on the heap.
  size_t NumberOfNativeContexts();
  // The total number of native contexts that were detached but were not
  // garbage collected yet.
  size_t NumberOfDetachedContexts();

1273 1274 1275 1276 1277 1278 1279
  // ===========================================================================
  // Code statistics. ==========================================================
  // ===========================================================================

  // Collect code (Code and BytecodeArray objects) statistics.
  void CollectCodeStatistics();

1280 1281 1282 1283
  // ===========================================================================
  // GC statistics. ============================================================
  // ===========================================================================

1284
  // Returns the maximum amount of memory reserved for the heap.
1285
  V8_EXPORT_PRIVATE size_t MaxReserved();
1286 1287
  size_t MaxSemiSpaceSize() { return max_semi_space_size_; }
  size_t InitialSemiSpaceSize() { return initial_semispace_size_; }
1288
  size_t MaxOldGenerationSize() { return max_old_generation_size(); }
1289

1290 1291 1292
  // Limit on the max old generation size imposed by the underlying allocator.
  V8_EXPORT_PRIVATE static size_t AllocatorLimitOnMaxOldGenerationSize();

1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307
  V8_EXPORT_PRIVATE static size_t HeapSizeFromPhysicalMemory(
      uint64_t physical_memory);
  V8_EXPORT_PRIVATE static void GenerationSizesFromHeapSize(
      size_t heap_size, size_t* young_generation_size,
      size_t* old_generation_size);
  V8_EXPORT_PRIVATE static size_t YoungGenerationSizeFromOldGenerationSize(
      size_t old_generation_size);
  V8_EXPORT_PRIVATE static size_t YoungGenerationSizeFromSemiSpaceSize(
      size_t semi_space_size);
  V8_EXPORT_PRIVATE static size_t SemiSpaceSizeFromYoungGenerationSize(
      size_t young_generation_size);
  V8_EXPORT_PRIVATE static size_t MinYoungGenerationSize();
  V8_EXPORT_PRIVATE static size_t MinOldGenerationSize();
  V8_EXPORT_PRIVATE static size_t MaxOldGenerationSize(
      uint64_t physical_memory);
1308

1309 1310
  // Returns the capacity of the heap in bytes w/o growing. Heap grows when
  // more spaces are needed until it reaches the limit.
1311
  size_t Capacity();
1312

1313
  // Returns the capacity of the old generation.
1314
  V8_EXPORT_PRIVATE size_t OldGenerationCapacity();
1315

1316 1317
  // Returns the amount of memory currently held alive by the unmapper.
  size_t CommittedMemoryOfUnmapper();
1318

1319
  // Returns the amount of memory currently committed for the heap.
1320
  size_t CommittedMemory();
1321

1322
  // Returns the amount of memory currently committed for the old space.
1323
  size_t CommittedOldGenerationMemory();
1324

1325
  // Returns the amount of executable memory currently committed for the heap.
1326
  size_t CommittedMemoryExecutable();
1327

1328 1329
  // Returns the amount of phyical memory currently committed for the heap.
  size_t CommittedPhysicalMemory();
1330

1331
  // Returns the maximum amount of memory ever committed for the heap.
1332
  size_t MaximumCommittedMemory() { return maximum_committed_; }
1333

1334 1335 1336
  // Updates the maximum committed memory for the heap. Should be called
  // whenever a space grows.
  void UpdateMaximumCommitted();
1337

1338 1339 1340
  // Returns the available bytes in space w/o growing.
  // Heap doesn't guarantee that it can allocate an object that requires
  // all available bytes. Check MaxHeapObjectSize() instead.
1341
  size_t Available();
1342

1343
  // Returns size of all objects residing in the heap.
1344
  V8_EXPORT_PRIVATE size_t SizeOfObjects();
1345

1346 1347 1348
  // Returns size of all global handles in the heap.
  V8_EXPORT_PRIVATE size_t TotalGlobalHandlesSize();

1349 1350 1351
  // Returns size of all allocated/used global handles in the heap.
  V8_EXPORT_PRIVATE size_t UsedGlobalHandlesSize();

1352
  void UpdateSurvivalStatistics(int start_new_space_size);
1353

1354
  inline void IncrementPromotedObjectsSize(size_t object_size) {
1355
    promoted_objects_size_ += object_size;
1356
  }
1357
  inline size_t promoted_objects_size() { return promoted_objects_size_; }
1358

1359
  inline void IncrementSemiSpaceCopiedObjectSize(size_t object_size) {
1360
    semi_space_copied_object_size_ += object_size;
1361
  }
1362
  inline size_t semi_space_copied_object_size() {
1363
    return semi_space_copied_object_size_;
1364
  }
1365

1366
  inline size_t SurvivedYoungObjectSize() {
1367
    return promoted_objects_size_ + semi_space_copied_object_size_;
1368
  }
1369

1370
  inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; }
1371

1372
  inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; }
1373

1374
  inline void IncrementNodesPromoted() { nodes_promoted_++; }
1375

1376
  inline void IncrementYoungSurvivorsCounter(size_t survived) {
1377 1378
    survived_last_scavenge_ = survived;
    survived_since_last_expansion_ += survived;
1379
  }
1380

1381
  void UpdateNewSpaceAllocationCounter();
1382

1383
  V8_EXPORT_PRIVATE size_t NewSpaceAllocationCounter();
1384

1385 1386 1387 1388
  // This should be used only for testing.
  void set_new_space_allocation_counter(size_t new_value) {
    new_space_allocation_counter_ = new_value;
  }
1389

1390
  void UpdateOldGenerationAllocationCounter() {
1391 1392
    old_generation_allocation_counter_at_last_gc_ =
        OldGenerationAllocationCounter();
1393
    old_generation_size_at_last_gc_ = 0;
1394
  }
1395

1396
  size_t OldGenerationAllocationCounter() {
1397 1398
    return old_generation_allocation_counter_at_last_gc_ +
           PromotedSinceLastGC();
1399
  }
1400

1401 1402
  size_t EmbedderAllocationCounter() const;

1403
  // This should be used only for testing.
1404 1405
  void set_old_generation_allocation_counter_at_last_gc(size_t new_value) {
    old_generation_allocation_counter_at_last_gc_ = new_value;
1406
  }
1407

1408
  size_t PromotedSinceLastGC() {
1409
    size_t old_generation_size = OldGenerationSizeOfObjects();
1410 1411 1412
    return old_generation_size > old_generation_size_at_last_gc_
               ? old_generation_size - old_generation_size_at_last_gc_
               : 0;
1413 1414
  }

1415
  int gc_count() const { return gc_count_; }
1416

1417 1418
  bool is_current_gc_forced() const { return is_current_gc_forced_; }

1419 1420 1421 1422 1423 1424
  // Returns whether the currently in-progress GC should avoid increasing the
  // ages on any objects that live for a set number of collections.
  bool ShouldCurrentGCKeepAgesUnchanged() const {
    return is_current_gc_forced_ || is_current_gc_for_heap_profiler_;
  }

1425 1426
  // Returns the size of objects residing in non-new spaces.
  // Excludes external memory held by those objects.
1427
  V8_EXPORT_PRIVATE size_t OldGenerationSizeOfObjects();
1428

1429 1430 1431 1432
  // Returns the size of objects held by the EmbedderHeapTracer.
  V8_EXPORT_PRIVATE size_t EmbedderSizeOfObjects() const;

  // Returns the global size of objects (embedder + V8 non-new spaces).
1433 1434
  V8_EXPORT_PRIVATE size_t GlobalSizeOfObjects();

1435 1436 1437 1438 1439
  // We allow incremental marking to overshoot the V8 and global allocation
  // limit for performace reasons. If the overshoot is too large then we are
  // more eager to finalize incremental marking.
  bool AllocationLimitOvershotByLargeMargin();

1440 1441 1442 1443 1444
  // Return the maximum size objects can be before having to allocate them as
  // large objects. This takes into account allocating in the code space for
  // which the size of the allocatable space per V8 page may depend on the OS
  // page size at runtime. You may use kMaxRegularHeapObjectSize as a constant
  // instead if you know the allocation isn't in the code spaces.
1445
  inline V8_EXPORT_PRIVATE int MaxRegularHeapObjectSize(
1446 1447
      AllocationType allocation);

1448 1449 1450
  // ===========================================================================
  // Prologue/epilogue callback methods.========================================
  // ===========================================================================
1451

1452 1453 1454 1455
  void AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
                             GCType gc_type_filter, void* data);
  void RemoveGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
                                void* data);
1456

1457 1458 1459 1460
  void AddGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
                             GCType gc_type_filter, void* data);
  void RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
                                void* data);
1461

1462 1463
  void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
  void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags);
1464

1465 1466 1467
  // ===========================================================================
  // Allocation methods. =======================================================
  // ===========================================================================
1468

1469
  // Creates a filler object and returns a heap object immediately after it.
1470 1471
  V8_EXPORT_PRIVATE HeapObject PrecedeWithFiller(HeapObject object,
                                                 int filler_size);
1472

1473 1474 1475
  // Creates a filler object if needed for alignment and returns a heap object
  // immediately after it. If any space is left after the returned object,
  // another filler object is created so the over allocated memory is iterable.
1476 1477 1478
  V8_WARN_UNUSED_RESULT HeapObject
  AlignWithFiller(HeapObject object, int object_size, int allocation_size,
                  AllocationAlignment alignment);
1479

1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490
  // Allocate an external backing store with the given allocation callback.
  // If the callback fails (indicated by a nullptr result) then this function
  // will re-try the allocation after performing GCs. This is useful for
  // external backing stores that may be retained by (unreachable) V8 objects
  // such as ArrayBuffers, ExternalStrings, etc.
  //
  // The function may also proactively trigger GCs even if the allocation
  // callback does not fail to keep the memory usage low.
  V8_EXPORT_PRIVATE void* AllocateExternalBackingStore(
      const std::function<void*(size_t)>& allocate, size_t byte_length);

1491 1492 1493 1494
  // ===========================================================================
  // Allocation site tracking. =================================================
  // ===========================================================================

1495 1496 1497
  // Updates the AllocationSite of a given {object}. The entry (including the
  // count) is cached on the local pretenuring feedback.
  inline void UpdateAllocationSite(
1498
      Map map, HeapObject object, PretenuringFeedbackMap* pretenuring_feedback);
1499 1500 1501 1502 1503

  // Merges local pretenuring feedback into the global one. Note that this
  // method needs to be called after evacuation, as allocation sites may be
  // evacuated and this method resolves forward pointers accordingly.
  void MergeAllocationSitePretenuringFeedback(
1504
      const PretenuringFeedbackMap& local_pretenuring_feedback);
1505

1506 1507 1508 1509 1510 1511
  // Adds an allocation site to the list of sites to be pretenured during the
  // next collection. Added allocation sites are pretenured independent of
  // their feedback.
  V8_EXPORT_PRIVATE void PretenureAllocationSiteOnNextCollection(
      AllocationSite site);

1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
  // ===========================================================================
  // Allocation tracking. ======================================================
  // ===========================================================================

  // Adds {new_space_observer} to new space and {observer} to any other space.
  void AddAllocationObserversToAllSpaces(
      AllocationObserver* observer, AllocationObserver* new_space_observer);

  // Removes {new_space_observer} from new space and {observer} from any other
  // space.
  void RemoveAllocationObserversFromAllSpaces(
      AllocationObserver* observer, AllocationObserver* new_space_observer);

1525 1526 1527 1528
  // Check if the given object was recently allocated and its fields may appear
  // as uninitialized to background threads.
  // This predicate may be invoked from a background thread.
  inline bool IsPendingAllocation(HeapObject object);
1529
  inline bool IsPendingAllocation(Object object);
1530 1531 1532 1533 1534 1535

  // Notifies that all previously allocated objects are properly initialized
  // and ensures that IsPendingAllocation returns false for them. This function
  // may be invoked only on the main thread.
  V8_EXPORT_PRIVATE void PublishPendingAllocations();

1536
  // ===========================================================================
1537 1538 1539
  // Heap object allocation tracking. ==========================================
  // ===========================================================================

1540 1541 1542 1543
  V8_EXPORT_PRIVATE void AddHeapObjectAllocationTracker(
      HeapObjectAllocationTracker* tracker);
  V8_EXPORT_PRIVATE void RemoveHeapObjectAllocationTracker(
      HeapObjectAllocationTracker* tracker);
1544 1545 1546 1547
  bool has_heap_object_allocation_tracker() const {
    return !allocation_trackers_.empty();
  }

1548
  // ===========================================================================
1549 1550
  // Retaining path tracking. ==================================================
  // ===========================================================================
1551

1552 1553 1554
  // Adds the given object to the weak table of retaining path targets.
  // On each GC if the marker discovers the object, it will print the retaining
  // path. This requires --track-retaining-path flag.
1555 1556
  void AddRetainingPathTarget(Handle<HeapObject> object,
                              RetainingPathOption option);
1557

1558 1559 1560 1561
  // ===========================================================================
  // Stack frame support. ======================================================
  // ===========================================================================

1562 1563
  // Returns the Code object for a given interior pointer.
  Code GcSafeFindCodeForInnerPointer(Address inner_pointer);
1564 1565 1566

  // Returns true if {addr} is contained within {code} and false otherwise.
  // Mostly useful for debugging.
1567
  bool GcSafeCodeContains(Code code, Address addr);
1568

1569 1570 1571 1572 1573 1574 1575 1576
  // Casts a heap object to a code object and checks if the inner_pointer is
  // within the object.
  Code GcSafeCastToCode(HeapObject object, Address inner_pointer);

  // Returns the map of an object. Can be used during garbage collection, i.e.
  // it supports a forwarded map. Fails if the map is not the code map.
  Map GcSafeMapOfCodeSpaceObject(HeapObject object);

1577
// =============================================================================
1578 1579
#ifdef VERIFY_HEAP
  // Verify the heap is in its normal state before or after a GC.
1580
  V8_EXPORT_PRIVATE void Verify();
1581 1582 1583
  // Verify the read-only heap after all read-only heap objects have been
  // created.
  void VerifyReadOnlyHeap();
1584
  void VerifyRememberedSetFor(HeapObject object);
1585
#endif
1586

1587
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
1588 1589
  void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
#endif
1590

1591
#ifdef DEBUG
1592 1593 1594
  void VerifyCountersAfterSweeping();
  void VerifyCountersBeforeConcurrentSweeping();

1595 1596
  void Print();
  void PrintHandles();
1597

1598
  // Report code statistics.
1599 1600
  void ReportCodeStatistics(const char* title);
#endif
1601
  void* GetRandomMmapAddr() {
1602
    void* result = v8::internal::GetRandomMmapAddr();
1603
#if V8_TARGET_ARCH_X64
1604
#if V8_OS_DARWIN
1605 1606 1607 1608 1609 1610 1611
    // The Darwin kernel [as of macOS 10.12.5] does not clean up page
    // directory entries [PDE] created from mmap or mach_vm_allocate, even
    // after the region is destroyed. Using a virtual address space that is
    // too large causes a leak of about 1 wired [can never be paged out] page
    // per call to mmap(). The page is only reclaimed when the process is
    // killed. Confine the hint to a 32-bit section of the virtual address
    // space. See crbug.com/700928.
1612
    uintptr_t offset = reinterpret_cast<uintptr_t>(result) & kMmapRegionMask;
1613
    result = reinterpret_cast<void*>(mmap_region_base_ + offset);
1614
#endif  // V8_OS_DARWIN
1615 1616 1617
#endif  // V8_TARGET_ARCH_X64
    return result;
  }
ulan@chromium.org's avatar
ulan@chromium.org committed
1618

1619 1620
  void RegisterCodeObject(Handle<Code> code);

1621 1622 1623
  static const char* GarbageCollectionReasonToString(
      GarbageCollectionReason gc_reason);

1624 1625 1626
  // Calculates the nof entries for the full sized number to string cache.
  inline int MaxNumberToStringCacheSize() const;

1627 1628
  static Isolate* GetIsolateFromWritableObject(HeapObject object);

1629 1630 1631 1632
  // Ensure that we have swept all spaces in such a way that we can iterate
  // over all objects.
  void MakeHeapIterable();

1633
 private:
1634 1635
  class AllocationTrackerForDebugging;

1636 1637
  using ExternalStringTableUpdaterCallback = String (*)(Heap* heap,
                                                        FullObjectSlot pointer);
1638

1639 1640 1641 1642 1643
  // External strings table is a place where all external strings are
  // registered.  We need to keep track of such strings to properly
  // finalize them.
  class ExternalStringTable {
   public:
1644
    explicit ExternalStringTable(Heap* heap) : heap_(heap) {}
1645 1646
    ExternalStringTable(const ExternalStringTable&) = delete;
    ExternalStringTable& operator=(const ExternalStringTable&) = delete;
1647

1648
    // Registers an external string.
1649 1650
    inline void AddString(String string);
    bool Contains(String string);
1651

1652
    void IterateAll(RootVisitor* v);
1653 1654
    void IterateYoung(RootVisitor* v);
    void PromoteYoung();
1655

1656 1657 1658
    // Restores internal invariant and gets rid of collected strings. Must be
    // called after each Iterate*() that modified the strings.
    void CleanUpAll();
1659
    void CleanUpYoung();
1660

1661
    // Finalize all registered external strings and clear tables.
1662 1663
    void TearDown();

1664
    void UpdateYoungReferences(
1665 1666 1667
        Heap::ExternalStringTableUpdaterCallback updater_func);
    void UpdateReferences(
        Heap::ExternalStringTableUpdaterCallback updater_func);
1668

1669
   private:
1670
    void Verify();
1671
    void VerifyYoung();
1672

1673
    Heap* const heap_;
1674

1675 1676 1677 1678
    // To speed up scavenge collections young string are kept separate from old
    // strings.
    std::vector<Object> young_strings_;
    std::vector<Object> old_strings_;
1679 1680
  };

1681 1682 1683
  struct StringTypeTable {
    InstanceType type;
    int size;
1684
    RootIndex index;
1685 1686
  };

1687
  struct ConstantStringTable {
1688
    const char* contents;
1689
    RootIndex index;
1690 1691 1692 1693 1694
  };

  struct StructTable {
    InstanceType type;
    int size;
1695
    RootIndex index;
1696 1697
  };

1698 1699 1700 1701
  struct GCCallbackTuple {
    GCCallbackTuple(v8::Isolate::GCCallbackWithData callback, GCType gc_type,
                    void* data)
        : callback(callback), gc_type(gc_type), data(data) {}
1702

1703
    bool operator==(const GCCallbackTuple& other) const;
1704

1705
    v8::Isolate::GCCallbackWithData callback;
1706
    GCType gc_type;
1707
    void* data;
1708
  };
1709

1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724
  static const int kInitialEvalCacheSize = 64;
  static const int kInitialNumberStringCacheSize = 256;

  static const int kRememberedUnmappedPages = 128;

  static const StringTypeTable string_type_table[];
  static const ConstantStringTable constant_string_table[];
  static const StructTable struct_table[];

  static const int kYoungSurvivalRateHighThreshold = 90;
  static const int kYoungSurvivalRateAllowedDeviation = 15;
  static const int kOldSurvivalRateLowThreshold = 10;

  static const int kMaxMarkCompactsInIdleRound = 7;

1725
  static const int kInitialFeedbackCapacity = 256;
1726 1727

  Heap();
1728
  ~Heap();
1729

1730 1731 1732
  Heap(const Heap&) = delete;
  Heap& operator=(const Heap&) = delete;

1733 1734 1735
  static bool IsRegularObjectAllocation(AllocationType allocation) {
    return AllocationType::kYoung == allocation ||
           AllocationType::kOld == allocation;
1736 1737
  }

1738 1739 1740 1741
  static size_t DefaultGetExternallyAllocatedMemoryInBytesCallback() {
    return 0;
  }

1742
#define ROOT_ACCESSOR(type, name, CamelName) inline void set_##name(type value);
1743 1744 1745
  ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR

1746
  void set_current_gc_flags(int flags) { current_gc_flags_ = flags; }
1747 1748

  inline bool ShouldReduceMemory() const {
1749
    return (current_gc_flags_ & kReduceMemoryFootprintMask) != 0;
1750 1751
  }

1752 1753
  int NumberOfScavengeTasks();

1754
  // Checks whether a global GC is necessary
1755 1756
  GarbageCollector SelectGarbageCollector(AllocationSpace space,
                                          const char** reason);
1757

1758 1759 1760 1761 1762 1763
  // Free all LABs in the heap.
  void FreeLinearAllocationAreas();

  // Free all shared LABs.
  void FreeSharedLinearAllocationAreas();

1764 1765 1766
  // Free all shared LABs of main thread.
  void FreeMainThreadSharedLinearAllocationAreas();

1767 1768 1769
  // Performs garbage collection in a safepoint.
  // Returns the number of freed global handles.
  size_t PerformGarbageCollection(
1770 1771
      GarbageCollector collector, GarbageCollectionReason gc_reason,
      const char* collector_reason,
1772
      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1773

1774 1775 1776 1777
  // Performs garbage collection in the shared heap.
  void PerformSharedGarbageCollection(Isolate* initiator,
                                      GarbageCollectionReason gc_reason);

1778 1779 1780
  inline void UpdateOldSpaceLimits();

  bool CreateInitialMaps();
1781
  void CreateInternalAccessorInfoObjects();
1782 1783 1784 1785 1786 1787
  void CreateInitialObjects();

  // Commits from space if it is uncommitted.
  void EnsureFromSpaceIsCommitted();

  // Uncommit unused semi space.
1788
  V8_EXPORT_PRIVATE bool UncommitFromSpace();
1789 1790 1791 1792

  // Fill in bogus values in from space
  void ZapFromSpace();

1793
  // Zaps the memory of a code object.
1794 1795
  V8_EXPORT_PRIVATE void ZapCodeObject(Address start_address,
                                       int size_in_bytes);
1796

1797 1798 1799 1800
  // Initialize a filler object to keep the ability to iterate over the heap
  // when introducing gaps within pages. If the memory after the object header
  // of the filler should be cleared, pass in kClearFreedMemory. The default is
  // kDontClearFreedMemory.
1801 1802 1803 1804
  V8_EXPORT_PRIVATE HeapObject
  CreateFillerObjectAt(Address addr, int size,
                       ClearFreedMemoryMode clear_memory_mode =
                           ClearFreedMemoryMode::kDontClearFreedMemory);
1805

1806
  // Range write barrier implementation.
1807
  template <int kModeMask, typename TSlot>
1808
  V8_INLINE void WriteBarrierForRangeImpl(MemoryChunk* source_page,
1809 1810
                                          HeapObject object, TSlot start_slot,
                                          TSlot end_slot);
1811

1812 1813
  // Deopts all code that contains allocation instruction which are tenured or
  // not tenured. Moreover it clears the pretenuring allocation site statistics.
1814
  void ResetAllAllocationSitesDependentCode(AllocationType allocation);
1815 1816 1817 1818 1819 1820

  // Evaluates local pretenuring for the old space and calls
  // ResetAllTenuredAllocationSitesDependentCode if too many objects died in
  // the old space.
  void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc);

1821
  // Record statistics after garbage collection.
1822 1823 1824 1825 1826
  void ReportStatisticsAfterGC();

  // Flush the number to string cache.
  void FlushNumberStringCache();

1827
  void ConfigureInitialOldGenerationSize();
1828

1829 1830
  double ComputeMutatorUtilization(const char* tag, double mutator_speed,
                                   double gc_speed);
1831 1832
  bool HasLowYoungGenerationAllocationRate();
  bool HasLowOldGenerationAllocationRate();
1833
  bool HasLowEmbedderAllocationRate();
1834 1835 1836

  void ReduceNewSpaceSize();

1837
  GCIdleTimeHeapState ComputeHeapState();
1838 1839

  bool PerformIdleTimeAction(GCIdleTimeAction action,
1840
                             GCIdleTimeHeapState heap_state,
1841 1842 1843
                             double deadline_in_ms);

  void IdleNotificationEpilogue(GCIdleTimeAction action,
1844 1845
                                GCIdleTimeHeapState heap_state, double start_ms,
                                double deadline_in_ms);
1846

1847 1848
  int NextAllocationTimeout(int current_timeout = 0);

1849 1850 1851
  void PrintMaxMarkingLimitReached();
  void PrintMaxNewSpaceSizeReached();

1852 1853
  int NextStressMarkingLimit();

1854 1855 1856
  void AddToRingBuffer(const char* string);
  void GetFromRingBuffer(char* buffer);

1857
  void CompactRetainedMaps(WeakArrayList retained_maps);
1858

1859
  void CollectGarbageOnMemoryPressure();
1860

1861 1862
  void EagerlyFreeExternalMemory();

1863
  bool InvokeNearHeapLimitCallback();
1864

1865
  void ComputeFastPromotionMode();
1866

1867 1868 1869 1870
  // Attempt to over-approximate the weak closure by marking object groups and
  // implicit references from global handles, but don't atomically complete
  // marking. If we continue to mark incrementally, we might have marked
  // objects that die later.
1871 1872
  void FinalizeIncrementalMarkingIncrementally(
      GarbageCollectionReason gc_reason);
1873

1874 1875 1876
  void InvokeIncrementalMarkingPrologueCallbacks();
  void InvokeIncrementalMarkingEpilogueCallbacks();

1877 1878 1879 1880 1881 1882
  // Returns the timer used for a given GC type.
  // - GCScavenger: young generation GC
  // - GCCompactor: full GC
  // - GCFinalzeMC: finalization of incremental full GC
  // - GCFinalizeMCReduceMemory: finalization of incremental full GC with
  // memory reduction
1883 1884
  TimedHistogram* GCTypeTimer(GarbageCollector collector);
  TimedHistogram* GCTypePriorityTimer(GarbageCollector collector);
1885

1886 1887 1888 1889 1890 1891 1892 1893 1894
  // ===========================================================================
  // Pretenuring. ==============================================================
  // ===========================================================================

  // Pretenuring decisions are made based on feedback collected during new space
  // evacuation. Note that between feedback collection and calling this method
  // object in old space must not move.
  void ProcessPretenuringFeedback();

1895
  // Removes an entry from the global pretenuring storage.
1896
  void RemoveAllocationSitePretenuringFeedback(AllocationSite site);
1897

1898 1899 1900 1901 1902 1903
  // ===========================================================================
  // Actual GC. ================================================================
  // ===========================================================================

  // Code that should be run before and after each GC.  Includes some
  // reporting/verification activities when compiled with DEBUG set.
1904 1905
  void GarbageCollectionPrologue(GarbageCollectionReason gc_reason,
                                 const v8::GCCallbackFlags gc_callback_flags);
1906
  void GarbageCollectionPrologueInSafepoint();
1907
  void GarbageCollectionEpilogue(GarbageCollector collector);
1908
  void GarbageCollectionEpilogueInSafepoint(GarbageCollector collector);
1909 1910 1911

  // Performs a major collection in the whole heap.
  void MarkCompact();
1912 1913
  // Performs a minor collection of just the young generation.
  void MinorMarkCompact();
1914 1915 1916 1917 1918 1919 1920

  // Code to be run before and after mark-compact.
  void MarkCompactPrologue();
  void MarkCompactEpilogue();

  // Performs a minor collection in new generation.
  void Scavenge();
1921
  void EvacuateYoungGeneration();
1922

1923
  void UpdateYoungReferencesInExternalStringTable(
1924 1925 1926 1927 1928 1929 1930 1931 1932
      ExternalStringTableUpdaterCallback updater_func);

  void UpdateReferencesInExternalStringTable(
      ExternalStringTableUpdaterCallback updater_func);

  void ProcessAllWeakReferences(WeakObjectRetainer* retainer);
  void ProcessYoungWeakReferences(WeakObjectRetainer* retainer);
  void ProcessNativeContexts(WeakObjectRetainer* retainer);
  void ProcessAllocationSites(WeakObjectRetainer* retainer);
1933
  void ProcessDirtyJSFinalizationRegistries(WeakObjectRetainer* retainer);
1934
  void ProcessWeakListRoots(WeakObjectRetainer* retainer);
1935 1936 1937 1938 1939

  // ===========================================================================
  // GC statistics. ============================================================
  // ===========================================================================

1940
  inline size_t OldGenerationSpaceAvailable() {
1941 1942 1943
    uint64_t bytes = OldGenerationSizeOfObjects() +
                     AllocatedExternalMemorySinceMarkCompact();

1944 1945
    if (old_generation_allocation_limit() <= bytes) return 0;
    return old_generation_allocation_limit() - static_cast<size_t>(bytes);
1946
  }
1947

1948
  void UpdateTotalGCTime(double duration);
1949 1950 1951

  bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }

1952 1953 1954 1955 1956
  bool IsIneffectiveMarkCompact(size_t old_generation_size,
                                double mutator_utilization);
  void CheckIneffectiveMarkCompact(size_t old_generation_size,
                                   double mutator_utilization);

1957 1958 1959 1960 1961 1962
  inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
                                                 size_t amount);

  inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
                                                 size_t amount);

1963 1964 1965 1966
  // ===========================================================================
  // Growing strategy. =========================================================
  // ===========================================================================

1967
  MemoryReducer* memory_reducer() { return memory_reducer_.get(); }
1968

1969 1970 1971 1972
  // For some webpages RAIL mode does not switch from PERFORMANCE_LOAD.
  // This constant limits the effect of load RAIL mode on GC.
  // The value is arbitrary and chosen as the largest load time observed in
  // v8 browsing benchmarks.
1973
  static const int kMaxLoadTimeMs = 7000;
1974 1975 1976

  bool ShouldOptimizeForLoadTime();

1977
  size_t old_generation_allocation_limit() const {
1978 1979 1980 1981 1982
    return old_generation_allocation_limit_.load(std::memory_order_relaxed);
  }

  void set_old_generation_allocation_limit(size_t newlimit) {
    old_generation_allocation_limit_.store(newlimit, std::memory_order_relaxed);
1983 1984
  }

1985 1986
  size_t global_allocation_limit() const { return global_allocation_limit_; }

1987 1988 1989 1990 1991 1992 1993 1994
  size_t max_old_generation_size() {
    return max_old_generation_size_.load(std::memory_order_relaxed);
  }

  void set_max_old_generation_size(size_t value) {
    max_old_generation_size_.store(value, std::memory_order_relaxed);
  }

1995
  bool always_allocate() { return always_allocate_scope_count_ != 0; }
1996

1997
  V8_EXPORT_PRIVATE bool CanExpandOldGeneration(size_t size);
1998 1999
  V8_EXPORT_PRIVATE bool CanExpandOldGenerationBackground(LocalHeap* local_heap,
                                                          size_t size);
2000
  V8_EXPORT_PRIVATE bool CanPromoteYoungAndExpandOldGeneration(size_t size);
2001

2002 2003 2004
  bool ShouldExpandOldGenerationOnSlowAllocation(
      LocalHeap* local_heap = nullptr);
  bool IsRetryOfFailedAllocation(LocalHeap* local_heap);
2005
  bool IsMainThreadParked(LocalHeap* local_heap);
2006

2007 2008
  HeapGrowingMode CurrentHeapGrowingMode();

2009 2010
  double PercentToOldGenerationLimit();
  double PercentToGlobalMemoryLimit();
2011 2012 2013 2014 2015 2016
  enum class IncrementalMarkingLimit {
    kNoLimit,
    kSoftLimit,
    kHardLimit,
    kFallbackForEmbedderLimit
  };
2017 2018
  IncrementalMarkingLimit IncrementalMarkingLimitReached();

2019 2020
  bool ShouldStressCompaction() const;

2021 2022 2023 2024
  bool UseGlobalMemoryScheduling() const {
    return FLAG_global_gc_scheduling && local_embedder_heap_tracer();
  }

2025
  base::Optional<size_t> GlobalMemoryAvailable();
2026

2027 2028
  void RecomputeLimits(GarbageCollector collector);

2029 2030 2031 2032 2033
  // ===========================================================================
  // Idle notification. ========================================================
  // ===========================================================================

  bool RecentIdleNotificationHappened();
2034 2035 2036 2037 2038 2039

  // ===========================================================================
  // GC Tasks. =================================================================
  // ===========================================================================

  void ScheduleScavengeTaskIfNeeded();
2040

2041 2042 2043 2044 2045
  // ===========================================================================
  // Allocation methods. =======================================================
  // ===========================================================================

  // Allocates a JS Map in the heap.
2046
  V8_WARN_UNUSED_RESULT AllocationResult
2047
  AllocateMap(InstanceType instance_type, int instance_size,
2048 2049
              ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
              int inobject_properties = 0);
2050

2051
  // Allocate an uninitialized object.  The memory is non-executable if the
2052 2053 2054
  // hardware and OS allow.  This is the single choke-point for allocations
  // performed by the runtime and should not be bypassed (to extend this to
  // inlined allocations, use the Heap::DisableInlineAllocation() support).
2055 2056 2057 2058 2059
  V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
  AllocateRaw(int size_in_bytes, AllocationType allocation,
              AllocationOrigin origin = AllocationOrigin::kRuntime,
              AllocationAlignment alignment = kTaggedAligned);

2060 2061 2062 2063 2064 2065 2066
  // Allocates an uninitialized large object. Used as dispatch by
  // `AllocateRaw()` for large objects. Do not call this from anywhere else.
  V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
  AllocateRawLargeInternal(int size_in_bytes, AllocationType allocation,
                           AllocationOrigin origin = AllocationOrigin::kRuntime,
                           AllocationAlignment alignment = kTaggedAligned);

2067 2068 2069 2070
  // This method will try to allocate objects quickly (AllocationType::kYoung)
  // otherwise it falls back to a slower path indicated by the mode.
  enum AllocationRetryMode { kLightRetry, kRetryOrFail };
  template <AllocationRetryMode mode>
2071 2072 2073 2074
  V8_WARN_UNUSED_RESULT V8_INLINE HeapObject
  AllocateRawWith(int size, AllocationType allocation,
                  AllocationOrigin origin = AllocationOrigin::kRuntime,
                  AllocationAlignment alignment = kTaggedAligned);
2075

2076 2077 2078 2079
  // Call AllocateRawWith with kRetryOrFail. Matches the method in LocalHeap.
  V8_WARN_UNUSED_RESULT inline Address AllocateRawOrFail(
      int size, AllocationType allocation,
      AllocationOrigin origin = AllocationOrigin::kRuntime,
2080
      AllocationAlignment alignment = kTaggedAligned);
2081

2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100
  // This method will try to perform an allocation of a given size of a given
  // AllocationType. If the allocation fails, a regular full garbage collection
  // is triggered and the allocation is retried. This is performed multiple
  // times. If after that retry procedure the allocation still fails nullptr is
  // returned.
  V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithLightRetrySlowPath(
      int size, AllocationType allocation, AllocationOrigin origin,
      AllocationAlignment alignment = kTaggedAligned);

  // This method will try to perform an allocation of a given size of a given
  // AllocationType. If the allocation fails, a regular full garbage collection
  // is triggered and the allocation is retried. This is performed multiple
  // times. If after that retry procedure the allocation still fails a "hammer"
  // garbage collection is triggered which tries to significantly reduce memory.
  // If the allocation still fails after that a fatal error is thrown.
  V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithRetryOrFailSlowPath(
      int size, AllocationType allocation, AllocationOrigin origin,
      AllocationAlignment alignment = kTaggedAligned);

2101
  // Allocates a heap object based on the map.
2102
  V8_WARN_UNUSED_RESULT AllocationResult Allocate(Handle<Map> map,
2103
                                                  AllocationType allocation);
2104 2105

  // Allocates a partial map for bootstrapping.
2106 2107
  V8_WARN_UNUSED_RESULT AllocationResult
  AllocatePartialMap(InstanceType instance_type, int instance_size);
2108

2109
  void FinalizePartialMap(Map map);
2110

2111
  void set_force_oom(bool value) { force_oom_ = value; }
2112 2113 2114
  void set_force_gc_on_next_allocation() {
    force_gc_on_next_allocation_ = true;
  }
2115

2116 2117 2118
  // Helper for IsPendingAllocation.
  inline bool IsPendingAllocationInternal(HeapObject object);

2119 2120
  // ===========================================================================
  // Retaining path tracing ====================================================
2121 2122
  // ===========================================================================

2123 2124 2125
  void AddRetainer(HeapObject retainer, HeapObject object);
  void AddEphemeronRetainer(HeapObject retainer, HeapObject object);
  void AddRetainingRoot(Root root, HeapObject object);
2126 2127
  // Returns true if the given object is a target of retaining path tracking.
  // Stores the option corresponding to the object in the provided *option.
2128 2129
  bool IsRetainingPathTarget(HeapObject object, RetainingPathOption* option);
  void PrintRetainingPath(HeapObject object, RetainingPathOption option);
2130
  void UpdateRetainersAfterScavenge();
2131

2132
#ifdef DEBUG
2133
  V8_EXPORT_PRIVATE void IncrementObjectCounters();
2134 2135
#endif  // DEBUG

2136
  std::vector<Handle<NativeContext>> FindAllNativeContexts();
2137
  std::vector<WeakArrayList> FindAllRetainedMaps();
2138 2139
  MemoryMeasurement* memory_measurement() { return memory_measurement_.get(); }

2140 2141 2142 2143
  AllocationType allocation_type_for_in_place_internalizable_strings() const {
    return allocation_type_for_in_place_internalizable_strings_;
  }

2144 2145
  bool IsStressingScavenge();

2146
  ExternalMemoryAccounting external_memory_;
2147

2148 2149
  // This can be calculated directly from a pointer to the heap; however, it is
  // more expedient to get at the isolate directly from within Heap methods.
2150
  Isolate* isolate_ = nullptr;
2151

2152 2153
  // These limits are initialized in Heap::ConfigureHeap based on the resource
  // constraints and flags.
2154
  size_t code_range_size_ = 0;
2155 2156
  size_t max_semi_space_size_ = 0;
  size_t initial_semispace_size_ = 0;
2157 2158 2159 2160 2161
  // Full garbage collections can be skipped if the old generation size
  // is below this threshold.
  size_t min_old_generation_size_ = 0;
  // If the old generation size exceeds this limit, then V8 will
  // crash with out-of-memory error.
2162
  std::atomic<size_t> max_old_generation_size_{0};
2163
  // TODO(mlippautz): Clarify whether this should take some embedder
2164
  // configurable limit into account.
2165
  size_t min_global_memory_size_ = 0;
2166 2167 2168 2169 2170
  size_t max_global_memory_size_ = 0;

  size_t initial_max_old_generation_size_ = 0;
  size_t initial_max_old_generation_size_threshold_ = 0;
  size_t initial_old_generation_size_ = 0;
2171 2172
  bool old_generation_size_configured_ = false;
  size_t maximum_committed_ = 0;
2173
  size_t old_generation_capacity_after_bootstrap_ = 0;
2174

2175
  // Backing store bytes (array buffers and external strings).
2176 2177 2178
  // Use uint64_t counter since the counter could overflow the 32-bit range
  // temporarily on 32-bit.
  std::atomic<uint64_t> backing_store_bytes_{0};
2179

2180 2181
  // For keeping track of how much data has survived
  // scavenge since last new space expansion.
2182
  size_t survived_since_last_expansion_ = 0;
2183

2184
  // ... and since the last scavenge.
2185
  size_t survived_last_scavenge_ = 0;
2186

2187 2188
  // This is not the depth of nested AlwaysAllocateScope's but rather a single
  // count, as scopes can be acquired from multiple tasks (read: threads).
2189
  std::atomic<size_t> always_allocate_scope_count_{0};
2190

2191 2192
  // Stores the memory pressure level that set by MemoryPressureNotification
  // and reset by a mark-compact garbage collection.
2193
  std::atomic<MemoryPressureLevel> memory_pressure_level_;
2194

2195
  std::vector<std::pair<v8::NearHeapLimitCallback, void*>>
2196
      near_heap_limit_callbacks_;
2197

2198
  // For keeping track of context disposals.
2199
  int contexts_disposed_ = 0;
2200

2201 2202 2203 2204
  NewSpace* new_space_ = nullptr;
  OldSpace* old_space_ = nullptr;
  CodeSpace* code_space_ = nullptr;
  MapSpace* map_space_ = nullptr;
2205
  OldLargeObjectSpace* lo_space_ = nullptr;
2206
  CodeLargeObjectSpace* code_lo_space_ = nullptr;
2207 2208
  NewLargeObjectSpace* new_lo_space_ = nullptr;
  ReadOnlySpace* read_only_space_ = nullptr;
2209

2210 2211 2212 2213 2214 2215
  OldSpace* shared_old_space_ = nullptr;
  MapSpace* shared_map_space_ = nullptr;

  std::unique_ptr<ConcurrentAllocator> shared_old_allocator_;
  std::unique_ptr<ConcurrentAllocator> shared_map_allocator_;

2216 2217
  // Map from the space id to the space.
  Space* space_[LAST_SPACE + 1];
2218

2219 2220
  LocalHeap* main_thread_local_heap_ = nullptr;

2221
  // List for tracking ArrayBufferExtensions
2222 2223
  ArrayBufferExtension* old_array_buffer_extensions_ = nullptr;
  ArrayBufferExtension* young_array_buffer_extensions_ = nullptr;
2224

2225 2226
  // Determines whether code space is write-protected. This is essentially a
  // race-free copy of the {FLAG_write_protect_code_memory} flag.
2227
  bool write_protect_code_memory_ = false;
2228

2229
  // Holds the number of open CodeSpaceMemoryModificationScopes.
2230
  uintptr_t code_space_memory_modification_scope_depth_ = 0;
2231

2232
  // Holds the number of open CodePageCollectionMemoryModificationScopes.
2233 2234
  std::atomic<uintptr_t> code_page_collection_memory_modification_scope_depth_{
      0};
2235

2236
  std::atomic<HeapState> gc_state_{NOT_IN_GC};
2237 2238

  int gc_post_processing_depth_ = 0;
2239

2240
  // Returns the amount of external memory registered since last global gc.
2241
  V8_EXPORT_PRIVATE uint64_t AllocatedExternalMemorySinceMarkCompact();
2242

2243 2244
  // Starts marking when stress_marking_percentage_% of the marking start limit
  // is reached.
2245
  std::atomic<int> stress_marking_percentage_{0};
2246

2247 2248
  // Observer that causes more frequent checks for reached incremental
  // marking limit.
2249
  AllocationObserver* stress_marking_observer_ = nullptr;
2250

2251
  // Observer that can cause early scavenge start.
2252
  StressScavengeObserver* stress_scavenge_observer_ = nullptr;
2253

2254 2255
  // The maximum percent of the marking limit reached wihout causing marking.
  // This is tracked when specyfing --fuzzer-gc-analysis.
2256
  double max_marking_limit_reached_ = 0.0;
2257

2258
  // How many mark-sweep collections happened.
2259
  unsigned int ms_count_ = 0;
2260

2261
  // How many gc happened.
2262
  unsigned int gc_count_ = 0;
2263

2264 2265
  // The number of Mark-Compact garbage collections that are considered as
  // ineffective. See IsIneffectiveMarkCompact() predicate.
2266
  int consecutive_ineffective_mark_compacts_ = 0;
2267

2268
  static const uintptr_t kMmapRegionMask = 0xFFFFFFFFu;
2269
  uintptr_t mmap_region_base_ = 0;
2270

2271
  // For post mortem debugging.
2272
  int remembered_unmapped_pages_index_ = 0;
2273
  Address remembered_unmapped_pages_[kRememberedUnmappedPages];
2274

2275 2276 2277 2278
  // Limit that triggers a global GC on the next (normally caused) GC.  This
  // is checked when we have already decided to do a GC to help determine
  // which collector to invoke, before expanding a paged space in the old
  // generation and on every allocation in large object space.
2279
  std::atomic<size_t> old_generation_allocation_limit_{0};
2280
  size_t global_allocation_limit_ = 0;
2281

2282 2283
  // Weak list heads, threaded through the objects.
  // List heads are initialized lazily and contain the undefined_value at start.
2284 2285 2286
  // {native_contexts_list_} is an Address instead of an Object to allow the use
  // of atomic accessors.
  std::atomic<Address> native_contexts_list_;
2287
  Object allocation_sites_list_;
2288
  Object dirty_js_finalization_registries_list_;
2289
  // Weak list tails.
2290
  Object dirty_js_finalization_registries_list_tail_;
2291

2292 2293
  std::vector<GCCallbackTuple> gc_epilogue_callbacks_;
  std::vector<GCCallbackTuple> gc_prologue_callbacks_;
2294

2295 2296
  GetExternallyAllocatedMemoryInBytesCallback external_memory_callback_;

2297 2298
  int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];

2299 2300 2301 2302 2303 2304 2305 2306 2307
  size_t promoted_objects_size_ = 0;
  double promotion_ratio_ = 0.0;
  double promotion_rate_ = 0.0;
  size_t semi_space_copied_object_size_ = 0;
  size_t previous_semi_space_copied_object_size_ = 0;
  double semi_space_copied_rate_ = 0.0;
  int nodes_died_in_new_space_ = 0;
  int nodes_copied_in_new_space_ = 0;
  int nodes_promoted_ = 0;
2308

2309 2310 2311 2312
  // This is the pretenuring trigger for allocation sites that are in maybe
  // tenure state. When we switched to the maximum new space size we deoptimize
  // the code that belongs to the allocation site and derive the lifetime
  // of the allocation site.
2313
  unsigned int maximum_size_scavenges_ = 0;
2314

2315
  // Total time spent in GC.
2316
  double total_gc_time_ms_ = 0.0;
2317

2318
  // Last time an idle notification happened.
2319
  double last_idle_notification_time_ = 0.0;
2320

2321
  // Last time a garbage collection happened.
2322 2323
  double last_gc_time_ = 0.0;

2324 2325
  std::unique_ptr<GCTracer> tracer_;
  std::unique_ptr<MarkCompactCollector> mark_compact_collector_;
Omer Katz's avatar
Omer Katz committed
2326
  std::unique_ptr<MinorMarkCompactCollector> minor_mark_compact_collector_;
2327
  std::unique_ptr<ScavengerCollector> scavenger_collector_;
2328 2329
  std::unique_ptr<ArrayBufferSweeper> array_buffer_sweeper_;

2330 2331 2332 2333
  std::unique_ptr<MemoryAllocator> memory_allocator_;
  std::unique_ptr<IncrementalMarking> incremental_marking_;
  std::unique_ptr<ConcurrentMarking> concurrent_marking_;
  std::unique_ptr<GCIdleTimeHandler> gc_idle_time_handler_;
2334
  std::unique_ptr<MemoryMeasurement> memory_measurement_;
2335 2336 2337 2338
  std::unique_ptr<MemoryReducer> memory_reducer_;
  std::unique_ptr<ObjectStats> live_object_stats_;
  std::unique_ptr<ObjectStats> dead_object_stats_;
  std::unique_ptr<ScavengeJob> scavenge_job_;
2339
  std::unique_ptr<AllocationObserver> scavenge_task_observer_;
2340
  std::unique_ptr<AllocationObserver> stress_concurrent_allocation_observer_;
2341
  std::unique_ptr<LocalEmbedderHeapTracer> local_embedder_heap_tracer_;
2342
  std::unique_ptr<MarkingBarrier> marking_barrier_;
2343 2344
  std::unique_ptr<AllocationTrackerForDebugging>
      allocation_tracker_for_debugging_;
2345

2346 2347 2348 2349 2350 2351 2352
  // This object controls virtual space reserved for code on the V8 heap. This
  // is only valid for 64-bit architectures where kRequiresCodeRange.
  //
  // Owned by the heap when !V8_COMPRESS_POINTERS_IN_SHARED_CAGE, otherwise is
  // process-wide.
  std::shared_ptr<CodeRange> code_range_;

2353 2354
  // The embedder owns the C++ heap.
  v8::CppHeap* cpp_heap_ = nullptr;
2355

2356 2357
  EmbedderRootsHandler* embedder_roots_handler_ = nullptr;

2358
  StrongRootsEntry* strong_roots_head_ = nullptr;
2359
  base::Mutex strong_roots_mutex_;
2360

2361 2362
  bool need_to_remove_stress_concurrent_allocation_observer_ = false;

2363 2364 2365
  // This counter is increased before each GC and never reset.
  // To account for the bytes allocated since the last GC, use the
  // NewSpaceAllocationCounter() function.
2366
  size_t new_space_allocation_counter_ = 0;
2367

2368 2369 2370
  // This counter is increased before each GC and never reset. To
  // account for the bytes allocated since the last GC, use the
  // OldGenerationAllocationCounter() function.
2371
  size_t old_generation_allocation_counter_at_last_gc_ = 0;
2372 2373

  // The size of objects in old generation after the last MarkCompact GC.
2374
  size_t old_generation_size_at_last_gc_{0};
2375

2376 2377 2378
  // The size of global memory after the last MarkCompact GC.
  size_t global_memory_at_last_gc_ = 0;

2379 2380 2381 2382 2383
  // The feedback storage is used to store allocation sites (keys) and how often
  // they have been visited (values) by finding a memento behind an object. The
  // storage is only alive temporary during a GC. The invariant is that all
  // pointers in this map are already fixed, i.e., they do not point to
  // forwarding pointers.
2384
  PretenuringFeedbackMap global_pretenuring_feedback_;
2385

2386 2387 2388
  std::unique_ptr<GlobalHandleVector<AllocationSite>>
      allocation_sites_to_pretenure_;

2389
  char trace_ring_buffer_[kTraceRingBufferSize];
2390 2391

  // Used as boolean.
2392
  uint8_t is_marking_flag_ = 0;
2393

2394 2395 2396
  // If it's not full then the data is from 0 to ring_buffer_end_.  If it's
  // full then the data is from ring_buffer_end_ to the end of the buffer and
  // from 0 to ring_buffer_end_.
2397 2398
  bool ring_buffer_full_ = false;
  size_t ring_buffer_end_ = 0;
2399

2400
  // Flag is set when the heap has been configured.  The heap can be repeatedly
2401
  // configured through the API until it is set up.
2402
  bool configured_ = false;
2403

2404
  // Currently set GC flags that are respected by all GC components.
2405
  int current_gc_flags_ = Heap::kNoGCFlags;
2406

2407 2408
  // Currently set GC callback flags that are used to pass information between
  // the embedder and V8's GC.
2409 2410
  GCCallbackFlags current_gc_callback_flags_ =
      GCCallbackFlags::kNoGCCallbackFlags;
2411

2412
  std::unique_ptr<IsolateSafepoint> safepoint_;
2413

2414
  bool is_current_gc_forced_ = false;
2415
  bool is_current_gc_for_heap_profiler_ = false;
2416

2417 2418
  ExternalStringTable external_string_table_;

2419 2420
  const AllocationType allocation_type_for_in_place_internalizable_strings_;

2421
  base::Mutex relocation_mutex_;
2422

2423
  std::unique_ptr<CollectionBarrier> collection_barrier_;
2424

2425 2426
  int ignore_local_gc_requests_depth_ = 0;

2427
  int gc_callbacks_depth_ = 0;
2428

2429
  bool deserialization_complete_ = false;
2430

2431 2432
  int max_regular_code_object_size_ = 0;

2433
  bool fast_promotion_mode_ = false;
2434

2435
  // Used for testing purposes.
2436
  bool force_oom_ = false;
2437
  bool force_gc_on_next_allocation_ = false;
2438
  bool delay_sweeper_tasks_for_testing_ = false;
2439

2440
  HeapObject pending_layout_change_object_;
2441

2442
  base::Mutex unprotected_memory_chunks_mutex_;
2443 2444
  std::unordered_set<MemoryChunk*> unprotected_memory_chunks_;

2445 2446 2447 2448 2449 2450 2451
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
  // If the --gc-interval flag is set to a positive value, this
  // variable holds the value indicating the number of allocations
  // remain until the next failure and garbage collection.
  int allocation_timeout_ = 0;
#endif  // V8_ENABLE_ALLOCATION_TIMEOUT

2452 2453
  std::unordered_map<HeapObject, HeapObject, Object::Hasher> retainer_;
  std::unordered_map<HeapObject, Root, Object::Hasher> retaining_root_;
2454 2455
  // If an object is retained by an ephemeron, then the retaining key of the
  // ephemeron is stored in this map.
2456 2457
  std::unordered_map<HeapObject, HeapObject, Object::Hasher>
      ephemeron_retainer_;
2458 2459
  // For each index inthe retaining_path_targets_ array this map
  // stores the option of the corresponding target.
2460
  std::unordered_map<int, RetainingPathOption> retaining_path_target_option_;
2461

2462 2463
  std::vector<HeapObjectAllocationTracker*> allocation_trackers_;

2464
  bool is_finalization_registry_cleanup_task_posted_ = false;
2465

2466 2467
  std::unique_ptr<third_party_heap::Heap> tp_heap_;

2468
  // Classes in "heap" can be friends.
2469
  friend class AlwaysAllocateScope;
2470
  friend class ArrayBufferCollector;
2471
  friend class ArrayBufferSweeper;
2472
  friend class ConcurrentMarking;
2473
  friend class EvacuateVisitorBase;
2474
  friend class GCCallbacksScope;
2475
  friend class GCTracer;
2476
  friend class HeapObjectIterator;
2477
  friend class ScavengeTaskObserver;
2478
  friend class IgnoreLocalGCRequests;
2479
  friend class IncrementalMarking;
2480
  friend class IncrementalMarkingJob;
2481
  friend class LargeObjectSpace;
2482
  friend class LocalHeap;
2483
  friend class MarkingBarrier;
2484
  friend class OldLargeObjectSpace;
2485
  friend class OptionalAlwaysAllocateScope;
2486 2487
  template <typename ConcreteVisitor, typename MarkingState>
  friend class MarkingVisitorBase;
2488
  friend class MarkCompactCollector;
2489
  friend class MarkCompactCollectorBase;
2490
  friend class MinorMarkCompactCollector;
2491
  friend class NewLargeObjectSpace;
ulan's avatar
ulan committed
2492
  friend class NewSpace;
2493
  friend class ObjectStatsCollector;
2494
  friend class Page;
2495
  friend class PagedSpace;
2496
  friend class ReadOnlyRoots;
2497
  friend class Scavenger;
2498
  friend class ScavengerCollector;
2499
  friend class StressConcurrentAllocationObserver;
2500
  friend class Space;
2501
  friend class Sweeper;
2502
  friend class heap::TestMemoryAllocatorScope;
2503 2504
  friend class third_party_heap::Heap;
  friend class third_party_heap::Impl;
2505

2506 2507
  // The allocator interface.
  friend class Factory;
2508
  friend class LocalFactory;
2509
  template <typename IsolateT>
2510
  friend class Deserializer;
2511 2512 2513 2514

  // The Isolate constructs us.
  friend class Isolate;

2515
  // Used in cctest.
2516
  friend class heap::HeapTester;
2517 2518
};

ager@chromium.org's avatar
ager@chromium.org committed
2519 2520
class HeapStats {
 public:
2521 2522 2523
  static const int kStartMarker = 0xDECADE00;
  static const int kEndMarker = 0xDECADE01;

2524
  intptr_t* start_marker;                  //  0
2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535
  size_t* ro_space_size;                   //  1
  size_t* ro_space_capacity;               //  2
  size_t* new_space_size;                  //  3
  size_t* new_space_capacity;              //  4
  size_t* old_space_size;                  //  5
  size_t* old_space_capacity;              //  6
  size_t* code_space_size;                 //  7
  size_t* code_space_capacity;             //  8
  size_t* map_space_size;                  //  9
  size_t* map_space_capacity;              // 10
  size_t* lo_space_size;                   // 11
2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551
  size_t* code_lo_space_size;              // 12
  size_t* global_handle_count;             // 13
  size_t* weak_global_handle_count;        // 14
  size_t* pending_global_handle_count;     // 15
  size_t* near_death_global_handle_count;  // 16
  size_t* free_global_handle_count;        // 17
  size_t* memory_allocator_size;           // 18
  size_t* memory_allocator_capacity;       // 19
  size_t* malloced_memory;                 // 20
  size_t* malloced_peak_memory;            // 21
  size_t* objects_per_type;                // 22
  size_t* size_per_type;                   // 23
  int* os_error;                           // 24
  char* last_few_messages;                 // 25
  char* js_stacktrace;                     // 26
  intptr_t* end_marker;                    // 27
2552 2553
};

2554 2555 2556
// Disables GC for all allocations. It should not be used
// outside heap, deserializer, and isolate bootstrap.
// Use AlwaysAllocateScopeForTesting in tests.
2557
class V8_NODISCARD AlwaysAllocateScope {
2558
 public:
2559
  inline ~AlwaysAllocateScope();
2560 2561

 private:
2562 2563 2564 2565 2566 2567
  friend class AlwaysAllocateScopeForTesting;
  friend class Evacuator;
  friend class Heap;
  friend class Isolate;

  explicit inline AlwaysAllocateScope(Heap* heap);
2568
  Heap* heap_;
2569 2570
};

2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588
// Like AlwaysAllocateScope if the heap argument to the constructor is
// non-null. No-op otherwise.
//
// This class exists because AlwaysAllocateScope doesn't compose with
// base::Optional, since supporting that composition requires making
// base::Optional a friend class, defeating the purpose of hiding its
// constructor.
class V8_NODISCARD OptionalAlwaysAllocateScope {
 public:
  inline ~OptionalAlwaysAllocateScope();

 private:
  friend class Heap;

  explicit inline OptionalAlwaysAllocateScope(Heap* heap);
  Heap* heap_;
};

2589
class V8_NODISCARD AlwaysAllocateScopeForTesting {
2590 2591 2592 2593 2594 2595 2596
 public:
  explicit inline AlwaysAllocateScopeForTesting(Heap* heap);

 private:
  AlwaysAllocateScope scope_;
};

2597
// The CodeSpaceMemoryModificationScope can only be used by the main thread.
2598
class V8_NODISCARD CodeSpaceMemoryModificationScope {
2599 2600 2601
 public:
  explicit inline CodeSpaceMemoryModificationScope(Heap* heap);
  inline ~CodeSpaceMemoryModificationScope();
2602 2603 2604 2605 2606 2607 2608 2609

 private:
  Heap* heap_;
};

// The CodePageCollectionMemoryModificationScope can only be used by the main
// thread. It will not be enabled if a CodeSpaceMemoryModificationScope is
// already active.
2610
class V8_NODISCARD CodePageCollectionMemoryModificationScope {
2611 2612 2613
 public:
  explicit inline CodePageCollectionMemoryModificationScope(Heap* heap);
  inline ~CodePageCollectionMemoryModificationScope();
2614 2615 2616 2617 2618

 private:
  Heap* heap_;
};

2619 2620 2621
// The CodePageMemoryModificationScope does not check if tansitions to
// writeable and back to executable are actually allowed, i.e. the MemoryChunk
// was registered to be executable. It can be used by concurrent threads.
2622
class V8_NODISCARD CodePageMemoryModificationScope {
2623
 public:
2624
  explicit inline CodePageMemoryModificationScope(BasicMemoryChunk* chunk);
2625
  explicit inline CodePageMemoryModificationScope(Code object);
2626 2627 2628
  inline ~CodePageMemoryModificationScope();

 private:
2629
  BasicMemoryChunk* chunk_;
2630
  bool scope_active_;
2631 2632 2633

  // Disallow any GCs inside this scope, as a relocation of the underlying
  // object would change the {MemoryChunk} that this scope targets.
2634
  DISALLOW_GARBAGE_COLLECTION(no_heap_allocation_)
2635
};
2636

2637 2638 2639 2640 2641 2642 2643 2644 2645
class V8_NODISCARD IgnoreLocalGCRequests {
 public:
  explicit inline IgnoreLocalGCRequests(Heap* heap);
  inline ~IgnoreLocalGCRequests();

 private:
  Heap* heap_;
};

2646 2647 2648 2649 2650
// Visitor class to verify interior pointers in spaces that do not contain
// or care about intergenerational references. All heap object pointers have to
// point into the heap to a location that has a map pointer at its first word.
// Caveat: Heap::Contains is an approximation because it can return true for
// objects in a heap space but above the allocation pointer.
2651 2652
class VerifyPointersVisitor : public ObjectVisitorWithCageBases,
                              public RootVisitor {
2653
 public:
2654
  V8_INLINE explicit VerifyPointersVisitor(Heap* heap);
2655
  void VisitPointers(HeapObject host, ObjectSlot start,
2656
                     ObjectSlot end) override;
2657
  void VisitPointers(HeapObject host, MaybeObjectSlot start,
2658
                     MaybeObjectSlot end) override;
2659
  void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override;
2660
  void VisitCodeTarget(Code host, RelocInfo* rinfo) override;
2661
  void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override;
2662

2663 2664
  void VisitRootPointers(Root root, const char* description,
                         FullObjectSlot start, FullObjectSlot end) override;
2665 2666 2667
  void VisitRootPointers(Root root, const char* description,
                         OffHeapObjectSlot start,
                         OffHeapObjectSlot end) override;
2668

2669
 protected:
2670
  V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object);
2671
  V8_INLINE void VerifyCodeObjectImpl(HeapObject heap_object);
2672 2673 2674 2675

  template <typename TSlot>
  V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end);

2676
  virtual void VerifyPointers(HeapObject host, MaybeObjectSlot start,
2677
                              MaybeObjectSlot end);
2678 2679

  Heap* heap_;
2680 2681
};

2682
// Verify that all objects are Smis.
2683
class VerifySmisVisitor : public RootVisitor {
2684
 public:
2685 2686
  void VisitRootPointers(Root root, const char* description,
                         FullObjectSlot start, FullObjectSlot end) override;
2687 2688
};

2689
// Space iterator for iterating over all the paged spaces of the heap: Map
2690 2691
// space, old space and code space. Returns each space in turn, and null when it
// is done.
2692
class V8_EXPORT_PRIVATE PagedSpaceIterator {
2693
 public:
2694 2695
  explicit PagedSpaceIterator(Heap* heap)
      : heap_(heap), counter_(FIRST_GROWABLE_PAGED_SPACE) {}
2696
  PagedSpace* Next();
2697

2698
 private:
2699
  Heap* heap_;
2700 2701 2702
  int counter_;
};

2703
class V8_EXPORT_PRIVATE SpaceIterator : public Malloced {
2704
 public:
2705
  explicit SpaceIterator(Heap* heap);
2706 2707
  virtual ~SpaceIterator();

2708 2709
  bool HasNext();
  Space* Next();
2710 2711

 private:
2712
  Heap* heap_;
2713
  int current_space_;  // from enum AllocationSpace.
2714 2715
};

2716 2717 2718
// A HeapObjectIterator provides iteration over the entire non-read-only heap.
// It aggregates the specific iterators for the different spaces as these can
// only iterate over one space only.
2719
//
2720
// HeapObjectIterator ensures there is no allocation during its lifetime (using
2721
// an embedded DisallowGarbageCollection instance).
2722
//
2723 2724 2725 2726 2727
// HeapObjectIterator can skip free list nodes (that is, de-allocated heap
// objects that still remain in the heap). As implementation of free nodes
// filtering uses GC marks, it can't be used during MS/MC GC phases. Also, it is
// forbidden to interrupt iteration in this mode, as this will leave heap
// objects marked (and thus, unusable).
2728
//
2729 2730 2731 2732
// See ReadOnlyHeapObjectIterator if you need to iterate over read-only space
// objects, or CombinedHeapObjectIterator if you need to iterate over both
// heaps.
class V8_EXPORT_PRIVATE HeapObjectIterator {
2733
 public:
2734 2735
  enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable };

2736 2737 2738
  explicit HeapObjectIterator(Heap* heap,
                              HeapObjectsFiltering filtering = kNoFiltering);
  ~HeapObjectIterator();
2739

2740
  HeapObject Next();
2741 2742

 private:
2743
  HeapObject NextObject();
2744

2745
  Heap* heap_;
2746
  std::unique_ptr<SafepointScope> safepoint_scope_;
2747 2748
  HeapObjectsFiltering filtering_;
  HeapObjectsFilter* filter_;
2749 2750 2751
  // Space iterator for iterating all the spaces.
  SpaceIterator* space_iterator_;
  // Object iterator for the space currently being iterated.
2752
  std::unique_ptr<ObjectIterator> object_iterator_;
2753 2754

  DISALLOW_GARBAGE_COLLECTION(no_heap_allocation_)
2755 2756
};

2757 2758 2759
// Abstract base class for checking whether a weak object should be retained.
class WeakObjectRetainer {
 public:
2760
  virtual ~WeakObjectRetainer() = default;
2761

2762
  // Return whether this object should be retained. If nullptr is returned the
2763 2764
  // object has no references. Otherwise the address of the retained object
  // should be returned as in some GC situations the object has been moved.
2765
  virtual Object RetainAs(Object object) = 0;
2766 2767
};

2768 2769 2770 2771 2772 2773 2774 2775 2776 2777
// -----------------------------------------------------------------------------
// Allows observation of heap object allocations.
class HeapObjectAllocationTracker {
 public:
  virtual void AllocationEvent(Address addr, int size) = 0;
  virtual void MoveEvent(Address from, Address to, int size) {}
  virtual void UpdateObjectSizeEvent(Address addr, int size) {}
  virtual ~HeapObjectAllocationTracker() = default;
};

2778
template <typename T>
2779
inline T ForwardingAddress(T heap_obj);
2780

2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792
// Address block allocator compatible with standard containers which registers
// its allocated range as strong roots.
class StrongRootBlockAllocator {
 public:
  using pointer = Address*;
  using const_pointer = const Address*;
  using reference = Address&;
  using const_reference = const Address&;
  using value_type = Address;
  using size_type = size_t;
  using difference_type = ptrdiff_t;
  template <class U>
2793
  struct rebind;
2794 2795 2796 2797 2798 2799 2800 2801 2802 2803

  explicit StrongRootBlockAllocator(Heap* heap) : heap_(heap) {}

  Address* allocate(size_t n);
  void deallocate(Address* p, size_t n) noexcept;

 private:
  Heap* heap_;
};

2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820
// Rebinding to Address gives another StrongRootBlockAllocator.
template <>
struct StrongRootBlockAllocator::rebind<Address> {
  using other = StrongRootBlockAllocator;
};

// Rebinding to something other than Address gives a std::allocator that
// is copy-constructable from StrongRootBlockAllocator.
template <class U>
struct StrongRootBlockAllocator::rebind {
  class other : public std::allocator<U> {
   public:
    // NOLINTNEXTLINE
    other(const StrongRootBlockAllocator&) {}
  };
};

2821 2822
}  // namespace internal
}  // namespace v8
2823

2824
#endif  // V8_HEAP_HEAP_H_