large-spaces.h 6.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef V8_HEAP_LARGE_SPACES_H_
#define V8_HEAP_LARGE_SPACES_H_

#include <atomic>
#include <functional>
#include <memory>
#include <unordered_map>

#include "src/base/macros.h"
14
#include "src/base/platform/mutex.h"
15 16
#include "src/common/globals.h"
#include "src/heap/heap.h"
17
#include "src/heap/memory-chunk.h"
18 19 20 21 22 23 24
#include "src/heap/spaces.h"
#include "src/objects/heap-object.h"

namespace v8 {
namespace internal {

class Isolate;
25
class LocalHeap;
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117

class LargePage : public MemoryChunk {
 public:
  // A limit to guarantee that we do not overflow typed slot offset in the old
  // to old remembered set. Note that this limit is higher than what assembler
  // already imposes on x64 and ia32 architectures.
  static const int kMaxCodePageSize = 512 * MB;

  static LargePage* FromHeapObject(HeapObject o) {
    return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
  }

  HeapObject GetObject() { return HeapObject::FromAddress(area_start()); }

  LargePage* next_page() { return static_cast<LargePage*>(list_node_.next()); }

  // Uncommit memory that is not in use anymore by the object. If the object
  // cannot be shrunk 0 is returned.
  Address GetAddressToShrink(Address object_address, size_t object_size);

  void ClearOutOfLiveRangeSlots(Address free_start);

 private:
  static LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
                               Executability executable);

  friend class MemoryAllocator;
};

STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);

// -----------------------------------------------------------------------------
// Large objects ( > kMaxRegularHeapObjectSize ) are allocated and managed by
// the large object space. Large objects do not move during garbage collections.

class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
 public:
  using iterator = LargePageIterator;

  ~LargeObjectSpace() override { TearDown(); }

  // Releases internal resources, frees objects in this space.
  void TearDown();

  // Available bytes for objects in this space.
  size_t Available() override;

  size_t Size() override { return size_; }
  size_t SizeOfObjects() override { return objects_size_; }

  // Approximate amount of physical memory committed for this space.
  size_t CommittedPhysicalMemory() override;

  int PageCount() { return page_count_; }

  // Frees unmarked objects.
  virtual void FreeUnmarkedObjects();

  // Checks whether a heap object is in this space; O(1).
  bool Contains(HeapObject obj);
  // Checks whether an address is in the object area in this space. Iterates all
  // objects in the space. May be slow.
  bool ContainsSlow(Address addr);

  // Checks whether the space is empty.
  bool IsEmpty() { return first_page() == nullptr; }

  virtual void AddPage(LargePage* page, size_t object_size);
  virtual void RemovePage(LargePage* page, size_t object_size);

  LargePage* first_page() {
    return reinterpret_cast<LargePage*>(Space::first_page());
  }

  iterator begin() { return iterator(first_page()); }
  iterator end() { return iterator(nullptr); }

  std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;

  virtual bool is_off_thread() const { return false; }

#ifdef VERIFY_HEAP
  virtual void Verify(Isolate* isolate);
#endif

#ifdef DEBUG
  void Print() override;
#endif

 protected:
  LargeObjectSpace(Heap* heap, AllocationSpace id);

118 119
  void AdvanceAndInvokeAllocationObservers(Address soon_object, size_t size);

120 121
  LargePage* AllocateLargePage(int object_size, Executability executable);

122
  std::atomic<size_t> size_;  // allocated bytes
123
  int page_count_;       // number of chunks
124 125
  std::atomic<size_t> objects_size_;  // size of objects
  base::Mutex allocation_mutex_;
126 127 128 129 130 131 132 133 134 135 136 137

 private:
  friend class LargeObjectSpaceObjectIterator;
};

class OldLargeObjectSpace : public LargeObjectSpace {
 public:
  explicit OldLargeObjectSpace(Heap* heap);

  V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
  AllocateRaw(int object_size);

138 139 140
  V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
  AllocateRawBackground(LocalHeap* local_heap, int object_size);

141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
  // Clears the marking state of live objects.
  void ClearMarkingStateOfLiveObjects();

  void PromoteNewLargeObject(LargePage* page);

 protected:
  explicit OldLargeObjectSpace(Heap* heap, AllocationSpace id);
  V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
                                                     Executability executable);
};

class NewLargeObjectSpace : public LargeObjectSpace {
 public:
  NewLargeObjectSpace(Heap* heap, size_t capacity);

  V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
  AllocateRaw(int object_size);

  // Available bytes for objects in this space.
  size_t Available() override;

  void Flip();

  void FreeDeadObjects(const std::function<bool(HeapObject)>& is_dead);

  void SetCapacity(size_t capacity);

  // The last allocated object that is not guaranteed to be initialized when the
  // concurrent marker visits it.
  Address pending_object() {
    return pending_object_.load(std::memory_order_relaxed);
  }

  void ResetPendingObject() { pending_object_.store(0); }

 private:
  std::atomic<Address> pending_object_;
  size_t capacity_;
};

class CodeLargeObjectSpace : public OldLargeObjectSpace {
 public:
  explicit CodeLargeObjectSpace(Heap* heap);

  V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
  AllocateRaw(int object_size);

  // Finds a large object page containing the given address, returns nullptr if
  // such a page doesn't exist.
  LargePage* FindPage(Address a);

 protected:
  void AddPage(LargePage* page, size_t object_size) override;
  void RemovePage(LargePage* page, size_t object_size) override;

 private:
  static const size_t kInitialChunkMapCapacity = 1024;
  void InsertChunkMapEntries(LargePage* page);
  void RemoveChunkMapEntries(LargePage* page);

  // Page-aligned addresses to their corresponding LargePage.
  std::unordered_map<Address, LargePage*> chunk_map_;
};

class LargeObjectSpaceObjectIterator : public ObjectIterator {
 public:
  explicit LargeObjectSpaceObjectIterator(LargeObjectSpace* space);

  HeapObject Next() override;

 private:
  LargePage* current_;
};

}  // namespace internal
}  // namespace v8

#endif  // V8_HEAP_LARGE_SPACES_H_