read-only-spaces.h 10.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef V8_HEAP_READ_ONLY_SPACES_H_
#define V8_HEAP_READ_ONLY_SPACES_H_

#include <memory>
#include <utility>

#include "include/v8-platform.h"
#include "src/base/macros.h"
13
#include "src/common/globals.h"
14
#include "src/heap/allocation-stats.h"
15
#include "src/heap/base-space.h"
16
#include "src/heap/basic-memory-chunk.h"
17
#include "src/heap/list.h"
18
#include "src/heap/memory-chunk.h"
19 20 21 22

namespace v8 {
namespace internal {

23
class ReadOnlyDeserializer;
24
class MemoryAllocator;
25 26
class ReadOnlyHeap;

27
class ReadOnlyPage : public BasicMemoryChunk {
28 29 30 31 32
 public:
  // Clears any pointers in the header that point out of the page that would
  // otherwise make the header non-relocatable.
  void MakeHeaderRelocatable();

33 34
  size_t ShrinkToHighWaterMark();

35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
  // Returns the address for a given offset in this page.
  Address OffsetToAddress(size_t offset) const {
    Address address_in_page = address() + offset;
    if (V8_SHARED_RO_HEAP_BOOL && COMPRESS_POINTERS_BOOL) {
      // Pointer compression with share ReadOnlyPages means that the area_start
      // and area_end cannot be defined since they are stored within the pages
      // which can be mapped at multiple memory addresses.
      DCHECK_LT(offset, size());
    } else {
      DCHECK_GE(address_in_page, area_start());
      DCHECK_LT(address_in_page, area_end());
    }
    return address_in_page;
  }

50 51 52 53 54 55 56
  // Returns the start area of the page without using area_start() which cannot
  // return the correct result when the page is remapped multiple times.
  Address GetAreaStart() const {
    return address() +
           MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(RO_SPACE);
  }

57 58 59 60 61 62 63 64
 private:
  friend class ReadOnlySpace;
};

// -----------------------------------------------------------------------------
// Artifacts used to construct a new SharedReadOnlySpace
class ReadOnlyArtifacts {
 public:
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
  virtual ~ReadOnlyArtifacts() = default;

  // Initialize the ReadOnlyArtifacts from an Isolate that has just been created
  // either by serialization or by creating the objects directly.
  virtual void Initialize(Isolate* isolate, std::vector<ReadOnlyPage*>&& pages,
                          const AllocationStats& stats) = 0;

  // This replaces the ReadOnlySpace in the given Heap with a newly constructed
  // SharedReadOnlySpace that has pages created from the ReadOnlyArtifacts. This
  // is only called for the first Isolate, where the ReadOnlySpace is created
  // during the bootstrap process.

  virtual void ReinstallReadOnlySpace(Isolate* isolate) = 0;
  // Creates a ReadOnlyHeap for a specific Isolate. This will be populated with
  // a SharedReadOnlySpace object that points to the Isolate's heap. Should only
  // be used when the read-only heap memory is shared with or without pointer
  // compression. This is called for all subsequent Isolates created after the
  // first one.
  virtual ReadOnlyHeap* GetReadOnlyHeapForIsolate(Isolate* isolate) = 0;

  virtual void VerifyHeapAndSpaceRelationships(Isolate* isolate) = 0;

  std::vector<ReadOnlyPage*>& pages() { return pages_; }
88 89

  void set_accounting_stats(const AllocationStats& stats) { stats_ = stats; }
90
  const AllocationStats& accounting_stats() const { return stats_; }
91 92 93 94 95 96 97 98 99

  void set_shared_read_only_space(
      std::unique_ptr<SharedReadOnlySpace> shared_space) {
    shared_read_only_space_ = std::move(shared_space);
  }
  SharedReadOnlySpace* shared_read_only_space() {
    return shared_read_only_space_.get();
  }

100 101
  void set_read_only_heap(std::unique_ptr<ReadOnlyHeap> read_only_heap);
  ReadOnlyHeap* read_only_heap() const { return read_only_heap_.get(); }
102

103 104
  void InitializeChecksum(ReadOnlyDeserializer* des);
  void VerifyChecksum(ReadOnlyDeserializer* des, bool read_only_heap_created);
105

106 107
 protected:
  ReadOnlyArtifacts() = default;
108

109
  std::vector<ReadOnlyPage*> pages_;
110 111 112
  AllocationStats stats_;
  std::unique_ptr<SharedReadOnlySpace> shared_read_only_space_;
  std::unique_ptr<ReadOnlyHeap> read_only_heap_;
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
#ifdef DEBUG
  // The checksum of the blob the read-only heap was deserialized from, if
  // any.
  base::Optional<uint32_t> read_only_blob_checksum_;
#endif  // DEBUG
};

// -----------------------------------------------------------------------------
// Artifacts used to construct a new SharedReadOnlySpace when pointer
// compression is disabled and so there is a single ReadOnlySpace with one set
// of pages shared between all Isolates.
class SingleCopyReadOnlyArtifacts : public ReadOnlyArtifacts {
 public:
  ~SingleCopyReadOnlyArtifacts() override;

  ReadOnlyHeap* GetReadOnlyHeapForIsolate(Isolate* isolate) override;
  void Initialize(Isolate* isolate, std::vector<ReadOnlyPage*>&& pages,
                  const AllocationStats& stats) override;
  void ReinstallReadOnlySpace(Isolate* isolate) override;
  void VerifyHeapAndSpaceRelationships(Isolate* isolate) override;
};

// -----------------------------------------------------------------------------
// Artifacts used to construct a new SharedReadOnlySpace when pointer
// compression is enabled and so there is a ReadOnlySpace for each Isolate with
// with its own set of pages mapped from the canonical set stored here.
class PointerCompressedReadOnlyArtifacts : public ReadOnlyArtifacts {
 public:
  ReadOnlyHeap* GetReadOnlyHeapForIsolate(Isolate* isolate) override;
  void Initialize(Isolate* isolate, std::vector<ReadOnlyPage*>&& pages,
                  const AllocationStats& stats) override;
  void ReinstallReadOnlySpace(Isolate* isolate) override;
  void VerifyHeapAndSpaceRelationships(Isolate* isolate) override;

 private:
  SharedReadOnlySpace* CreateReadOnlySpace(Isolate* isolate);
  Tagged_t OffsetForPage(size_t index) const { return page_offsets_[index]; }
  void InitializeRootsIn(Isolate* isolate);
  void InitializeRootsFrom(Isolate* isolate);

  std::unique_ptr<v8::PageAllocator::SharedMemoryMapping> RemapPageTo(
      size_t i, Address new_address, ReadOnlyPage*& new_page);

  static constexpr size_t kReadOnlyRootsCount =
      static_cast<size_t>(RootIndex::kReadOnlyRootsCount);

  Address read_only_roots_[kReadOnlyRootsCount];
  std::vector<Tagged_t> page_offsets_;
  std::vector<std::unique_ptr<PageAllocator::SharedMemory>> shared_memory_;
162 163 164 165
};

// -----------------------------------------------------------------------------
// Read Only space for all Immortal Immovable and Immutable objects
166
class ReadOnlySpace : public BaseSpace {
167
 public:
168
  V8_EXPORT_PRIVATE explicit ReadOnlySpace(Heap* heap);
169

170 171 172
  // Detach the pages and add them to artifacts for using in creating a
  // SharedReadOnlySpace. Since the current space no longer has any pages, it
  // should be replaced straight after this in its Heap.
173 174 175
  void DetachPagesAndAddToArtifacts(
      std::shared_ptr<ReadOnlyArtifacts> artifacts);

176
  V8_EXPORT_PRIVATE ~ReadOnlySpace() override;
177
  V8_EXPORT_PRIVATE virtual void TearDown(MemoryAllocator* memory_allocator);
178 179

  bool IsDetached() const { return heap_ == nullptr; }
180 181 182 183 184 185

  bool writable() const { return !is_marked_read_only_; }

  bool Contains(Address a) = delete;
  bool Contains(Object o) = delete;

186
  V8_EXPORT_PRIVATE
187
  AllocationResult AllocateRaw(int size_in_bytes,
188 189
                               AllocationAlignment alignment);

190 191
  V8_EXPORT_PRIVATE void ClearStringPaddingIfNeeded();

192 193 194 195 196
  enum class SealMode {
    kDetachFromHeap,
    kDetachFromHeapAndUnregisterMemory,
    kDoNotDetachFromHeap
  };
197 198 199 200

  // Seal the space by marking it read-only, optionally detaching it
  // from the heap and forgetting it for memory bookkeeping purposes (e.g.
  // prevent space's memory from registering as leaked).
201
  V8_EXPORT_PRIVATE void Seal(SealMode ro_mode);
202 203

  // During boot the free_space_map is created, and afterwards we may need
204 205 206
  // to write it into the free space nodes that were already created.
  void RepairFreeSpacesAfterDeserialization();

207 208
  size_t Size() override { return accounting_stats_.Size(); }
  V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory() override;
209 210 211 212 213 214 215

  const std::vector<ReadOnlyPage*>& pages() const { return pages_; }
  Address top() const { return top_; }
  Address limit() const { return limit_; }
  size_t Capacity() const { return capacity_; }

  bool ContainsSlow(Address addr);
216
  V8_EXPORT_PRIVATE void ShrinkPages();
217 218 219 220 221 222 223 224
#ifdef VERIFY_HEAP
  void Verify(Isolate* isolate);
#ifdef DEBUG
  void VerifyCounters(Heap* heap);
#endif  // DEBUG
#endif  // VERIFY_HEAP

  // Return size of allocatable area on a page in this space.
225
  int AreaSize() const { return static_cast<int>(area_size_); }
226 227

  ReadOnlyPage* InitializePage(BasicMemoryChunk* chunk);
228

229
  Address FirstPageAddress() const { return pages_.front()->address(); }
230 231

 protected:
232 233
  friend class SingleCopyReadOnlyArtifacts;

234 235 236 237 238
  void SetPermissionsForPages(MemoryAllocator* memory_allocator,
                              PageAllocator::Permission access);

  bool is_marked_read_only_ = false;

239 240 241 242 243 244 245 246
  // Accounting information for this space.
  AllocationStats accounting_stats_;

  std::vector<ReadOnlyPage*> pages_;

  Address top_;
  Address limit_;

247
 private:
248
  // Unseal the space after it has been sealed, by making it writable.
249 250
  void Unseal();

251 252 253 254 255 256 257 258 259 260 261 262 263 264
  void DetachFromHeap() { heap_ = nullptr; }

  AllocationResult AllocateRawUnaligned(int size_in_bytes);
  AllocationResult AllocateRawAligned(int size_in_bytes,
                                      AllocationAlignment alignment);

  HeapObject TryAllocateLinearlyAligned(int size_in_bytes,
                                        AllocationAlignment alignment);
  void EnsureSpaceForAllocation(int size_in_bytes);
  void FreeLinearAllocationArea();

  // String padding must be cleared just before serialization and therefore
  // the string padding in the space will already have been cleared if the
  // space was deserialized.
265
  bool is_string_padding_cleared_;
266 267

  size_t capacity_;
268
  const size_t area_size_;
269 270 271 272
};

class SharedReadOnlySpace : public ReadOnlySpace {
 public:
273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
  explicit SharedReadOnlySpace(Heap* heap) : ReadOnlySpace(heap) {
    is_marked_read_only_ = true;
  }

  SharedReadOnlySpace(Heap* heap,
                      PointerCompressedReadOnlyArtifacts* artifacts);
  SharedReadOnlySpace(
      Heap* heap, std::vector<ReadOnlyPage*>&& new_pages,
      std::vector<std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping>>&&
          mappings,
      AllocationStats&& new_stats);
  SharedReadOnlySpace(Heap* heap, SingleCopyReadOnlyArtifacts* artifacts);
  SharedReadOnlySpace(const SharedReadOnlySpace&) = delete;

  void TearDown(MemoryAllocator* memory_allocator) override;

  // Holds any shared memory mapping that must be freed when the space is
  // deallocated.
  std::vector<std::unique_ptr<v8::PageAllocator::SharedMemoryMapping>>
      shared_memory_mappings_;
293 294 295 296 297 298
};

}  // namespace internal
}  // namespace v8

#endif  // V8_HEAP_READ_ONLY_SPACES_H_