Commit c7d22c49 authored by Dan Elphick's avatar Dan Elphick Committed by Commit Bot

[heap] Share RO_SPACE pages with pointer compression

This allows the configuration v8_enable_shared_ro_heap and
v8_enable_pointer_compression on Linux and Android, although it still
defaults to off.

When pointer compression and read-only heap sharing are enabled, sharing
is achieved by allocating ReadOnlyPages in shared memory that are
retained in the shared ReadOnlyArtifacts object. These ReadOnlyPages are
then remapped into the address space of the Isolate ultimately using
mremap.

To simplify the creation process the ReadOnlySpace memory for the first
Isolate is created as before without any sharing. It is only when the
ReadOnlySpace memory has been finalized that the shared memory is
allocated and has its contents copied into it. The original memory is
then released (with PC this means it's just released back to the
BoundedPageAllocator) and immediately re-allocated as a shared mapping.

Because we would like to make v8_enable_shared_ro_heap default to true
at some point but can't make this conditional on the value returned by
a method in the code we are yet to compile, the code required for
sharing has been mostly changed to use ifs with
ReadOnlyHeap::IsReadOnlySpaceShared() instead of #ifdefs except where
a compile error would result due to the absence of a class members
without sharing. IsReadOnlySpaceShared() will evaluate
CanAllocateSharedPages in the platform PageAllocator (with pointer
compression and sharing enabled) once and cache that value so sharing
cannot be toggled during the lifetime of the process.

Bug: v8:10454
Change-Id: I0236d752047ecce71bd64c159430517a712bc1e2
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2267300
Commit-Queue: Dan Elphick <delphick@chromium.org>
Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69174}
parent 0ae4ef05
......@@ -339,9 +339,11 @@ assert(v8_current_cpu != "x86" || !v8_untrusted_code_mitigations,
assert(v8_current_cpu == "arm64" || !v8_control_flow_integrity,
"Control-flow integrity is only supported on arm64")
assert(
!v8_enable_pointer_compression || !v8_enable_shared_ro_heap,
"Pointer compression is not supported with shared read-only heap enabled")
if (v8_enable_shared_ro_heap && v8_enable_pointer_compression) {
assert(
is_linux || is_android,
"Sharing read-only heap with pointer compression is only supported on Linux or Android")
}
assert(!v8_enable_heap_sandbox || v8_enable_pointer_compression,
"V8 Heap Sandbox requires pointer compression")
......
......@@ -8499,12 +8499,13 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
heap_statistics->total_global_handles_size_ = heap->TotalGlobalHandlesSize();
heap_statistics->used_global_handles_size_ = heap->UsedGlobalHandlesSize();
#ifndef V8_SHARED_RO_HEAP
i::ReadOnlySpace* ro_space = heap->read_only_space();
heap_statistics->total_heap_size_ += ro_space->CommittedMemory();
heap_statistics->total_physical_size_ += ro_space->CommittedPhysicalMemory();
heap_statistics->used_heap_size_ += ro_space->Size();
#endif // V8_SHARED_RO_HEAP
if (!i::ReadOnlyHeap::IsReadOnlySpaceShared()) {
i::ReadOnlySpace* ro_space = heap->read_only_space();
heap_statistics->total_heap_size_ += ro_space->CommittedMemory();
heap_statistics->total_physical_size_ +=
ro_space->CommittedPhysicalMemory();
heap_statistics->used_heap_size_ += ro_space->Size();
}
heap_statistics->total_heap_size_executable_ =
heap->CommittedMemoryExecutable();
......@@ -8542,7 +8543,7 @@ bool Isolate::GetHeapSpaceStatistics(HeapSpaceStatistics* space_statistics,
space_statistics->space_name_ = i::BaseSpace::GetSpaceName(allocation_space);
if (allocation_space == i::RO_SPACE) {
if (V8_SHARED_RO_HEAP_BOOL) {
if (i::ReadOnlyHeap::IsReadOnlySpaceShared()) {
// RO_SPACE memory is accounted for elsewhere when ReadOnlyHeap is shared.
space_statistics->space_size_ = 0;
space_statistics->space_used_size_ = 0;
......
......@@ -59,6 +59,26 @@ bool BoundedPageAllocator::AllocatePagesAt(Address address, size_t size,
return true;
}
bool BoundedPageAllocator::ReserveForSharedMemoryMapping(void* ptr,
size_t size) {
Address address = reinterpret_cast<Address>(ptr);
CHECK(IsAligned(address, allocate_page_size_));
CHECK(IsAligned(size, commit_page_size_));
CHECK(region_allocator_.contains(address, size));
// Region allocator requires page size rather than commit size so just over-
// allocate there since any extra space couldn't be used anyway.
size_t region_size = RoundUp(size, allocate_page_size_);
if (!region_allocator_.AllocateRegionAt(
address, region_size, RegionAllocator::RegionState::kExcluded)) {
return false;
}
CHECK(page_allocator_->SetPermissions(ptr, size,
PageAllocator::Permission::kNoAccess));
return true;
}
bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) {
MutexGuard guard(&mutex_);
......
......@@ -56,6 +56,8 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
void* AllocatePages(void* hint, size_t size, size_t alignment,
Permission access) override;
bool ReserveForSharedMemoryMapping(void* address, size_t size) override;
// Allocates pages at given address, returns true on success.
bool AllocatePagesAt(Address address, size_t size, Permission access);
......
......@@ -80,6 +80,10 @@ STATIC_ASSERT(kPtrComprIsolateRootAlignment ==
V8_INLINE Tagged_t CompressTagged(Address tagged) { UNREACHABLE(); }
V8_INLINE Address GetIsolateRoot(Address on_heap_addr) { UNREACHABLE(); }
V8_INLINE Address GetIsolateRoot(const Isolate* isolate) { UNREACHABLE(); }
V8_INLINE Address DecompressTaggedSigned(Tagged_t raw_value) { UNREACHABLE(); }
template <typename TOnHeapAddress>
......
......@@ -2933,14 +2933,17 @@ void Isolate::Delete(Isolate* isolate) {
}
void Isolate::SetUpFromReadOnlyArtifacts(
std::shared_ptr<ReadOnlyArtifacts> artifacts) {
artifacts_ = artifacts;
DCHECK_NOT_NULL(artifacts);
ReadOnlyHeap* ro_heap = artifacts->read_only_heap();
std::shared_ptr<ReadOnlyArtifacts> artifacts, ReadOnlyHeap* ro_heap) {
if (ReadOnlyHeap::IsReadOnlySpaceShared()) {
DCHECK_NOT_NULL(artifacts);
artifacts_ = artifacts;
} else {
DCHECK_NULL(artifacts);
}
DCHECK_NOT_NULL(ro_heap);
DCHECK_IMPLIES(read_only_heap_ != nullptr, read_only_heap_ == ro_heap);
read_only_heap_ = ro_heap;
heap_.SetUpFromReadOnlyHeap(ro_heap);
heap_.SetUpFromReadOnlyHeap(read_only_heap_);
}
v8::PageAllocator* Isolate::page_allocator() {
......@@ -3221,6 +3224,13 @@ Isolate::~Isolate() {
default_microtask_queue_ == default_microtask_queue_->next());
delete default_microtask_queue_;
default_microtask_queue_ = nullptr;
// The ReadOnlyHeap should not be destroyed when sharing without pointer
// compression as the object itself is shared.
if (read_only_heap_->IsOwnedByIsolate()) {
delete read_only_heap_;
read_only_heap_ = nullptr;
}
}
void Isolate::InitializeThreadLocal() {
......
......@@ -533,7 +533,9 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// for legacy API reasons.
static void Delete(Isolate* isolate);
void SetUpFromReadOnlyArtifacts(std::shared_ptr<ReadOnlyArtifacts> artifacts);
void SetUpFromReadOnlyArtifacts(std::shared_ptr<ReadOnlyArtifacts> artifacts,
ReadOnlyHeap* ro_heap);
void set_read_only_heap(ReadOnlyHeap* ro_heap) { read_only_heap_ = ro_heap; }
// Returns allocation mode of this isolate.
V8_INLINE IsolateAllocationMode isolate_allocation_mode();
......
......@@ -3062,6 +3062,8 @@ bool Heap::InOffThreadSpace(HeapObject heap_object) {
return false; // currently unsupported
#else
BaseSpace* owner = BasicMemoryChunk::FromHeapObject(heap_object)->owner();
// Detached RO_SPACE chunks have no owner set.
if (owner == nullptr) return false;
if (owner->identity() == OLD_SPACE) {
// TODO(leszeks): Should we exclude compaction spaces here?
return static_cast<PagedSpace*>(owner)->is_off_thread_space();
......@@ -5309,7 +5311,10 @@ void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
void Heap::ReplaceReadOnlySpace(SharedReadOnlySpace* space) {
CHECK(V8_SHARED_RO_HEAP_BOOL);
delete read_only_space_;
if (read_only_space_) {
read_only_space_->TearDown(memory_allocator());
delete read_only_space_;
}
read_only_space_ = space;
}
......@@ -5611,13 +5616,14 @@ void Heap::TearDown() {
tracer_.reset();
isolate()->read_only_heap()->OnHeapTearDown();
read_only_space_ = nullptr;
for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) {
delete space_[i];
space_[i] = nullptr;
}
isolate()->read_only_heap()->OnHeapTearDown(this);
read_only_space_ = nullptr;
memory_allocator()->TearDown();
StrongRootsList* next = nullptr;
......
......@@ -15,6 +15,7 @@
#include "src/heap/memory-chunk.h"
#include "src/heap/read-only-spaces.h"
#include "src/logging/log.h"
#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
......@@ -536,6 +537,14 @@ void MemoryAllocator::PartialFreeMemory(BasicMemoryChunk* chunk,
size_ -= released_bytes;
}
void MemoryAllocator::UnregisterSharedMemory(BasicMemoryChunk* chunk) {
VirtualMemory* reservation = chunk->reserved_memory();
const size_t size =
reservation->IsReserved() ? reservation->size() : chunk->size();
DCHECK_GE(size_, static_cast<size_t>(size));
size_ -= size;
}
void MemoryAllocator::UnregisterMemory(BasicMemoryChunk* chunk,
Executability executable) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
......@@ -543,6 +552,7 @@ void MemoryAllocator::UnregisterMemory(BasicMemoryChunk* chunk,
const size_t size =
reservation->IsReserved() ? reservation->size() : chunk->size();
DCHECK_GE(size_, static_cast<size_t>(size));
size_ -= size;
if (executable == EXECUTABLE) {
DCHECK_GE(size_executable_, size);
......@@ -559,15 +569,19 @@ void MemoryAllocator::UnregisterMemory(MemoryChunk* chunk) {
void MemoryAllocator::FreeReadOnlyPage(ReadOnlyPage* chunk) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
UnregisterMemory(chunk);
chunk->SetFlag(MemoryChunk::PRE_FREED);
UnregisterSharedMemory(chunk);
v8::PageAllocator* allocator = page_allocator(NOT_EXECUTABLE);
VirtualMemory* reservation = chunk->reserved_memory();
if (reservation->IsReserved()) {
reservation->Free();
reservation->FreeReadOnly();
} else {
// Only read-only pages can have non-initialized reservation object.
FreeMemory(page_allocator(NOT_EXECUTABLE), chunk->address(), chunk->size());
// Only read-only pages can have a non-initialized reservation object. This
// happens when the pages are remapped to multiple locations and where the
// reservation would therefore be invalid.
FreeMemory(allocator, chunk->address(),
RoundUp(chunk->size(), allocator->AllocatePageSize()));
}
}
......@@ -671,6 +685,12 @@ ReadOnlyPage* MemoryAllocator::AllocateReadOnlyPage(size_t size,
return owner->InitializePage(chunk);
}
std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping>
MemoryAllocator::RemapSharedPage(
::v8::PageAllocator::SharedMemory* shared_memory, Address new_address) {
return shared_memory->RemapTo(reinterpret_cast<void*>(new_address));
}
LargePage* MemoryAllocator::AllocateLargePage(size_t size,
LargeObjectSpace* owner,
Executability executable) {
......
......@@ -11,6 +11,7 @@
#include <unordered_set>
#include <vector>
#include "include/v8-platform.h"
#include "src/base/bounded-page-allocator.h"
#include "src/base/export-template.h"
#include "src/base/macros.h"
......@@ -195,6 +196,9 @@ class MemoryAllocator {
ReadOnlyPage* AllocateReadOnlyPage(size_t size, ReadOnlySpace* owner);
std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping> RemapSharedPage(
::v8::PageAllocator::SharedMemory* shared_memory, Address new_address);
template <MemoryAllocator::FreeMode mode = kFull>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
void Free(MemoryChunk* chunk);
......@@ -303,6 +307,9 @@ class MemoryAllocator {
void UnregisterMemory(MemoryChunk* chunk);
void UnregisterMemory(BasicMemoryChunk* chunk,
Executability executable = NOT_EXECUTABLE);
void UnregisterSharedMemory(BasicMemoryChunk* chunk);
void RegisterReadOnlyMemory(ReadOnlyPage* page);
private:
void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,
......
......@@ -21,8 +21,9 @@ ReadOnlyRoots ReadOnlyHeap::GetReadOnlyRoots(HeapObject object) {
#ifdef V8_SHARED_RO_HEAP
// This fails if we are creating heap objects and the roots haven't yet been
// copied into the read-only heap.
if (shared_ro_heap_ != nullptr && shared_ro_heap_->init_complete_) {
return ReadOnlyRoots(shared_ro_heap_->read_only_roots_);
auto* shared_ro_heap = SoleReadOnlyHeap::shared_ro_heap_;
if (shared_ro_heap != nullptr && shared_ro_heap->init_complete_) {
return ReadOnlyRoots(shared_ro_heap->read_only_roots_);
}
#endif // V8_SHARED_RO_HEAP
return ReadOnlyRoots(GetHeapFromWritableObject(object));
......
This diff is collapsed.
......@@ -28,14 +28,17 @@ class ReadOnlyArtifacts;
class ReadOnlyDeserializer;
class ReadOnlyPage;
class ReadOnlySpace;
class SharedReadOnlySpace;
// This class transparently manages read-only space, roots and cache creation
// and destruction.
class ReadOnlyHeap final {
class ReadOnlyHeap {
public:
static constexpr size_t kEntriesCount =
static_cast<size_t>(RootIndex::kReadOnlyRootsCount);
virtual ~ReadOnlyHeap() = default;
// If necessary creates read-only heap and initializes its artifacts (if the
// deserializer is provided). Then attaches the read-only heap to the isolate.
// If the deserializer is not provided, then the read-only heap will be only
......@@ -52,7 +55,7 @@ class ReadOnlyHeap final {
void OnCreateHeapObjectsComplete(Isolate* isolate);
// Indicates that the current isolate no longer requires the read-only heap
// and it may be safely disposed of.
void OnHeapTearDown();
virtual void OnHeapTearDown(Heap* heap);
// If the read-only heap is shared, then populate |statistics| with its stats,
// otherwise the read-only heap stats are set to 0.
static void PopulateReadOnlySpaceStatistics(
......@@ -77,9 +80,24 @@ class ReadOnlyHeap final {
ReadOnlySpace* read_only_space() const { return read_only_space_; }
private:
// Creates a new read-only heap and attaches it to the provided isolate.
static ReadOnlyHeap* CreateAndAttachToIsolate(
// Returns whether the ReadOnlySpace will actually be shared taking into
// account whether shared memory is available with pointer compression.
static bool IsReadOnlySpaceShared() {
return V8_SHARED_RO_HEAP_BOOL &&
(!COMPRESS_POINTERS_BOOL || IsSharedMemoryAvailable());
}
virtual void InitializeIsolateRoots(Isolate* isolate) {}
virtual void InitializeFromIsolateRoots(Isolate* isolate) {}
virtual bool IsOwnedByIsolate() { return true; }
protected:
friend class ReadOnlyArtifacts;
friend class PointerCompressedReadOnlyArtifacts;
// Creates a new read-only heap and attaches it to the provided isolate. Only
// used the first time when creating a ReadOnlyHeap for sharing.
static ReadOnlyHeap* CreateInitalHeapForBootstrapping(
Isolate* isolate, std::shared_ptr<ReadOnlyArtifacts> artifacts);
// Runs the read-only deserializer and calls InitFromIsolate to complete
// read-only heap initialization.
......@@ -94,21 +112,33 @@ class ReadOnlyHeap final {
ReadOnlySpace* read_only_space_ = nullptr;
std::vector<Object> read_only_object_cache_;
#ifdef V8_SHARED_RO_HEAP
#ifdef DEBUG
// The checksum of the blob the read-only heap was deserialized from, if any.
base::Optional<uint32_t> read_only_blob_checksum_;
#endif // DEBUG
Address read_only_roots_[kEntriesCount];
V8_EXPORT_PRIVATE static ReadOnlyHeap* shared_ro_heap_;
#endif // V8_SHARED_RO_HEAP
// Returns whether shared memory can be allocated and then remapped to
// additional addresses.
static bool IsSharedMemoryAvailable();
explicit ReadOnlyHeap(ReadOnlySpace* ro_space) : read_only_space_(ro_space) {}
ReadOnlyHeap(ReadOnlyHeap* ro_heap, ReadOnlySpace* ro_space);
DISALLOW_COPY_AND_ASSIGN(ReadOnlyHeap);
};
// This is used without pointer compression when there is just a single
// ReadOnlyHeap object shared between all Isolates.
class SoleReadOnlyHeap : public ReadOnlyHeap {
public:
void InitializeIsolateRoots(Isolate* isolate) override;
void InitializeFromIsolateRoots(Isolate* isolate) override;
void OnHeapTearDown(Heap* heap) override;
bool IsOwnedByIsolate() override { return false; }
private:
friend class ReadOnlyHeap;
explicit SoleReadOnlyHeap(ReadOnlySpace* ro_space) : ReadOnlyHeap(ro_space) {}
Address read_only_roots_[kEntriesCount];
V8_EXPORT_PRIVATE static SoleReadOnlyHeap* shared_ro_heap_;
};
// This class enables iterating over all read-only heap objects.
class V8_EXPORT_PRIVATE ReadOnlyHeapObjectIterator {
public:
......
This diff is collapsed.
......@@ -20,6 +20,7 @@
namespace v8 {
namespace internal {
class ReadOnlyDeserializer;
class MemoryAllocator;
class ReadOnlyHeap;
......@@ -46,6 +47,13 @@ class ReadOnlyPage : public BasicMemoryChunk {
return address_in_page;
}
// Returns the start area of the page without using area_start() which cannot
// return the correct result when the page is remapped multiple times.
Address GetAreaStart() const {
return address() +
MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(RO_SPACE);
}
private:
friend class ReadOnlySpace;
};
......@@ -54,9 +62,32 @@ class ReadOnlyPage : public BasicMemoryChunk {
// Artifacts used to construct a new SharedReadOnlySpace
class ReadOnlyArtifacts {
public:
~ReadOnlyArtifacts();
virtual ~ReadOnlyArtifacts() = default;
// Initialize the ReadOnlyArtifacts from an Isolate that has just been created
// either by serialization or by creating the objects directly.
virtual void Initialize(Isolate* isolate, std::vector<ReadOnlyPage*>&& pages,
const AllocationStats& stats) = 0;
// This replaces the ReadOnlySpace in the given Heap with a newly constructed
// SharedReadOnlySpace that has pages created from the ReadOnlyArtifacts. This
// is only called for the first Isolate, where the ReadOnlySpace is created
// during the bootstrap process.
virtual void ReinstallReadOnlySpace(Isolate* isolate) = 0;
// Creates a ReadOnlyHeap for a specific Isolate. This will be populated with
// a SharedReadOnlySpace object that points to the Isolate's heap. Should only
// be used when the read-only heap memory is shared with or without pointer
// compression. This is called for all subsequent Isolates created after the
// first one.
virtual ReadOnlyHeap* GetReadOnlyHeapForIsolate(Isolate* isolate) = 0;
virtual void VerifyHeapAndSpaceRelationships(Isolate* isolate) = 0;
std::vector<ReadOnlyPage*>& pages() { return pages_; }
void set_accounting_stats(const AllocationStats& stats) { stats_ = stats; }
const AllocationStats& accounting_stats() const { return stats_; }
void set_shared_read_only_space(
std::unique_ptr<SharedReadOnlySpace> shared_space) {
......@@ -66,21 +97,68 @@ class ReadOnlyArtifacts {
return shared_read_only_space_.get();
}
std::vector<ReadOnlyPage*>& pages() { return pages_; }
void TransferPages(std::vector<ReadOnlyPage*>&& pages) {
pages_ = std::move(pages);
}
void set_read_only_heap(std::unique_ptr<ReadOnlyHeap> read_only_heap);
ReadOnlyHeap* read_only_heap() const { return read_only_heap_.get(); }
const AllocationStats& accounting_stats() const { return stats_; }
void InitializeChecksum(ReadOnlyDeserializer* des);
void VerifyChecksum(ReadOnlyDeserializer* des, bool read_only_heap_created);
void set_read_only_heap(std::unique_ptr<ReadOnlyHeap> read_only_heap);
ReadOnlyHeap* read_only_heap() { return read_only_heap_.get(); }
protected:
ReadOnlyArtifacts() = default;
private:
std::vector<ReadOnlyPage*> pages_;
AllocationStats stats_;
std::unique_ptr<SharedReadOnlySpace> shared_read_only_space_;
std::unique_ptr<ReadOnlyHeap> read_only_heap_;
#ifdef DEBUG
// The checksum of the blob the read-only heap was deserialized from, if
// any.
base::Optional<uint32_t> read_only_blob_checksum_;
#endif // DEBUG
};
// -----------------------------------------------------------------------------
// Artifacts used to construct a new SharedReadOnlySpace when pointer
// compression is disabled and so there is a single ReadOnlySpace with one set
// of pages shared between all Isolates.
class SingleCopyReadOnlyArtifacts : public ReadOnlyArtifacts {
public:
~SingleCopyReadOnlyArtifacts() override;
ReadOnlyHeap* GetReadOnlyHeapForIsolate(Isolate* isolate) override;
void Initialize(Isolate* isolate, std::vector<ReadOnlyPage*>&& pages,
const AllocationStats& stats) override;
void ReinstallReadOnlySpace(Isolate* isolate) override;
void VerifyHeapAndSpaceRelationships(Isolate* isolate) override;
};
// -----------------------------------------------------------------------------
// Artifacts used to construct a new SharedReadOnlySpace when pointer
// compression is enabled and so there is a ReadOnlySpace for each Isolate with
// with its own set of pages mapped from the canonical set stored here.
class PointerCompressedReadOnlyArtifacts : public ReadOnlyArtifacts {
public:
ReadOnlyHeap* GetReadOnlyHeapForIsolate(Isolate* isolate) override;
void Initialize(Isolate* isolate, std::vector<ReadOnlyPage*>&& pages,
const AllocationStats& stats) override;
void ReinstallReadOnlySpace(Isolate* isolate) override;
void VerifyHeapAndSpaceRelationships(Isolate* isolate) override;
private:
SharedReadOnlySpace* CreateReadOnlySpace(Isolate* isolate);
Tagged_t OffsetForPage(size_t index) const { return page_offsets_[index]; }
void InitializeRootsIn(Isolate* isolate);
void InitializeRootsFrom(Isolate* isolate);
std::unique_ptr<v8::PageAllocator::SharedMemoryMapping> RemapPageTo(
size_t i, Address new_address, ReadOnlyPage*& new_page);
static constexpr size_t kReadOnlyRootsCount =
static_cast<size_t>(RootIndex::kReadOnlyRootsCount);
Address read_only_roots_[kReadOnlyRootsCount];
std::vector<Tagged_t> page_offsets_;
std::vector<std::unique_ptr<PageAllocator::SharedMemory>> shared_memory_;
};
// -----------------------------------------------------------------------------
......@@ -89,12 +167,14 @@ class ReadOnlySpace : public BaseSpace {
public:
V8_EXPORT_PRIVATE explicit ReadOnlySpace(Heap* heap);
// Detach the pages and them to artifacts for using in creating a
// SharedReadOnlySpace.
// Detach the pages and add them to artifacts for using in creating a
// SharedReadOnlySpace. Since the current space no longer has any pages, it
// should be replaced straight after this in its Heap.
void DetachPagesAndAddToArtifacts(
std::shared_ptr<ReadOnlyArtifacts> artifacts);
V8_EXPORT_PRIVATE ~ReadOnlySpace() override;
V8_EXPORT_PRIVATE virtual void TearDown(MemoryAllocator* memory_allocator);
bool IsDetached() const { return heap_ == nullptr; }
......@@ -109,7 +189,11 @@ class ReadOnlySpace : public BaseSpace {
V8_EXPORT_PRIVATE void ClearStringPaddingIfNeeded();
enum class SealMode { kDetachFromHeapAndForget, kDoNotDetachFromHeap };
enum class SealMode {
kDetachFromHeap,
kDetachFromHeapAndUnregisterMemory,
kDoNotDetachFromHeap
};
// Seal the space by marking it read-only, optionally detaching it
// from the heap and forgetting it for memory bookkeeping purposes (e.g.
......@@ -145,6 +229,8 @@ class ReadOnlySpace : public BaseSpace {
Address FirstPageAddress() const { return pages_.front()->address(); }
protected:
friend class SingleCopyReadOnlyArtifacts;
void SetPermissionsForPages(MemoryAllocator* memory_allocator,
PageAllocator::Permission access);
......@@ -184,8 +270,26 @@ class ReadOnlySpace : public BaseSpace {
class SharedReadOnlySpace : public ReadOnlySpace {
public:
SharedReadOnlySpace(Heap* heap, std::shared_ptr<ReadOnlyArtifacts> artifacts);
~SharedReadOnlySpace() override;
explicit SharedReadOnlySpace(Heap* heap) : ReadOnlySpace(heap) {
is_marked_read_only_ = true;
}
SharedReadOnlySpace(Heap* heap,
PointerCompressedReadOnlyArtifacts* artifacts);
SharedReadOnlySpace(
Heap* heap, std::vector<ReadOnlyPage*>&& new_pages,
std::vector<std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping>>&&
mappings,
AllocationStats&& new_stats);
SharedReadOnlySpace(Heap* heap, SingleCopyReadOnlyArtifacts* artifacts);
SharedReadOnlySpace(const SharedReadOnlySpace&) = delete;
void TearDown(MemoryAllocator* memory_allocator) override;
// Holds any shared memory mapping that must be freed when the space is
// deallocated.
std::vector<std::unique_ptr<v8::PageAllocator::SharedMemoryMapping>>
shared_memory_mappings_;
};
} // namespace internal
......
......@@ -527,9 +527,11 @@ class RootsTable {
friend class Isolate;
friend class Heap;
friend class Factory;
friend class PointerCompressedReadOnlyArtifacts;
friend class ReadOnlyHeap;
friend class ReadOnlyRoots;
friend class RootsSerializer;
friend class SoleReadOnlyHeap;
};
class ReadOnlyRoots {
......
......@@ -276,5 +276,18 @@ void VirtualMemory::Free() {
RoundUp(region.size(), page_allocator->AllocatePageSize())));
}
void VirtualMemory::FreeReadOnly() {
DCHECK(IsReserved());
// The only difference to Free is that it doesn't call Reset which would write
// to the VirtualMemory object.
v8::PageAllocator* page_allocator = page_allocator_;
base::AddressRegion region = region_;
// FreePages expects size to be aligned to allocation granularity however
// ReleasePages may leave size at only commit granularity. Align it here.
CHECK(FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
RoundUp(region.size(), page_allocator->AllocatePageSize())));
}
} // namespace internal
} // namespace v8
......@@ -233,6 +233,10 @@ class VirtualMemory final {
// Frees all memory.
V8_EXPORT_PRIVATE void Free();
// As with Free but does not write to the VirtualMemory object itself so it
// can be called on a VirtualMemory that is itself not writable.
V8_EXPORT_PRIVATE void FreeReadOnly();
bool InVM(Address address, size_t size) {
return region_.contains(address, size);
}
......
......@@ -793,6 +793,25 @@ TEST(NoMemoryForNewPage) {
CHECK_NULL(page);
}
namespace {
// ReadOnlySpace cannot be torn down by a destructor because the destructor
// cannot take an argument. Since these tests create ReadOnlySpaces not attached
// to the Heap directly, they need to be destroyed to ensure the
// MemoryAllocator's stats are all 0 at exit.
class ReadOnlySpaceScope {
public:
explicit ReadOnlySpaceScope(Heap* heap) : ro_space_(heap) {}
~ReadOnlySpaceScope() {
ro_space_.TearDown(CcTest::heap()->memory_allocator());
}
ReadOnlySpace* space() { return &ro_space_; }
private:
ReadOnlySpace ro_space_;
};
} // namespace
TEST(ReadOnlySpaceMetrics_OnePage) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
......@@ -800,34 +819,35 @@ TEST(ReadOnlySpaceMetrics_OnePage) {
// Create a read-only space and allocate some memory, shrink the pages and
// check the allocated object size is as expected.
ReadOnlySpace faked_space(heap);
ReadOnlySpaceScope scope(heap);
ReadOnlySpace* faked_space = scope.space();
// Initially no memory.
CHECK_EQ(faked_space.Size(), 0);
CHECK_EQ(faked_space.Capacity(), 0);
CHECK_EQ(faked_space.CommittedMemory(), 0);
CHECK_EQ(faked_space.CommittedPhysicalMemory(), 0);
CHECK_EQ(faked_space->Size(), 0);
CHECK_EQ(faked_space->Capacity(), 0);
CHECK_EQ(faked_space->CommittedMemory(), 0);
CHECK_EQ(faked_space->CommittedPhysicalMemory(), 0);
faked_space.AllocateRaw(16, kWordAligned);
faked_space->AllocateRaw(16, kWordAligned);
faked_space.ShrinkPages();
faked_space.Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
faked_space->ShrinkPages();
faked_space->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
MemoryAllocator* allocator = heap->memory_allocator();
// Allocated objects size.
CHECK_EQ(faked_space.Size(), 16);
CHECK_EQ(faked_space->Size(), 16);
size_t committed_memory = RoundUp(
MemoryChunkLayout::ObjectStartOffsetInDataPage() + faked_space.Size(),
MemoryChunkLayout::ObjectStartOffsetInDataPage() + faked_space->Size(),
allocator->GetCommitPageSize());
// Amount of OS allocated memory.
CHECK_EQ(faked_space.CommittedMemory(), committed_memory);
CHECK_EQ(faked_space.CommittedPhysicalMemory(), committed_memory);
CHECK_EQ(faked_space->CommittedMemory(), committed_memory);
CHECK_EQ(faked_space->CommittedPhysicalMemory(), committed_memory);
// Capacity will be one OS page minus the page header.
CHECK_EQ(faked_space.Capacity(),
CHECK_EQ(faked_space->Capacity(),
committed_memory - MemoryChunkLayout::ObjectStartOffsetInDataPage());
}
......@@ -838,13 +858,14 @@ TEST(ReadOnlySpaceMetrics_AlignedAllocations) {
// Create a read-only space and allocate some memory, shrink the pages and
// check the allocated object size is as expected.
ReadOnlySpace faked_space(heap);
ReadOnlySpaceScope scope(heap);
ReadOnlySpace* faked_space = scope.space();
// Initially no memory.
CHECK_EQ(faked_space.Size(), 0);
CHECK_EQ(faked_space.Capacity(), 0);
CHECK_EQ(faked_space.CommittedMemory(), 0);
CHECK_EQ(faked_space.CommittedPhysicalMemory(), 0);
CHECK_EQ(faked_space->Size(), 0);
CHECK_EQ(faked_space->Capacity(), 0);
CHECK_EQ(faked_space->CommittedMemory(), 0);
CHECK_EQ(faked_space->CommittedPhysicalMemory(), 0);
MemoryAllocator* allocator = heap->memory_allocator();
// Allocate an object just under an OS page in size.
......@@ -860,28 +881,28 @@ TEST(ReadOnlySpaceMetrics_AlignedAllocations) {
#endif
HeapObject object =
faked_space.AllocateRaw(object_size, kDoubleAligned).ToObjectChecked();
faked_space->AllocateRaw(object_size, kDoubleAligned).ToObjectChecked();
CHECK_EQ(object.address() % alignment, 0);
object =
faked_space.AllocateRaw(object_size, kDoubleAligned).ToObjectChecked();
faked_space->AllocateRaw(object_size, kDoubleAligned).ToObjectChecked();
CHECK_EQ(object.address() % alignment, 0);
faked_space.ShrinkPages();
faked_space.Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
faked_space->ShrinkPages();
faked_space->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
// Allocated objects size may will contain 4 bytes of padding on 32-bit or
// with pointer compression.
CHECK_EQ(faked_space.Size(), object_size + RoundUp(object_size, alignment));
CHECK_EQ(faked_space->Size(), object_size + RoundUp(object_size, alignment));
size_t committed_memory = RoundUp(
MemoryChunkLayout::ObjectStartOffsetInDataPage() + faked_space.Size(),
MemoryChunkLayout::ObjectStartOffsetInDataPage() + faked_space->Size(),
allocator->GetCommitPageSize());
CHECK_EQ(faked_space.CommittedMemory(), committed_memory);
CHECK_EQ(faked_space.CommittedPhysicalMemory(), committed_memory);
CHECK_EQ(faked_space->CommittedMemory(), committed_memory);
CHECK_EQ(faked_space->CommittedPhysicalMemory(), committed_memory);
// Capacity will be 3 OS pages minus the page header.
CHECK_EQ(faked_space.Capacity(),
CHECK_EQ(faked_space->Capacity(),
committed_memory - MemoryChunkLayout::ObjectStartOffsetInDataPage());
}
......@@ -892,13 +913,14 @@ TEST(ReadOnlySpaceMetrics_TwoPages) {
// Create a read-only space and allocate some memory, shrink the pages and
// check the allocated object size is as expected.
ReadOnlySpace faked_space(heap);
ReadOnlySpaceScope scope(heap);
ReadOnlySpace* faked_space = scope.space();
// Initially no memory.
CHECK_EQ(faked_space.Size(), 0);
CHECK_EQ(faked_space.Capacity(), 0);
CHECK_EQ(faked_space.CommittedMemory(), 0);
CHECK_EQ(faked_space.CommittedPhysicalMemory(), 0);
CHECK_EQ(faked_space->Size(), 0);
CHECK_EQ(faked_space->Capacity(), 0);
CHECK_EQ(faked_space->CommittedMemory(), 0);
CHECK_EQ(faked_space->CommittedPhysicalMemory(), 0);
MemoryAllocator* allocator = heap->memory_allocator();
......@@ -910,23 +932,23 @@ TEST(ReadOnlySpaceMetrics_TwoPages) {
kTaggedSize);
CHECK_GT(object_size * 2,
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE));
faked_space.AllocateRaw(object_size, kWordAligned);
faked_space->AllocateRaw(object_size, kWordAligned);
// Then allocate another so it expands the space to two pages.
faked_space.AllocateRaw(object_size, kWordAligned);
faked_space->AllocateRaw(object_size, kWordAligned);
faked_space.ShrinkPages();
faked_space.Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
faked_space->ShrinkPages();
faked_space->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
// Allocated objects size.
CHECK_EQ(faked_space.Size(), object_size * 2);
CHECK_EQ(faked_space->Size(), object_size * 2);
// Amount of OS allocated memory.
size_t committed_memory_per_page =
RoundUp(MemoryChunkLayout::ObjectStartOffsetInDataPage() + object_size,
allocator->GetCommitPageSize());
CHECK_EQ(faked_space.CommittedMemory(), 2 * committed_memory_per_page);
CHECK_EQ(faked_space.CommittedPhysicalMemory(),
CHECK_EQ(faked_space->CommittedMemory(), 2 * committed_memory_per_page);
CHECK_EQ(faked_space->CommittedPhysicalMemory(),
2 * committed_memory_per_page);
// Capacity will be the space up to the amount of committed memory minus the
......@@ -935,7 +957,7 @@ TEST(ReadOnlySpaceMetrics_TwoPages) {
RoundUp(MemoryChunkLayout::ObjectStartOffsetInDataPage() + object_size,
allocator->GetCommitPageSize()) -
MemoryChunkLayout::ObjectStartOffsetInDataPage();
CHECK_EQ(faked_space.Capacity(), 2 * capacity_per_page);
CHECK_EQ(faked_space->Capacity(), 2 * capacity_per_page);
}
} // namespace heap
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment