Commit 93cfa458 authored by Dan Elphick's avatar Dan Elphick Committed by Commit Bot

[heap] Add SharedReadOnlySpace for shared RO_SPACE

When RO_SPACE is to be shared, this constructs SharedReadOnlySpace
object (via ReadOnlySpace::DetachPagesAndAddToArtifacts) that contains
the shared artifacts and the original ReadOnlySpace is destroyed. This
is mostly a conceptual change and SharedReadOnlySpace behaves
identically to ReadOnlySpace (and subclasses it).

Also adds ReadOnlyArtifacts that contains the shared artifacts and which
is stored as a std::weak_ptr in a global so it can be destroyed when all
std::shared_ptrs to it are destroyed. Since this allows the ReadOnlyHeap
to be reconstructed when all Isolates are destroyed,
ReadOnlyHeap::ClearSharedHeapForTest is removed along with all uses
since that is now done automatically.

The ReadOnlyArtifacts class now owns all the shared artifacts and is
responsible for deleting them on exit (mostly via unique_ptr).

Bug: v8:10454
Change-Id: I2fe7110a4ab9cf8719dd198bafc1d083bee641b1
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2154204
Commit-Queue: Dan Elphick <delphick@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67440}
parent 32726006
......@@ -16,6 +16,18 @@ template <class T>
class List {
public:
List() : front_(nullptr), back_(nullptr) {}
List(List&& other) V8_NOEXCEPT : front_(std::exchange(other.front_, nullptr)),
back_(std::exchange(other.back_, nullptr)) {}
List& operator=(List&& other) V8_NOEXCEPT {
front_ = std::exchange(other.front_, nullptr);
back_ = std::exchange(other.back_, nullptr);
return *this;
}
void ShallowCopyTo(List* other) const {
other->front_ = front_;
other->back_ = back_;
}
void PushBack(T* element) {
DCHECK(!element->list_node().next());
......
......@@ -2838,7 +2838,11 @@ void Isolate::Delete(Isolate* isolate) {
SetIsolateThreadLocals(saved_isolate, saved_data);
}
void Isolate::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
void Isolate::SetUpFromReadOnlyArtifacts(
std::shared_ptr<ReadOnlyArtifacts> artifacts) {
artifacts_ = artifacts;
DCHECK_NOT_NULL(artifacts);
ReadOnlyHeap* ro_heap = artifacts->read_only_heap();
DCHECK_NOT_NULL(ro_heap);
DCHECK_IMPLIES(read_only_heap_ != nullptr, read_only_heap_ == ro_heap);
read_only_heap_ = ro_heap;
......
......@@ -87,6 +87,7 @@ class MicrotaskQueue;
class OptimizingCompileDispatcher;
class PersistentHandles;
class PersistentHandlesList;
class ReadOnlyArtifacts;
class ReadOnlyDeserializer;
class RegExpStack;
class RootVisitor;
......@@ -524,7 +525,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// for legacy API reasons.
static void Delete(Isolate* isolate);
void SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap);
void SetUpFromReadOnlyArtifacts(std::shared_ptr<ReadOnlyArtifacts> artifacts);
// Returns allocation mode of this isolate.
V8_INLINE IsolateAllocationMode isolate_allocation_mode();
......@@ -1630,6 +1631,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
std::unique_ptr<IsolateAllocator> isolate_allocator_;
Heap heap_;
ReadOnlyHeap* read_only_heap_ = nullptr;
std::shared_ptr<ReadOnlyArtifacts> artifacts_;
const int id_;
EntryStackItem* entry_stack_ = nullptr;
......
......@@ -5216,6 +5216,12 @@ void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
space_[RO_SPACE] = read_only_space_ = ro_heap->read_only_space();
}
void Heap::ReplaceReadOnlySpace(SharedReadOnlySpace* space) {
CHECK(V8_SHARED_RO_HEAP_BOOL);
delete read_only_space_;
space_[RO_SPACE] = read_only_space_ = space;
}
void Heap::SetUpSpaces() {
// Ensure SetUpFromReadOnlySpace has been ran.
DCHECK_NOT_NULL(read_only_space_);
......
......@@ -87,6 +87,7 @@ class RootVisitor;
class ScavengeJob;
class Scavenger;
class ScavengerCollector;
class SharedReadOnlySpace;
class Space;
class StressScavengeObserver;
class TimedHistogram;
......@@ -712,6 +713,8 @@ class Heap {
// Sets read-only heap and space.
void SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap);
void ReplaceReadOnlySpace(SharedReadOnlySpace* shared_ro_space);
// Sets up the heap memory without creating any objects.
void SetUpSpaces();
......
......@@ -6,8 +6,9 @@
#include <cstring>
#include "src/base/lazy-instance.h"
#include "src/base/lsan.h"
#include "src/base/once.h"
#include "src/base/platform/mutex.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/spaces.h"
......@@ -20,7 +21,28 @@ namespace v8 {
namespace internal {
#ifdef V8_SHARED_RO_HEAP
V8_DECLARE_ONCE(setup_ro_heap_once);
namespace {
// Mutex used to ensure that ReadOnlyArtifacts creation is only done once.
base::LazyMutex read_only_heap_creation_mutex_ = LAZY_MUTEX_INITIALIZER;
// Weak pointer holding ReadOnlyArtifacts. ReadOnlyHeap::SetUp creates a
// std::shared_ptr from this when it attempts to reuse it. Since all Isolates
// hold a std::shared_ptr to this, the object is destroyed when no Isolates
// remain.
base::LazyInstance<std::weak_ptr<ReadOnlyArtifacts>>::type
read_only_artifacts_ = LAZY_INSTANCE_INITIALIZER;
std::shared_ptr<ReadOnlyArtifacts> InitializeSharedReadOnlyArtifacts() {
auto artifacts = std::make_shared<ReadOnlyArtifacts>();
*read_only_artifacts_.Pointer() = artifacts;
return artifacts;
}
} // namespace
// This ReadOnlyHeap instance will only be accessed by Isolates that are already
// set up. As such it doesn't need to be guarded by a mutex or shared_ptrs,
// since an already set up Isolate will hold a shared_ptr to
// read_only_artifacts_.
ReadOnlyHeap* ReadOnlyHeap::shared_ro_heap_ = nullptr;
#endif
......@@ -28,45 +50,54 @@ ReadOnlyHeap* ReadOnlyHeap::shared_ro_heap_ = nullptr;
void ReadOnlyHeap::SetUp(Isolate* isolate, ReadOnlyDeserializer* des) {
DCHECK_NOT_NULL(isolate);
#ifdef V8_SHARED_RO_HEAP
bool call_once_ran = false;
base::Optional<uint32_t> des_checksum;
#ifdef DEBUG
if (des != nullptr) des_checksum = des->GetChecksum();
#endif // DEBUG
base::CallOnce(&setup_ro_heap_once,
[isolate, des, des_checksum, &call_once_ran]() {
USE(des_checksum);
shared_ro_heap_ = CreateAndAttachToIsolate(isolate);
if (des != nullptr) {
bool read_only_heap_created = false;
if (des != nullptr) {
base::MutexGuard guard(read_only_heap_creation_mutex_.Pointer());
std::shared_ptr<ReadOnlyArtifacts> artifacts =
read_only_artifacts_.Get().lock();
if (!artifacts) {
artifacts = InitializeSharedReadOnlyArtifacts();
shared_ro_heap_ = CreateAndAttachToIsolate(isolate, artifacts);
#ifdef DEBUG
shared_ro_heap_->read_only_blob_checksum_ = des_checksum;
shared_ro_heap_->read_only_blob_checksum_ = des->GetChecksum();
#endif // DEBUG
shared_ro_heap_->DeseralizeIntoIsolate(isolate, des);
}
call_once_ran = true;
});
shared_ro_heap_->DeseralizeIntoIsolate(isolate, des);
read_only_heap_created = true;
} else {
isolate->SetUpFromReadOnlyArtifacts(artifacts);
}
} else {
// This path should only be taken in mksnapshot, should only be run once
// before tearing down the Isolate that holds this ReadOnlyArtifacts and is
// not thread-safe.
std::shared_ptr<ReadOnlyArtifacts> artifacts =
read_only_artifacts_.Get().lock();
CHECK(!artifacts);
artifacts = InitializeSharedReadOnlyArtifacts();
shared_ro_heap_ = CreateAndAttachToIsolate(isolate, artifacts);
read_only_heap_created = true;
}
USE(call_once_ran);
USE(des_checksum);
#ifdef DEBUG
const base::Optional<uint32_t> last_checksum =
shared_ro_heap_->read_only_blob_checksum_;
if (last_checksum) {
// The read-only heap was set up from a snapshot. Make sure it's the always
// the same snapshot.
CHECK_WITH_MSG(des_checksum,
"Attempt to create the read-only heap after "
"already creating from a snapshot.");
CHECK_EQ(last_checksum, des_checksum);
CHECK_WITH_MSG(des->GetChecksum(),
"Attempt to create the read-only heap after already "
"creating from a snapshot.");
CHECK_EQ(last_checksum, des->GetChecksum());
} else {
// The read-only heap objects were created. Make sure this happens only
// once, during this call.
CHECK(call_once_ran);
CHECK(read_only_heap_created);
}
#endif // DEBUG
USE(read_only_heap_created);
isolate->SetUpFromReadOnlyHeap(shared_ro_heap_);
if (des != nullptr) {
void* const isolate_ro_roots = reinterpret_cast<void*>(
isolate->roots_table().read_only_roots_begin().address());
......@@ -74,7 +105,8 @@ void ReadOnlyHeap::SetUp(Isolate* isolate, ReadOnlyDeserializer* des) {
kEntriesCount * sizeof(Address));
}
#else
auto* ro_heap = CreateAndAttachToIsolate(isolate);
auto artifacts = std::make_shared<ReadOnlyArtifacts>();
auto* ro_heap = CreateAndAttachToIsolate(isolate, artifacts);
if (des != nullptr) ro_heap->DeseralizeIntoIsolate(isolate, des);
#endif // V8_SHARED_RO_HEAP
}
......@@ -92,24 +124,30 @@ void ReadOnlyHeap::OnCreateHeapObjectsComplete(Isolate* isolate) {
}
// static
ReadOnlyHeap* ReadOnlyHeap::CreateAndAttachToIsolate(Isolate* isolate) {
auto* ro_heap = new ReadOnlyHeap(new ReadOnlySpace(isolate->heap()));
isolate->SetUpFromReadOnlyHeap(ro_heap);
return ro_heap;
ReadOnlyHeap* ReadOnlyHeap::CreateAndAttachToIsolate(
Isolate* isolate, std::shared_ptr<ReadOnlyArtifacts> artifacts) {
std::unique_ptr<ReadOnlyHeap> ro_heap(
new ReadOnlyHeap(new ReadOnlySpace(isolate->heap())));
artifacts->set_read_only_heap(std::move(ro_heap));
isolate->SetUpFromReadOnlyArtifacts(artifacts);
return artifacts->read_only_heap();
}
void ReadOnlyHeap::InitFromIsolate(Isolate* isolate) {
DCHECK(!init_complete_);
read_only_space_->ShrinkImmortalImmovablePages();
#ifdef V8_SHARED_RO_HEAP
std::shared_ptr<ReadOnlyArtifacts> artifacts(*read_only_artifacts_.Pointer());
read_only_space()->DetachPagesAndAddToArtifacts(artifacts);
read_only_space_ = artifacts->shared_read_only_space();
void* const isolate_ro_roots = reinterpret_cast<void*>(
isolate->roots_table().read_only_roots_begin().address());
std::memcpy(read_only_roots_, isolate_ro_roots,
kEntriesCount * sizeof(Address));
// N.B. Since pages are manually allocated with mmap, Lsan doesn't track
// their pointers. Seal explicitly ignores the necessary objects.
// N.B. Since pages are manually allocated with mmap, Lsan doesn't track their
// pointers. Seal explicitly ignores the necessary objects.
LSAN_IGNORE_OBJECT(this);
read_only_space_->Seal(ReadOnlySpace::SealMode::kDetachFromHeapAndForget);
#else
read_only_space_->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
#endif
......@@ -119,7 +157,6 @@ void ReadOnlyHeap::InitFromIsolate(Isolate* isolate) {
void ReadOnlyHeap::OnHeapTearDown() {
#ifndef V8_SHARED_RO_HEAP
delete read_only_space_;
delete this;
#endif
}
......@@ -128,17 +165,6 @@ void ReadOnlyHeap::OnHeapTearDown() {
const ReadOnlyHeap* ReadOnlyHeap::Instance() { return shared_ro_heap_; }
#endif
// static
void ReadOnlyHeap::ClearSharedHeapForTest() {
#ifdef V8_SHARED_RO_HEAP
DCHECK_NOT_NULL(shared_ro_heap_);
// TODO(v8:7464): Just leak read-only space for now. The paged-space heap
// is null so there isn't a nice way to do this.
shared_ro_heap_ = nullptr;
setup_ro_heap_once = 0;
#endif
}
// static
bool ReadOnlyHeap::Contains(Address address) {
return MemoryChunk::FromAddress(address)->InReadOnlySpace();
......
......@@ -5,6 +5,7 @@
#ifndef V8_HEAP_READ_ONLY_HEAP_H_
#define V8_HEAP_READ_ONLY_HEAP_H_
#include <memory>
#include <utility>
#include "src/base/macros.h"
......@@ -18,6 +19,7 @@ namespace internal {
class Isolate;
class Page;
class ReadOnlyArtifacts;
class ReadOnlyDeserializer;
class ReadOnlySpace;
......@@ -28,15 +30,19 @@ class ReadOnlyHeap final {
static constexpr size_t kEntriesCount =
static_cast<size_t>(RootIndex::kReadOnlyRootsCount);
// If necessary creates read-only heap and initializes its artifacts (if
// the deserializer is provided). Then attaches the read-only heap to the
// isolate.
// If necessary creates read-only heap and initializes its artifacts (if the
// deserializer is provided). Then attaches the read-only heap to the isolate.
// If the deserializer is not provided, then the read-only heap will be only
// finish initializing when initial heap object creation in the Isolate is
// completed, which is signalled by calling OnCreateHeapObjectsComplete. When
// V8_SHARED_RO_HEAP is enabled, a lock will be held until that method is
// called.
// TODO(v8:7464): Ideally we'd create this without needing a heap.
static void SetUp(Isolate* isolate, ReadOnlyDeserializer* des);
// Indicates that the isolate has been set up and all read-only space objects
// have been created and will not be written to. This is not thread safe, and
// should really only be used during snapshot creation or when read-only heap
// sharing is disabled.
// have been created and will not be written to. This should only be called if
// a deserializer was not previously provided to Setup. When V8_SHARED_RO_HEAP
// is enabled, this releases the ReadOnlyHeap creation lock.
void OnCreateHeapObjectsComplete(Isolate* isolate);
// Indicates that the current isolate no longer requires the read-only heap
// and it may be safely disposed of.
......@@ -56,10 +62,6 @@ class ReadOnlyHeap final {
V8_EXPORT_PRIVATE inline static ReadOnlyRoots GetReadOnlyRoots(
HeapObject object);
// Clears any shared read-only heap artifacts for testing, forcing read-only
// heap to be re-created on next set up.
V8_EXPORT_PRIVATE static void ClearSharedHeapForTest();
// Extends the read-only object cache with new zero smi and returns a
// reference to it.
Object* ExtendReadOnlyObjectCache();
......@@ -71,14 +73,15 @@ class ReadOnlyHeap final {
private:
// Creates a new read-only heap and attaches it to the provided isolate.
static ReadOnlyHeap* CreateAndAttachToIsolate(Isolate* isolate);
// Runs the read-only deserailizer and calls InitFromIsolate to complete
static ReadOnlyHeap* CreateAndAttachToIsolate(
Isolate* isolate, std::shared_ptr<ReadOnlyArtifacts> artifacts);
// Runs the read-only deserializer and calls InitFromIsolate to complete
// read-only heap initialization.
void DeseralizeIntoIsolate(Isolate* isolate, ReadOnlyDeserializer* des);
// Initializes read-only heap from an already set-up isolate, copying
// read-only roots from the isolate. This then seals the space off from
// further writes, marks it as read-only and detaches it from the heap (unless
// sharing is disabled).
// further writes, marks it as read-only and detaches it from the heap
// (unless sharing is disabled).
void InitFromIsolate(Isolate* isolate);
bool init_complete_ = false;
......
......@@ -4025,6 +4025,51 @@ ReadOnlySpace::ReadOnlySpace(Heap* heap)
is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()) {
}
ReadOnlyArtifacts::~ReadOnlyArtifacts() {
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
MemoryChunk* next_chunk;
for (MemoryChunk* chunk = pages_.front(); chunk != nullptr;
chunk = next_chunk) {
void* chunk_address = reinterpret_cast<void*>(chunk->address());
page_allocator->SetPermissions(chunk_address, chunk->size(),
PageAllocator::kReadWrite);
next_chunk = chunk->list_node().next();
size_t size = RoundUp(chunk->size(), page_allocator->AllocatePageSize());
CHECK(page_allocator->FreePages(chunk_address, size));
}
}
void ReadOnlyArtifacts::set_read_only_heap(
std::unique_ptr<ReadOnlyHeap> read_only_heap) {
read_only_heap_ = std::move(read_only_heap);
}
SharedReadOnlySpace::~SharedReadOnlySpace() {
// Clear the memory chunk list before the space is deleted, so that the
// inherited destructors don't try to destroy the MemoryChunks themselves.
memory_chunk_list_ = base::List<MemoryChunk>();
}
SharedReadOnlySpace::SharedReadOnlySpace(
Heap* heap, std::shared_ptr<ReadOnlyArtifacts> artifacts)
: ReadOnlySpace(heap) {
artifacts->pages().ShallowCopyTo(&memory_chunk_list_);
is_marked_read_only_ = true;
accounting_stats_ = artifacts->accounting_stats();
}
void ReadOnlySpace::DetachPagesAndAddToArtifacts(
std::shared_ptr<ReadOnlyArtifacts> artifacts) {
Heap* heap = ReadOnlySpace::heap();
Seal(SealMode::kDetachFromHeapAndForget);
artifacts->set_accounting_stats(accounting_stats_);
artifacts->TransferPages(std::move(memory_chunk_list_));
artifacts->set_shared_read_only_space(
std::make_unique<SharedReadOnlySpace>(heap, artifacts));
heap->ReplaceReadOnlySpace(artifacts->shared_read_only_space());
}
void ReadOnlyPage::MakeHeaderRelocatable() {
ReleaseAllocatedMemoryNeededForWritableChunk();
// Detached read-only space needs to have a valid marking bitmap and free list
......@@ -4115,7 +4160,10 @@ void ReadOnlySpace::Seal(SealMode ro_mode) {
void ReadOnlySpace::Unseal() {
DCHECK(is_marked_read_only_);
SetPermissionsForPages(heap()->memory_allocator(), PageAllocator::kReadWrite);
if (HasPages()) {
SetPermissionsForPages(heap()->memory_allocator(),
PageAllocator::kReadWrite);
}
is_marked_read_only_ = false;
}
......
......@@ -1705,6 +1705,16 @@ class AllocationStats {
public:
AllocationStats() { Clear(); }
AllocationStats& operator=(const AllocationStats& stats) V8_NOEXCEPT {
capacity_ = stats.capacity_.load();
max_capacity_ = stats.max_capacity_;
size_ = stats.size_;
#ifdef DEBUG
allocated_on_page_ = stats.allocated_on_page_;
#endif
return *this;
}
// Zero out all the allocation statistics (i.e., no capacity).
void Clear() {
capacity_ = 0;
......@@ -3225,6 +3235,39 @@ class V8_EXPORT_PRIVATE OffThreadSpace : public LocalSpace {
void RefillFreeList() override;
};
// -----------------------------------------------------------------------------
// Artifacts used to construct a new SharedReadOnlySpace
class ReadOnlyArtifacts {
public:
~ReadOnlyArtifacts();
void set_accounting_stats(const AllocationStats& stats) { stats_ = stats; }
void set_shared_read_only_space(
std::unique_ptr<SharedReadOnlySpace> shared_space) {
shared_read_only_space_ = std::move(shared_space);
}
SharedReadOnlySpace* shared_read_only_space() {
return shared_read_only_space_.get();
}
base::List<MemoryChunk>& pages() { return pages_; }
void TransferPages(base::List<MemoryChunk>&& pages) {
pages_ = std::move(pages);
}
const AllocationStats& accounting_stats() const { return stats_; }
void set_read_only_heap(std::unique_ptr<ReadOnlyHeap> read_only_heap);
ReadOnlyHeap* read_only_heap() { return read_only_heap_.get(); }
private:
base::List<MemoryChunk> pages_;
AllocationStats stats_;
std::unique_ptr<SharedReadOnlySpace> shared_read_only_space_;
std::unique_ptr<ReadOnlyHeap> read_only_heap_;
};
// -----------------------------------------------------------------------------
// Read Only space for all Immortal Immovable and Immutable objects
......@@ -3232,8 +3275,11 @@ class ReadOnlySpace : public PagedSpace {
public:
explicit ReadOnlySpace(Heap* heap);
// TODO(v8:7464): Remove this once PagedSpace::Unseal no longer writes to
// memory_chunk_list_.
// Detach the pages and them to artifacts for using in creating a
// SharedReadOnlySpace.
void DetachPagesAndAddToArtifacts(
std::shared_ptr<ReadOnlyArtifacts> artifacts);
~ReadOnlySpace() override { Unseal(); }
bool writable() const { return !is_marked_read_only_; }
......@@ -3256,15 +3302,17 @@ class ReadOnlySpace : public PagedSpace {
size_t Available() override { return 0; }
private:
// Unseal the space after is has been sealed, by making it writable.
// TODO(v8:7464): Only possible if the space hasn't been detached.
void Unseal();
protected:
void SetPermissionsForPages(MemoryAllocator* memory_allocator,
PageAllocator::Permission access);
bool is_marked_read_only_ = false;
private:
// Unseal the space after is has been sealed, by making it writable.
// TODO(v8:7464): Only possible if the space hasn't been detached.
void Unseal();
//
// String padding must be cleared just before serialization and therefore the
// string padding in the space will already have been cleared if the space was
......@@ -3272,6 +3320,12 @@ class ReadOnlySpace : public PagedSpace {
bool is_string_padding_cleared_;
};
class SharedReadOnlySpace : public ReadOnlySpace {
public:
SharedReadOnlySpace(Heap* heap, std::shared_ptr<ReadOnlyArtifacts> artifacts);
~SharedReadOnlySpace() override;
};
// -----------------------------------------------------------------------------
// Large objects ( > kMaxRegularHeapObjectSize ) are allocated and
// managed by the large object space.
......
......@@ -6479,7 +6479,6 @@ UNINITIALIZED_TEST(ReinitializeStringHashSeed) {
v8::Context::Scope context_scope(context);
}
isolate->Dispose();
ReadOnlyHeap::ClearSharedHeapForTest();
}
}
......
This diff is collapsed.
......@@ -1070,7 +1070,6 @@ int main(int argc, char* argv[]) {
printf("Embedding script '%s'\n", argv[i]);
startup_data = i::CreateSnapshotDataBlobInternal(
v8::SnapshotCreator::FunctionCodeHandling::kClear, argv[i], nullptr);
v8::internal::ReadOnlyHeap::ClearSharedHeapForTest();
argv[i] = nullptr;
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment