Commit a537be46 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Track unprotected chunks in LocalHeap

CodePageCollectionMemoryModificationScope now increases a per-thread
counter and inserts unprotected code chunks into a thread-local set
of chunks. This information is moved from Heap into LocalHeap.

We can't use kMaxWriteUnprotectCounter on the unprotect counter on the
MemoryChunk anymore, since e.g. for concurrent Sparkplug N threads might
now allocate a code object on the same page and since
CodePageCollectionMemoryModificationScope doesn't know about the
other threads anymore, each thread has to increase that counter by 1.
We DCHECK that nesting depth now in the scope's constructor instead.

We still need to remove chunks from `unprotected_memory_chunks_` when
freeing an executable MemoryChunk during GC. Fortunately we can still do
this, since all threads are in a safepoint during GC and we can remove
the chunk from each thread-local set without any synchronization.

Bug: chromium:1330887
Change-Id: Icefc61b8d8de113d8dcfb1cf64122d12dd9798c4
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3688516Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81047}
parent 03dcbc88
......@@ -3438,6 +3438,10 @@ void Isolate::Deinit() {
DetachFromSharedIsolate();
}
// Since there are no other threads left, we can lock this mutex without any
// ceremony. This signals to the tear down code that we are in a safepoint.
base::RecursiveMutexGuard safepoint(&heap_.safepoint()->local_heaps_mutex_);
ReleaseSharedPtrs();
builtins_.TearDown();
......@@ -5583,7 +5587,9 @@ LocalHeap* Isolate::main_thread_local_heap() {
LocalHeap* Isolate::CurrentLocalHeap() {
LocalHeap* local_heap = LocalHeap::Current();
return local_heap ? local_heap : main_thread_local_heap();
if (local_heap) return local_heap;
DCHECK_EQ(ThreadId::Current(), thread_id());
return main_thread_local_heap();
}
// |chunk| is either a Page or an executable LargePage.
......
......@@ -622,6 +622,29 @@ CodeSpaceMemoryModificationScope::CodeSpaceMemoryModificationScope(Heap* heap)
}
}
void Heap::IncrementCodePageCollectionMemoryModificationScopeDepth() {
LocalHeap* local_heap = isolate()->CurrentLocalHeap();
local_heap->code_page_collection_memory_modification_scope_depth_++;
#if DEBUG
// Maximum number of nested scopes.
static constexpr int kMaxCodePageCollectionMemoryModificationScopeDepth = 2;
DCHECK_LE(local_heap->code_page_collection_memory_modification_scope_depth_,
kMaxCodePageCollectionMemoryModificationScopeDepth);
#endif
}
bool Heap::DecrementCodePageCollectionMemoryModificationScopeDepth() {
LocalHeap* local_heap = isolate()->CurrentLocalHeap();
local_heap->code_page_collection_memory_modification_scope_depth_--;
return local_heap->code_page_collection_memory_modification_scope_depth_ == 0;
}
uintptr_t Heap::code_page_collection_memory_modification_scope_depth() {
LocalHeap* local_heap = isolate()->CurrentLocalHeap();
return local_heap->code_page_collection_memory_modification_scope_depth_;
}
CodeSpaceMemoryModificationScope::~CodeSpaceMemoryModificationScope() {
if (heap_->write_protect_code_memory()) {
heap_->decrement_code_space_memory_modification_scope_depth();
......@@ -651,8 +674,10 @@ CodePageCollectionMemoryModificationScope::
CodePageCollectionMemoryModificationScope::
~CodePageCollectionMemoryModificationScope() {
if (heap_->write_protect_code_memory()) {
if (heap_->DecrementCodePageCollectionMemoryModificationScopeDepth()) {
heap_->ProtectUnprotectedMemoryChunks();
}
}
}
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
......
......@@ -2769,42 +2769,36 @@ void Heap::UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk,
UnprotectMemoryOrigin origin) {
if (!write_protect_code_memory()) return;
base::MutexGuard guard(&unprotected_memory_chunks_mutex_);
// CodePageCollectionMemoryModificationScope can be used in multiple threads,
// so we have to check its depth behind the lock.
if (code_page_collection_memory_modification_scope_depth_ > 0) {
if (unprotected_memory_chunks_.insert(chunk).second) {
// No need to register any unprotected chunks during a GC. This also avoids
// the use of CurrentLocalHeap() on GC workers, which don't have a LocalHeap.
if (code_space_memory_modification_scope_depth_ > 0) return;
LocalHeap* local_heap = isolate()->CurrentLocalHeap();
DCHECK_GT(local_heap->code_page_collection_memory_modification_scope_depth_,
0);
if (local_heap->unprotected_memory_chunks_.insert(chunk).second) {
chunk->SetCodeModificationPermissions();
}
} else {
DCHECK_GT(code_space_memory_modification_scope_depth_, 0);
}
}
void Heap::UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk) {
safepoint()->IterateLocalHeaps([chunk](LocalHeap* local_heap) {
local_heap->unprotected_memory_chunks_.erase(chunk);
});
}
void Heap::UnprotectAndRegisterMemoryChunk(HeapObject object,
UnprotectMemoryOrigin origin) {
if (!write_protect_code_memory()) return;
UnprotectAndRegisterMemoryChunk(MemoryChunk::FromHeapObject(object), origin);
}
void Heap::UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk) {
unprotected_memory_chunks_.erase(chunk);
}
void Heap::ProtectUnprotectedMemoryChunks() {
base::MutexGuard guard(&unprotected_memory_chunks_mutex_);
// CodePageCollectionMemoryModificationScope can be used in multiple threads,
// so we have to check its depth behind the lock.
if (--code_page_collection_memory_modification_scope_depth_ > 0) return;
for (auto chunk = unprotected_memory_chunks_.begin();
chunk != unprotected_memory_chunks_.end(); chunk++) {
DCHECK(memory_allocator()->IsMemoryChunkExecutable(*chunk));
(*chunk)->SetDefaultCodePermissions();
LocalHeap* local_heap = isolate()->CurrentLocalHeap();
for (MemoryChunk* chunk : local_heap->unprotected_memory_chunks_) {
DCHECK(memory_allocator()->IsMemoryChunkExecutable(chunk));
chunk->SetDefaultCodePermissions();
}
unprotected_memory_chunks_.clear();
local_heap->unprotected_memory_chunks_.clear();
}
bool Heap::ExternalStringTable::Contains(String string) {
......@@ -6103,6 +6097,11 @@ void Heap::StartTearDown() {
void Heap::TearDown() {
DCHECK_EQ(gc_state(), TEAR_DOWN);
// Assert that there are no background threads left and no executable memory
// chunks are unprotected.
safepoint()->AssertMainThreadIsOnlyThread();
DCHECK(main_thread_local_heap()->unprotected_memory_chunks_.empty());
if (FLAG_concurrent_marking || FLAG_parallel_marking)
concurrent_marking_->Pause();
......
......@@ -671,9 +671,9 @@ class Heap {
void UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk);
V8_EXPORT_PRIVATE void ProtectUnprotectedMemoryChunks();
void IncrementCodePageCollectionMemoryModificationScopeDepth() {
code_page_collection_memory_modification_scope_depth_++;
}
inline void IncrementCodePageCollectionMemoryModificationScopeDepth();
inline bool DecrementCodePageCollectionMemoryModificationScopeDepth();
inline uintptr_t code_page_collection_memory_modification_scope_depth();
inline HeapState gc_state() const {
return gc_state_.load(std::memory_order_relaxed);
......@@ -2240,10 +2240,6 @@ class Heap {
// Holds the number of open CodeSpaceMemoryModificationScopes.
uintptr_t code_space_memory_modification_scope_depth_ = 0;
// Holds the number of open CodePageCollectionMemoryModificationScopes.
std::atomic<uintptr_t> code_page_collection_memory_modification_scope_depth_{
0};
std::atomic<HeapState> gc_state_{NOT_IN_GC};
int gc_post_processing_depth_ = 0;
......@@ -2451,9 +2447,6 @@ class Heap {
HeapObject pending_layout_change_object_;
base::Mutex unprotected_memory_chunks_mutex_;
std::unordered_set<MemoryChunk*> unprotected_memory_chunks_;
std::unordered_map<HeapObject, HeapObject, Object::Hasher> retainer_;
std::unordered_map<HeapObject, Root, Object::Hasher> retaining_root_;
// If an object is retained by an ephemeron, then the retaining key of the
......
......@@ -21,8 +21,9 @@ namespace v8 {
namespace internal {
class Heap;
class Safepoint;
class LocalHandles;
class MemoryChunk;
class Safepoint;
// LocalHeap is used by the GC to track all threads with heap access in order to
// stop them before performing a collection. LocalHeaps can be either Parked or
......@@ -306,6 +307,9 @@ class V8_EXPORT_PRIVATE LocalHeap {
LocalHeap* prev_;
LocalHeap* next_;
std::unordered_set<MemoryChunk*> unprotected_memory_chunks_;
uintptr_t code_page_collection_memory_modification_scope_depth_{0};
std::unique_ptr<LocalHandles> handles_;
std::unique_ptr<PersistentHandles> persistent_handles_;
std::unique_ptr<MarkingBarrier> marking_barrier_;
......
......@@ -60,7 +60,6 @@ void MemoryChunk::DecrementWriteUnprotectCounterAndMaybeSetPermissions(
return;
}
write_unprotect_counter_--;
DCHECK_LT(write_unprotect_counter_, kMaxWriteUnprotectCounter);
if (write_unprotect_counter_ == 0) {
Address protect_start =
address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
......@@ -89,7 +88,6 @@ void MemoryChunk::SetCodeModificationPermissions() {
// protection mode has to be atomic.
base::MutexGuard guard(page_protection_change_mutex_);
write_unprotect_counter_++;
DCHECK_LE(write_unprotect_counter_, kMaxWriteUnprotectCounter);
if (write_unprotect_counter_ == 1) {
Address unprotect_start =
address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
......
......@@ -50,9 +50,6 @@ class MemoryChunk : public BasicMemoryChunk {
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
// Maximum number of nested code memory modification scopes.
static const int kMaxWriteUnprotectCounter = 3;
MemoryChunk(Heap* heap, BaseSpace* space, size_t size, Address area_start,
Address area_end, VirtualMemory reservation,
Executability executable, PageSize page_size);
......@@ -269,8 +266,6 @@ class MemoryChunk : public BasicMemoryChunk {
// counter is decremented when a component resets to read+executable.
// If Value() == 0 => The memory is read and executable.
// If Value() >= 1 => The Memory is read and writable (and maybe executable).
// The maximum value is limited by {kMaxWriteUnprotectCounter} to prevent
// excessive nesting of scopes.
// All executable MemoryChunks are allocated rw based on the assumption that
// they will be used immediately for an allocation. They are initialized
// with the number of open CodeSpaceMemoryModificationScopes. The caller
......
......@@ -147,6 +147,7 @@ class IsolateSafepoint final {
friend class GlobalSafepoint;
friend class GlobalSafepointScope;
friend class Isolate;
friend class LocalHeap;
friend class SafepointScope;
};
......
......@@ -36,6 +36,7 @@
#include "src/base/platform/platform.h"
#include "src/common/globals.h"
#include "src/heap/factory.h"
#include "src/heap/heap.h"
#include "src/heap/large-spaces.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk.h"
......@@ -159,6 +160,7 @@ static unsigned int PseudorandomAreaSize() {
TEST(MemoryChunk) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
SafepointScope safepoint(heap);
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t area_size;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment