Commit 8a6b7d8b authored by Victor Gomes's avatar Victor Gomes Committed by V8 LUCI CQ

[heap] Support mprotect off thread

While compiling concurrently, we change the permissions of the page
containing the new code object to RWX, so the main thread can continue
executing a potential code in the same page.

If no thread is compiling the new code, we change the permissions
of all pages affected back to RX.

We also initialises code object page to immediately RWX by default.
Otherwise, a new code could be allocated in the same page, it will call
UnprotectAndRegister, and since write_unprotect_counter_ is now at
least 2, the code ignores the permission change. We then sigfault
when trying to run the new code.

Change-Id: Id18bcb9a44843b4ff747b1e4ac91913e80b74d80
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3257606Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Victor Gomes <victorgomes@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77827}
parent cb4e08c3
......@@ -162,6 +162,9 @@ class ConcurrentBaselineCompiler {
void Run(JobDelegate* delegate) override {
while (!incoming_queue_->IsEmpty() && !delegate->ShouldYield()) {
// Since we're going to compile an entire batch, this guarantees that
// we only switch back the memory chunks to RX at the end.
CodePageCollectionMemoryModificationScope batch_alloc(isolate_->heap());
std::unique_ptr<BaselineBatchCompilerJob> job;
incoming_queue_->Dequeue(&job);
job->Compile();
......
......@@ -715,11 +715,6 @@ DEFINE_BOOL_READONLY(concurrent_sparkplug, false,
DEFINE_BOOL(concurrent_sparkplug, false,
"compile Sparkplug code in a background thread")
#endif
#if !MUST_WRITE_PROTECT_CODE_MEMORY
// TODO(victorgomes): Currently concurrent compilation only works if we assume
// no write protect in code space.
DEFINE_NEG_IMPLICATION(concurrent_sparkplug, write_protect_code_memory)
#endif
#else
DEFINE_BOOL(baseline_batch_compilation, false, "batch compile Sparkplug code")
DEFINE_BOOL_READONLY(concurrent_sparkplug, false,
......
......@@ -2664,10 +2664,7 @@ void Heap::ComputeFastPromotionMode() {
void Heap::UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk,
UnprotectMemoryOrigin origin) {
if (code_page_collection_memory_modification_scope_depth_ > 0) {
base::Optional<base::MutexGuard> guard;
if (origin != UnprotectMemoryOrigin::kMainThread) {
guard.emplace(&unprotected_memory_chunks_mutex_);
}
base::MutexGuard guard(&unprotected_memory_chunks_mutex_);
if (unprotected_memory_chunks_.insert(chunk).second) {
chunk->SetCodeModificationPermissions();
}
......@@ -2684,7 +2681,7 @@ void Heap::UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk) {
}
void Heap::ProtectUnprotectedMemoryChunks() {
DCHECK_EQ(code_page_collection_memory_modification_scope_depth_, 0);
base::MutexGuard guard(&unprotected_memory_chunks_mutex_);
for (auto chunk = unprotected_memory_chunks_.begin();
chunk != unprotected_memory_chunks_.end(); chunk++) {
DCHECK(memory_allocator()->IsMemoryChunkExecutable(*chunk));
......
......@@ -2250,7 +2250,8 @@ class Heap {
uintptr_t code_space_memory_modification_scope_depth_ = 0;
// Holds the number of open CodePageCollectionMemoryModificationScopes.
uintptr_t code_page_collection_memory_modification_scope_depth_ = 0;
std::atomic<uintptr_t> code_page_collection_memory_modification_scope_depth_{
0};
std::atomic<HeapState> gc_state_{NOT_IN_GC};
......
......@@ -47,6 +47,8 @@ AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type,
}
HeapObject object;
if (alloc.To(&object) && !V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
heap()->UnprotectAndRegisterMemoryChunk(
object, UnprotectMemoryOrigin::kMaybeOffMainThread);
heap()->ZapCodeObject(object.address(), size_in_bytes);
}
return alloc;
......
......@@ -685,7 +685,7 @@ bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
PageAllocator::kNoAccess)) {
// Commit the executable code body.
if (vm->SetPermissions(code_area, commit_size - pre_guard_offset,
PageAllocator::kReadWrite)) {
MemoryChunk::GetCodeModificationPermission())) {
// Create the post-code guard page.
if (vm->SetPermissions(post_guard_page, page_size,
PageAllocator::kNoAccess)) {
......
......@@ -93,10 +93,9 @@ void MemoryChunk::SetCodeModificationPermissions() {
// We may use RWX pages to write code. Some CPUs have optimisations to push
// updates to code to the icache through a fast path, and they may filter
// updates based on the written memory being executable.
CHECK(reservation_.SetPermissions(unprotect_start, unprotect_size,
FLAG_write_code_using_rwx
? PageAllocator::kReadWriteExecute
: PageAllocator::kReadWrite));
CHECK(reservation_.SetPermissions(
unprotect_start, unprotect_size,
MemoryChunk::GetCodeModificationPermission()));
}
}
......
......@@ -189,6 +189,11 @@ class MemoryChunk : public BasicMemoryChunk {
// MemoryChunk::synchronized_heap() to simulate the barrier.
void InitializationMemoryFence();
static PageAllocator::Permission GetCodeModificationPermission() {
return FLAG_write_code_using_rwx ? PageAllocator::kReadWriteExecute
: PageAllocator::kReadWrite;
}
V8_EXPORT_PRIVATE void SetReadable();
V8_EXPORT_PRIVATE void SetReadAndExecutable();
......
......@@ -683,6 +683,10 @@ PagedSpace::TryAllocationFromFreeListBackground(LocalHeap* local_heap,
DCHECK_LE(limit, end);
DCHECK_LE(min_size_in_bytes, limit - start);
if (limit != end) {
if (identity() == CODE_SPACE) {
heap()->UnprotectAndRegisterMemoryChunk(
page, UnprotectMemoryOrigin::kMaybeOffMainThread);
}
Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment