Commit 2982b4fe authored by Michael Lippautz's avatar Michael Lippautz Committed by V8 LUCI CQ

[heap] Guard unprotected memory chunk with a mutex

Use a mutex guard when the unprotection is triggered from a compaction
space in which case it is actually parallel.

Main-thread only unprotection does not require acquiring the mutex.

The list itself is only used from the main thread and thus the actual
process does not require a mutex.

The issue was introduced in https://crrev.com/c/2966382

Bug: v8:11982
Change-Id: I593c0659eb5a96c8206d0b4014f07ab13827be85
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3026705Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75734}
parent 64c72615
......@@ -58,7 +58,8 @@ FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
if (!prev_non_evac_node.is_null()) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(prev_non_evac_node);
if (chunk->owner_identity() == CODE_SPACE) {
chunk->heap()->UnprotectAndRegisterMemoryChunk(chunk);
chunk->heap()->UnprotectAndRegisterMemoryChunk(
chunk, UnprotectMemoryOrigin::kMaybeOffMainThread);
}
prev_non_evac_node.set_next(cur_node.next());
}
......
......@@ -276,7 +276,8 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
if (AllocationType::kCode == type && !V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
// Unprotect the memory chunk of the object if it was not unprotected
// already.
UnprotectAndRegisterMemoryChunk(object);
UnprotectAndRegisterMemoryChunk(object,
UnprotectMemoryOrigin::kMainThread);
ZapCodeObject(object.address(), size_in_bytes);
if (!large_object) {
MemoryChunk::FromHeapObject(object)
......
......@@ -2656,16 +2656,22 @@ void Heap::ComputeFastPromotionMode() {
}
}
void Heap::UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk) {
void Heap::UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk,
UnprotectMemoryOrigin origin) {
if (unprotected_memory_chunks_registry_enabled_) {
base::Optional<base::MutexGuard> guard;
if (origin != UnprotectMemoryOrigin::kMainThread) {
guard.emplace(&unprotected_memory_chunks_mutex_);
}
if (unprotected_memory_chunks_.insert(chunk).second) {
chunk->SetReadAndWritable();
}
}
}
void Heap::UnprotectAndRegisterMemoryChunk(HeapObject object) {
UnprotectAndRegisterMemoryChunk(MemoryChunk::FromHeapObject(object));
void Heap::UnprotectAndRegisterMemoryChunk(HeapObject object,
UnprotectMemoryOrigin origin) {
UnprotectAndRegisterMemoryChunk(MemoryChunk::FromHeapObject(object), origin);
}
void Heap::UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk) {
......
......@@ -187,6 +187,11 @@ enum class SkipRoot {
kWeak
};
enum UnprotectMemoryOrigin {
kMainThread,
kMaybeOffMainThread,
};
class StrongRootsEntry final {
explicit StrongRootsEntry(const char* label) : label(label) {}
......@@ -659,8 +664,10 @@ class Heap {
code_space_memory_modification_scope_depth_--;
}
void UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk);
V8_EXPORT_PRIVATE void UnprotectAndRegisterMemoryChunk(HeapObject object);
void UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk,
UnprotectMemoryOrigin origin);
V8_EXPORT_PRIVATE void UnprotectAndRegisterMemoryChunk(
HeapObject object, UnprotectMemoryOrigin origin);
void UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk);
V8_EXPORT_PRIVATE void ProtectUnprotectedMemoryChunks();
......@@ -2451,6 +2458,7 @@ class Heap {
HeapObject pending_layout_change_object_;
base::Mutex unprotected_memory_chunks_mutex_;
std::unordered_set<MemoryChunk*> unprotected_memory_chunks_;
bool unprotected_memory_chunks_registry_enabled_ = false;
......
......@@ -416,6 +416,15 @@ size_t PagedSpace::Available() {
return free_list_->Available();
}
namespace {
UnprotectMemoryOrigin GetUnprotectMemoryOrigin(bool is_compaction_space) {
return is_compaction_space ? UnprotectMemoryOrigin::kMaybeOffMainThread
: UnprotectMemoryOrigin::kMainThread;
}
} // namespace
void PagedSpace::FreeLinearAllocationArea() {
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap.
......@@ -441,7 +450,8 @@ void PagedSpace::FreeLinearAllocationArea() {
// because we are going to write a filler into that memory area below.
if (identity() == CODE_SPACE) {
heap()->UnprotectAndRegisterMemoryChunk(
MemoryChunk::FromAddress(current_top));
MemoryChunk::FromAddress(current_top),
GetUnprotectMemoryOrigin(is_compaction_space()));
}
DCHECK_IMPLIES(current_limit - current_top >= 2 * kTaggedSize,
......@@ -543,7 +553,8 @@ bool PagedSpace::TryAllocationFromFreeListMain(size_t size_in_bytes,
DCHECK_LE(size_in_bytes, limit - start);
if (limit != end) {
if (identity() == CODE_SPACE) {
heap()->UnprotectAndRegisterMemoryChunk(page);
heap()->UnprotectAndRegisterMemoryChunk(
page, GetUnprotectMemoryOrigin(is_compaction_space()));
}
Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
}
......
......@@ -425,7 +425,8 @@ void SpaceWithLinearArea::InvokeAllocationObservers(
// Ensure that there is a valid object
if (identity() == CODE_SPACE) {
MemoryChunk* chunk = MemoryChunk::FromAddress(soon_object);
heap()->UnprotectAndRegisterMemoryChunk(chunk);
heap()->UnprotectAndRegisterMemoryChunk(
chunk, UnprotectMemoryOrigin::kMainThread);
}
heap_->CreateFillerObjectAt(soon_object, static_cast<int>(size_in_bytes),
ClearRecordedSlots::kNo);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment