Commit bedf467c authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

[heap] Add flag to synchronize runtime allocations in old space

Add flag FLAG_concurrent_allocation that when enabled protects all runtime allocations in the old space with a mutex. Prerequisite for concurrent allocation. Still disabled by default.

Bug: v8:10315
Change-Id: I37e59b3d51fe3aad112f0b556ea867cc0a2496e2
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2140937
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67075}
parent 656c68a7
...@@ -917,6 +917,7 @@ DEFINE_BOOL_READONLY(array_buffer_extension, V8_ARRAY_BUFFER_EXTENSION_BOOL, ...@@ -917,6 +917,7 @@ DEFINE_BOOL_READONLY(array_buffer_extension, V8_ARRAY_BUFFER_EXTENSION_BOOL,
DEFINE_IMPLICATION(array_buffer_extension, always_promote_young_mc) DEFINE_IMPLICATION(array_buffer_extension, always_promote_young_mc)
DEFINE_BOOL(concurrent_array_buffer_sweeping, true, DEFINE_BOOL(concurrent_array_buffer_sweeping, true,
"concurrently sweep array buffers") "concurrently sweep array buffers")
DEFINE_BOOL(concurrent_allocation, false, "concurrently allocate in old space")
DEFINE_BOOL(local_heaps, false, "allow heap access from background tasks") DEFINE_BOOL(local_heaps, false, "allow heap access from background tasks")
DEFINE_BOOL(parallel_marking, true, "use parallel marking in atomic pause") DEFINE_BOOL(parallel_marking, true, "use parallel marking in atomic pause")
DEFINE_INT(ephemeron_fixpoint_iterations, 10, DEFINE_INT(ephemeron_fixpoint_iterations, 10,
......
...@@ -3787,6 +3787,13 @@ bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes, ...@@ -3787,6 +3787,13 @@ bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
VMState<GC> state(heap()->isolate()); VMState<GC> state(heap()->isolate());
RuntimeCallTimerScope runtime_timer( RuntimeCallTimerScope runtime_timer(
heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw); heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
base::Optional<base::MutexGuard> optional_mutex;
if (FLAG_concurrent_allocation && origin != AllocationOrigin::kGC &&
identity() == OLD_SPACE) {
optional_mutex.emplace(&allocation_mutex_);
}
return RawSlowRefillLinearAllocationArea(size_in_bytes, origin); return RawSlowRefillLinearAllocationArea(size_in_bytes, origin);
} }
......
...@@ -2588,6 +2588,9 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -2588,6 +2588,9 @@ class V8_EXPORT_PRIVATE PagedSpace
// Mutex guarding any concurrent access to the space. // Mutex guarding any concurrent access to the space.
base::Mutex space_mutex_; base::Mutex space_mutex_;
// Mutex guarding concurrent allocation.
base::Mutex allocation_mutex_;
friend class IncrementalMarking; friend class IncrementalMarking;
friend class MarkCompactCollector; friend class MarkCompactCollector;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment