Commit 90a9d6cb authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Support multiple clients in shared GC

Add support for safepointing multiple isolates as described in the
design doc (link is below). A safepoint across multiple isolates is
considered a global safepoint to distinguish it from regular safepoints.

The basic idea behind the implementation is that we reach a
safepoint for each client. What's new is that now also main threads
need to participate in the safepointing protocol and need to give up
control in time. The slow paths of Park(), Unpark() and Safepoint() on
the main thread need to be adjusted for this reason as well.

This CL introduces GlobalSafepoint and GlobalSafepointScope to mirror
IsolateSafepoint and IsolateSafepointScope.

This CL adds the type IgnoreLocalGCRequests, it is used to prevent
Park() and Unpark() from honoring the request from background threads
to perform a local GC. This is used heap-internally to not have GCs
(or even nested GCs) in certain locations. E.g. when initiating a
safepoint to perform a GC we don't want a "recursive" GC to occur.

Design doc: https://docs.google.com/document/d/1y6C9zAACEr0sBYMIYk3YpXosnkF3Ak4CEuWJu1-3zXs/edit?usp=sharing

Bug: v8:11708
Change-Id: I5aca8f5f24873279271a53be3bb093fc92a1a1eb
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3009224
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77812}
parent b8f002ce
...@@ -55,6 +55,9 @@ ...@@ -55,6 +55,9 @@
#include "src/handles/global-handles-inl.h" #include "src/handles/global-handles-inl.h"
#include "src/handles/persistent-handles.h" #include "src/handles/persistent-handles.h"
#include "src/heap/heap-inl.h" #include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/local-heap.h"
#include "src/heap/parked-scope.h"
#include "src/heap/read-only-heap.h" #include "src/heap/read-only-heap.h"
#include "src/heap/safepoint.h" #include "src/heap/safepoint.h"
#include "src/ic/stub-cache.h" #include "src/ic/stub-cache.h"
...@@ -3190,7 +3193,18 @@ void Isolate::Deinit() { ...@@ -3190,7 +3193,18 @@ void Isolate::Deinit() {
// This stops cancelable tasks (i.e. concurrent marking tasks). // This stops cancelable tasks (i.e. concurrent marking tasks).
// Stop concurrent tasks before destroying resources since they might still // Stop concurrent tasks before destroying resources since they might still
// use those. // use those.
cancelable_task_manager()->CancelAndWait(); {
IgnoreLocalGCRequests ignore_gc_requests(heap());
ParkedScope parked_scope(main_thread_local_heap());
cancelable_task_manager()->CancelAndWait();
}
{
// This isolate might have to park for a shared GC initiated by another
// client isolate before it can actually detach from the shared isolate.
AllowGarbageCollection allow_shared_gc;
DetachFromSharedIsolate();
}
ReleaseSharedPtrs(); ReleaseSharedPtrs();
...@@ -3218,10 +3232,6 @@ void Isolate::Deinit() { ...@@ -3218,10 +3232,6 @@ void Isolate::Deinit() {
// updated anymore. // updated anymore.
DumpAndResetStats(); DumpAndResetStats();
main_thread_local_isolate_->heap()->FreeLinearAllocationArea();
DetachFromSharedIsolate();
heap_.TearDown(); heap_.TearDown();
main_thread_local_isolate_.reset(); main_thread_local_isolate_.reset();
...@@ -3697,7 +3707,11 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data, ...@@ -3697,7 +3707,11 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
// Create LocalIsolate/LocalHeap for the main thread and set state to Running. // Create LocalIsolate/LocalHeap for the main thread and set state to Running.
main_thread_local_isolate_.reset(new LocalIsolate(this, ThreadKind::kMain)); main_thread_local_isolate_.reset(new LocalIsolate(this, ThreadKind::kMain));
main_thread_local_heap()->Unpark();
{
IgnoreLocalGCRequests ignore_gc_requests(heap());
main_thread_local_heap()->Unpark();
}
// The main thread LocalHeap needs to be set up when attaching to the shared // The main thread LocalHeap needs to be set up when attaching to the shared
// isolate. Otherwise a global safepoint would find an isolate without // isolate. Otherwise a global safepoint would find an isolate without
......
...@@ -22,14 +22,17 @@ bool CollectionBarrier::WasGCRequested() { ...@@ -22,14 +22,17 @@ bool CollectionBarrier::WasGCRequested() {
return collection_requested_.load(); return collection_requested_.load();
} }
void CollectionBarrier::RequestGC() { bool CollectionBarrier::TryRequestGC() {
base::MutexGuard guard(&mutex_); base::MutexGuard guard(&mutex_);
if (shutdown_requested_) return false;
bool was_already_requested = collection_requested_.exchange(true); bool was_already_requested = collection_requested_.exchange(true);
if (!was_already_requested) { if (!was_already_requested) {
CHECK(!timer_.IsStarted()); CHECK(!timer_.IsStarted());
timer_.Start(); timer_.Start();
} }
return true;
} }
class BackgroundCollectionInterruptTask : public CancelableTask { class BackgroundCollectionInterruptTask : public CancelableTask {
...@@ -59,8 +62,19 @@ void CollectionBarrier::NotifyShutdownRequested() { ...@@ -59,8 +62,19 @@ void CollectionBarrier::NotifyShutdownRequested() {
void CollectionBarrier::ResumeThreadsAwaitingCollection() { void CollectionBarrier::ResumeThreadsAwaitingCollection() {
base::MutexGuard guard(&mutex_); base::MutexGuard guard(&mutex_);
DCHECK(!timer_.IsStarted());
collection_requested_.store(false);
block_for_collection_ = false;
collection_performed_ = true;
cv_wakeup_.NotifyAll();
}
void CollectionBarrier::CancelCollectionAndResumeThreads() {
base::MutexGuard guard(&mutex_);
if (timer_.IsStarted()) timer_.Stop();
collection_requested_.store(false); collection_requested_.store(false);
block_for_collection_ = false; block_for_collection_ = false;
collection_performed_ = false;
cv_wakeup_.NotifyAll(); cv_wakeup_.NotifyAll();
} }
...@@ -72,6 +86,10 @@ bool CollectionBarrier::AwaitCollectionBackground(LocalHeap* local_heap) { ...@@ -72,6 +86,10 @@ bool CollectionBarrier::AwaitCollectionBackground(LocalHeap* local_heap) {
// set before the next GC. // set before the next GC.
base::MutexGuard guard(&mutex_); base::MutexGuard guard(&mutex_);
if (shutdown_requested_) return false; if (shutdown_requested_) return false;
// Collection was cancelled by the main thread.
if (!collection_requested_.load()) return false;
first_thread = !block_for_collection_; first_thread = !block_for_collection_;
block_for_collection_ = true; block_for_collection_ = true;
CHECK(timer_.IsStarted()); CHECK(timer_.IsStarted());
...@@ -88,7 +106,8 @@ bool CollectionBarrier::AwaitCollectionBackground(LocalHeap* local_heap) { ...@@ -88,7 +106,8 @@ bool CollectionBarrier::AwaitCollectionBackground(LocalHeap* local_heap) {
cv_wakeup_.Wait(&mutex_); cv_wakeup_.Wait(&mutex_);
} }
return true; // Collection may have been cancelled while blocking for it.
return collection_performed_;
} }
void CollectionBarrier::ActivateStackGuardAndPostTask() { void CollectionBarrier::ActivateStackGuardAndPostTask() {
......
...@@ -27,8 +27,10 @@ class CollectionBarrier { ...@@ -27,8 +27,10 @@ class CollectionBarrier {
// Returns true when collection was requested. // Returns true when collection was requested.
bool WasGCRequested(); bool WasGCRequested();
// Requests a GC from the main thread. // Requests a GC from the main thread. Returns whether GC was successfully
void RequestGC(); // requested. Requesting a GC can fail when isolate shutdown was already
// initiated.
bool TryRequestGC();
// Resumes all threads waiting for GC when tear down starts. // Resumes all threads waiting for GC when tear down starts.
void NotifyShutdownRequested(); void NotifyShutdownRequested();
...@@ -39,7 +41,11 @@ class CollectionBarrier { ...@@ -39,7 +41,11 @@ class CollectionBarrier {
// Resumes threads waiting for collection. // Resumes threads waiting for collection.
void ResumeThreadsAwaitingCollection(); void ResumeThreadsAwaitingCollection();
// Cancels collection if one was requested and resumes threads waiting for GC.
void CancelCollectionAndResumeThreads();
// This is the method use by background threads to request and wait for GC. // This is the method use by background threads to request and wait for GC.
// Returns whether a GC was performed.
bool AwaitCollectionBackground(LocalHeap* local_heap); bool AwaitCollectionBackground(LocalHeap* local_heap);
private: private:
...@@ -50,8 +56,21 @@ class CollectionBarrier { ...@@ -50,8 +56,21 @@ class CollectionBarrier {
base::Mutex mutex_; base::Mutex mutex_;
base::ConditionVariable cv_wakeup_; base::ConditionVariable cv_wakeup_;
base::ElapsedTimer timer_; base::ElapsedTimer timer_;
// Flag that main thread checks whether a GC was requested from the background
// thread.
std::atomic<bool> collection_requested_{false}; std::atomic<bool> collection_requested_{false};
// This flag is used to detect whether to block for the GC. Only set if the
// main thread was actually running and is unset when GC resumes background
// threads.
bool block_for_collection_ = false; bool block_for_collection_ = false;
// Set to true when a GC was performed, false in case it was canceled because
// the main thread parked itself without running the GC.
bool collection_performed_ = false;
// Will be set as soon as Isolate starts tear down.
bool shutdown_requested_ = false; bool shutdown_requested_ = false;
}; };
......
...@@ -124,7 +124,7 @@ bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) { ...@@ -124,7 +124,7 @@ bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
local_heap_, kLabSize, kMaxLabSize, kWordAligned, origin); local_heap_, kLabSize, kMaxLabSize, kWordAligned, origin);
if (!result) return false; if (!result) return false;
if (local_heap_->heap()->incremental_marking()->black_allocation()) { if (IsBlackAllocationEnabled()) {
Address top = result->first; Address top = result->first;
Address limit = top + result->second; Address limit = top + result->second;
Page::FromAllocationAreaAddress(top)->CreateBlackAreaBackground(top, limit); Page::FromAllocationAreaAddress(top)->CreateBlackAreaBackground(top, limit);
...@@ -149,13 +149,19 @@ AllocationResult ConcurrentAllocator::AllocateOutsideLab( ...@@ -149,13 +149,19 @@ AllocationResult ConcurrentAllocator::AllocateOutsideLab(
HeapObject object = HeapObject::FromAddress(result->first); HeapObject object = HeapObject::FromAddress(result->first);
if (local_heap_->heap()->incremental_marking()->black_allocation()) { if (IsBlackAllocationEnabled()) {
local_heap_->heap()->incremental_marking()->MarkBlackBackground( owning_heap()->incremental_marking()->MarkBlackBackground(object,
object, object_size); object_size);
} }
return AllocationResult(object); return AllocationResult(object);
} }
bool ConcurrentAllocator::IsBlackAllocationEnabled() const {
return owning_heap()->incremental_marking()->black_allocation();
}
Heap* ConcurrentAllocator::owning_heap() const { return space_->heap(); }
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -63,6 +63,12 @@ class ConcurrentAllocator { ...@@ -63,6 +63,12 @@ class ConcurrentAllocator {
V8_EXPORT_PRIVATE AllocationResult AllocateOutsideLab( V8_EXPORT_PRIVATE AllocationResult AllocateOutsideLab(
int object_size, AllocationAlignment alignment, AllocationOrigin origin); int object_size, AllocationAlignment alignment, AllocationOrigin origin);
bool IsBlackAllocationEnabled() const;
// Returns the Heap of space_. This might differ from the LocalHeap's Heap for
// shared spaces.
Heap* owning_heap() const;
LocalHeap* const local_heap_; LocalHeap* const local_heap_;
PagedSpace* const space_; PagedSpace* const space_;
LocalAllocationBuffer lab_; LocalAllocationBuffer lab_;
......
...@@ -563,11 +563,12 @@ void GCTracer::Print() const { ...@@ -563,11 +563,12 @@ void GCTracer::Print() const {
Output( Output(
"[%d:%p] " "[%d:%p] "
"%8.0f ms: " "%8.0f ms: "
"%s%s %.1f (%.1f) -> %.1f (%.1f) MB, " "%s%s%s %.1f (%.1f) -> %.1f (%.1f) MB, "
"%.1f / %.1f ms %s (average mu = %.3f, current mu = %.3f) %s %s\n", "%.1f / %.1f ms %s (average mu = %.3f, current mu = %.3f) %s %s\n",
base::OS::GetCurrentProcessId(), base::OS::GetCurrentProcessId(),
reinterpret_cast<void*>(heap_->isolate()), reinterpret_cast<void*>(heap_->isolate()),
heap_->isolate()->time_millis_since_init(), current_.TypeName(false), heap_->isolate()->time_millis_since_init(),
heap_->IsShared() ? "Shared " : "", current_.TypeName(false),
current_.reduce_memory ? " (reduce)" : "", current_.reduce_memory ? " (reduce)" : "",
static_cast<double>(current_.start_object_size) / MB, static_cast<double>(current_.start_object_size) / MB,
static_cast<double>(current_.start_memory_size) / MB, static_cast<double>(current_.start_memory_size) / MB,
......
...@@ -867,6 +867,16 @@ CodePageMemoryModificationScope::~CodePageMemoryModificationScope() { ...@@ -867,6 +867,16 @@ CodePageMemoryModificationScope::~CodePageMemoryModificationScope() {
} }
} }
IgnoreLocalGCRequests::IgnoreLocalGCRequests(Heap* heap) : heap_(heap) {
DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
heap_->ignore_local_gc_requests_depth_++;
}
IgnoreLocalGCRequests::~IgnoreLocalGCRequests() {
DCHECK_GT(heap_->ignore_local_gc_requests_depth_, 0);
heap_->ignore_local_gc_requests_depth_--;
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -67,6 +67,7 @@ ...@@ -67,6 +67,7 @@
#include "src/heap/objects-visiting-inl.h" #include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h" #include "src/heap/objects-visiting.h"
#include "src/heap/paged-spaces-inl.h" #include "src/heap/paged-spaces-inl.h"
#include "src/heap/parked-scope.h"
#include "src/heap/read-only-heap.h" #include "src/heap/read-only-heap.h"
#include "src/heap/remembered-set.h" #include "src/heap/remembered-set.h"
#include "src/heap/safepoint.h" #include "src/heap/safepoint.h"
...@@ -1923,6 +1924,7 @@ void Heap::StartIncrementalMarking(int gc_flags, ...@@ -1923,6 +1924,7 @@ void Heap::StartIncrementalMarking(int gc_flags,
CppHeap::From(cpp_heap())->FinishSweepingIfRunning(); CppHeap::From(cpp_heap())->FinishSweepingIfRunning();
} }
IgnoreLocalGCRequests ignore_gc_requests(this);
SafepointScope safepoint(this); SafepointScope safepoint(this);
#ifdef DEBUG #ifdef DEBUG
...@@ -2156,7 +2158,13 @@ size_t Heap::PerformGarbageCollection( ...@@ -2156,7 +2158,13 @@ size_t Heap::PerformGarbageCollection(
TRACE_GC_EPOCH(tracer(), CollectorScopeId(collector), ThreadKind::kMain); TRACE_GC_EPOCH(tracer(), CollectorScopeId(collector), ThreadKind::kMain);
SafepointScope safepoint_scope(this); base::Optional<SafepointScope> safepoint_scope;
{
AllowGarbageCollection allow_shared_gc;
IgnoreLocalGCRequests ignore_gc_requests(this);
safepoint_scope.emplace(this);
}
collection_barrier_->StopTimeToCollectionTimer(); collection_barrier_->StopTimeToCollectionTimer();
...@@ -5477,14 +5485,16 @@ HeapObject Heap::AllocateRawWithRetryOrFailSlowPath( ...@@ -5477,14 +5485,16 @@ HeapObject Heap::AllocateRawWithRetryOrFailSlowPath(
isolate()->counters()->gc_last_resort_from_handles()->Increment(); isolate()->counters()->gc_last_resort_from_handles()->Increment();
if (IsSharedAllocationType(allocation)) { if (IsSharedAllocationType(allocation)) {
CollectSharedGarbage(GarbageCollectionReason::kLastResort); CollectSharedGarbage(GarbageCollectionReason::kLastResort);
AlwaysAllocateScope scope(isolate()->shared_isolate()->heap());
alloc = AllocateRaw(size, allocation, origin, alignment);
} else { } else {
CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort); CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
}
{
AlwaysAllocateScope scope(this); AlwaysAllocateScope scope(this);
alloc = AllocateRaw(size, allocation, origin, alignment); alloc = AllocateRaw(size, allocation, origin, alignment);
} }
if (alloc.To(&result)) { if (alloc.To(&result)) {
DCHECK(result != ReadOnlyRoots(this).exception()); DCHECK(result != ReadOnlyRoots(this).exception());
return result; return result;
...@@ -5878,11 +5888,21 @@ void Heap::StartTearDown() { ...@@ -5878,11 +5888,21 @@ void Heap::StartTearDown() {
// threads finish. // threads finish.
collection_barrier_->NotifyShutdownRequested(); collection_barrier_->NotifyShutdownRequested();
// Main thread isn't going to allocate anymore.
main_thread_local_heap()->FreeLinearAllocationArea();
if (isolate()->shared_isolate()) {
// Free LABs before detaching from the shared isolate.
shared_old_allocator_->FreeLinearAllocationArea();
shared_map_allocator_->FreeLinearAllocationArea();
}
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
// {StartTearDown} is called fairly early during Isolate teardown, so it's // {StartTearDown} is called fairly early during Isolate teardown, so it's
// a good time to run heap verification (if requested), before starting to // a good time to run heap verification (if requested), before starting to
// tear down parts of the Isolate. // tear down parts of the Isolate.
if (FLAG_verify_heap) { if (FLAG_verify_heap) {
IgnoreLocalGCRequests ignore_gc_requests(this);
SafepointScope scope(this); SafepointScope scope(this);
Verify(); Verify();
} }
......
...@@ -687,6 +687,10 @@ class Heap { ...@@ -687,6 +687,10 @@ class Heap {
bool IsTearingDown() const { return gc_state() == TEAR_DOWN; } bool IsTearingDown() const { return gc_state() == TEAR_DOWN; }
bool force_oom() const { return force_oom_; } bool force_oom() const { return force_oom_; }
bool ignore_local_gc_requests() const {
return ignore_local_gc_requests_depth_ > 0;
}
inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; } inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
bool IsGCWithoutStack() const; bool IsGCWithoutStack() const;
...@@ -2449,6 +2453,8 @@ class Heap { ...@@ -2449,6 +2453,8 @@ class Heap {
std::unique_ptr<CollectionBarrier> collection_barrier_; std::unique_ptr<CollectionBarrier> collection_barrier_;
int ignore_local_gc_requests_depth_ = 0;
int gc_callbacks_depth_ = 0; int gc_callbacks_depth_ = 0;
bool deserialization_complete_ = false; bool deserialization_complete_ = false;
...@@ -2505,6 +2511,7 @@ class Heap { ...@@ -2505,6 +2511,7 @@ class Heap {
friend class GCTracer; friend class GCTracer;
friend class HeapObjectIterator; friend class HeapObjectIterator;
friend class ScavengeTaskObserver; friend class ScavengeTaskObserver;
friend class IgnoreLocalGCRequests;
friend class IncrementalMarking; friend class IncrementalMarking;
friend class IncrementalMarkingJob; friend class IncrementalMarkingJob;
friend class LargeObjectSpace; friend class LargeObjectSpace;
...@@ -2663,6 +2670,15 @@ class V8_NODISCARD CodePageMemoryModificationScope { ...@@ -2663,6 +2670,15 @@ class V8_NODISCARD CodePageMemoryModificationScope {
DISALLOW_GARBAGE_COLLECTION(no_heap_allocation_) DISALLOW_GARBAGE_COLLECTION(no_heap_allocation_)
}; };
class V8_NODISCARD IgnoreLocalGCRequests {
public:
explicit inline IgnoreLocalGCRequests(Heap* heap);
inline ~IgnoreLocalGCRequests();
private:
Heap* heap_;
};
// Visitor class to verify interior pointers in spaces that do not contain // Visitor class to verify interior pointers in spaces that do not contain
// or care about intergenerational references. All heap object pointers have to // or care about intergenerational references. All heap object pointers have to
// point into the heap to a location that has a map pointer at its first word. // point into the heap to a location that has a map pointer at its first word.
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <memory> #include <memory>
#include "src/base/logging.h" #include "src/base/logging.h"
#include "src/base/optional.h"
#include "src/base/platform/mutex.h" #include "src/base/platform/mutex.h"
#include "src/common/globals.h" #include "src/common/globals.h"
#include "src/execution/isolate.h" #include "src/execution/isolate.h"
...@@ -17,6 +18,7 @@ ...@@ -17,6 +18,7 @@
#include "src/heap/gc-tracer.h" #include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h" #include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier.h" #include "src/heap/heap-write-barrier.h"
#include "src/heap/heap.h"
#include "src/heap/local-heap-inl.h" #include "src/heap/local-heap-inl.h"
#include "src/heap/marking-barrier.h" #include "src/heap/marking-barrier.h"
#include "src/heap/parked-scope.h" #include "src/heap/parked-scope.h"
...@@ -173,13 +175,42 @@ void LocalHeap::ParkSlowPath() { ...@@ -173,13 +175,42 @@ void LocalHeap::ParkSlowPath() {
DCHECK(current_state.IsRunning()); DCHECK(current_state.IsRunning());
if (is_main_thread()) { if (is_main_thread()) {
DCHECK(current_state.IsCollectionRequested()); DCHECK(current_state.IsSafepointRequested() ||
heap_->CollectGarbageForBackground(this); current_state.IsCollectionRequested());
if (current_state.IsSafepointRequested()) {
ThreadState old_state = state_.SetParked();
heap_->safepoint()->NotifyPark();
if (old_state.IsCollectionRequested())
heap_->collection_barrier_->CancelCollectionAndResumeThreads();
return;
}
if (current_state.IsCollectionRequested()) {
if (!heap()->ignore_local_gc_requests()) {
heap_->CollectGarbageForBackground(this);
continue;
}
DCHECK(!current_state.IsSafepointRequested());
if (state_.CompareExchangeStrong(current_state,
current_state.SetParked())) {
heap_->collection_barrier_->CancelCollectionAndResumeThreads();
return;
} else {
continue;
}
}
} else { } else {
DCHECK(current_state.IsSafepointRequested()); DCHECK(current_state.IsSafepointRequested());
DCHECK(!current_state.IsCollectionRequested()); DCHECK(!current_state.IsCollectionRequested());
CHECK(state_.CompareExchangeStrong(current_state,
current_state.SetParked())); ThreadState old_state = state_.SetParked();
CHECK(old_state.IsRunning());
CHECK(old_state.IsSafepointRequested());
CHECK(!old_state.IsCollectionRequested());
heap_->safepoint()->NotifyPark(); heap_->safepoint()->NotifyPark();
return; return;
} }
...@@ -196,52 +227,105 @@ void LocalHeap::UnparkSlowPath() { ...@@ -196,52 +227,105 @@ void LocalHeap::UnparkSlowPath() {
DCHECK(current_state.IsParked()); DCHECK(current_state.IsParked());
if (is_main_thread()) { if (is_main_thread()) {
DCHECK(current_state.IsCollectionRequested()); DCHECK(current_state.IsSafepointRequested() ||
CHECK(state_.CompareExchangeStrong(current_state, current_state.IsCollectionRequested());
current_state.SetRunning()));
heap_->CollectGarbageForBackground(this); if (current_state.IsSafepointRequested()) {
return; SleepInUnpark();
continue;
}
if (current_state.IsCollectionRequested()) {
DCHECK(!current_state.IsSafepointRequested());
if (!state_.CompareExchangeStrong(current_state,
current_state.SetRunning()))
continue;
if (!heap()->ignore_local_gc_requests()) {
heap_->CollectGarbageForBackground(this);
}
return;
}
} else { } else {
DCHECK(current_state.IsSafepointRequested()); DCHECK(current_state.IsSafepointRequested());
DCHECK(!current_state.IsCollectionRequested()); DCHECK(!current_state.IsCollectionRequested());
TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_UNPARK,
ThreadKind::kBackground); SleepInUnpark();
heap_->safepoint()->WaitInUnpark();
} }
} }
} }
void LocalHeap::SleepInUnpark() {
GCTracer::Scope::ScopeId scope_id;
ThreadKind thread_kind;
if (is_main_thread()) {
scope_id = GCTracer::Scope::UNPARK;
thread_kind = ThreadKind::kMain;
} else {
scope_id = GCTracer::Scope::BACKGROUND_UNPARK;
thread_kind = ThreadKind::kBackground;
}
TRACE_GC1(heap_->tracer(), scope_id, thread_kind);
heap_->safepoint()->WaitInUnpark();
}
void LocalHeap::EnsureParkedBeforeDestruction() { void LocalHeap::EnsureParkedBeforeDestruction() {
DCHECK_IMPLIES(!is_main_thread(), IsParked()); DCHECK_IMPLIES(!is_main_thread(), IsParked());
} }
void LocalHeap::SafepointSlowPath() { void LocalHeap::SafepointSlowPath() {
#ifdef DEBUG
ThreadState current_state = state_.load_relaxed(); ThreadState current_state = state_.load_relaxed();
DCHECK(current_state.IsRunning()); DCHECK(current_state.IsRunning());
#endif
if (is_main_thread()) { if (is_main_thread()) {
DCHECK(current_state.IsCollectionRequested()); DCHECK(current_state.IsSafepointRequested() ||
heap_->CollectGarbageForBackground(this); current_state.IsCollectionRequested());
if (current_state.IsSafepointRequested()) {
SleepInSafepoint();
}
if (current_state.IsCollectionRequested()) {
heap_->CollectGarbageForBackground(this);
}
} else { } else {
DCHECK(current_state.IsSafepointRequested()); DCHECK(current_state.IsSafepointRequested());
DCHECK(!current_state.IsCollectionRequested()); DCHECK(!current_state.IsCollectionRequested());
TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_SAFEPOINT, SleepInSafepoint();
ThreadKind::kBackground); }
}
// Parking the running thread here is an optimization. We do not need to
// wake this thread up to reach the next safepoint.
ThreadState old_state = state_.SetParked();
CHECK(old_state.IsRunning());
CHECK(old_state.IsSafepointRequested());
CHECK(!old_state.IsCollectionRequested());
heap_->safepoint()->WaitInSafepoint(); void LocalHeap::SleepInSafepoint() {
GCTracer::Scope::ScopeId scope_id;
ThreadKind thread_kind;
Unpark(); if (is_main_thread()) {
scope_id = GCTracer::Scope::SAFEPOINT;
thread_kind = ThreadKind::kMain;
} else {
scope_id = GCTracer::Scope::BACKGROUND_SAFEPOINT;
thread_kind = ThreadKind::kBackground;
} }
TRACE_GC1(heap_->tracer(), scope_id, thread_kind);
// Parking the running thread here is an optimization. We do not need to
// wake this thread up to reach the next safepoint.
ThreadState old_state = state_.SetParked();
CHECK(old_state.IsRunning());
CHECK(old_state.IsSafepointRequested());
CHECK_IMPLIES(old_state.IsCollectionRequested(), is_main_thread());
heap_->safepoint()->WaitInSafepoint();
base::Optional<IgnoreLocalGCRequests> ignore_gc_requests;
if (is_main_thread()) ignore_gc_requests.emplace(heap());
Unpark();
} }
void LocalHeap::FreeLinearAllocationArea() { void LocalHeap::FreeLinearAllocationArea() {
...@@ -270,7 +354,7 @@ bool LocalHeap::TryPerformCollection() { ...@@ -270,7 +354,7 @@ bool LocalHeap::TryPerformCollection() {
return true; return true;
} else { } else {
DCHECK(IsRunning()); DCHECK(IsRunning());
heap_->collection_barrier_->RequestGC(); if (!heap_->collection_barrier_->TryRequestGC()) return false;
LocalHeap* main_thread = heap_->main_thread_local_heap(); LocalHeap* main_thread = heap_->main_thread_local_heap();
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <atomic> #include <atomic>
#include <memory> #include <memory>
#include "src/base/logging.h"
#include "src/base/macros.h" #include "src/base/macros.h"
#include "src/base/platform/condition-variable.h" #include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h" #include "src/base/platform/mutex.h"
...@@ -278,6 +279,8 @@ class V8_EXPORT_PRIVATE LocalHeap { ...@@ -278,6 +279,8 @@ class V8_EXPORT_PRIVATE LocalHeap {
void UnparkSlowPath(); void UnparkSlowPath();
void EnsureParkedBeforeDestruction(); void EnsureParkedBeforeDestruction();
void SafepointSlowPath(); void SafepointSlowPath();
void SleepInSafepoint();
void SleepInUnpark();
void EnsurePersistentHandles(); void EnsurePersistentHandles();
...@@ -308,10 +311,12 @@ class V8_EXPORT_PRIVATE LocalHeap { ...@@ -308,10 +311,12 @@ class V8_EXPORT_PRIVATE LocalHeap {
friend class CollectionBarrier; friend class CollectionBarrier;
friend class ConcurrentAllocator; friend class ConcurrentAllocator;
friend class GlobalSafepoint;
friend class IsolateSafepoint; friend class IsolateSafepoint;
friend class Heap; friend class Heap;
friend class Isolate; friend class Isolate;
friend class ParkedScope; friend class ParkedScope;
friend class SafepointScope;
friend class UnparkedScope; friend class UnparkedScope;
}; };
......
...@@ -44,6 +44,8 @@ class V8_NODISCARD UnparkedScope { ...@@ -44,6 +44,8 @@ class V8_NODISCARD UnparkedScope {
LocalHeap* const local_heap_; LocalHeap* const local_heap_;
}; };
// Scope that automatically parks the thread while blocking on the given
// base::Mutex.
class V8_NODISCARD ParkedMutexGuard { class V8_NODISCARD ParkedMutexGuard {
public: public:
explicit ParkedMutexGuard(LocalIsolate* local_isolate, base::Mutex* mutex) explicit ParkedMutexGuard(LocalIsolate* local_isolate, base::Mutex* mutex)
......
...@@ -7,6 +7,9 @@ ...@@ -7,6 +7,9 @@
#include <atomic> #include <atomic>
#include "src/base/logging.h" #include "src/base/logging.h"
#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/handles/handles.h" #include "src/handles/handles.h"
#include "src/handles/local-handles.h" #include "src/handles/local-handles.h"
#include "src/handles/persistent-handles.h" #include "src/handles/persistent-handles.h"
...@@ -16,6 +19,7 @@ ...@@ -16,6 +19,7 @@
#include "src/heap/local-heap.h" #include "src/heap/local-heap.h"
#include "src/heap/parked-scope.h" #include "src/heap/parked-scope.h"
#include "src/logging/counters-scopes.h" #include "src/logging/counters-scopes.h"
#include "src/objects/objects.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -23,22 +27,46 @@ namespace internal { ...@@ -23,22 +27,46 @@ namespace internal {
IsolateSafepoint::IsolateSafepoint(Heap* heap) IsolateSafepoint::IsolateSafepoint(Heap* heap)
: heap_(heap), local_heaps_head_(nullptr), active_safepoint_scopes_(0) {} : heap_(heap), local_heaps_head_(nullptr), active_safepoint_scopes_(0) {}
void IsolateSafepoint::EnterSafepointScope(StopMainThread stop_main_thread) { void IsolateSafepoint::EnterLocalSafepointScope() {
// Safepoints need to be initiated on the main thread. // Safepoints need to be initiated on the main thread.
DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id()); DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
DCHECK_NULL(LocalHeap::Current()); DCHECK_NULL(LocalHeap::Current());
LockMutex(heap_->isolate()->main_thread_local_heap());
if (++active_safepoint_scopes_ > 1) return; if (++active_safepoint_scopes_ > 1) return;
TimedHistogramScope timer( TimedHistogramScope timer(
heap_->isolate()->counters()->gc_time_to_safepoint()); heap_->isolate()->counters()->gc_time_to_safepoint());
TRACE_GC(heap_->tracer(), GCTracer::Scope::TIME_TO_SAFEPOINT); TRACE_GC(heap_->tracer(), GCTracer::Scope::TIME_TO_SAFEPOINT);
local_heaps_mutex_.Lock(); barrier_.Arm();
size_t running = SetSafepointRequestedFlags(IncludeMainThread::kNo);
barrier_.WaitUntilRunningThreadsInSafepoint(running);
}
void IsolateSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
{
IgnoreLocalGCRequests ignore_gc_requests(initiator->heap());
LockMutex(initiator->main_thread_local_heap());
}
CHECK_EQ(active_safepoint_scopes_.exchange(1), 0);
barrier_.Arm(); barrier_.Arm();
int running = 0; size_t running =
SetSafepointRequestedFlags(IncludeMainThreadUnlessInitiator(initiator));
barrier_.WaitUntilRunningThreadsInSafepoint(running);
}
IsolateSafepoint::IncludeMainThread
IsolateSafepoint::IncludeMainThreadUnlessInitiator(Isolate* initiator) {
const bool is_initiator = heap_->isolate() == initiator;
return is_initiator ? IncludeMainThread::kNo : IncludeMainThread::kYes;
}
size_t IsolateSafepoint::SetSafepointRequestedFlags(
IncludeMainThread include_main_thread) {
size_t running = 0;
// There needs to be at least one LocalHeap for the main thread. // There needs to be at least one LocalHeap for the main thread.
DCHECK_NOT_NULL(local_heaps_head_); DCHECK_NOT_NULL(local_heaps_head_);
...@@ -46,7 +74,7 @@ void IsolateSafepoint::EnterSafepointScope(StopMainThread stop_main_thread) { ...@@ -46,7 +74,7 @@ void IsolateSafepoint::EnterSafepointScope(StopMainThread stop_main_thread) {
for (LocalHeap* local_heap = local_heaps_head_; local_heap; for (LocalHeap* local_heap = local_heaps_head_; local_heap;
local_heap = local_heap->next_) { local_heap = local_heap->next_) {
if (local_heap->is_main_thread() && if (local_heap->is_main_thread() &&
stop_main_thread == StopMainThread::kNo) { include_main_thread == IncludeMainThread::kNo) {
continue; continue;
} }
...@@ -59,21 +87,42 @@ void IsolateSafepoint::EnterSafepointScope(StopMainThread stop_main_thread) { ...@@ -59,21 +87,42 @@ void IsolateSafepoint::EnterSafepointScope(StopMainThread stop_main_thread) {
CHECK(!old_state.IsSafepointRequested()); CHECK(!old_state.IsSafepointRequested());
} }
barrier_.WaitUntilRunningThreadsInSafepoint(running); return running;
} }
void IsolateSafepoint::LeaveSafepointScope(StopMainThread stop_main_thread) { void IsolateSafepoint::LockMutex(LocalHeap* local_heap) {
// Safepoints need to be initiated on the main thread. if (!local_heaps_mutex_.TryLock()) {
DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id()); ParkedScope parked_scope(local_heap);
DCHECK_NULL(LocalHeap::Current()); local_heaps_mutex_.Lock();
}
}
void IsolateSafepoint::LeaveGlobalSafepointScope(Isolate* initiator) {
local_heaps_mutex_.AssertHeld();
CHECK_EQ(active_safepoint_scopes_.exchange(0), 1);
ClearSafepointRequestedFlags(IncludeMainThreadUnlessInitiator(initiator));
barrier_.Disarm();
local_heaps_mutex_.Unlock();
}
void IsolateSafepoint::LeaveLocalSafepointScope() {
local_heaps_mutex_.AssertHeld();
DCHECK_GT(active_safepoint_scopes_, 0); DCHECK_GT(active_safepoint_scopes_, 0);
if (--active_safepoint_scopes_ > 0) return;
if (--active_safepoint_scopes_ == 0) {
ClearSafepointRequestedFlags(IncludeMainThread::kNo);
barrier_.Disarm();
}
local_heaps_mutex_.Unlock();
}
void IsolateSafepoint::ClearSafepointRequestedFlags(
IncludeMainThread include_main_thread) {
for (LocalHeap* local_heap = local_heaps_head_; local_heap; for (LocalHeap* local_heap = local_heaps_head_; local_heap;
local_heap = local_heap->next_) { local_heap = local_heap->next_) {
if (local_heap->is_main_thread() && if (local_heap->is_main_thread() &&
stop_main_thread == StopMainThread::kNo) { include_main_thread == IncludeMainThread::kNo) {
continue; continue;
} }
...@@ -85,10 +134,6 @@ void IsolateSafepoint::LeaveSafepointScope(StopMainThread stop_main_thread) { ...@@ -85,10 +134,6 @@ void IsolateSafepoint::LeaveSafepointScope(StopMainThread stop_main_thread) {
CHECK_IMPLIES(old_state.IsCollectionRequested(), CHECK_IMPLIES(old_state.IsCollectionRequested(),
local_heap->is_main_thread()); local_heap->is_main_thread());
} }
barrier_.Disarm();
local_heaps_mutex_.Unlock();
} }
void IsolateSafepoint::WaitInSafepoint() { barrier_.WaitInSafepoint(); } void IsolateSafepoint::WaitInSafepoint() { barrier_.WaitInSafepoint(); }
...@@ -113,7 +158,7 @@ void IsolateSafepoint::Barrier::Disarm() { ...@@ -113,7 +158,7 @@ void IsolateSafepoint::Barrier::Disarm() {
} }
void IsolateSafepoint::Barrier::WaitUntilRunningThreadsInSafepoint( void IsolateSafepoint::Barrier::WaitUntilRunningThreadsInSafepoint(
int running) { size_t running) {
base::MutexGuard guard(&mutex_); base::MutexGuard guard(&mutex_);
DCHECK(IsArmed()); DCHECK(IsArmed());
while (stopped_ < running) { while (stopped_ < running) {
...@@ -148,16 +193,8 @@ void IsolateSafepoint::Barrier::WaitInUnpark() { ...@@ -148,16 +193,8 @@ void IsolateSafepoint::Barrier::WaitInUnpark() {
} }
} }
SafepointScope::SafepointScope(Heap* heap) : safepoint_(heap->safepoint()) {
safepoint_->EnterSafepointScope(IsolateSafepoint::StopMainThread::kNo);
}
SafepointScope::~SafepointScope() {
safepoint_->LeaveSafepointScope(IsolateSafepoint::StopMainThread::kNo);
}
bool IsolateSafepoint::ContainsLocalHeap(LocalHeap* local_heap) { bool IsolateSafepoint::ContainsLocalHeap(LocalHeap* local_heap) {
base::MutexGuard guard(&local_heaps_mutex_); base::RecursiveMutexGuard guard(&local_heaps_mutex_);
LocalHeap* current = local_heaps_head_; LocalHeap* current = local_heaps_head_;
while (current) { while (current) {
...@@ -169,7 +206,7 @@ bool IsolateSafepoint::ContainsLocalHeap(LocalHeap* local_heap) { ...@@ -169,7 +206,7 @@ bool IsolateSafepoint::ContainsLocalHeap(LocalHeap* local_heap) {
} }
bool IsolateSafepoint::ContainsAnyLocalHeap() { bool IsolateSafepoint::ContainsAnyLocalHeap() {
base::MutexGuard guard(&local_heaps_mutex_); base::RecursiveMutexGuard guard(&local_heaps_mutex_);
return local_heaps_head_ != nullptr; return local_heaps_head_ != nullptr;
} }
...@@ -181,6 +218,12 @@ void IsolateSafepoint::Iterate(RootVisitor* visitor) { ...@@ -181,6 +218,12 @@ void IsolateSafepoint::Iterate(RootVisitor* visitor) {
} }
} }
SafepointScope::SafepointScope(Heap* heap) : safepoint_(heap->safepoint()) {
safepoint_->EnterLocalSafepointScope();
}
SafepointScope::~SafepointScope() { safepoint_->LeaveLocalSafepointScope(); }
GlobalSafepoint::GlobalSafepoint(Isolate* isolate) GlobalSafepoint::GlobalSafepoint(Isolate* isolate)
: shared_isolate_(isolate), shared_heap_(isolate->heap()) {} : shared_isolate_(isolate), shared_heap_(isolate->heap()) {}
...@@ -204,7 +247,11 @@ void GlobalSafepoint::AppendClient(Isolate* client) { ...@@ -204,7 +247,11 @@ void GlobalSafepoint::AppendClient(Isolate* client) {
void GlobalSafepoint::RemoveClient(Isolate* client) { void GlobalSafepoint::RemoveClient(Isolate* client) {
DCHECK_EQ(client->heap()->gc_state(), Heap::TEAR_DOWN); DCHECK_EQ(client->heap()->gc_state(), Heap::TEAR_DOWN);
base::MutexGuard guard(&clients_mutex_);
// A shared heap may have already acquired the client mutex to perform a
// shared GC. We need to park the Isolate here to allow for a shared GC.
IgnoreLocalGCRequests ignore_gc_requests(client->heap());
ParkedMutexGuard guard(client->main_thread_local_heap(), &clients_mutex_);
if (client->global_safepoint_next_client_isolate_) { if (client->global_safepoint_next_client_isolate_) {
client->global_safepoint_next_client_isolate_ client->global_safepoint_next_client_isolate_
...@@ -228,6 +275,7 @@ void GlobalSafepoint::AssertNoClients() { DCHECK_NULL(clients_head_); } ...@@ -228,6 +275,7 @@ void GlobalSafepoint::AssertNoClients() { DCHECK_NULL(clients_head_); }
void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) { void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
if (!clients_mutex_.TryLock()) { if (!clients_mutex_.TryLock()) {
IgnoreLocalGCRequests ignore_gc_requests(initiator->heap());
ParkedScope parked_scope(initiator->main_thread_local_heap()); ParkedScope parked_scope(initiator->main_thread_local_heap());
clients_mutex_.Lock(); clients_mutex_.Lock();
} }
...@@ -239,9 +287,7 @@ void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) { ...@@ -239,9 +287,7 @@ void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
IterateClientIsolates([this, initiator](Isolate* client) { IterateClientIsolates([this, initiator](Isolate* client) {
Heap* client_heap = client->heap(); Heap* client_heap = client->heap();
CHECK_EQ(initiator, client); client_heap->safepoint()->EnterGlobalSafepointScope(initiator);
client_heap->safepoint()->EnterSafepointScope(
IsolateSafepoint::StopMainThread::kNo);
USE(this); USE(this);
DCHECK_EQ(client->shared_isolate(), shared_isolate_); DCHECK_EQ(client->shared_isolate(), shared_isolate_);
...@@ -250,10 +296,9 @@ void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) { ...@@ -250,10 +296,9 @@ void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
} }
void GlobalSafepoint::LeaveGlobalSafepointScope(Isolate* initiator) { void GlobalSafepoint::LeaveGlobalSafepointScope(Isolate* initiator) {
IterateClientIsolates([](Isolate* client) { IterateClientIsolates([initiator](Isolate* client) {
Heap* client_heap = client->heap(); Heap* client_heap = client->heap();
client_heap->safepoint()->LeaveSafepointScope( client_heap->safepoint()->LeaveGlobalSafepointScope(initiator);
IsolateSafepoint::StopMainThread::kNo);
}); });
clients_mutex_.Unlock(); clients_mutex_.Unlock();
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include "src/base/platform/condition-variable.h" #include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h" #include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/handles/persistent-handles.h" #include "src/handles/persistent-handles.h"
#include "src/heap/local-heap.h" #include "src/heap/local-heap.h"
#include "src/objects/visitors.h" #include "src/objects/visitors.h"
...@@ -18,21 +19,12 @@ class Heap; ...@@ -18,21 +19,12 @@ class Heap;
class LocalHeap; class LocalHeap;
class RootVisitor; class RootVisitor;
// Used to bring all threads with heap access to a safepoint such that e.g. a // Used to bring all threads with heap access in an isolate to a safepoint such
// garbage collection can be performed. // that e.g. a garbage collection can be performed.
class IsolateSafepoint final { class IsolateSafepoint final {
public: public:
explicit IsolateSafepoint(Heap* heap); explicit IsolateSafepoint(Heap* heap);
// Wait until unpark operation is safe again
void WaitInUnpark();
// Enter the safepoint from a running thread
void WaitInSafepoint();
// Running thread reached a safepoint by parking itself.
void NotifyPark();
V8_EXPORT_PRIVATE bool ContainsLocalHeap(LocalHeap* local_heap); V8_EXPORT_PRIVATE bool ContainsLocalHeap(LocalHeap* local_heap);
V8_EXPORT_PRIVATE bool ContainsAnyLocalHeap(); V8_EXPORT_PRIVATE bool ContainsAnyLocalHeap();
...@@ -58,7 +50,7 @@ class IsolateSafepoint final { ...@@ -58,7 +50,7 @@ class IsolateSafepoint final {
base::ConditionVariable cv_stopped_; base::ConditionVariable cv_stopped_;
bool armed_; bool armed_;
int stopped_ = 0; size_t stopped_ = 0;
bool IsArmed() { return armed_; } bool IsArmed() { return armed_; }
...@@ -67,23 +59,42 @@ class IsolateSafepoint final { ...@@ -67,23 +59,42 @@ class IsolateSafepoint final {
void Arm(); void Arm();
void Disarm(); void Disarm();
void WaitUntilRunningThreadsInSafepoint(int running); void WaitUntilRunningThreadsInSafepoint(size_t running);
void WaitInSafepoint(); void WaitInSafepoint();
void WaitInUnpark(); void WaitInUnpark();
void NotifyPark(); void NotifyPark();
}; };
enum class StopMainThread { kYes, kNo }; enum class IncludeMainThread { kYes, kNo };
// Wait until unpark operation is safe again.
void WaitInUnpark();
// Enter the safepoint from a running thread.
void WaitInSafepoint();
// Running thread reached a safepoint by parking itself.
void NotifyPark();
void EnterLocalSafepointScope();
void EnterGlobalSafepointScope(Isolate* initiator);
void LeaveLocalSafepointScope();
void LeaveGlobalSafepointScope(Isolate* initiator);
IncludeMainThread IncludeMainThreadUnlessInitiator(Isolate* initiator);
void LockMutex(LocalHeap* local_heap);
void EnterSafepointScope(StopMainThread stop_main_thread); size_t SetSafepointRequestedFlags(IncludeMainThread include_main_thread);
void LeaveSafepointScope(StopMainThread stop_main_thread); void ClearSafepointRequestedFlags(IncludeMainThread include_main_thread);
template <typename Callback> template <typename Callback>
void AddLocalHeap(LocalHeap* local_heap, Callback callback) { void AddLocalHeap(LocalHeap* local_heap, Callback callback) {
// Safepoint holds this lock in order to stop threads from starting or // Safepoint holds this lock in order to stop threads from starting or
// stopping. // stopping.
base::MutexGuard guard(&local_heaps_mutex_); base::RecursiveMutexGuard guard(&local_heaps_mutex_);
// Additional code protected from safepoint // Additional code protected from safepoint
callback(); callback();
...@@ -97,7 +108,7 @@ class IsolateSafepoint final { ...@@ -97,7 +108,7 @@ class IsolateSafepoint final {
template <typename Callback> template <typename Callback>
void RemoveLocalHeap(LocalHeap* local_heap, Callback callback) { void RemoveLocalHeap(LocalHeap* local_heap, Callback callback) {
base::MutexGuard guard(&local_heaps_mutex_); base::RecursiveMutexGuard guard(&local_heaps_mutex_);
// Additional code protected from safepoint // Additional code protected from safepoint
callback(); callback();
...@@ -113,10 +124,12 @@ class IsolateSafepoint final { ...@@ -113,10 +124,12 @@ class IsolateSafepoint final {
Barrier barrier_; Barrier barrier_;
Heap* heap_; Heap* heap_;
base::Mutex local_heaps_mutex_; // Mutex is used both for safepointing and adding/removing threads. A
// RecursiveMutex is needed since we need to support nested SafepointScopes.
base::RecursiveMutex local_heaps_mutex_;
LocalHeap* local_heaps_head_; LocalHeap* local_heaps_head_;
int active_safepoint_scopes_; std::atomic<int> active_safepoint_scopes_;
friend class Heap; friend class Heap;
friend class GlobalSafepoint; friend class GlobalSafepoint;
......
...@@ -575,6 +575,7 @@ ...@@ -575,6 +575,7 @@
F(MINOR_MC_MARKING_DEQUE) \ F(MINOR_MC_MARKING_DEQUE) \
F(MINOR_MC_RESET_LIVENESS) \ F(MINOR_MC_RESET_LIVENESS) \
F(MINOR_MC_SWEEPING) \ F(MINOR_MC_SWEEPING) \
F(SAFEPOINT) \
F(SCAVENGER) \ F(SCAVENGER) \
F(SCAVENGER_COMPLETE_SWEEP_ARRAY_BUFFERS) \ F(SCAVENGER_COMPLETE_SWEEP_ARRAY_BUFFERS) \
F(SCAVENGER_FAST_PROMOTE) \ F(SCAVENGER_FAST_PROMOTE) \
...@@ -591,7 +592,8 @@ ...@@ -591,7 +592,8 @@
F(SCAVENGER_SWEEP_ARRAY_BUFFERS) \ F(SCAVENGER_SWEEP_ARRAY_BUFFERS) \
F(TIME_TO_GLOBAL_SAFEPOINT) \ F(TIME_TO_GLOBAL_SAFEPOINT) \
F(TIME_TO_SAFEPOINT) \ F(TIME_TO_SAFEPOINT) \
F(UNMAPPER) F(UNMAPPER) \
F(UNPARK)
#define TRACER_BACKGROUND_SCOPES(F) \ #define TRACER_BACKGROUND_SCOPES(F) \
F(BACKGROUND_YOUNG_ARRAY_BUFFER_SWEEP) \ F(BACKGROUND_YOUNG_ARRAY_BUFFER_SWEEP) \
......
...@@ -164,22 +164,24 @@ UNINITIALIZED_TEST(SharedCollectionWithoutClients) { ...@@ -164,22 +164,24 @@ UNINITIALIZED_TEST(SharedCollectionWithoutClients) {
Isolate::Delete(shared_isolate); Isolate::Delete(shared_isolate);
} }
void AllocateInSharedHeap(Isolate* shared_isolate) { void AllocateInSharedHeap(Isolate* shared_isolate, int iterations = 100) {
SetupClientIsolateAndRunCallback( SetupClientIsolateAndRunCallback(
shared_isolate, shared_isolate,
[](v8::Isolate* client_isolate, Isolate* i_client_isolate) { [iterations](v8::Isolate* client_isolate, Isolate* i_client_isolate) {
HandleScope scope(i_client_isolate); HandleScope scope(i_client_isolate);
std::vector<Handle<FixedArray>> arrays; std::vector<Handle<FixedArray>> arrays;
const int kKeptAliveArrays = 1000; const int kKeptAliveArrays = 1000;
for (int i = 0; i < kNumIterations * 100; i++) { for (int i = 0; i < kNumIterations * iterations; i++) {
HandleScope new_scope(i_client_isolate); HandleScope scope(i_client_isolate);
Handle<FixedArray> array = i_client_isolate->factory()->NewFixedArray( Handle<FixedArray> array = i_client_isolate->factory()->NewFixedArray(
100, AllocationType::kSharedOld); 100, AllocationType::kSharedOld);
if (i < kKeptAliveArrays) { if (i < kKeptAliveArrays) {
// Keep some of those arrays alive across GCs. // Keep some of those arrays alive across GCs.
arrays.push_back(new_scope.CloseAndEscape(array)); arrays.push_back(scope.CloseAndEscape(array));
} }
i_client_isolate->factory()->NewFixedArray(100,
AllocationType::kYoung);
} }
for (Handle<FixedArray> array : arrays) { for (Handle<FixedArray> array : arrays) {
...@@ -203,5 +205,46 @@ UNINITIALIZED_TEST(SharedCollectionWithOneClient) { ...@@ -203,5 +205,46 @@ UNINITIALIZED_TEST(SharedCollectionWithOneClient) {
Isolate::Delete(shared_isolate); Isolate::Delete(shared_isolate);
} }
namespace {
class SharedFixedArrayAllocationThread final : public v8::base::Thread {
public:
explicit SharedFixedArrayAllocationThread(Isolate* shared)
: v8::base::Thread(
base::Thread::Options("SharedFixedArrayAllocationThread")),
shared_(shared) {}
void Run() override { AllocateInSharedHeap(shared_, 5); }
Isolate* shared_;
};
} // namespace
UNINITIALIZED_TEST(SharedCollectionWithMultipleClients) {
FLAG_max_old_space_size = 8;
if (!ReadOnlyHeap::IsReadOnlySpaceShared()) return;
std::unique_ptr<v8::ArrayBuffer::Allocator> allocator(
v8::ArrayBuffer::Allocator::NewDefaultAllocator());
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = allocator.get();
Isolate* shared_isolate = Isolate::NewShared(create_params);
std::vector<std::unique_ptr<SharedFixedArrayAllocationThread>> threads;
const int kThreads = 4;
for (int i = 0; i < kThreads; i++) {
auto thread =
std::make_unique<SharedFixedArrayAllocationThread>(shared_isolate);
CHECK(thread->Start());
threads.push_back(std::move(thread));
}
for (auto& thread : threads) {
thread->Join();
}
Isolate::Delete(shared_isolate);
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment