Commit 2c88cec4 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

Reland "[heap] Support multiple clients in shared GC"

This is a reland of 90a9d6cb

The original CL got reverted because of two different issues:

* The DCHECK failure on AllowGarbageCollection::IsAllowed() got fixed
  in https://crrev.com/c/3289625.
* The crash with the incremental marking job were because of a nested
  GC started from a SafepointScope. This CL adds IgnoreLocalGCRequests
  scopes to SafepointScopes in src/heap.

In addition this CL prevents shared GCs during isolate deserialization
by locking the clients_mutex_ until the isolate is fully deserialized.
The original GC used a DisallowSafepoints scope to prevent shared GCs
from interrupting isolate deserialization.

Original change's description:
> [heap] Support multiple clients in shared GC
>
> Add support for safepointing multiple isolates as described in the
> design doc (link is below). A safepoint across multiple isolates is
> considered a global safepoint to distinguish it from regular safepoints.
>
> The basic idea behind the implementation is that we reach a
> safepoint for each client. What's new is that now also main threads
> need to participate in the safepointing protocol and need to give up
> control in time. The slow paths of Park(), Unpark() and Safepoint() on
> the main thread need to be adjusted for this reason as well.
>
> This CL introduces GlobalSafepoint and GlobalSafepointScope to mirror
> IsolateSafepoint and IsolateSafepointScope.
>
> This CL adds the type IgnoreLocalGCRequests, it is used to prevent
> Park() and Unpark() from honoring the request from background threads
> to perform a local GC. This is used heap-internally to not have GCs
> (or even nested GCs) in certain locations. E.g. when initiating a
> safepoint to perform a GC we don't want a "recursive" GC to occur.
>
> Design doc: https://docs.google.com/document/d/1y6C9zAACEr0sBYMIYk3YpXosnkF3Ak4CEuWJu1-3zXs/edit?usp=sharing
>
> Bug: v8:11708
> Change-Id: I5aca8f5f24873279271a53be3bb093fc92a1a1eb
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3009224
> Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
> Reviewed-by: Jakob Gruber <jgruber@chromium.org>
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#77812}

Bug: v8:11708, v8:12375, v8:12377
Change-Id: I9d1af6fbc06a3a8b6f216ec5e9027665ad071809
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3283067
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78013}
parent fd86d20a
......@@ -56,6 +56,9 @@
#include "src/handles/global-handles-inl.h"
#include "src/handles/persistent-handles.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/local-heap.h"
#include "src/heap/parked-scope.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/safepoint.h"
#include "src/ic/stub-cache.h"
......@@ -3221,7 +3224,25 @@ void Isolate::Deinit() {
// This stops cancelable tasks (i.e. concurrent marking tasks).
// Stop concurrent tasks before destroying resources since they might still
// use those.
cancelable_task_manager()->CancelAndWait();
{
IgnoreLocalGCRequests ignore_gc_requests(heap());
ParkedScope parked_scope(main_thread_local_heap());
cancelable_task_manager()->CancelAndWait();
}
// Cancel all baseline compiler tasks.
delete baseline_batch_compiler_;
baseline_batch_compiler_ = nullptr;
// At this point there are no more background threads left in this isolate.
heap_.safepoint()->AssertMainThreadIsOnlyThread();
{
// This isolate might have to park for a shared GC initiated by another
// client isolate before it can actually detach from the shared isolate.
AllowGarbageCollection allow_shared_gc;
DetachFromSharedIsolate();
}
ReleaseSharedPtrs();
......@@ -3243,17 +3264,10 @@ void Isolate::Deinit() {
string_table_.reset();
delete baseline_batch_compiler_;
baseline_batch_compiler_ = nullptr;
// After all concurrent tasks are stopped, we know for sure that stats aren't
// updated anymore.
DumpAndResetStats();
main_thread_local_isolate_->heap()->FreeLinearAllocationArea();
DetachFromSharedIsolate();
heap_.TearDown();
main_thread_local_isolate_.reset();
......@@ -3734,7 +3748,19 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
// Create LocalIsolate/LocalHeap for the main thread and set state to Running.
main_thread_local_isolate_.reset(new LocalIsolate(this, ThreadKind::kMain));
main_thread_local_heap()->Unpark();
{
IgnoreLocalGCRequests ignore_gc_requests(heap());
main_thread_local_heap()->Unpark();
}
// Lock clients_mutex_ in order to prevent shared GCs from other clients
// during deserialization.
base::Optional<base::MutexGuard> clients_guard;
if (shared_isolate_) {
clients_guard.emplace(&shared_isolate_->global_safepoint()->clients_mutex_);
}
// The main thread LocalHeap needs to be set up when attaching to the shared
// isolate. Otherwise a global safepoint would find an isolate without
......
......@@ -22,14 +22,17 @@ bool CollectionBarrier::WasGCRequested() {
return collection_requested_.load();
}
void CollectionBarrier::RequestGC() {
bool CollectionBarrier::TryRequestGC() {
base::MutexGuard guard(&mutex_);
if (shutdown_requested_) return false;
bool was_already_requested = collection_requested_.exchange(true);
if (!was_already_requested) {
CHECK(!timer_.IsStarted());
timer_.Start();
}
return true;
}
class BackgroundCollectionInterruptTask : public CancelableTask {
......@@ -59,8 +62,19 @@ void CollectionBarrier::NotifyShutdownRequested() {
void CollectionBarrier::ResumeThreadsAwaitingCollection() {
base::MutexGuard guard(&mutex_);
DCHECK(!timer_.IsStarted());
collection_requested_.store(false);
block_for_collection_ = false;
collection_performed_ = true;
cv_wakeup_.NotifyAll();
}
void CollectionBarrier::CancelCollectionAndResumeThreads() {
base::MutexGuard guard(&mutex_);
if (timer_.IsStarted()) timer_.Stop();
collection_requested_.store(false);
block_for_collection_ = false;
collection_performed_ = false;
cv_wakeup_.NotifyAll();
}
......@@ -72,6 +86,10 @@ bool CollectionBarrier::AwaitCollectionBackground(LocalHeap* local_heap) {
// set before the next GC.
base::MutexGuard guard(&mutex_);
if (shutdown_requested_) return false;
// Collection was cancelled by the main thread.
if (!collection_requested_.load()) return false;
first_thread = !block_for_collection_;
block_for_collection_ = true;
CHECK(timer_.IsStarted());
......@@ -88,7 +106,8 @@ bool CollectionBarrier::AwaitCollectionBackground(LocalHeap* local_heap) {
cv_wakeup_.Wait(&mutex_);
}
return true;
// Collection may have been cancelled while blocking for it.
return collection_performed_;
}
void CollectionBarrier::ActivateStackGuardAndPostTask() {
......
......@@ -27,8 +27,10 @@ class CollectionBarrier {
// Returns true when collection was requested.
bool WasGCRequested();
// Requests a GC from the main thread.
void RequestGC();
// Requests a GC from the main thread. Returns whether GC was successfully
// requested. Requesting a GC can fail when isolate shutdown was already
// initiated.
bool TryRequestGC();
// Resumes all threads waiting for GC when tear down starts.
void NotifyShutdownRequested();
......@@ -39,7 +41,11 @@ class CollectionBarrier {
// Resumes threads waiting for collection.
void ResumeThreadsAwaitingCollection();
// Cancels collection if one was requested and resumes threads waiting for GC.
void CancelCollectionAndResumeThreads();
// This is the method use by background threads to request and wait for GC.
// Returns whether a GC was performed.
bool AwaitCollectionBackground(LocalHeap* local_heap);
private:
......@@ -50,8 +56,21 @@ class CollectionBarrier {
base::Mutex mutex_;
base::ConditionVariable cv_wakeup_;
base::ElapsedTimer timer_;
// Flag that main thread checks whether a GC was requested from the background
// thread.
std::atomic<bool> collection_requested_{false};
// This flag is used to detect whether to block for the GC. Only set if the
// main thread was actually running and is unset when GC resumes background
// threads.
bool block_for_collection_ = false;
// Set to true when a GC was performed, false in case it was canceled because
// the main thread parked itself without running the GC.
bool collection_performed_ = false;
// Will be set as soon as Isolate starts tear down.
bool shutdown_requested_ = false;
};
......
......@@ -136,7 +136,7 @@ bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
local_heap_, kLabSize, kMaxLabSize, kTaggedAligned, origin);
if (!result) return false;
if (local_heap_->heap()->incremental_marking()->black_allocation()) {
if (IsBlackAllocationEnabled()) {
Address top = result->first;
Address limit = top + result->second;
Page::FromAllocationAreaAddress(top)->CreateBlackAreaBackground(top, limit);
......@@ -161,13 +161,19 @@ AllocationResult ConcurrentAllocator::AllocateOutsideLab(
HeapObject object = HeapObject::FromAddress(result->first);
if (local_heap_->heap()->incremental_marking()->black_allocation()) {
local_heap_->heap()->incremental_marking()->MarkBlackBackground(
object, object_size);
if (IsBlackAllocationEnabled()) {
owning_heap()->incremental_marking()->MarkBlackBackground(object,
object_size);
}
return AllocationResult(object);
}
bool ConcurrentAllocator::IsBlackAllocationEnabled() const {
return owning_heap()->incremental_marking()->black_allocation();
}
Heap* ConcurrentAllocator::owning_heap() const { return space_->heap(); }
} // namespace internal
} // namespace v8
......@@ -63,6 +63,12 @@ class ConcurrentAllocator {
V8_EXPORT_PRIVATE AllocationResult AllocateOutsideLab(
int object_size, AllocationAlignment alignment, AllocationOrigin origin);
bool IsBlackAllocationEnabled() const;
// Returns the Heap of space_. This might differ from the LocalHeap's Heap for
// shared spaces.
Heap* owning_heap() const;
LocalHeap* const local_heap_;
PagedSpace* const space_;
LocalAllocationBuffer lab_;
......
......@@ -563,11 +563,12 @@ void GCTracer::Print() const {
Output(
"[%d:%p] "
"%8.0f ms: "
"%s%s %.1f (%.1f) -> %.1f (%.1f) MB, "
"%s%s%s %.1f (%.1f) -> %.1f (%.1f) MB, "
"%.1f / %.1f ms %s (average mu = %.3f, current mu = %.3f) %s %s\n",
base::OS::GetCurrentProcessId(),
reinterpret_cast<void*>(heap_->isolate()),
heap_->isolate()->time_millis_since_init(), current_.TypeName(false),
heap_->isolate()->time_millis_since_init(),
heap_->IsShared() ? "Shared " : "", current_.TypeName(false),
current_.reduce_memory ? " (reduce)" : "",
static_cast<double>(current_.start_object_size) / MB,
static_cast<double>(current_.start_memory_size) / MB,
......
......@@ -865,6 +865,16 @@ CodePageMemoryModificationScope::~CodePageMemoryModificationScope() {
}
}
IgnoreLocalGCRequests::IgnoreLocalGCRequests(Heap* heap) : heap_(heap) {
DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
heap_->ignore_local_gc_requests_depth_++;
}
IgnoreLocalGCRequests::~IgnoreLocalGCRequests() {
DCHECK_GT(heap_->ignore_local_gc_requests_depth_, 0);
heap_->ignore_local_gc_requests_depth_--;
}
} // namespace internal
} // namespace v8
......
......@@ -67,6 +67,7 @@
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/paged-spaces-inl.h"
#include "src/heap/parked-scope.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/remembered-set.h"
#include "src/heap/safepoint.h"
......@@ -1914,6 +1915,7 @@ void Heap::StartIncrementalMarking(int gc_flags,
{
AllowGarbageCollection allow_shared_gc;
IgnoreLocalGCRequests ignore_gc_requests(this);
safepoint_scope.emplace(this);
}
......@@ -2152,6 +2154,7 @@ size_t Heap::PerformGarbageCollection(
{
AllowGarbageCollection allow_shared_gc;
IgnoreLocalGCRequests ignore_gc_requests(this);
safepoint_scope.emplace(this);
}
......@@ -3414,6 +3417,7 @@ FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
{
AllowGarbageCollection allow_gc;
IgnoreLocalGCRequests ignore_gc_requests(this);
safepoint_scope.emplace(this);
}
......@@ -3559,8 +3563,14 @@ void Heap::FreeSharedLinearAllocationAreas() {
safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->FreeSharedLinearAllocationArea();
});
FreeMainThreadSharedLinearAllocationAreas();
}
void Heap::FreeMainThreadSharedLinearAllocationAreas() {
if (!isolate()->shared_isolate()) return;
shared_old_allocator_->FreeLinearAllocationArea();
shared_map_allocator_->FreeLinearAllocationArea();
main_thread_local_heap()->FreeSharedLinearAllocationArea();
}
namespace {
......@@ -3778,6 +3788,7 @@ void Heap::FinalizeIncrementalMarkingIncrementally(
TRACE_GC_EPOCH(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE,
ThreadKind::kMain);
IgnoreLocalGCRequests ignore_gc_requests(this);
SafepointScope safepoint(this);
InvokeIncrementalMarkingPrologueCallbacks();
incremental_marking()->FinalizeIncrementally();
......@@ -4202,6 +4213,7 @@ std::unique_ptr<v8::MeasureMemoryDelegate> Heap::MeasureMemoryDelegate(
void Heap::CollectCodeStatistics() {
TRACE_EVENT0("v8", "Heap::CollectCodeStatistics");
IgnoreLocalGCRequests ignore_gc_requests(this);
SafepointScope safepoint_scope(this);
MakeHeapIterable();
CodeStatistics::ResetCodeAndMetadataStatistics(isolate());
......@@ -4415,6 +4427,7 @@ bool Heap::IsValidAllocationSpace(AllocationSpace space) {
#ifdef VERIFY_HEAP
void Heap::Verify() {
CHECK(HasBeenSetUp());
IgnoreLocalGCRequests ignore_gc_requests(this);
SafepointScope safepoint_scope(this);
HandleScope scope(isolate());
......@@ -5507,14 +5520,16 @@ HeapObject Heap::AllocateRawWithRetryOrFailSlowPath(
isolate()->counters()->gc_last_resort_from_handles()->Increment();
if (IsSharedAllocationType(allocation)) {
CollectSharedGarbage(GarbageCollectionReason::kLastResort);
AlwaysAllocateScope scope(isolate()->shared_isolate()->heap());
alloc = AllocateRaw(size, allocation, origin, alignment);
} else {
CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
}
{
AlwaysAllocateScope scope(this);
alloc = AllocateRaw(size, allocation, origin, alignment);
}
if (alloc.To(&result)) {
DCHECK(result != ReadOnlyRoots(this).exception());
return result;
......@@ -5908,12 +5923,19 @@ void Heap::StartTearDown() {
// threads finish.
collection_barrier_->NotifyShutdownRequested();
// Main thread isn't going to allocate anymore.
main_thread_local_heap()->FreeLinearAllocationArea();
FreeMainThreadSharedLinearAllocationAreas();
#ifdef VERIFY_HEAP
// {StartTearDown} is called fairly early during Isolate teardown, so it's
// a good time to run heap verification (if requested), before starting to
// tear down parts of the Isolate.
if (FLAG_verify_heap) {
AllowGarbageCollection allow_gc;
IgnoreLocalGCRequests ignore_gc_requests(this);
SafepointScope scope(this);
Verify();
}
#endif
......
......@@ -687,6 +687,10 @@ class Heap {
bool IsTearingDown() const { return gc_state() == TEAR_DOWN; }
bool force_oom() const { return force_oom_; }
bool ignore_local_gc_requests() const {
return ignore_local_gc_requests_depth_ > 0;
}
inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
bool IsGCWithoutStack() const;
......@@ -1786,6 +1790,9 @@ class Heap {
// Free all shared LABs.
void FreeSharedLinearAllocationAreas();
// Free all shared LABs of main thread.
void FreeMainThreadSharedLinearAllocationAreas();
// Performs garbage collection in a safepoint.
// Returns the number of freed global handles.
size_t PerformGarbageCollection(
......@@ -2444,6 +2451,8 @@ class Heap {
std::unique_ptr<CollectionBarrier> collection_barrier_;
int ignore_local_gc_requests_depth_ = 0;
int gc_callbacks_depth_ = 0;
bool deserialization_complete_ = false;
......@@ -2500,6 +2509,7 @@ class Heap {
friend class GCTracer;
friend class HeapObjectIterator;
friend class ScavengeTaskObserver;
friend class IgnoreLocalGCRequests;
friend class IncrementalMarking;
friend class IncrementalMarkingJob;
friend class LargeObjectSpace;
......@@ -2658,6 +2668,15 @@ class V8_NODISCARD CodePageMemoryModificationScope {
DISALLOW_GARBAGE_COLLECTION(no_heap_allocation_)
};
class V8_NODISCARD IgnoreLocalGCRequests {
public:
explicit inline IgnoreLocalGCRequests(Heap* heap);
inline ~IgnoreLocalGCRequests();
private:
Heap* heap_;
};
// Visitor class to verify interior pointers in spaces that do not contain
// or care about intergenerational references. All heap object pointers have to
// point into the heap to a location that has a map pointer at its first word.
......
......@@ -8,6 +8,7 @@
#include <memory>
#include "src/base/logging.h"
#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
......@@ -17,6 +18,7 @@
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier.h"
#include "src/heap/heap.h"
#include "src/heap/local-heap-inl.h"
#include "src/heap/marking-barrier.h"
#include "src/heap/parked-scope.h"
......@@ -53,7 +55,9 @@ LocalHeap::LocalHeap(Heap* heap, ThreadKind kind,
next_(nullptr),
handles_(new LocalHandles),
persistent_handles_(std::move(persistent_handles)) {
DCHECK_IMPLIES(!is_main_thread(), heap_->deserialization_complete());
if (!is_main_thread()) SetUp();
heap_->safepoint()->AddLocalHeap(this, [this] {
if (!is_main_thread()) {
WriteBarrier::SetForThread(marking_barrier_.get());
......@@ -179,13 +183,42 @@ void LocalHeap::ParkSlowPath() {
DCHECK(current_state.IsRunning());
if (is_main_thread()) {
DCHECK(current_state.IsCollectionRequested());
heap_->CollectGarbageForBackground(this);
DCHECK(current_state.IsSafepointRequested() ||
current_state.IsCollectionRequested());
if (current_state.IsSafepointRequested()) {
ThreadState old_state = state_.SetParked();
heap_->safepoint()->NotifyPark();
if (old_state.IsCollectionRequested())
heap_->collection_barrier_->CancelCollectionAndResumeThreads();
return;
}
if (current_state.IsCollectionRequested()) {
if (!heap()->ignore_local_gc_requests()) {
heap_->CollectGarbageForBackground(this);
continue;
}
DCHECK(!current_state.IsSafepointRequested());
if (state_.CompareExchangeStrong(current_state,
current_state.SetParked())) {
heap_->collection_barrier_->CancelCollectionAndResumeThreads();
return;
} else {
continue;
}
}
} else {
DCHECK(current_state.IsSafepointRequested());
DCHECK(!current_state.IsCollectionRequested());
CHECK(state_.CompareExchangeStrong(current_state,
current_state.SetParked()));
ThreadState old_state = state_.SetParked();
CHECK(old_state.IsRunning());
CHECK(old_state.IsSafepointRequested());
CHECK(!old_state.IsCollectionRequested());
heap_->safepoint()->NotifyPark();
return;
}
......@@ -202,52 +235,105 @@ void LocalHeap::UnparkSlowPath() {
DCHECK(current_state.IsParked());
if (is_main_thread()) {
DCHECK(current_state.IsCollectionRequested());
CHECK(state_.CompareExchangeStrong(current_state,
current_state.SetRunning()));
heap_->CollectGarbageForBackground(this);
return;
DCHECK(current_state.IsSafepointRequested() ||
current_state.IsCollectionRequested());
if (current_state.IsSafepointRequested()) {
SleepInUnpark();
continue;
}
if (current_state.IsCollectionRequested()) {
DCHECK(!current_state.IsSafepointRequested());
if (!state_.CompareExchangeStrong(current_state,
current_state.SetRunning()))
continue;
if (!heap()->ignore_local_gc_requests()) {
heap_->CollectGarbageForBackground(this);
}
return;
}
} else {
DCHECK(current_state.IsSafepointRequested());
DCHECK(!current_state.IsCollectionRequested());
TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_UNPARK,
ThreadKind::kBackground);
heap_->safepoint()->WaitInUnpark();
SleepInUnpark();
}
}
}
void LocalHeap::SleepInUnpark() {
GCTracer::Scope::ScopeId scope_id;
ThreadKind thread_kind;
if (is_main_thread()) {
scope_id = GCTracer::Scope::UNPARK;
thread_kind = ThreadKind::kMain;
} else {
scope_id = GCTracer::Scope::BACKGROUND_UNPARK;
thread_kind = ThreadKind::kBackground;
}
TRACE_GC1(heap_->tracer(), scope_id, thread_kind);
heap_->safepoint()->WaitInUnpark();
}
void LocalHeap::EnsureParkedBeforeDestruction() {
DCHECK_IMPLIES(!is_main_thread(), IsParked());
}
void LocalHeap::SafepointSlowPath() {
#ifdef DEBUG
ThreadState current_state = state_.load_relaxed();
DCHECK(current_state.IsRunning());
#endif
if (is_main_thread()) {
DCHECK(current_state.IsCollectionRequested());
heap_->CollectGarbageForBackground(this);
DCHECK(current_state.IsSafepointRequested() ||
current_state.IsCollectionRequested());
if (current_state.IsSafepointRequested()) {
SleepInSafepoint();
}
if (current_state.IsCollectionRequested()) {
heap_->CollectGarbageForBackground(this);
}
} else {
DCHECK(current_state.IsSafepointRequested());
DCHECK(!current_state.IsCollectionRequested());
TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_SAFEPOINT,
ThreadKind::kBackground);
// Parking the running thread here is an optimization. We do not need to
// wake this thread up to reach the next safepoint.
ThreadState old_state = state_.SetParked();
CHECK(old_state.IsRunning());
CHECK(old_state.IsSafepointRequested());
CHECK(!old_state.IsCollectionRequested());
SleepInSafepoint();
}
}
heap_->safepoint()->WaitInSafepoint();
void LocalHeap::SleepInSafepoint() {
GCTracer::Scope::ScopeId scope_id;
ThreadKind thread_kind;
Unpark();
if (is_main_thread()) {
scope_id = GCTracer::Scope::SAFEPOINT;
thread_kind = ThreadKind::kMain;
} else {
scope_id = GCTracer::Scope::BACKGROUND_SAFEPOINT;
thread_kind = ThreadKind::kBackground;
}
TRACE_GC1(heap_->tracer(), scope_id, thread_kind);
// Parking the running thread here is an optimization. We do not need to
// wake this thread up to reach the next safepoint.
ThreadState old_state = state_.SetParked();
CHECK(old_state.IsRunning());
CHECK(old_state.IsSafepointRequested());
CHECK_IMPLIES(old_state.IsCollectionRequested(), is_main_thread());
heap_->safepoint()->WaitInSafepoint();
base::Optional<IgnoreLocalGCRequests> ignore_gc_requests;
if (is_main_thread()) ignore_gc_requests.emplace(heap());
Unpark();
}
void LocalHeap::FreeLinearAllocationArea() {
......@@ -280,7 +366,7 @@ bool LocalHeap::TryPerformCollection() {
return true;
} else {
DCHECK(IsRunning());
heap_->collection_barrier_->RequestGC();
if (!heap_->collection_barrier_->TryRequestGC()) return false;
LocalHeap* main_thread = heap_->main_thread_local_heap();
......
......@@ -8,6 +8,7 @@
#include <atomic>
#include <memory>
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
......@@ -284,6 +285,8 @@ class V8_EXPORT_PRIVATE LocalHeap {
void UnparkSlowPath();
void EnsureParkedBeforeDestruction();
void SafepointSlowPath();
void SleepInSafepoint();
void SleepInUnpark();
void EnsurePersistentHandles();
......@@ -315,10 +318,12 @@ class V8_EXPORT_PRIVATE LocalHeap {
friend class CollectionBarrier;
friend class ConcurrentAllocator;
friend class GlobalSafepoint;
friend class IsolateSafepoint;
friend class Heap;
friend class Isolate;
friend class ParkedScope;
friend class SafepointScope;
friend class UnparkedScope;
};
......
......@@ -44,6 +44,8 @@ class V8_NODISCARD UnparkedScope {
LocalHeap* const local_heap_;
};
// Scope that automatically parks the thread while blocking on the given
// base::Mutex.
class V8_NODISCARD ParkedMutexGuard {
public:
explicit ParkedMutexGuard(LocalIsolate* local_isolate, base::Mutex* mutex)
......
......@@ -7,7 +7,10 @@
#include <atomic>
#include "src/base/logging.h"
#include "src/common/assert-scope.h"
#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/handles/handles.h"
#include "src/handles/local-handles.h"
#include "src/handles/persistent-handles.h"
......@@ -17,6 +20,7 @@
#include "src/heap/local-heap.h"
#include "src/heap/parked-scope.h"
#include "src/logging/counters-scopes.h"
#include "src/objects/objects.h"
namespace v8 {
namespace internal {
......@@ -24,23 +28,47 @@ namespace internal {
IsolateSafepoint::IsolateSafepoint(Heap* heap)
: heap_(heap), local_heaps_head_(nullptr), active_safepoint_scopes_(0) {}
void IsolateSafepoint::EnterSafepointScope(StopMainThread stop_main_thread) {
void IsolateSafepoint::EnterLocalSafepointScope() {
// Safepoints need to be initiated on the main thread.
DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
DCHECK_NULL(LocalHeap::Current());
DCHECK(AllowGarbageCollection::IsAllowed());
LockMutex(heap_->isolate()->main_thread_local_heap());
if (++active_safepoint_scopes_ > 1) return;
TimedHistogramScope timer(
heap_->isolate()->counters()->gc_time_to_safepoint());
TRACE_GC(heap_->tracer(), GCTracer::Scope::TIME_TO_SAFEPOINT);
local_heaps_mutex_.Lock();
barrier_.Arm();
size_t running = SetSafepointRequestedFlags(IncludeMainThread::kNo);
barrier_.WaitUntilRunningThreadsInSafepoint(running);
}
void IsolateSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
{
IgnoreLocalGCRequests ignore_gc_requests(initiator->heap());
LockMutex(initiator->main_thread_local_heap());
}
CHECK_EQ(active_safepoint_scopes_.exchange(1), 0);
barrier_.Arm();
int running = 0;
size_t running =
SetSafepointRequestedFlags(IncludeMainThreadUnlessInitiator(initiator));
barrier_.WaitUntilRunningThreadsInSafepoint(running);
}
IsolateSafepoint::IncludeMainThread
IsolateSafepoint::IncludeMainThreadUnlessInitiator(Isolate* initiator) {
const bool is_initiator = heap_->isolate() == initiator;
return is_initiator ? IncludeMainThread::kNo : IncludeMainThread::kYes;
}
size_t IsolateSafepoint::SetSafepointRequestedFlags(
IncludeMainThread include_main_thread) {
size_t running = 0;
// There needs to be at least one LocalHeap for the main thread.
DCHECK_NOT_NULL(local_heaps_head_);
......@@ -48,7 +76,7 @@ void IsolateSafepoint::EnterSafepointScope(StopMainThread stop_main_thread) {
for (LocalHeap* local_heap = local_heaps_head_; local_heap;
local_heap = local_heap->next_) {
if (local_heap->is_main_thread() &&
stop_main_thread == StopMainThread::kNo) {
include_main_thread == IncludeMainThread::kNo) {
continue;
}
......@@ -61,21 +89,42 @@ void IsolateSafepoint::EnterSafepointScope(StopMainThread stop_main_thread) {
CHECK(!old_state.IsSafepointRequested());
}
barrier_.WaitUntilRunningThreadsInSafepoint(running);
return running;
}
void IsolateSafepoint::LeaveSafepointScope(StopMainThread stop_main_thread) {
// Safepoints need to be initiated on the main thread.
DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
DCHECK_NULL(LocalHeap::Current());
void IsolateSafepoint::LockMutex(LocalHeap* local_heap) {
if (!local_heaps_mutex_.TryLock()) {
ParkedScope parked_scope(local_heap);
local_heaps_mutex_.Lock();
}
}
void IsolateSafepoint::LeaveGlobalSafepointScope(Isolate* initiator) {
local_heaps_mutex_.AssertHeld();
CHECK_EQ(active_safepoint_scopes_.exchange(0), 1);
ClearSafepointRequestedFlags(IncludeMainThreadUnlessInitiator(initiator));
barrier_.Disarm();
local_heaps_mutex_.Unlock();
}
void IsolateSafepoint::LeaveLocalSafepointScope() {
local_heaps_mutex_.AssertHeld();
DCHECK_GT(active_safepoint_scopes_, 0);
if (--active_safepoint_scopes_ > 0) return;
if (--active_safepoint_scopes_ == 0) {
ClearSafepointRequestedFlags(IncludeMainThread::kNo);
barrier_.Disarm();
}
local_heaps_mutex_.Unlock();
}
void IsolateSafepoint::ClearSafepointRequestedFlags(
IncludeMainThread include_main_thread) {
for (LocalHeap* local_heap = local_heaps_head_; local_heap;
local_heap = local_heap->next_) {
if (local_heap->is_main_thread() &&
stop_main_thread == StopMainThread::kNo) {
include_main_thread == IncludeMainThread::kNo) {
continue;
}
......@@ -87,10 +136,6 @@ void IsolateSafepoint::LeaveSafepointScope(StopMainThread stop_main_thread) {
CHECK_IMPLIES(old_state.IsCollectionRequested(),
local_heap->is_main_thread());
}
barrier_.Disarm();
local_heaps_mutex_.Unlock();
}
void IsolateSafepoint::WaitInSafepoint() { barrier_.WaitInSafepoint(); }
......@@ -115,7 +160,7 @@ void IsolateSafepoint::Barrier::Disarm() {
}
void IsolateSafepoint::Barrier::WaitUntilRunningThreadsInSafepoint(
int running) {
size_t running) {
base::MutexGuard guard(&mutex_);
DCHECK(IsArmed());
while (stopped_ < running) {
......@@ -150,16 +195,8 @@ void IsolateSafepoint::Barrier::WaitInUnpark() {
}
}
SafepointScope::SafepointScope(Heap* heap) : safepoint_(heap->safepoint()) {
safepoint_->EnterSafepointScope(IsolateSafepoint::StopMainThread::kNo);
}
SafepointScope::~SafepointScope() {
safepoint_->LeaveSafepointScope(IsolateSafepoint::StopMainThread::kNo);
}
bool IsolateSafepoint::ContainsLocalHeap(LocalHeap* local_heap) {
base::MutexGuard guard(&local_heaps_mutex_);
base::RecursiveMutexGuard guard(&local_heaps_mutex_);
LocalHeap* current = local_heaps_head_;
while (current) {
......@@ -171,7 +208,7 @@ bool IsolateSafepoint::ContainsLocalHeap(LocalHeap* local_heap) {
}
bool IsolateSafepoint::ContainsAnyLocalHeap() {
base::MutexGuard guard(&local_heaps_mutex_);
base::RecursiveMutexGuard guard(&local_heaps_mutex_);
return local_heaps_head_ != nullptr;
}
......@@ -183,11 +220,22 @@ void IsolateSafepoint::Iterate(RootVisitor* visitor) {
}
}
void IsolateSafepoint::AssertMainThreadIsOnlyThread() {
DCHECK_EQ(local_heaps_head_, heap_->main_thread_local_heap());
DCHECK_NULL(heap_->main_thread_local_heap()->next_);
}
SafepointScope::SafepointScope(Heap* heap) : safepoint_(heap->safepoint()) {
safepoint_->EnterLocalSafepointScope();
}
SafepointScope::~SafepointScope() { safepoint_->LeaveLocalSafepointScope(); }
GlobalSafepoint::GlobalSafepoint(Isolate* isolate)
: shared_isolate_(isolate), shared_heap_(isolate->heap()) {}
void GlobalSafepoint::AppendClient(Isolate* client) {
base::MutexGuard guard(&clients_mutex_);
clients_mutex_.AssertHeld();
DCHECK_NULL(client->global_safepoint_prev_client_isolate_);
DCHECK_NULL(client->global_safepoint_next_client_isolate_);
......@@ -206,7 +254,11 @@ void GlobalSafepoint::AppendClient(Isolate* client) {
void GlobalSafepoint::RemoveClient(Isolate* client) {
DCHECK_EQ(client->heap()->gc_state(), Heap::TEAR_DOWN);
base::MutexGuard guard(&clients_mutex_);
// A shared heap may have already acquired the client mutex to perform a
// shared GC. We need to park the Isolate here to allow for a shared GC.
IgnoreLocalGCRequests ignore_gc_requests(client->heap());
ParkedMutexGuard guard(client->main_thread_local_heap(), &clients_mutex_);
if (client->global_safepoint_next_client_isolate_) {
client->global_safepoint_next_client_isolate_
......@@ -230,6 +282,7 @@ void GlobalSafepoint::AssertNoClients() { DCHECK_NULL(clients_head_); }
void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
if (!clients_mutex_.TryLock()) {
IgnoreLocalGCRequests ignore_gc_requests(initiator->heap());
ParkedScope parked_scope(initiator->main_thread_local_heap());
clients_mutex_.Lock();
}
......@@ -241,9 +294,7 @@ void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
IterateClientIsolates([this, initiator](Isolate* client) {
Heap* client_heap = client->heap();
CHECK_EQ(initiator, client);
client_heap->safepoint()->EnterSafepointScope(
IsolateSafepoint::StopMainThread::kNo);
client_heap->safepoint()->EnterGlobalSafepointScope(initiator);
USE(this);
DCHECK_EQ(client->shared_isolate(), shared_isolate_);
......@@ -252,10 +303,9 @@ void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
}
void GlobalSafepoint::LeaveGlobalSafepointScope(Isolate* initiator) {
IterateClientIsolates([](Isolate* client) {
IterateClientIsolates([initiator](Isolate* client) {
Heap* client_heap = client->heap();
client_heap->safepoint()->LeaveSafepointScope(
IsolateSafepoint::StopMainThread::kNo);
client_heap->safepoint()->LeaveGlobalSafepointScope(initiator);
});
clients_mutex_.Unlock();
......@@ -266,8 +316,7 @@ GlobalSafepointScope::GlobalSafepointScope(Isolate* initiator)
if (shared_isolate_) {
shared_isolate_->global_safepoint()->EnterGlobalSafepointScope(initiator_);
} else {
initiator_->heap()->safepoint()->EnterSafepointScope(
IsolateSafepoint::StopMainThread::kNo);
initiator_->heap()->safepoint()->EnterLocalSafepointScope();
}
}
......@@ -275,8 +324,7 @@ GlobalSafepointScope::~GlobalSafepointScope() {
if (shared_isolate_) {
shared_isolate_->global_safepoint()->LeaveGlobalSafepointScope(initiator_);
} else {
initiator_->heap()->safepoint()->LeaveSafepointScope(
IsolateSafepoint::StopMainThread::kNo);
initiator_->heap()->safepoint()->LeaveLocalSafepointScope();
}
}
......
......@@ -7,6 +7,7 @@
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/handles/persistent-handles.h"
#include "src/heap/local-heap.h"
#include "src/objects/visitors.h"
......@@ -18,21 +19,12 @@ class Heap;
class LocalHeap;
class RootVisitor;
// Used to bring all threads with heap access to a safepoint such that e.g. a
// garbage collection can be performed.
// Used to bring all threads with heap access in an isolate to a safepoint such
// that e.g. a garbage collection can be performed.
class IsolateSafepoint final {
public:
explicit IsolateSafepoint(Heap* heap);
// Wait until unpark operation is safe again
void WaitInUnpark();
// Enter the safepoint from a running thread
void WaitInSafepoint();
// Running thread reached a safepoint by parking itself.
void NotifyPark();
V8_EXPORT_PRIVATE bool ContainsLocalHeap(LocalHeap* local_heap);
V8_EXPORT_PRIVATE bool ContainsAnyLocalHeap();
......@@ -51,6 +43,8 @@ class IsolateSafepoint final {
void AssertActive() { local_heaps_mutex_.AssertHeld(); }
void AssertMainThreadIsOnlyThread();
private:
class Barrier {
base::Mutex mutex_;
......@@ -58,7 +52,7 @@ class IsolateSafepoint final {
base::ConditionVariable cv_stopped_;
bool armed_;
int stopped_ = 0;
size_t stopped_ = 0;
bool IsArmed() { return armed_; }
......@@ -67,23 +61,42 @@ class IsolateSafepoint final {
void Arm();
void Disarm();
void WaitUntilRunningThreadsInSafepoint(int running);
void WaitUntilRunningThreadsInSafepoint(size_t running);
void WaitInSafepoint();
void WaitInUnpark();
void NotifyPark();
};
enum class StopMainThread { kYes, kNo };
enum class IncludeMainThread { kYes, kNo };
// Wait until unpark operation is safe again.
void WaitInUnpark();
// Enter the safepoint from a running thread.
void WaitInSafepoint();
// Running thread reached a safepoint by parking itself.
void NotifyPark();
void EnterLocalSafepointScope();
void EnterGlobalSafepointScope(Isolate* initiator);
void LeaveLocalSafepointScope();
void LeaveGlobalSafepointScope(Isolate* initiator);
IncludeMainThread IncludeMainThreadUnlessInitiator(Isolate* initiator);
void LockMutex(LocalHeap* local_heap);
void EnterSafepointScope(StopMainThread stop_main_thread);
void LeaveSafepointScope(StopMainThread stop_main_thread);
size_t SetSafepointRequestedFlags(IncludeMainThread include_main_thread);
void ClearSafepointRequestedFlags(IncludeMainThread include_main_thread);
template <typename Callback>
void AddLocalHeap(LocalHeap* local_heap, Callback callback) {
// Safepoint holds this lock in order to stop threads from starting or
// stopping.
base::MutexGuard guard(&local_heaps_mutex_);
base::RecursiveMutexGuard guard(&local_heaps_mutex_);
// Additional code protected from safepoint
callback();
......@@ -97,7 +110,7 @@ class IsolateSafepoint final {
template <typename Callback>
void RemoveLocalHeap(LocalHeap* local_heap, Callback callback) {
base::MutexGuard guard(&local_heaps_mutex_);
base::RecursiveMutexGuard guard(&local_heaps_mutex_);
// Additional code protected from safepoint
callback();
......@@ -113,10 +126,12 @@ class IsolateSafepoint final {
Barrier barrier_;
Heap* heap_;
base::Mutex local_heaps_mutex_;
// Mutex is used both for safepointing and adding/removing threads. A
// RecursiveMutex is needed since we need to support nested SafepointScopes.
base::RecursiveMutex local_heaps_mutex_;
LocalHeap* local_heaps_head_;
int active_safepoint_scopes_;
std::atomic<int> active_safepoint_scopes_;
friend class Heap;
friend class GlobalSafepoint;
......@@ -164,6 +179,7 @@ class GlobalSafepoint final {
Isolate* clients_head_ = nullptr;
friend class GlobalSafepointScope;
friend class Isolate;
};
class V8_NODISCARD GlobalSafepointScope {
......
......@@ -575,6 +575,7 @@
F(MINOR_MC_MARKING_DEQUE) \
F(MINOR_MC_RESET_LIVENESS) \
F(MINOR_MC_SWEEPING) \
F(SAFEPOINT) \
F(SCAVENGER) \
F(SCAVENGER_COMPLETE_SWEEP_ARRAY_BUFFERS) \
F(SCAVENGER_FAST_PROMOTE) \
......@@ -591,7 +592,8 @@
F(SCAVENGER_SWEEP_ARRAY_BUFFERS) \
F(TIME_TO_GLOBAL_SAFEPOINT) \
F(TIME_TO_SAFEPOINT) \
F(UNMAPPER)
F(UNMAPPER) \
F(UNPARK)
#define TRACER_BACKGROUND_SCOPES(F) \
F(BACKGROUND_YOUNG_ARRAY_BUFFER_SWEEP) \
......
......@@ -164,22 +164,24 @@ UNINITIALIZED_TEST(SharedCollectionWithoutClients) {
Isolate::Delete(shared_isolate);
}
void AllocateInSharedHeap(Isolate* shared_isolate) {
void AllocateInSharedHeap(Isolate* shared_isolate, int iterations = 100) {
SetupClientIsolateAndRunCallback(
shared_isolate,
[](v8::Isolate* client_isolate, Isolate* i_client_isolate) {
[iterations](v8::Isolate* client_isolate, Isolate* i_client_isolate) {
HandleScope scope(i_client_isolate);
std::vector<Handle<FixedArray>> arrays;
const int kKeptAliveArrays = 1000;
for (int i = 0; i < kNumIterations * 100; i++) {
HandleScope new_scope(i_client_isolate);
for (int i = 0; i < kNumIterations * iterations; i++) {
HandleScope scope(i_client_isolate);
Handle<FixedArray> array = i_client_isolate->factory()->NewFixedArray(
100, AllocationType::kSharedOld);
if (i < kKeptAliveArrays) {
// Keep some of those arrays alive across GCs.
arrays.push_back(new_scope.CloseAndEscape(array));
arrays.push_back(scope.CloseAndEscape(array));
}
i_client_isolate->factory()->NewFixedArray(100,
AllocationType::kYoung);
}
for (Handle<FixedArray> array : arrays) {
......@@ -203,5 +205,46 @@ UNINITIALIZED_TEST(SharedCollectionWithOneClient) {
Isolate::Delete(shared_isolate);
}
namespace {
class SharedFixedArrayAllocationThread final : public v8::base::Thread {
public:
explicit SharedFixedArrayAllocationThread(Isolate* shared)
: v8::base::Thread(
base::Thread::Options("SharedFixedArrayAllocationThread")),
shared_(shared) {}
void Run() override { AllocateInSharedHeap(shared_, 5); }
Isolate* shared_;
};
} // namespace
UNINITIALIZED_TEST(SharedCollectionWithMultipleClients) {
FLAG_max_old_space_size = 8;
if (!ReadOnlyHeap::IsReadOnlySpaceShared()) return;
std::unique_ptr<v8::ArrayBuffer::Allocator> allocator(
v8::ArrayBuffer::Allocator::NewDefaultAllocator());
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = allocator.get();
Isolate* shared_isolate = Isolate::NewShared(create_params);
std::vector<std::unique_ptr<SharedFixedArrayAllocationThread>> threads;
const int kThreads = 4;
for (int i = 0; i < kThreads; i++) {
auto thread =
std::make_unique<SharedFixedArrayAllocationThread>(shared_isolate);
CHECK(thread->Start());
threads.push_back(std::move(thread));
}
for (auto& thread : threads) {
thread->Join();
}
Isolate::Delete(shared_isolate);
}
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment