Commit c7cd212f authored by Francis McCabe's avatar Francis McCabe Committed by V8 LUCI CQ

Revert "[heap] Replace usages of CollectionRequested with SafepointRequested"

This reverts commit 5ef4e14f.

Reason for revert: Seems to be causing a lot of flakes
e.g., https://ci.chromium.org/p/v8/builders/ci/V8%20Linux64/42913

Original change's description:
> [heap] Replace usages of CollectionRequested with SafepointRequested
>
> CollectionRequested was used exclusively on the main thread when a
> background thread requested a GC. The main thread never used
> SafepointRequested at any time. Now with the shared GC we might need to
> stop multiple isolates in a safepoint in the future. In such a situation
> we would need to use SafepointRequested also on the main thread.
>
> This CL prepares V8 for this situation by using SafepointRequested
> instead of CollectionRequested and friends on the main thread. The slow
> path of Safepoint(), Park() and Unpark() will check in the future
> whether the main thread needs to halt for a shared GC or needs to
> perform a local GC. At the moment, simply performing the local GC is
> still enough.
>
> Bug: v8:11708
> Change-Id: I819b6f7db8251074a4adf8b554e0a1393c76f7da
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2891834
> Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#74673}

Bug: v8:11708
Change-Id: I51c51e68110e83f729bd43ef62eef1396aa0cb96
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2906913
Auto-Submit: Francis McCabe <fgm@chromium.org>
Commit-Queue: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Cr-Commit-Position: refs/heads/master@{#74684}
parent 3ef42c03
......@@ -17,19 +17,25 @@
namespace v8 {
namespace internal {
bool CollectionBarrier::WasGCRequested() {
return collection_requested_.load();
bool CollectionBarrier::CollectionRequested() {
return main_thread_state_relaxed() == LocalHeap::kCollectionRequested;
}
void CollectionBarrier::RequestGC() {
ActivateStackGuardAndPostTask();
LocalHeap::ThreadState CollectionBarrier::main_thread_state_relaxed() {
LocalHeap* main_thread_local_heap = heap_->main_thread_local_heap();
return main_thread_local_heap->state_relaxed();
}
void CollectionBarrier::NotifyShutdownRequested() {
base::MutexGuard guard(&mutex_);
bool already_requested = collection_requested_.exchange(true);
CHECK(!already_requested);
if (timer_.IsStarted()) timer_.Stop();
shutdown_requested_ = true;
cv_wakeup_.NotifyAll();
}
CHECK(!timer_.IsStarted());
timer_.Start();
void CollectionBarrier::ResumeThreadsAwaitingCollection() {
base::MutexGuard guard(&mutex_);
cv_wakeup_.NotifyAll();
}
class BackgroundCollectionInterruptTask : public CancelableTask {
......@@ -50,34 +56,11 @@ class BackgroundCollectionInterruptTask : public CancelableTask {
Heap* heap_;
};
void CollectionBarrier::ActivateStackGuardAndPostTask() {
Isolate* isolate = heap_->isolate();
ExecutionAccess access(isolate);
isolate->stack_guard()->RequestGC();
auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
reinterpret_cast<v8::Isolate*>(isolate));
taskrunner->PostTask(
std::make_unique<BackgroundCollectionInterruptTask>(heap_));
}
void CollectionBarrier::NotifyShutdownRequested() {
base::MutexGuard guard(&mutex_);
if (timer_.IsStarted()) timer_.Stop();
shutdown_requested_ = true;
cv_wakeup_.NotifyAll();
}
void CollectionBarrier::ResumeThreadsAwaitingCollection() {
base::MutexGuard guard(&mutex_);
cv_wakeup_.NotifyAll();
}
bool CollectionBarrier::AwaitCollectionBackground(LocalHeap* local_heap) {
ParkedScope scope(local_heap);
base::MutexGuard guard(&mutex_);
while (WasGCRequested()) {
while (CollectionRequested()) {
if (shutdown_requested_) return false;
cv_wakeup_.Wait(&mutex_);
}
......@@ -86,7 +69,11 @@ bool CollectionBarrier::AwaitCollectionBackground(LocalHeap* local_heap) {
}
void CollectionBarrier::StopTimeToCollectionTimer() {
if (collection_requested_.load()) {
LocalHeap::ThreadState main_thread_state = main_thread_state_relaxed();
CHECK(main_thread_state == LocalHeap::kRunning ||
main_thread_state == LocalHeap::kCollectionRequested);
if (main_thread_state == LocalHeap::kCollectionRequested) {
base::MutexGuard guard(&mutex_);
// The first background thread that requests the GC, starts the timer first
// and only then parks itself. Since we are in a safepoint here, the timer
......@@ -102,9 +89,23 @@ void CollectionBarrier::StopTimeToCollectionTimer() {
->gc_time_to_collection_on_background()
->AddTimedSample(delta);
timer_.Stop();
collection_requested_.store(false);
}
}
void CollectionBarrier::ActivateStackGuardAndPostTask() {
Isolate* isolate = heap_->isolate();
ExecutionAccess access(isolate);
isolate->stack_guard()->RequestGC();
auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
reinterpret_cast<v8::Isolate*>(isolate));
taskrunner->PostTask(
std::make_unique<BackgroundCollectionInterruptTask>(heap_));
base::MutexGuard guard(&mutex_);
CHECK(!timer_.IsStarted());
timer_.Start();
}
} // namespace internal
} // namespace v8
......@@ -21,16 +21,20 @@ class Heap;
// This class stops and resumes all background threads waiting for GC.
class CollectionBarrier {
Heap* heap_;
base::Mutex mutex_;
base::ConditionVariable cv_wakeup_;
base::ElapsedTimer timer_;
bool shutdown_requested_;
LocalHeap::ThreadState main_thread_state_relaxed();
public:
explicit CollectionBarrier(Heap* heap)
: heap_(heap), collection_requested_(false), shutdown_requested_(false) {}
: heap_(heap), shutdown_requested_(false) {}
// Returns true when collection was requested.
bool WasGCRequested();
// Requests a GC from the main thread. Must not be invoked more than once per
// GC cycle.
void RequestGC();
bool CollectionRequested();
// Resumes all threads waiting for GC when tear down starts.
void NotifyShutdownRequested();
......@@ -44,17 +48,9 @@ class CollectionBarrier {
// This is the method use by background threads to request and wait for GC.
bool AwaitCollectionBackground(LocalHeap* local_heap);
private:
// Request GC by activating stack guards and posting a task to perform the
// GC.
void ActivateStackGuardAndPostTask();
Heap* heap_;
base::Mutex mutex_;
base::ConditionVariable cv_wakeup_;
base::ElapsedTimer timer_;
std::atomic<bool> collection_requested_;
bool shutdown_requested_;
};
} // namespace internal
......
......@@ -1310,7 +1310,7 @@ void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
main_thread_local_heap()->state_.exchange(LocalHeap::kRunning);
CHECK(old_state == LocalHeap::kRunning ||
old_state == LocalHeap::kSafepointRequested);
old_state == LocalHeap::kCollectionRequested);
// Resume all threads waiting for the GC.
collection_barrier_->ResumeThreadsAwaitingCollection();
......@@ -2085,7 +2085,7 @@ void Heap::EnsureFromSpaceIsCommitted() {
}
bool Heap::CollectionRequested() {
return collection_barrier_->WasGCRequested();
return collection_barrier_->CollectionRequested();
}
void Heap::CollectGarbageForBackground(LocalHeap* local_heap) {
......
......@@ -126,7 +126,8 @@ bool LocalHeap::IsHandleDereferenceAllowed() {
VerifyCurrent();
#endif
ThreadState state = state_relaxed();
return state == kRunning || state == kSafepointRequested;
return state == kRunning || state == kSafepointRequested ||
state == kCollectionRequested;
}
#endif
......@@ -135,13 +136,14 @@ bool LocalHeap::IsParked() {
VerifyCurrent();
#endif
ThreadState state = state_relaxed();
return state == kParked || state == kParkedSafepointRequested;
return state == kParked || state == kParkedSafepointRequested ||
state == kParkedCollectionRequested;
}
void LocalHeap::ParkSlowPath(ThreadState current_state) {
if (is_main_thread()) {
while (true) {
CHECK_EQ(current_state, kSafepointRequested);
CHECK_EQ(current_state, kCollectionRequested);
heap_->CollectGarbageForBackground(this);
current_state = kRunning;
......@@ -159,8 +161,8 @@ void LocalHeap::ParkSlowPath(ThreadState current_state) {
void LocalHeap::UnparkSlowPath() {
if (is_main_thread()) {
ThreadState expected = kParkedSafepointRequested;
CHECK(state_.compare_exchange_strong(expected, kSafepointRequested));
ThreadState expected = kParkedCollectionRequested;
CHECK(state_.compare_exchange_strong(expected, kCollectionRequested));
heap_->CollectGarbageForBackground(this);
} else {
while (true) {
......@@ -183,7 +185,7 @@ void LocalHeap::EnsureParkedBeforeDestruction() {
void LocalHeap::SafepointSlowPath() {
if (is_main_thread()) {
CHECK_EQ(kSafepointRequested, state_relaxed());
CHECK_EQ(kCollectionRequested, state_relaxed());
heap_->CollectGarbageForBackground(this);
} else {
TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_SAFEPOINT,
......@@ -226,27 +228,27 @@ bool LocalHeap::TryPerformCollection() {
switch (current) {
case kRunning:
if (main_thread->state_.compare_exchange_strong(
current, kSafepointRequested)) {
heap_->collection_barrier_->RequestGC();
current, kCollectionRequested)) {
heap_->collection_barrier_->ActivateStackGuardAndPostTask();
return heap_->collection_barrier_->AwaitCollectionBackground(this);
}
break;
case kSafepointRequested:
case kCollectionRequested:
return heap_->collection_barrier_->AwaitCollectionBackground(this);
case kParked:
if (main_thread->state_.compare_exchange_strong(
current, kParkedSafepointRequested)) {
heap_->collection_barrier_->RequestGC();
current, kParkedCollectionRequested)) {
heap_->collection_barrier_->ActivateStackGuardAndPostTask();
return false;
}
break;
case kParkedSafepointRequested:
case kParkedCollectionRequested:
return false;
case kSafepoint:
default:
UNREACHABLE();
}
}
......
......@@ -46,6 +46,7 @@ class V8_EXPORT_PRIVATE LocalHeap {
void Safepoint() {
DCHECK(AllowSafepoints::IsAllowed());
ThreadState current = state_relaxed();
STATIC_ASSERT(kSafepointRequested == kCollectionRequested);
// The following condition checks for both kSafepointRequested (background
// thread) and kCollectionRequested (main thread).
......@@ -149,16 +150,31 @@ class V8_EXPORT_PRIVATE LocalHeap {
// or manipulate the heap in any way. This is considered to be a safepoint.
kParked,
// SafepointRequested is used for Running threads to force Safepoint() and
// SafepointRequested is used for Running background threads to force
// Safepoint() and
// Park() into the slow path.
kSafepointRequested,
// A thread transitions into this state from SafepointRequested when it
// A background thread transitions into this state from SafepointRequested
// when it
// enters a safepoint.
kSafepoint,
// This state is used for Parked background threads and forces Unpark() into
// the slow path. It prevents Unpark() to succeed before the safepoint
// operation is finished.
// the slow
// path. It prevents Unpark() to succeed before the safepoint operation is
// finished.
kParkedSafepointRequested,
// This state is used on the main thread when at least one background thread
// requested a GC while the main thread was Running.
// We can use the same value for CollectionRequested and SafepointRequested
// since the first is only used on the main thread, while the other one only
// occurs on background threads. This property is used to have a faster
// check in Safepoint().
kCollectionRequested = kSafepointRequested,
// This state is used on the main thread when at least one background thread
// requested a GC while the main thread was Parked.
kParkedCollectionRequested,
};
ThreadState state_relaxed() { return state_.load(std::memory_order_relaxed); }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment