Commit 99dbb750 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Introduce new ThreadState with flags

Change ThreadState representation from a fixed set of values to
either Parked or Running with two additional flags (or bits) that
are used when either a collection or a safepoint requested. Setting
either of these flags forces Park(), Unpark() and Safepoint() into
their slow path.

Currently we use the CollectionRequested flag on the main thread,
while SafepointRequested is used on background threads.

In case the slow path sees the CollectionRequested flag, it will
perform a GC. When encountering the SafepointRequested flag, the
background thread will participate in the safepoint protocol and
park itself for the duration of the safepoint operation.

This CL is a prerequisite for supporting safepoints across multiple
isolates. When safepointing multiple isolates, the main thread will
use both the CollectionRequested and SafepointRequested flag. This
isn't possible with the current system.

Design Doc: https://docs.google.com/document/d/1y6C9zAACEr0sBYMIYk3YpXosnkF3Ak4CEuWJu1-3zXs/edit?usp=sharing

Bug: v8:11708
Change-Id: I16b88740182d9c13bce54be163b334761529a5f0
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3211894Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77349}
parent 62418750
...@@ -1322,12 +1322,13 @@ void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) { ...@@ -1322,12 +1322,13 @@ void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
ReduceNewSpaceSize(); ReduceNewSpaceSize();
} }
// Set main thread state back to Running from CollectionRequested. // Remove CollectionRequested flag from main thread state, as the collection
// was just performed.
safepoint()->AssertActive();
LocalHeap::ThreadState old_state = LocalHeap::ThreadState old_state =
main_thread_local_heap()->state_.exchange(LocalHeap::kRunning); main_thread_local_heap()->state_.ClearCollectionRequested();
CHECK(old_state == LocalHeap::kRunning || CHECK(old_state.IsRunning());
old_state == LocalHeap::kSafepointRequested);
// Resume all threads waiting for the GC. // Resume all threads waiting for the GC.
collection_barrier_->ResumeThreadsAwaitingCollection(); collection_barrier_->ResumeThreadsAwaitingCollection();
......
...@@ -28,8 +28,7 @@ AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type, ...@@ -28,8 +28,7 @@ AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type,
alignment == AllocationAlignment::kWordAligned); alignment == AllocationAlignment::kWordAligned);
Heap::HeapState state = heap()->gc_state(); Heap::HeapState state = heap()->gc_state();
DCHECK(state == Heap::TEAR_DOWN || state == Heap::NOT_IN_GC); DCHECK(state == Heap::TEAR_DOWN || state == Heap::NOT_IN_GC);
ThreadState current = state_.load(std::memory_order_relaxed); DCHECK(IsRunning());
DCHECK(current == kRunning || current == kSafepointRequested);
#endif #endif
// Each allocation is supposed to be a safepoint. // Each allocation is supposed to be a safepoint.
......
...@@ -45,7 +45,7 @@ LocalHeap::LocalHeap(Heap* heap, ThreadKind kind, ...@@ -45,7 +45,7 @@ LocalHeap::LocalHeap(Heap* heap, ThreadKind kind,
std::unique_ptr<PersistentHandles> persistent_handles) std::unique_ptr<PersistentHandles> persistent_handles)
: heap_(heap), : heap_(heap),
is_main_thread_(kind == ThreadKind::kMain), is_main_thread_(kind == ThreadKind::kMain),
state_(kParked), state_(ThreadState::Parked()),
allocation_failed_(false), allocation_failed_(false),
main_thread_parked_(false), main_thread_parked_(false),
prev_(nullptr), prev_(nullptr),
...@@ -125,8 +125,7 @@ bool LocalHeap::IsHandleDereferenceAllowed() { ...@@ -125,8 +125,7 @@ bool LocalHeap::IsHandleDereferenceAllowed() {
#ifdef DEBUG #ifdef DEBUG
VerifyCurrent(); VerifyCurrent();
#endif #endif
ThreadState state = state_relaxed(); return IsRunning();
return state == kRunning || state == kSafepointRequested;
} }
#endif #endif
...@@ -134,45 +133,60 @@ bool LocalHeap::IsParked() { ...@@ -134,45 +133,60 @@ bool LocalHeap::IsParked() {
#ifdef DEBUG #ifdef DEBUG
VerifyCurrent(); VerifyCurrent();
#endif #endif
ThreadState state = state_relaxed(); return state_.load_relaxed().IsParked();
return state == kParked || state == kParkedSafepointRequested;
} }
void LocalHeap::ParkSlowPath(ThreadState current_state) { bool LocalHeap::IsRunning() {
if (is_main_thread()) { #ifdef DEBUG
while (true) { VerifyCurrent();
CHECK_EQ(current_state, kSafepointRequested); #endif
heap_->CollectGarbageForBackground(this); return state_.load_relaxed().IsRunning();
}
current_state = kRunning; void LocalHeap::ParkSlowPath() {
if (state_.compare_exchange_strong(current_state, kParked)) { while (true) {
ThreadState current_state = ThreadState::Running();
if (state_.CompareExchangeStrong(current_state, ThreadState::Parked()))
return; return;
}
} // CAS above failed, so state is Running with some additional flag.
DCHECK(current_state.IsRunning());
if (is_main_thread()) {
DCHECK(current_state.IsCollectionRequested());
heap_->CollectGarbageForBackground(this);
} else { } else {
CHECK_EQ(current_state, kSafepointRequested); DCHECK(current_state.IsSafepointRequested());
CHECK(state_.compare_exchange_strong(current_state, DCHECK(!current_state.IsCollectionRequested());
kParkedSafepointRequested)); CHECK(state_.CompareExchangeStrong(current_state,
current_state.SetParked()));
heap_->safepoint()->NotifyPark(); heap_->safepoint()->NotifyPark();
return;
}
} }
} }
void LocalHeap::UnparkSlowPath() { void LocalHeap::UnparkSlowPath() {
while (true) {
ThreadState current_state = ThreadState::Parked();
if (state_.CompareExchangeStrong(current_state, ThreadState::Running()))
return;
// CAS above failed, so state is Parked with some additional flag.
DCHECK(current_state.IsParked());
if (is_main_thread()) { if (is_main_thread()) {
ThreadState expected = kParkedSafepointRequested; DCHECK(current_state.IsCollectionRequested());
CHECK(state_.compare_exchange_strong(expected, kSafepointRequested)); CHECK(state_.CompareExchangeStrong(current_state,
current_state.SetRunning()));
heap_->CollectGarbageForBackground(this); heap_->CollectGarbageForBackground(this);
return;
} else { } else {
while (true) { DCHECK(current_state.IsSafepointRequested());
ThreadState expected = kParked; DCHECK(!current_state.IsCollectionRequested());
if (!state_.compare_exchange_strong(expected, kRunning)) {
CHECK_EQ(expected, kParkedSafepointRequested);
TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_UNPARK, TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_UNPARK,
ThreadKind::kBackground); ThreadKind::kBackground);
heap_->safepoint()->WaitInUnpark(); heap_->safepoint()->WaitInUnpark();
} else {
return;
}
} }
} }
} }
...@@ -182,18 +196,30 @@ void LocalHeap::EnsureParkedBeforeDestruction() { ...@@ -182,18 +196,30 @@ void LocalHeap::EnsureParkedBeforeDestruction() {
} }
void LocalHeap::SafepointSlowPath() { void LocalHeap::SafepointSlowPath() {
#ifdef DEBUG
ThreadState current_state = state_.load_relaxed();
DCHECK(current_state.IsRunning());
#endif
if (is_main_thread()) { if (is_main_thread()) {
CHECK_EQ(kSafepointRequested, state_relaxed()); DCHECK(current_state.IsCollectionRequested());
heap_->CollectGarbageForBackground(this); heap_->CollectGarbageForBackground(this);
} else { } else {
DCHECK(current_state.IsSafepointRequested());
DCHECK(!current_state.IsCollectionRequested());
TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_SAFEPOINT, TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_SAFEPOINT,
ThreadKind::kBackground); ThreadKind::kBackground);
ThreadState expected = kSafepointRequested;
CHECK(state_.compare_exchange_strong(expected, kSafepoint)); // Parking the running thread here is an optimization. We do not need to
// wake this thread up to reach the next safepoint.
ThreadState old_state = state_.SetParked();
CHECK(old_state.IsRunning());
CHECK(old_state.IsSafepointRequested());
CHECK(!old_state.IsCollectionRequested());
heap_->safepoint()->WaitInSafepoint(); heap_->safepoint()->WaitInSafepoint();
// This might be a bit surprising, IsolateSafepoint transitions the state
// from Safepoint (--> Running) --> Parked when returning from the
// safepoint.
Unpark(); Unpark();
} }
} }
...@@ -219,36 +245,20 @@ bool LocalHeap::TryPerformCollection() { ...@@ -219,36 +245,20 @@ bool LocalHeap::TryPerformCollection() {
heap_->CollectGarbageForBackground(this); heap_->CollectGarbageForBackground(this);
return true; return true;
} else { } else {
DCHECK(IsRunning());
heap_->collection_barrier_->RequestGC(); heap_->collection_barrier_->RequestGC();
LocalHeap* main_thread = heap_->main_thread_local_heap(); LocalHeap* main_thread = heap_->main_thread_local_heap();
ThreadState current = main_thread->state_relaxed();
while (true) {
switch (current) {
case kRunning:
if (main_thread->state_.compare_exchange_strong(
current, kSafepointRequested)) {
return heap_->collection_barrier_->AwaitCollectionBackground(this);
}
break;
case kSafepointRequested:
return heap_->collection_barrier_->AwaitCollectionBackground(this);
case kParked: const ThreadState old_state = main_thread->state_.SetCollectionRequested();
if (main_thread->state_.compare_exchange_strong(
current, kParkedSafepointRequested)) {
return false;
}
break;
case kParkedSafepointRequested: if (old_state.IsRunning()) {
const bool performed_gc =
heap_->collection_barrier_->AwaitCollectionBackground(this);
return performed_gc;
} else {
DCHECK(old_state.IsParked());
return false; return false;
case kSafepoint:
UNREACHABLE();
}
} }
} }
} }
......
...@@ -45,11 +45,9 @@ class V8_EXPORT_PRIVATE LocalHeap { ...@@ -45,11 +45,9 @@ class V8_EXPORT_PRIVATE LocalHeap {
// from the main thread. // from the main thread.
void Safepoint() { void Safepoint() {
DCHECK(AllowSafepoints::IsAllowed()); DCHECK(AllowSafepoints::IsAllowed());
ThreadState current = state_relaxed(); ThreadState current = state_.load_relaxed();
// The following condition checks for both kSafepointRequested (background if (V8_UNLIKELY(current.IsRunningWithSlowPathFlag())) {
// thread) and kCollectionRequested (main thread).
if (V8_UNLIKELY(current == kSafepointRequested)) {
SafepointSlowPath(); SafepointSlowPath();
} }
} }
...@@ -89,6 +87,7 @@ class V8_EXPORT_PRIVATE LocalHeap { ...@@ -89,6 +87,7 @@ class V8_EXPORT_PRIVATE LocalHeap {
#endif #endif
bool IsParked(); bool IsParked();
bool IsRunning();
Heap* heap() { return heap_; } Heap* heap() { return heap_; }
Heap* AsHeap() { return heap(); } Heap* AsHeap() { return heap(); }
...@@ -151,26 +150,94 @@ class V8_EXPORT_PRIVATE LocalHeap { ...@@ -151,26 +150,94 @@ class V8_EXPORT_PRIVATE LocalHeap {
void RemoveGCEpilogueCallback(GCEpilogueCallback* callback, void* data); void RemoveGCEpilogueCallback(GCEpilogueCallback* callback, void* data);
private: private:
enum ThreadState { using ParkedBit = base::BitField8<bool, 0, 1>;
// Threads in this state are allowed to access the heap. using SafepointRequestedBit = ParkedBit::Next<bool, 1>;
kRunning, using CollectionRequestedBit = SafepointRequestedBit::Next<bool, 1>;
// Thread was parked, which means that the thread is not allowed to access
// or manipulate the heap in any way. This is considered to be a safepoint. class ThreadState final {
kParked, public:
static constexpr ThreadState Parked() {
// SafepointRequested is used for Running threads to force Safepoint() and return ThreadState(ParkedBit::kMask);
// Park() into the slow path. }
kSafepointRequested, static constexpr ThreadState Running() { return ThreadState(0); }
// A thread transitions into this state from SafepointRequested when it
// enters a safepoint. constexpr bool IsRunning() const { return !ParkedBit::decode(raw_state_); }
kSafepoint,
// This state is used for Parked background threads and forces Unpark() into constexpr ThreadState SetRunning() const V8_WARN_UNUSED_RESULT {
// the slow path. It prevents Unpark() to succeed before the safepoint return ThreadState(raw_state_ & ~ParkedBit::kMask);
// operation is finished. }
kParkedSafepointRequested,
constexpr bool IsParked() const { return ParkedBit::decode(raw_state_); }
constexpr ThreadState SetParked() const V8_WARN_UNUSED_RESULT {
return ThreadState(ParkedBit::kMask | raw_state_);
}
constexpr bool IsSafepointRequested() const {
return SafepointRequestedBit::decode(raw_state_);
}
constexpr bool IsCollectionRequested() const {
return CollectionRequestedBit::decode(raw_state_);
}
constexpr bool IsRunningWithSlowPathFlag() const {
return IsRunning() && (raw_state_ & (SafepointRequestedBit::kMask |
CollectionRequestedBit::kMask));
}
private:
constexpr explicit ThreadState(uint8_t value) : raw_state_(value) {}
uint8_t raw() const { return raw_state_; }
uint8_t raw_state_;
friend class LocalHeap;
}; };
ThreadState state_relaxed() { return state_.load(std::memory_order_relaxed); } class AtomicThreadState final {
public:
constexpr explicit AtomicThreadState(ThreadState state)
: raw_state_(state.raw()) {}
bool CompareExchangeStrong(ThreadState& expected, ThreadState updated) {
return raw_state_.compare_exchange_strong(expected.raw_state_,
updated.raw());
}
bool CompareExchangeWeak(ThreadState& expected, ThreadState updated) {
return raw_state_.compare_exchange_weak(expected.raw_state_,
updated.raw());
}
ThreadState SetParked() {
return ThreadState(raw_state_.fetch_or(ParkedBit::kMask));
}
ThreadState SetSafepointRequested() {
return ThreadState(raw_state_.fetch_or(SafepointRequestedBit::kMask));
}
ThreadState ClearSafepointRequested() {
return ThreadState(raw_state_.fetch_and(~SafepointRequestedBit::kMask));
}
ThreadState SetCollectionRequested() {
return ThreadState(raw_state_.fetch_or(CollectionRequestedBit::kMask));
}
ThreadState ClearCollectionRequested() {
return ThreadState(raw_state_.fetch_and(~CollectionRequestedBit::kMask));
}
ThreadState load_relaxed() const {
return ThreadState(raw_state_.load(std::memory_order_relaxed));
}
private:
std::atomic<uint8_t> raw_state_;
};
// Slow path of allocation that performs GC and then retries allocation in // Slow path of allocation that performs GC and then retries allocation in
// loop. // loop.
...@@ -181,21 +248,21 @@ class V8_EXPORT_PRIVATE LocalHeap { ...@@ -181,21 +248,21 @@ class V8_EXPORT_PRIVATE LocalHeap {
void Park() { void Park() {
DCHECK(AllowGarbageCollection::IsAllowed()); DCHECK(AllowGarbageCollection::IsAllowed());
ThreadState expected = kRunning; ThreadState expected = ThreadState::Running();
if (!state_.compare_exchange_strong(expected, kParked)) { if (!state_.CompareExchangeWeak(expected, ThreadState::Parked())) {
ParkSlowPath(expected); ParkSlowPath();
} }
} }
void Unpark() { void Unpark() {
DCHECK(AllowGarbageCollection::IsAllowed()); DCHECK(AllowGarbageCollection::IsAllowed());
ThreadState expected = kParked; ThreadState expected = ThreadState::Parked();
if (!state_.compare_exchange_strong(expected, kRunning)) { if (!state_.CompareExchangeWeak(expected, ThreadState::Running())) {
UnparkSlowPath(); UnparkSlowPath();
} }
} }
void ParkSlowPath(ThreadState state); void ParkSlowPath();
void UnparkSlowPath(); void UnparkSlowPath();
void EnsureParkedBeforeDestruction(); void EnsureParkedBeforeDestruction();
void SafepointSlowPath(); void SafepointSlowPath();
...@@ -207,7 +274,7 @@ class V8_EXPORT_PRIVATE LocalHeap { ...@@ -207,7 +274,7 @@ class V8_EXPORT_PRIVATE LocalHeap {
Heap* heap_; Heap* heap_;
bool is_main_thread_; bool is_main_thread_;
std::atomic<ThreadState> state_; AtomicThreadState state_;
bool allocation_failed_; bool allocation_failed_;
bool main_thread_parked_; bool main_thread_parked_;
......
...@@ -46,23 +46,13 @@ void IsolateSafepoint::EnterSafepointScope(StopMainThread stop_main_thread) { ...@@ -46,23 +46,13 @@ void IsolateSafepoint::EnterSafepointScope(StopMainThread stop_main_thread) {
continue; continue;
} }
LocalHeap::ThreadState expected = local_heap->state_relaxed(); const LocalHeap::ThreadState old_state =
local_heap->state_.SetSafepointRequested();
while (true) {
CHECK(expected == LocalHeap::kParked || expected == LocalHeap::kRunning); if (old_state.IsRunning()) running++;
LocalHeap::ThreadState new_state = CHECK_IMPLIES(old_state.IsCollectionRequested(),
expected == LocalHeap::kParked ? LocalHeap::kParkedSafepointRequested local_heap->is_main_thread());
: LocalHeap::kSafepointRequested; CHECK(!old_state.IsSafepointRequested());
if (local_heap->state_.compare_exchange_strong(expected, new_state)) {
if (expected == LocalHeap::kRunning) {
running++;
} else {
CHECK_EQ(expected, LocalHeap::kParked);
}
break;
}
}
} }
barrier_.WaitUntilRunningThreadsInSafepoint(running); barrier_.WaitUntilRunningThreadsInSafepoint(running);
...@@ -83,17 +73,13 @@ void IsolateSafepoint::LeaveSafepointScope(StopMainThread stop_main_thread) { ...@@ -83,17 +73,13 @@ void IsolateSafepoint::LeaveSafepointScope(StopMainThread stop_main_thread) {
continue; continue;
} }
// We transition both ParkedSafepointRequested and Safepoint states to const LocalHeap::ThreadState old_state =
// Parked. While this is probably intuitive for ParkedSafepointRequested, local_heap->state_.ClearSafepointRequested();
// this might be surprising for Safepoint though. SafepointSlowPath() will
// later unpark that thread again. Going through Parked means that a CHECK(old_state.IsParked());
// background thread doesn't need to be waked up before the main thread can CHECK(old_state.IsSafepointRequested());
// start the next safepoint. CHECK_IMPLIES(old_state.IsCollectionRequested(),
local_heap->is_main_thread());
LocalHeap::ThreadState old_state =
local_heap->state_.exchange(LocalHeap::kParked);
CHECK(old_state == LocalHeap::kParkedSafepointRequested ||
old_state == LocalHeap::kSafepoint);
} }
barrier_.Disarm(); barrier_.Disarm();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment