Commit 248ae56d authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

Reland "[heap] Introduce new state in CollectionBarrier"

This is a reland of 8358ab49

Original change's description:
> [heap] Introduce new state in CollectionBarrier
>
> Introduce new state kCollectionStarted in CollectionBarrier. This state
> is used during Heap::PerformGarbageCollection. It stops threads from
> requesting GC when the GC was already started. This happens because a
> background thread only requests the GC after it parked itself - the GC
> could be started in-between those two events.
>
> Bug: v8:10315
> Change-Id: I59cf3d4ea41c7a2c37ffce89c5b057221a2499e0
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2474858
> Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#70572}

Bug: v8:10315
Change-Id: I9da463c847cb0badde58ce767a6e3a24be7672f5
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2480564Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70651}
parent 856c6e0f
...@@ -38,7 +38,20 @@ class BackgroundCollectionInterruptTask : public CancelableTask { ...@@ -38,7 +38,20 @@ class BackgroundCollectionInterruptTask : public CancelableTask {
}; };
void CollectionBarrier::AwaitCollectionBackground() { void CollectionBarrier::AwaitCollectionBackground() {
if (FirstCollectionRequest()) { bool first;
{
base::MutexGuard guard(&mutex_);
first = FirstCollectionRequest();
if (first) {
// Initialize scope while holding the lock - prevents GC from starting
// before setting up this counter
time_to_collection_scope_.emplace(
heap_->isolate()->counters()->time_to_collection());
}
}
if (first) {
// This is the first background thread requesting collection, ask the main // This is the first background thread requesting collection, ask the main
// thread for GC. // thread for GC.
ActivateStackGuardAndPostTask(); ActivateStackGuardAndPostTask();
...@@ -49,6 +62,11 @@ void CollectionBarrier::AwaitCollectionBackground() { ...@@ -49,6 +62,11 @@ void CollectionBarrier::AwaitCollectionBackground() {
void CollectionBarrier::StopTimeToCollectionTimer() { void CollectionBarrier::StopTimeToCollectionTimer() {
base::MutexGuard guard(&mutex_); base::MutexGuard guard(&mutex_);
RequestState old_state = state_.exchange(RequestState::kCollectionStarted,
std::memory_order_relaxed);
USE(old_state);
DCHECK(old_state == RequestState::kDefault ||
old_state == RequestState::kCollectionRequested);
time_to_collection_scope_.reset(); time_to_collection_scope_.reset();
} }
...@@ -60,8 +78,6 @@ void CollectionBarrier::ActivateStackGuardAndPostTask() { ...@@ -60,8 +78,6 @@ void CollectionBarrier::ActivateStackGuardAndPostTask() {
reinterpret_cast<v8::Isolate*>(isolate)); reinterpret_cast<v8::Isolate*>(isolate));
taskrunner->PostTask( taskrunner->PostTask(
std::make_unique<BackgroundCollectionInterruptTask>(heap_)); std::make_unique<BackgroundCollectionInterruptTask>(heap_));
base::MutexGuard guard(&mutex_);
time_to_collection_scope_.emplace(isolate->counters()->time_to_collection());
} }
void CollectionBarrier::BlockUntilCollected() { void CollectionBarrier::BlockUntilCollected() {
......
...@@ -29,7 +29,10 @@ class CollectionBarrier { ...@@ -29,7 +29,10 @@ class CollectionBarrier {
kDefault, kDefault,
// Collection was already requested // Collection was already requested
kCollection, kCollectionRequested,
// Collection was already started
kCollectionStarted,
// This state is reached after isolate starts to shut down. The main // This state is reached after isolate starts to shut down. The main
// thread can't perform any GCs anymore, so all allocations need to be // thread can't perform any GCs anymore, so all allocations need to be
...@@ -40,8 +43,6 @@ class CollectionBarrier { ...@@ -40,8 +43,6 @@ class CollectionBarrier {
// The current state. // The current state.
std::atomic<RequestState> state_; std::atomic<RequestState> state_;
void BlockUntilCollected();
// Request GC by activating stack guards and posting a task to perform the // Request GC by activating stack guards and posting a task to perform the
// GC. // GC.
void ActivateStackGuardAndPostTask(); void ActivateStackGuardAndPostTask();
...@@ -50,14 +51,16 @@ class CollectionBarrier { ...@@ -50,14 +51,16 @@ class CollectionBarrier {
// kCollection. // kCollection.
bool FirstCollectionRequest() { bool FirstCollectionRequest() {
RequestState expected = RequestState::kDefault; RequestState expected = RequestState::kDefault;
return state_.compare_exchange_strong(expected, RequestState::kCollection); return state_.compare_exchange_strong(expected,
RequestState::kCollectionRequested);
} }
// Sets state back to kDefault - invoked at end of GC. // Sets state back to kDefault - invoked at end of GC.
void ClearCollectionRequested() { void ClearCollectionRequested() {
RequestState old_state = RequestState old_state =
state_.exchange(RequestState::kDefault, std::memory_order_relaxed); state_.exchange(RequestState::kDefault, std::memory_order_relaxed);
CHECK_NE(old_state, RequestState::kShutdown); USE(old_state);
DCHECK_EQ(old_state, RequestState::kCollectionStarted);
} }
public: public:
...@@ -66,10 +69,12 @@ class CollectionBarrier { ...@@ -66,10 +69,12 @@ class CollectionBarrier {
// Checks whether any background thread requested GC. // Checks whether any background thread requested GC.
bool CollectionRequested() { bool CollectionRequested() {
return state_.load(std::memory_order_relaxed) == RequestState::kCollection; return state_.load(std::memory_order_relaxed) ==
RequestState::kCollectionRequested;
} }
void StopTimeToCollectionTimer(); void StopTimeToCollectionTimer();
void BlockUntilCollected();
// Resumes threads waiting for collection. // Resumes threads waiting for collection.
void ResumeThreadsAwaitingCollection(); void ResumeThreadsAwaitingCollection();
......
...@@ -1515,8 +1515,6 @@ bool Heap::CollectGarbage(AllocationSpace space, ...@@ -1515,8 +1515,6 @@ bool Heap::CollectGarbage(AllocationSpace space,
this, IsYoungGenerationCollector(collector) ? "MinorGC" : "MajorGC", this, IsYoungGenerationCollector(collector) ? "MinorGC" : "MajorGC",
GarbageCollectionReasonToString(gc_reason)); GarbageCollectionReasonToString(gc_reason));
collection_barrier_->StopTimeToCollectionTimer();
if (!CanPromoteYoungAndExpandOldGeneration(0)) { if (!CanPromoteYoungAndExpandOldGeneration(0)) {
InvokeNearHeapLimitCallback(); InvokeNearHeapLimitCallback();
} }
...@@ -1941,6 +1939,11 @@ size_t Heap::PerformGarbageCollection( ...@@ -1941,6 +1939,11 @@ size_t Heap::PerformGarbageCollection(
GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) { GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
DisallowJavascriptExecution no_js(isolate()); DisallowJavascriptExecution no_js(isolate());
base::Optional<SafepointScope> optional_safepoint_scope; base::Optional<SafepointScope> optional_safepoint_scope;
// Stop time-to-collection timer before safepoint - we do not want to measure
// time for safepointing.
collection_barrier_->StopTimeToCollectionTimer();
if (FLAG_local_heaps) { if (FLAG_local_heaps) {
optional_safepoint_scope.emplace(this); optional_safepoint_scope.emplace(this);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment