Commit 28d5f133 authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

[heap] Fix deadlock with concurrent allocation and blocked main thread

It could happen (e.g. with --stress-background-compile) that the main
thread blocks for a background thread but the background thread requests
a GC from the main thread. This would result in a deadlock. Avoid this
by parking the main thread for potentially blocking operations and allow
allocations while the main thread is parked.

This CL introduces new states for the main thread: CollectionRequested
and ParkedCollectionRequested. These states will force Safepoint(),
Park() and Unpark() on the main thread into slow paths. The slow path
can then perform a GC on the main thread - right before parking or after
the main thread got unparked.

Bug: v8:10315
Change-Id: If7ef31622d27320613139a0b7f79086fe3200f99
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2731528Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#73707}
parent 51140a44
......@@ -223,6 +223,8 @@ class AsmJsCompilationJob final : public UnoptimizedCompilationJob {
};
UnoptimizedCompilationJob::Status AsmJsCompilationJob::ExecuteJobImpl() {
DisallowHeapAccess no_heap_access;
// Step 1: Translate asm.js module to WebAssembly module.
Zone* compile_zone = &zone_;
Zone translate_zone(allocator_, ZONE_NAME);
......
......@@ -288,7 +288,6 @@ ScriptOriginOptions OriginOptionsForEval(Object script) {
// Implementation of UnoptimizedCompilationJob
CompilationJob::Status UnoptimizedCompilationJob::ExecuteJob() {
DisallowHeapAccess no_heap_access;
// Delegate to the underlying implementation.
DCHECK_EQ(state(), State::kReadyToExecute);
ScopedTimer t(&time_taken_to_execute_);
......@@ -2802,7 +2801,11 @@ MaybeHandle<SharedFunctionInfo> CompileScriptOnBothBackgroundAndMainThread(
}
// Join with background thread and finalize compilation.
background_compile_thread.Join();
{
ParkedScope scope(isolate->main_thread_local_isolate());
background_compile_thread.Join();
}
MaybeHandle<SharedFunctionInfo> maybe_result =
Compiler::GetSharedFunctionInfoForStreamedScript(
isolate, source, script_details, origin_options,
......
......@@ -6,24 +6,37 @@
#include "src/base/platform/time.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/handles/handles.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/local-heap.h"
#include "src/heap/parked-scope.h"
namespace v8 {
namespace internal {
void CollectionBarrier::ResumeThreadsAwaitingCollection() {
base::MutexGuard guard(&mutex_);
ClearCollectionRequested();
cond_.NotifyAll();
bool CollectionBarrier::CollectionRequested() {
return main_thread_state_relaxed() == LocalHeap::kCollectionRequested;
}
LocalHeap::ThreadState CollectionBarrier::main_thread_state_relaxed() {
LocalHeap* main_thread_local_heap =
heap_->isolate()->main_thread_local_heap();
return main_thread_local_heap->state_relaxed();
}
void CollectionBarrier::ShutdownRequested() {
void CollectionBarrier::NotifyShutdownRequested() {
base::MutexGuard guard(&mutex_);
if (timer_.IsStarted()) timer_.Stop();
state_.store(RequestState::kShutdown);
cond_.NotifyAll();
shutdown_requested_ = true;
cv_wakeup_.NotifyAll();
}
void CollectionBarrier::ResumeThreadsAwaitingCollection() {
base::MutexGuard guard(&mutex_);
cv_wakeup_.NotifyAll();
}
class BackgroundCollectionInterruptTask : public CancelableTask {
......@@ -44,30 +57,29 @@ class BackgroundCollectionInterruptTask : public CancelableTask {
Heap* heap_;
};
void CollectionBarrier::AwaitCollectionBackground() {
bool first;
{
base::MutexGuard guard(&mutex_);
first = FirstCollectionRequest();
if (first) timer_.Start();
}
bool CollectionBarrier::AwaitCollectionBackground(LocalHeap* local_heap) {
ParkedScope scope(local_heap);
base::MutexGuard guard(&mutex_);
if (first) {
// This is the first background thread requesting collection, ask the main
// thread for GC.
ActivateStackGuardAndPostTask();
while (CollectionRequested()) {
if (shutdown_requested_) return false;
cv_wakeup_.Wait(&mutex_);
}
BlockUntilCollected();
return true;
}
void CollectionBarrier::StopTimeToCollectionTimer() {
base::MutexGuard guard(&mutex_);
RequestState old_state = state_.exchange(RequestState::kCollectionStarted,
std::memory_order_relaxed);
if (old_state == RequestState::kCollectionRequested) {
DCHECK(timer_.IsStarted());
LocalHeap::ThreadState main_thread_state = main_thread_state_relaxed();
CHECK(main_thread_state == LocalHeap::kRunning ||
main_thread_state == LocalHeap::kCollectionRequested);
if (main_thread_state == LocalHeap::kCollectionRequested) {
base::MutexGuard guard(&mutex_);
// The first background thread that requests the GC, starts the timer first
// and only then parks itself. Since we are in a safepoint here, the timer
// is therefore always initialized here already.
CHECK(timer_.IsStarted());
base::TimeDelta delta = timer_.Elapsed();
TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"V8.GC.TimeToCollectionOnBackground",
......@@ -78,9 +90,6 @@ void CollectionBarrier::StopTimeToCollectionTimer() {
->gc_time_to_collection_on_background()
->AddTimedSample(delta);
timer_.Stop();
} else {
DCHECK_EQ(old_state, RequestState::kDefault);
DCHECK(!timer_.IsStarted());
}
}
......@@ -88,20 +97,15 @@ void CollectionBarrier::ActivateStackGuardAndPostTask() {
Isolate* isolate = heap_->isolate();
ExecutionAccess access(isolate);
isolate->stack_guard()->RequestGC();
auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
reinterpret_cast<v8::Isolate*>(isolate));
taskrunner->PostTask(
std::make_unique<BackgroundCollectionInterruptTask>(heap_));
}
void CollectionBarrier::BlockUntilCollected() {
TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_COLLECTION,
ThreadKind::kBackground);
base::MutexGuard guard(&mutex_);
while (CollectionRequested()) {
cond_.Wait(&mutex_);
}
CHECK(!timer_.IsStarted());
timer_.Start();
}
} // namespace internal
......
......@@ -8,8 +8,10 @@
#include <atomic>
#include "src/base/optional.h"
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/base/platform/mutex.h"
#include "src/heap/local-heap.h"
#include "src/logging/counters.h"
namespace v8 {
......@@ -21,70 +23,34 @@ class Heap;
class CollectionBarrier {
Heap* heap_;
base::Mutex mutex_;
base::ConditionVariable cond_;
base::ConditionVariable cv_wakeup_;
base::ElapsedTimer timer_;
bool shutdown_requested_;
enum class RequestState {
// Default state, no collection requested and tear down wasn't initated
// yet.
kDefault,
// Collection was already requested
kCollectionRequested,
// Collection was already started
kCollectionStarted,
// This state is reached after isolate starts to shut down. The main
// thread can't perform any GCs anymore, so all allocations need to be
// allowed from here on until background thread finishes.
kShutdown,
};
// The current state.
std::atomic<RequestState> state_;
// Request GC by activating stack guards and posting a task to perform the
// GC.
void ActivateStackGuardAndPostTask();
// Returns true when state was successfully updated from kDefault to
// kCollection.
bool FirstCollectionRequest() {
RequestState expected = RequestState::kDefault;
return state_.compare_exchange_strong(expected,
RequestState::kCollectionRequested);
}
// Sets state back to kDefault - invoked at end of GC.
void ClearCollectionRequested() {
RequestState old_state =
state_.exchange(RequestState::kDefault, std::memory_order_relaxed);
USE(old_state);
DCHECK_EQ(old_state, RequestState::kCollectionStarted);
}
LocalHeap::ThreadState main_thread_state_relaxed();
public:
explicit CollectionBarrier(Heap* heap)
: heap_(heap), state_(RequestState::kDefault) {}
: heap_(heap), shutdown_requested_(false) {}
// Returns true when collection was requested.
bool CollectionRequested();
// Checks whether any background thread requested GC.
bool CollectionRequested() {
return state_.load(std::memory_order_relaxed) ==
RequestState::kCollectionRequested;
}
// Resumes all threads waiting for GC when tear down starts.
void NotifyShutdownRequested();
// Stops the TimeToCollection timer when starting the GC.
void StopTimeToCollectionTimer();
void BlockUntilCollected();
// Resumes threads waiting for collection.
void ResumeThreadsAwaitingCollection();
// Sets current state to kShutdown.
void ShutdownRequested();
// This is the method use by background threads to request and wait for GC.
void AwaitCollectionBackground();
bool AwaitCollectionBackground(LocalHeap* local_heap);
// Request GC by activating stack guards and posting a task to perform the
// GC.
void ActivateStackGuardAndPostTask();
};
} // namespace internal
......
......@@ -8,6 +8,7 @@
#include "src/execution/isolate.h"
#include "src/handles/persistent-handles.h"
#include "src/heap/concurrent-allocator-inl.h"
#include "src/heap/heap.h"
#include "src/heap/local-heap-inl.h"
#include "src/heap/local-heap.h"
#include "src/heap/marking.h"
......@@ -38,21 +39,24 @@ void StressConcurrentAllocatorTask::RunInternal() {
AllocationAlignment::kWordAligned);
heap->CreateFillerObjectAtBackground(
address, kSmallObjectSize, ClearFreedMemoryMode::kDontClearFreedMemory);
local_heap.Safepoint();
address = local_heap.AllocateRawOrFail(
AllocationResult result = local_heap.AllocateRaw(
kMediumObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned);
heap->CreateFillerObjectAtBackground(
address, kMediumObjectSize,
ClearFreedMemoryMode::kDontClearFreedMemory);
local_heap.Safepoint();
address = local_heap.AllocateRawOrFail(
kLargeObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned);
heap->CreateFillerObjectAtBackground(
address, kLargeObjectSize, ClearFreedMemoryMode::kDontClearFreedMemory);
if (!result.IsRetry()) {
heap->CreateFillerObjectAtBackground(
result.ToAddress(), kMediumObjectSize,
ClearFreedMemoryMode::kDontClearFreedMemory);
}
result = local_heap.AllocateRaw(kLargeObjectSize, AllocationType::kOld,
AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned);
if (!result.IsRetry()) {
heap->CreateFillerObjectAtBackground(
result.ToAddress(), kLargeObjectSize,
ClearFreedMemoryMode::kDontClearFreedMemory);
}
local_heap.Safepoint();
}
......@@ -109,7 +113,6 @@ AllocationResult ConcurrentAllocator::AllocateInLabSlow(
bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
auto result = space_->RawRefillLabBackground(
local_heap_, kLabSize, kMaxLabSize, kWordAligned, origin);
if (!result) return false;
if (local_heap_->heap()->incremental_marking()->black_allocation()) {
......
......@@ -409,11 +409,13 @@ bool Heap::CanExpandOldGeneration(size_t size) {
return memory_allocator()->Size() + size <= MaxReserved();
}
bool Heap::CanExpandOldGenerationBackground(size_t size) {
bool Heap::CanExpandOldGenerationBackground(LocalHeap* local_heap,
size_t size) {
if (force_oom_) return false;
// When the heap is tearing down, then GC requests from background threads
// are not served and the threads are allowed to expand the heap to avoid OOM.
return gc_state() == TEAR_DOWN ||
return gc_state() == TEAR_DOWN || IsMainThreadParked(local_heap) ||
memory_allocator()->Size() + size <= MaxReserved();
}
......@@ -1177,6 +1179,15 @@ void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
ReduceNewSpaceSize();
}
// Set main thread state back to Running from CollectionRequested.
LocalHeap* main_thread_local_heap = isolate()->main_thread_local_heap();
LocalHeap::ThreadState old_state =
main_thread_local_heap->state_.exchange(LocalHeap::kRunning);
CHECK(old_state == LocalHeap::kRunning ||
old_state == LocalHeap::kCollectionRequested);
// Resume all threads waiting for the GC.
collection_barrier_->ResumeThreadsAwaitingCollection();
}
......@@ -1944,18 +1955,15 @@ bool Heap::CollectionRequested() {
return collection_barrier_->CollectionRequested();
}
void Heap::RequestCollectionBackground(LocalHeap* local_heap) {
if (local_heap->is_main_thread()) {
CollectAllGarbage(current_gc_flags_,
GarbageCollectionReason::kBackgroundAllocationFailure,
current_gc_callback_flags_);
} else {
collection_barrier_->AwaitCollectionBackground();
}
void Heap::CollectGarbageForBackground(LocalHeap* local_heap) {
CHECK(local_heap->is_main_thread());
CollectAllGarbage(current_gc_flags_,
GarbageCollectionReason::kBackgroundAllocationFailure,
current_gc_callback_flags_);
}
void Heap::CheckCollectionRequested() {
if (!collection_barrier_->CollectionRequested()) return;
if (!CollectionRequested()) return;
CollectAllGarbage(current_gc_flags_,
GarbageCollectionReason::kBackgroundAllocationFailure,
......@@ -2013,14 +2021,12 @@ size_t Heap::PerformGarbageCollection(
// cycle.
UpdateCurrentEpoch(collector);
// Stop time-to-collection timer before safepoint - we do not want to measure
// time for safepointing.
collection_barrier_->StopTimeToCollectionTimer();
TRACE_GC_EPOCH(tracer(), CollectorScopeId(collector), ThreadKind::kMain);
SafepointScope safepoint_scope(this);
collection_barrier_->StopTimeToCollectionTimer();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
......@@ -4891,7 +4897,11 @@ bool Heap::ShouldExpandOldGenerationOnSlowAllocation(LocalHeap* local_heap) {
// was initiated.
if (gc_state() == TEAR_DOWN) return true;
// Ensure that retry of allocation on background thread succeeds
// If main thread is parked, it can't perform the GC. Fix the deadlock by
// allowing the allocation.
if (IsMainThreadParked(local_heap)) return true;
// Make it more likely that retry of allocation on background thread succeeds
if (IsRetryOfFailedAllocation(local_heap)) return true;
// Background thread requested GC, allocation should fail
......@@ -4918,6 +4928,11 @@ bool Heap::IsRetryOfFailedAllocation(LocalHeap* local_heap) {
return local_heap->allocation_failed_;
}
bool Heap::IsMainThreadParked(LocalHeap* local_heap) {
if (!local_heap) return false;
return local_heap->main_thread_parked_;
}
Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
if (ShouldReduceMemory() || FLAG_stress_compaction) {
return Heap::HeapGrowingMode::kMinimal;
......@@ -5480,7 +5495,7 @@ void Heap::StartTearDown() {
// process the event queue anymore. Avoid this deadlock by allowing all
// allocations after tear down was requested to make sure all background
// threads finish.
collection_barrier_->ShutdownRequested();
collection_barrier_->NotifyShutdownRequested();
#ifdef VERIFY_HEAP
// {StartTearDown} is called fairly early during Isolate teardown, so it's
......
......@@ -667,8 +667,8 @@ class Heap {
template <FindMementoMode mode>
inline AllocationMemento FindAllocationMemento(Map map, HeapObject object);
// Requests collection and blocks until GC is finished.
void RequestCollectionBackground(LocalHeap* local_heap);
// Performs GC after background allocation failure.
void CollectGarbageForBackground(LocalHeap* local_heap);
//
// Support for the API.
......@@ -1932,12 +1932,14 @@ class Heap {
bool always_allocate() { return always_allocate_scope_count_ != 0; }
V8_EXPORT_PRIVATE bool CanExpandOldGeneration(size_t size);
V8_EXPORT_PRIVATE bool CanExpandOldGenerationBackground(size_t size);
V8_EXPORT_PRIVATE bool CanExpandOldGenerationBackground(LocalHeap* local_heap,
size_t size);
V8_EXPORT_PRIVATE bool CanPromoteYoungAndExpandOldGeneration(size_t size);
bool ShouldExpandOldGenerationOnSlowAllocation(
LocalHeap* local_heap = nullptr);
bool IsRetryOfFailedAllocation(LocalHeap* local_heap);
bool IsMainThreadParked(LocalHeap* local_heap);
HeapGrowingMode CurrentHeapGrowingMode();
......@@ -2362,6 +2364,7 @@ class Heap {
friend class ScavengeTaskObserver;
friend class IncrementalMarking;
friend class IncrementalMarkingJob;
friend class LocalHeap;
friend class OldLargeObjectSpace;
template <typename ConcreteVisitor, typename MarkingState>
friend class MarkingVisitorBase;
......
......@@ -162,7 +162,7 @@ AllocationResult OldLargeObjectSpace::AllocateRawBackground(
LocalHeap* local_heap, int object_size) {
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
if (!heap()->CanExpandOldGenerationBackground(object_size) ||
if (!heap()->CanExpandOldGenerationBackground(local_heap, object_size) ||
!heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap)) {
return AllocationResult::Retry(identity());
}
......
......@@ -27,8 +27,7 @@ AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type,
Heap::HeapState state = heap()->gc_state();
DCHECK(state == Heap::TEAR_DOWN || state == Heap::NOT_IN_GC);
ThreadState current = state_.load(std::memory_order_relaxed);
DCHECK(current == ThreadState::Running ||
current == ThreadState::SafepointRequested);
DCHECK(current == kRunning || current == kSafepointRequested);
#endif
// Each allocation is supposed to be a safepoint.
......
......@@ -12,6 +12,7 @@
#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/handles/local-handles.h"
#include "src/heap/collection-barrier.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier.h"
......@@ -44,8 +45,9 @@ LocalHeap::LocalHeap(Heap* heap, ThreadKind kind,
std::unique_ptr<PersistentHandles> persistent_handles)
: heap_(heap),
is_main_thread_(kind == ThreadKind::kMain),
state_(ThreadState::Parked),
state_(kParked),
allocation_failed_(false),
main_thread_parked_(false),
prev_(nullptr),
next_(nullptr),
handles_(new LocalHandles),
......@@ -124,8 +126,8 @@ bool LocalHeap::IsHandleDereferenceAllowed() {
VerifyCurrent();
#endif
ThreadState state = state_relaxed();
return state == ThreadState::Running ||
state == ThreadState::SafepointRequested;
return state == kRunning || state == kSafepointRequested ||
state == kCollectionRequested;
}
#endif
......@@ -134,32 +136,45 @@ bool LocalHeap::IsParked() {
VerifyCurrent();
#endif
ThreadState state = state_relaxed();
return state == ThreadState::Parked || state == ThreadState::ParkedSafepoint;
return state == kParked || state == kParkedSafepointRequested ||
state == kParkedCollectionRequested;
}
void LocalHeap::Park() {
ThreadState expected = ThreadState::Running;
if (!state_.compare_exchange_strong(expected, ThreadState::Parked)) {
CHECK_EQ(expected, ThreadState::SafepointRequested);
expected = ThreadState::SafepointRequested;
CHECK(
state_.compare_exchange_strong(expected, ThreadState::ParkedSafepoint));
void LocalHeap::ParkSlowPath(ThreadState current_state) {
if (is_main_thread()) {
while (true) {
CHECK_EQ(current_state, kCollectionRequested);
heap_->CollectGarbageForBackground(this);
current_state = kRunning;
if (state_.compare_exchange_strong(current_state, kParked)) {
return;
}
}
} else {
CHECK_EQ(current_state, kSafepointRequested);
CHECK(state_.compare_exchange_strong(current_state,
kParkedSafepointRequested));
heap_->safepoint()->NotifyPark();
}
}
void LocalHeap::Unpark() {
while (true) {
ThreadState expected = ThreadState::Parked;
if (!state_.compare_exchange_strong(expected, ThreadState::Running)) {
CHECK_EQ(expected, ThreadState::ParkedSafepoint);
DCHECK(!is_main_thread());
DCHECK_EQ(LocalHeap::Current(), this);
TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_UNPARK,
ThreadKind::kBackground);
heap_->safepoint()->WaitInUnpark();
} else {
break;
void LocalHeap::UnparkSlowPath() {
if (is_main_thread()) {
ThreadState expected = kParkedCollectionRequested;
CHECK(state_.compare_exchange_strong(expected, kCollectionRequested));
heap_->CollectGarbageForBackground(this);
} else {
while (true) {
ThreadState expected = kParked;
if (!state_.compare_exchange_strong(expected, kRunning)) {
CHECK_EQ(expected, kParkedSafepointRequested);
TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_UNPARK,
ThreadKind::kBackground);
heap_->safepoint()->WaitInUnpark();
} else {
return;
}
}
}
}
......@@ -169,17 +184,20 @@ void LocalHeap::EnsureParkedBeforeDestruction() {
}
void LocalHeap::SafepointSlowPath() {
DCHECK(!is_main_thread());
DCHECK_EQ(LocalHeap::Current(), this);
TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_SAFEPOINT,
ThreadKind::kBackground);
LocalHeap::ThreadState expected = LocalHeap::ThreadState::SafepointRequested;
CHECK(state_.compare_exchange_strong(expected,
LocalHeap::ThreadState::Safepoint));
heap_->safepoint()->WaitInSafepoint();
// This might be a bit surprising, GlobalSafepoint transitions the state from
// Safepoint (--> Running) --> Parked when returning from the safepoint.
Unpark();
if (is_main_thread()) {
CHECK_EQ(kCollectionRequested, state_relaxed());
heap_->CollectGarbageForBackground(this);
} else {
TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_SAFEPOINT,
ThreadKind::kBackground);
ThreadState expected = kSafepointRequested;
CHECK(state_.compare_exchange_strong(expected, kSafepoint));
heap_->safepoint()->WaitInSafepoint();
// This might be a bit surprising, GlobalSafepoint transitions the state
// from Safepoint (--> Running) --> Parked when returning from the
// safepoint.
Unpark();
}
}
void LocalHeap::FreeLinearAllocationArea() {
......@@ -198,23 +216,63 @@ void LocalHeap::UnmarkLinearAllocationArea() {
old_space_allocator_.UnmarkLinearAllocationArea();
}
void LocalHeap::PerformCollection() {
ParkedScope scope(this);
heap_->RequestCollectionBackground(this);
bool LocalHeap::TryPerformCollection() {
if (is_main_thread()) {
heap_->CollectGarbageForBackground(this);
return true;
} else {
LocalHeap* main_thread = heap_->isolate()->main_thread_local_heap();
ThreadState current = main_thread->state_relaxed();
while (true) {
switch (current) {
case kRunning:
if (main_thread->state_.compare_exchange_strong(
current, kCollectionRequested)) {
heap_->collection_barrier_->ActivateStackGuardAndPostTask();
return heap_->collection_barrier_->AwaitCollectionBackground(this);
}
break;
case kCollectionRequested:
return heap_->collection_barrier_->AwaitCollectionBackground(this);
case kParked:
if (main_thread->state_.compare_exchange_strong(
current, kParkedCollectionRequested)) {
heap_->collection_barrier_->ActivateStackGuardAndPostTask();
return false;
}
break;
case kParkedCollectionRequested:
return false;
default:
UNREACHABLE();
}
}
}
}
Address LocalHeap::PerformCollectionAndAllocateAgain(
int object_size, AllocationType type, AllocationOrigin origin,
AllocationAlignment alignment) {
CHECK(!allocation_failed_);
CHECK(!main_thread_parked_);
allocation_failed_ = true;
static const int kMaxNumberOfRetries = 3;
for (int i = 0; i < kMaxNumberOfRetries; i++) {
PerformCollection();
if (!TryPerformCollection()) {
main_thread_parked_ = true;
}
AllocationResult result = AllocateRaw(object_size, type, origin, alignment);
if (!result.IsRetry()) {
allocation_failed_ = false;
main_thread_parked_ = false;
return result.ToObjectChecked().address();
}
}
......
......@@ -8,6 +8,7 @@
#include <atomic>
#include <memory>
#include "src/base/macros.h"
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
#include "src/common/assert-scope.h"
......@@ -45,8 +46,11 @@ class V8_EXPORT_PRIVATE LocalHeap {
void Safepoint() {
DCHECK(AllowSafepoints::IsAllowed());
ThreadState current = state_relaxed();
STATIC_ASSERT(kSafepointRequested == kCollectionRequested);
if (V8_UNLIKELY(current == ThreadState::SafepointRequested)) {
// The following condition checks for both kSafepointRequested (background
// thread) and kCollectionRequested (main thread).
if (V8_UNLIKELY(current == kSafepointRequested)) {
SafepointSlowPath();
}
}
......@@ -129,7 +133,7 @@ class V8_EXPORT_PRIVATE LocalHeap {
bool is_main_thread() const { return is_main_thread_; }
// Requests GC and blocks until the collection finishes.
void PerformCollection();
bool TryPerformCollection();
// Adds a callback that is invoked with the given |data| after each GC.
// The callback is invoked on the main thread before any background thread
......@@ -139,24 +143,38 @@ class V8_EXPORT_PRIVATE LocalHeap {
void RemoveGCEpilogueCallback(GCEpilogueCallback* callback, void* data);
private:
enum class ThreadState {
enum ThreadState {
// Threads in this state are allowed to access the heap.
Running,
kRunning,
// Thread was parked, which means that the thread is not allowed to access
// or manipulate the heap in any way. This is considered to be a safepoint.
Parked,
kParked,
// All other states are needed for stopping-the-world.
// SafepointRequested is used for Running threads to force Safepoint() and
// SafepointRequested is used for Running background threads to force
// Safepoint() and
// Park() into the slow path.
SafepointRequested,
// A thread transitions into this state from SafepointRequested when it
kSafepointRequested,
// A background thread transitions into this state from SafepointRequested
// when it
// enters a safepoint.
Safepoint,
// This state is used for Parked threads and forces Unpark() into the slow
kSafepoint,
// This state is used for Parked background threads and forces Unpark() into
// the slow
// path. It prevents Unpark() to succeed before the safepoint operation is
// finished.
ParkedSafepoint,
kParkedSafepointRequested,
// This state is used on the main thread when at least one background thread
// requested a GC while the main thread was Running.
// We can use the same value for CollectionRequested and SafepointRequested
// since the first is only used on the main thread, while the other one only
// occurs on background threads. This property is used to have a faster
// check in Safepoint().
kCollectionRequested = kSafepointRequested,
// This state is used on the main thread when at least one background thread
// requested a GC while the main thread was Parked.
kParkedCollectionRequested,
};
ThreadState state_relaxed() { return state_.load(std::memory_order_relaxed); }
......@@ -168,8 +186,24 @@ class V8_EXPORT_PRIVATE LocalHeap {
AllocationOrigin origin,
AllocationAlignment alignment);
void Park();
void Unpark();
void Park() {
DCHECK(AllowGarbageCollection::IsAllowed());
ThreadState expected = kRunning;
if (!state_.compare_exchange_strong(expected, kParked)) {
ParkSlowPath(expected);
}
}
void Unpark() {
DCHECK(AllowGarbageCollection::IsAllowed());
ThreadState expected = kParked;
if (!state_.compare_exchange_strong(expected, kRunning)) {
UnparkSlowPath();
}
}
void ParkSlowPath(ThreadState state);
void UnparkSlowPath();
void EnsureParkedBeforeDestruction();
void SafepointSlowPath();
......@@ -183,6 +217,7 @@ class V8_EXPORT_PRIVATE LocalHeap {
std::atomic<ThreadState> state_;
bool allocation_failed_;
bool main_thread_parked_;
LocalHeap* prev_;
LocalHeap* next_;
......@@ -195,12 +230,13 @@ class V8_EXPORT_PRIVATE LocalHeap {
ConcurrentAllocator old_space_allocator_;
friend class Heap;
friend class CollectionBarrier;
friend class ConcurrentAllocator;
friend class GlobalSafepoint;
friend class Heap;
friend class Isolate;
friend class ParkedScope;
friend class UnparkedScope;
friend class ConcurrentAllocator;
friend class Isolate;
};
} // namespace internal
......
......@@ -592,7 +592,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
}
if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
heap()->CanExpandOldGenerationBackground(AreaSize())) {
heap()->CanExpandOldGenerationBackground(local_heap, AreaSize())) {
auto result = ExpandBackground(local_heap, max_size_in_bytes);
if (result) {
DCHECK_EQ(Heap::GetFillToAlign(result->first, alignment), 0);
......
......@@ -45,18 +45,16 @@ void GlobalSafepoint::EnterSafepointScope() {
LocalHeap::ThreadState expected = local_heap->state_relaxed();
while (true) {
CHECK(expected == LocalHeap::ThreadState::Parked ||
expected == LocalHeap::ThreadState::Running);
CHECK(expected == LocalHeap::kParked || expected == LocalHeap::kRunning);
LocalHeap::ThreadState new_state =
expected == LocalHeap::ThreadState::Parked
? LocalHeap::ThreadState::ParkedSafepoint
: LocalHeap::ThreadState::SafepointRequested;
expected == LocalHeap::kParked ? LocalHeap::kParkedSafepointRequested
: LocalHeap::kSafepointRequested;
if (local_heap->state_.compare_exchange_strong(expected, new_state)) {
if (expected == LocalHeap::ThreadState::Running) {
if (expected == LocalHeap::kRunning) {
running++;
} else {
CHECK_EQ(expected, LocalHeap::ThreadState::Parked);
CHECK_EQ(expected, LocalHeap::kParked);
}
break;
}
......@@ -78,16 +76,17 @@ void GlobalSafepoint::LeaveSafepointScope() {
continue;
}
// We transition both ParkedSafepoint and Safepoint states to Parked. While
// this is probably intuitive for ParkedSafepoint, this might be surprising
// for Safepoint though. SafepointSlowPath() will later unpark that thread
// again. Going through Parked means that a background thread doesn't need
// to be waked up before the main thread can start the next safepoint.
// We transition both ParkedSafepointRequested and Safepoint states to
// Parked. While this is probably intuitive for ParkedSafepointRequested,
// this might be surprising for Safepoint though. SafepointSlowPath() will
// later unpark that thread again. Going through Parked means that a
// background thread doesn't need to be waked up before the main thread can
// start the next safepoint.
LocalHeap::ThreadState old_state =
local_heap->state_.exchange(LocalHeap::ThreadState::Parked);
CHECK(old_state == LocalHeap::ThreadState::ParkedSafepoint ||
old_state == LocalHeap::ThreadState::Safepoint);
local_heap->state_.exchange(LocalHeap::kParked);
CHECK(old_state == LocalHeap::kParkedSafepointRequested ||
old_state == LocalHeap::kSafepoint);
}
barrier_.Disarm();
......
......@@ -184,13 +184,13 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
// TODO(lpy): add support for background compilation RCS trace.
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileIgnition");
base::Optional<ParkedScope> parked_scope;
if (local_isolate_) parked_scope.emplace(local_isolate_);
// Print AST if flag is enabled. Note, if compiling on a background thread
// then ASTs from different functions may be intersperse when printed.
MaybePrintAst(parse_info(), compilation_info());
base::Optional<ParkedScope> parked_scope;
if (local_isolate_) parked_scope.emplace(local_isolate_);
generator()->GenerateBytecode(stack_limit());
if (generator()->HasStackOverflow()) {
......
......@@ -65,7 +65,8 @@ void AllocateSomeObjects(LocalHeap* local_heap) {
class ConcurrentAllocationThread final : public v8::base::Thread {
public:
explicit ConcurrentAllocationThread(Heap* heap, std::atomic<int>* pending)
explicit ConcurrentAllocationThread(Heap* heap,
std::atomic<int>* pending = nullptr)
: v8::base::Thread(base::Thread::Options("ThreadWithLocalHeap")),
heap_(heap),
pending_(pending) {}
......@@ -74,7 +75,7 @@ class ConcurrentAllocationThread final : public v8::base::Thread {
LocalHeap local_heap(heap_, ThreadKind::kBackground);
UnparkedScope unparked_scope(&local_heap);
AllocateSomeObjects(&local_heap);
pending_->fetch_sub(1);
if (pending_) pending_->fetch_sub(1);
}
Heap* heap_;
......@@ -128,6 +129,108 @@ UNINITIALIZED_TEST(ConcurrentAllocationInOldSpaceFromMainThread) {
isolate->Dispose();
}
UNINITIALIZED_TEST(ConcurrentAllocationWhileMainThreadIsParked) {
FLAG_max_old_space_size = 4;
FLAG_stress_concurrent_allocation = false;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
std::vector<std::unique_ptr<ConcurrentAllocationThread>> threads;
const int kThreads = 4;
{
ParkedScope scope(i_isolate->main_thread_local_isolate());
for (int i = 0; i < kThreads; i++) {
auto thread =
std::make_unique<ConcurrentAllocationThread>(i_isolate->heap());
CHECK(thread->Start());
threads.push_back(std::move(thread));
}
for (auto& thread : threads) {
thread->Join();
}
}
isolate->Dispose();
}
UNINITIALIZED_TEST(ConcurrentAllocationWhileMainThreadParksAndUnparks) {
FLAG_max_old_space_size = 4;
FLAG_stress_concurrent_allocation = false;
FLAG_incremental_marking = false;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
std::vector<std::unique_ptr<ConcurrentAllocationThread>> threads;
const int kThreads = 4;
for (int i = 0; i < kThreads; i++) {
auto thread =
std::make_unique<ConcurrentAllocationThread>(i_isolate->heap());
CHECK(thread->Start());
threads.push_back(std::move(thread));
}
for (int i = 0; i < 300'000; i++) {
ParkedScope scope(i_isolate->main_thread_local_isolate());
}
{
ParkedScope scope(i_isolate->main_thread_local_isolate());
for (auto& thread : threads) {
thread->Join();
}
}
isolate->Dispose();
}
UNINITIALIZED_TEST(ConcurrentAllocationWhileMainThreadRunsWithSafepoints) {
FLAG_max_old_space_size = 4;
FLAG_stress_concurrent_allocation = false;
FLAG_incremental_marking = false;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
std::vector<std::unique_ptr<ConcurrentAllocationThread>> threads;
const int kThreads = 4;
for (int i = 0; i < kThreads; i++) {
auto thread =
std::make_unique<ConcurrentAllocationThread>(i_isolate->heap());
CHECK(thread->Start());
threads.push_back(std::move(thread));
}
// Some of the following Safepoint() invocations are supposed to perform a GC.
for (int i = 0; i < 1'000'000; i++) {
i_isolate->main_thread_local_heap()->Safepoint();
}
{
ParkedScope scope(i_isolate->main_thread_local_isolate());
for (auto& thread : threads) {
thread->Join();
}
}
i_isolate->main_thread_local_heap()->Safepoint();
isolate->Dispose();
}
class LargeObjectConcurrentAllocationThread final : public v8::base::Thread {
public:
explicit LargeObjectConcurrentAllocationThread(Heap* heap,
......@@ -146,7 +249,7 @@ class LargeObjectConcurrentAllocationThread final : public v8::base::Thread {
kLargeObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned);
if (result.IsRetry()) {
local_heap.PerformCollection();
local_heap.TryPerformCollection();
} else {
Address address = result.ToAddress();
CreateFixedArray(heap_, address, kLargeObjectSize);
......
......@@ -170,8 +170,11 @@ TEST_F(LocalHeapTest, GCEpilogue) {
CHECK(thread2->Start());
epilogue[1].WaitUntilStarted();
epilogue[2].WaitUntilStarted();
heap->PreciseCollectAllGarbage(Heap::kNoGCFlags,
GarbageCollectionReason::kTesting);
{
UnparkedScope scope(&lh);
heap->PreciseCollectAllGarbage(Heap::kNoGCFlags,
GarbageCollectionReason::kTesting);
}
epilogue[1].RequestStop();
epilogue[2].RequestStop();
thread1->Join();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment