Commit 28d5f133 authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

[heap] Fix deadlock with concurrent allocation and blocked main thread

It could happen (e.g. with --stress-background-compile) that the main
thread blocks for a background thread but the background thread requests
a GC from the main thread. This would result in a deadlock. Avoid this
by parking the main thread for potentially blocking operations and allow
allocations while the main thread is parked.

This CL introduces new states for the main thread: CollectionRequested
and ParkedCollectionRequested. These states will force Safepoint(),
Park() and Unpark() on the main thread into slow paths. The slow path
can then perform a GC on the main thread - right before parking or after
the main thread got unparked.

Bug: v8:10315
Change-Id: If7ef31622d27320613139a0b7f79086fe3200f99
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2731528Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#73707}
parent 51140a44
...@@ -223,6 +223,8 @@ class AsmJsCompilationJob final : public UnoptimizedCompilationJob { ...@@ -223,6 +223,8 @@ class AsmJsCompilationJob final : public UnoptimizedCompilationJob {
}; };
UnoptimizedCompilationJob::Status AsmJsCompilationJob::ExecuteJobImpl() { UnoptimizedCompilationJob::Status AsmJsCompilationJob::ExecuteJobImpl() {
DisallowHeapAccess no_heap_access;
// Step 1: Translate asm.js module to WebAssembly module. // Step 1: Translate asm.js module to WebAssembly module.
Zone* compile_zone = &zone_; Zone* compile_zone = &zone_;
Zone translate_zone(allocator_, ZONE_NAME); Zone translate_zone(allocator_, ZONE_NAME);
......
...@@ -288,7 +288,6 @@ ScriptOriginOptions OriginOptionsForEval(Object script) { ...@@ -288,7 +288,6 @@ ScriptOriginOptions OriginOptionsForEval(Object script) {
// Implementation of UnoptimizedCompilationJob // Implementation of UnoptimizedCompilationJob
CompilationJob::Status UnoptimizedCompilationJob::ExecuteJob() { CompilationJob::Status UnoptimizedCompilationJob::ExecuteJob() {
DisallowHeapAccess no_heap_access;
// Delegate to the underlying implementation. // Delegate to the underlying implementation.
DCHECK_EQ(state(), State::kReadyToExecute); DCHECK_EQ(state(), State::kReadyToExecute);
ScopedTimer t(&time_taken_to_execute_); ScopedTimer t(&time_taken_to_execute_);
...@@ -2802,7 +2801,11 @@ MaybeHandle<SharedFunctionInfo> CompileScriptOnBothBackgroundAndMainThread( ...@@ -2802,7 +2801,11 @@ MaybeHandle<SharedFunctionInfo> CompileScriptOnBothBackgroundAndMainThread(
} }
// Join with background thread and finalize compilation. // Join with background thread and finalize compilation.
{
ParkedScope scope(isolate->main_thread_local_isolate());
background_compile_thread.Join(); background_compile_thread.Join();
}
MaybeHandle<SharedFunctionInfo> maybe_result = MaybeHandle<SharedFunctionInfo> maybe_result =
Compiler::GetSharedFunctionInfoForStreamedScript( Compiler::GetSharedFunctionInfoForStreamedScript(
isolate, source, script_details, origin_options, isolate, source, script_details, origin_options,
......
...@@ -6,24 +6,37 @@ ...@@ -6,24 +6,37 @@
#include "src/base/platform/time.h" #include "src/base/platform/time.h"
#include "src/common/globals.h" #include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/handles/handles.h"
#include "src/heap/gc-tracer.h" #include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h" #include "src/heap/heap-inl.h"
#include "src/heap/heap.h" #include "src/heap/heap.h"
#include "src/heap/local-heap.h"
#include "src/heap/parked-scope.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
void CollectionBarrier::ResumeThreadsAwaitingCollection() { bool CollectionBarrier::CollectionRequested() {
base::MutexGuard guard(&mutex_); return main_thread_state_relaxed() == LocalHeap::kCollectionRequested;
ClearCollectionRequested();
cond_.NotifyAll();
} }
void CollectionBarrier::ShutdownRequested() { LocalHeap::ThreadState CollectionBarrier::main_thread_state_relaxed() {
LocalHeap* main_thread_local_heap =
heap_->isolate()->main_thread_local_heap();
return main_thread_local_heap->state_relaxed();
}
void CollectionBarrier::NotifyShutdownRequested() {
base::MutexGuard guard(&mutex_); base::MutexGuard guard(&mutex_);
if (timer_.IsStarted()) timer_.Stop(); if (timer_.IsStarted()) timer_.Stop();
state_.store(RequestState::kShutdown); shutdown_requested_ = true;
cond_.NotifyAll(); cv_wakeup_.NotifyAll();
}
void CollectionBarrier::ResumeThreadsAwaitingCollection() {
base::MutexGuard guard(&mutex_);
cv_wakeup_.NotifyAll();
} }
class BackgroundCollectionInterruptTask : public CancelableTask { class BackgroundCollectionInterruptTask : public CancelableTask {
...@@ -44,30 +57,29 @@ class BackgroundCollectionInterruptTask : public CancelableTask { ...@@ -44,30 +57,29 @@ class BackgroundCollectionInterruptTask : public CancelableTask {
Heap* heap_; Heap* heap_;
}; };
void CollectionBarrier::AwaitCollectionBackground() { bool CollectionBarrier::AwaitCollectionBackground(LocalHeap* local_heap) {
bool first; ParkedScope scope(local_heap);
{
base::MutexGuard guard(&mutex_); base::MutexGuard guard(&mutex_);
first = FirstCollectionRequest();
if (first) timer_.Start();
}
if (first) { while (CollectionRequested()) {
// This is the first background thread requesting collection, ask the main if (shutdown_requested_) return false;
// thread for GC. cv_wakeup_.Wait(&mutex_);
ActivateStackGuardAndPostTask();
} }
BlockUntilCollected(); return true;
} }
void CollectionBarrier::StopTimeToCollectionTimer() { void CollectionBarrier::StopTimeToCollectionTimer() {
LocalHeap::ThreadState main_thread_state = main_thread_state_relaxed();
CHECK(main_thread_state == LocalHeap::kRunning ||
main_thread_state == LocalHeap::kCollectionRequested);
if (main_thread_state == LocalHeap::kCollectionRequested) {
base::MutexGuard guard(&mutex_); base::MutexGuard guard(&mutex_);
RequestState old_state = state_.exchange(RequestState::kCollectionStarted, // The first background thread that requests the GC, starts the timer first
std::memory_order_relaxed); // and only then parks itself. Since we are in a safepoint here, the timer
if (old_state == RequestState::kCollectionRequested) { // is therefore always initialized here already.
DCHECK(timer_.IsStarted()); CHECK(timer_.IsStarted());
base::TimeDelta delta = timer_.Elapsed(); base::TimeDelta delta = timer_.Elapsed();
TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"), TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"V8.GC.TimeToCollectionOnBackground", "V8.GC.TimeToCollectionOnBackground",
...@@ -78,9 +90,6 @@ void CollectionBarrier::StopTimeToCollectionTimer() { ...@@ -78,9 +90,6 @@ void CollectionBarrier::StopTimeToCollectionTimer() {
->gc_time_to_collection_on_background() ->gc_time_to_collection_on_background()
->AddTimedSample(delta); ->AddTimedSample(delta);
timer_.Stop(); timer_.Stop();
} else {
DCHECK_EQ(old_state, RequestState::kDefault);
DCHECK(!timer_.IsStarted());
} }
} }
...@@ -88,20 +97,15 @@ void CollectionBarrier::ActivateStackGuardAndPostTask() { ...@@ -88,20 +97,15 @@ void CollectionBarrier::ActivateStackGuardAndPostTask() {
Isolate* isolate = heap_->isolate(); Isolate* isolate = heap_->isolate();
ExecutionAccess access(isolate); ExecutionAccess access(isolate);
isolate->stack_guard()->RequestGC(); isolate->stack_guard()->RequestGC();
auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner( auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
reinterpret_cast<v8::Isolate*>(isolate)); reinterpret_cast<v8::Isolate*>(isolate));
taskrunner->PostTask( taskrunner->PostTask(
std::make_unique<BackgroundCollectionInterruptTask>(heap_)); std::make_unique<BackgroundCollectionInterruptTask>(heap_));
}
void CollectionBarrier::BlockUntilCollected() {
TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_COLLECTION,
ThreadKind::kBackground);
base::MutexGuard guard(&mutex_); base::MutexGuard guard(&mutex_);
CHECK(!timer_.IsStarted());
while (CollectionRequested()) { timer_.Start();
cond_.Wait(&mutex_);
}
} }
} // namespace internal } // namespace internal
......
...@@ -8,8 +8,10 @@ ...@@ -8,8 +8,10 @@
#include <atomic> #include <atomic>
#include "src/base/optional.h" #include "src/base/optional.h"
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/elapsed-timer.h" #include "src/base/platform/elapsed-timer.h"
#include "src/base/platform/mutex.h" #include "src/base/platform/mutex.h"
#include "src/heap/local-heap.h"
#include "src/logging/counters.h" #include "src/logging/counters.h"
namespace v8 { namespace v8 {
...@@ -21,70 +23,34 @@ class Heap; ...@@ -21,70 +23,34 @@ class Heap;
class CollectionBarrier { class CollectionBarrier {
Heap* heap_; Heap* heap_;
base::Mutex mutex_; base::Mutex mutex_;
base::ConditionVariable cond_; base::ConditionVariable cv_wakeup_;
base::ElapsedTimer timer_; base::ElapsedTimer timer_;
bool shutdown_requested_;
enum class RequestState { LocalHeap::ThreadState main_thread_state_relaxed();
// Default state, no collection requested and tear down wasn't initated
// yet.
kDefault,
// Collection was already requested
kCollectionRequested,
// Collection was already started
kCollectionStarted,
// This state is reached after isolate starts to shut down. The main
// thread can't perform any GCs anymore, so all allocations need to be
// allowed from here on until background thread finishes.
kShutdown,
};
// The current state.
std::atomic<RequestState> state_;
// Request GC by activating stack guards and posting a task to perform the
// GC.
void ActivateStackGuardAndPostTask();
// Returns true when state was successfully updated from kDefault to
// kCollection.
bool FirstCollectionRequest() {
RequestState expected = RequestState::kDefault;
return state_.compare_exchange_strong(expected,
RequestState::kCollectionRequested);
}
// Sets state back to kDefault - invoked at end of GC.
void ClearCollectionRequested() {
RequestState old_state =
state_.exchange(RequestState::kDefault, std::memory_order_relaxed);
USE(old_state);
DCHECK_EQ(old_state, RequestState::kCollectionStarted);
}
public: public:
explicit CollectionBarrier(Heap* heap) explicit CollectionBarrier(Heap* heap)
: heap_(heap), state_(RequestState::kDefault) {} : heap_(heap), shutdown_requested_(false) {}
// Returns true when collection was requested.
bool CollectionRequested();
// Checks whether any background thread requested GC. // Resumes all threads waiting for GC when tear down starts.
bool CollectionRequested() { void NotifyShutdownRequested();
return state_.load(std::memory_order_relaxed) ==
RequestState::kCollectionRequested;
}
// Stops the TimeToCollection timer when starting the GC.
void StopTimeToCollectionTimer(); void StopTimeToCollectionTimer();
void BlockUntilCollected();
// Resumes threads waiting for collection. // Resumes threads waiting for collection.
void ResumeThreadsAwaitingCollection(); void ResumeThreadsAwaitingCollection();
// Sets current state to kShutdown.
void ShutdownRequested();
// This is the method use by background threads to request and wait for GC. // This is the method use by background threads to request and wait for GC.
void AwaitCollectionBackground(); bool AwaitCollectionBackground(LocalHeap* local_heap);
// Request GC by activating stack guards and posting a task to perform the
// GC.
void ActivateStackGuardAndPostTask();
}; };
} // namespace internal } // namespace internal
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include "src/execution/isolate.h" #include "src/execution/isolate.h"
#include "src/handles/persistent-handles.h" #include "src/handles/persistent-handles.h"
#include "src/heap/concurrent-allocator-inl.h" #include "src/heap/concurrent-allocator-inl.h"
#include "src/heap/heap.h"
#include "src/heap/local-heap-inl.h" #include "src/heap/local-heap-inl.h"
#include "src/heap/local-heap.h" #include "src/heap/local-heap.h"
#include "src/heap/marking.h" #include "src/heap/marking.h"
...@@ -38,21 +39,24 @@ void StressConcurrentAllocatorTask::RunInternal() { ...@@ -38,21 +39,24 @@ void StressConcurrentAllocatorTask::RunInternal() {
AllocationAlignment::kWordAligned); AllocationAlignment::kWordAligned);
heap->CreateFillerObjectAtBackground( heap->CreateFillerObjectAtBackground(
address, kSmallObjectSize, ClearFreedMemoryMode::kDontClearFreedMemory); address, kSmallObjectSize, ClearFreedMemoryMode::kDontClearFreedMemory);
local_heap.Safepoint();
address = local_heap.AllocateRawOrFail( AllocationResult result = local_heap.AllocateRaw(
kMediumObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime, kMediumObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned); AllocationAlignment::kWordAligned);
if (!result.IsRetry()) {
heap->CreateFillerObjectAtBackground( heap->CreateFillerObjectAtBackground(
address, kMediumObjectSize, result.ToAddress(), kMediumObjectSize,
ClearFreedMemoryMode::kDontClearFreedMemory); ClearFreedMemoryMode::kDontClearFreedMemory);
local_heap.Safepoint(); }
address = local_heap.AllocateRawOrFail( result = local_heap.AllocateRaw(kLargeObjectSize, AllocationType::kOld,
kLargeObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime, AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned); AllocationAlignment::kWordAligned);
if (!result.IsRetry()) {
heap->CreateFillerObjectAtBackground( heap->CreateFillerObjectAtBackground(
address, kLargeObjectSize, ClearFreedMemoryMode::kDontClearFreedMemory); result.ToAddress(), kLargeObjectSize,
ClearFreedMemoryMode::kDontClearFreedMemory);
}
local_heap.Safepoint(); local_heap.Safepoint();
} }
...@@ -109,7 +113,6 @@ AllocationResult ConcurrentAllocator::AllocateInLabSlow( ...@@ -109,7 +113,6 @@ AllocationResult ConcurrentAllocator::AllocateInLabSlow(
bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) { bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
auto result = space_->RawRefillLabBackground( auto result = space_->RawRefillLabBackground(
local_heap_, kLabSize, kMaxLabSize, kWordAligned, origin); local_heap_, kLabSize, kMaxLabSize, kWordAligned, origin);
if (!result) return false; if (!result) return false;
if (local_heap_->heap()->incremental_marking()->black_allocation()) { if (local_heap_->heap()->incremental_marking()->black_allocation()) {
......
...@@ -409,11 +409,13 @@ bool Heap::CanExpandOldGeneration(size_t size) { ...@@ -409,11 +409,13 @@ bool Heap::CanExpandOldGeneration(size_t size) {
return memory_allocator()->Size() + size <= MaxReserved(); return memory_allocator()->Size() + size <= MaxReserved();
} }
bool Heap::CanExpandOldGenerationBackground(size_t size) { bool Heap::CanExpandOldGenerationBackground(LocalHeap* local_heap,
size_t size) {
if (force_oom_) return false; if (force_oom_) return false;
// When the heap is tearing down, then GC requests from background threads // When the heap is tearing down, then GC requests from background threads
// are not served and the threads are allowed to expand the heap to avoid OOM. // are not served and the threads are allowed to expand the heap to avoid OOM.
return gc_state() == TEAR_DOWN || return gc_state() == TEAR_DOWN || IsMainThreadParked(local_heap) ||
memory_allocator()->Size() + size <= MaxReserved(); memory_allocator()->Size() + size <= MaxReserved();
} }
...@@ -1177,6 +1179,15 @@ void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) { ...@@ -1177,6 +1179,15 @@ void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
ReduceNewSpaceSize(); ReduceNewSpaceSize();
} }
// Set main thread state back to Running from CollectionRequested.
LocalHeap* main_thread_local_heap = isolate()->main_thread_local_heap();
LocalHeap::ThreadState old_state =
main_thread_local_heap->state_.exchange(LocalHeap::kRunning);
CHECK(old_state == LocalHeap::kRunning ||
old_state == LocalHeap::kCollectionRequested);
// Resume all threads waiting for the GC. // Resume all threads waiting for the GC.
collection_barrier_->ResumeThreadsAwaitingCollection(); collection_barrier_->ResumeThreadsAwaitingCollection();
} }
...@@ -1944,18 +1955,15 @@ bool Heap::CollectionRequested() { ...@@ -1944,18 +1955,15 @@ bool Heap::CollectionRequested() {
return collection_barrier_->CollectionRequested(); return collection_barrier_->CollectionRequested();
} }
void Heap::RequestCollectionBackground(LocalHeap* local_heap) { void Heap::CollectGarbageForBackground(LocalHeap* local_heap) {
if (local_heap->is_main_thread()) { CHECK(local_heap->is_main_thread());
CollectAllGarbage(current_gc_flags_, CollectAllGarbage(current_gc_flags_,
GarbageCollectionReason::kBackgroundAllocationFailure, GarbageCollectionReason::kBackgroundAllocationFailure,
current_gc_callback_flags_); current_gc_callback_flags_);
} else {
collection_barrier_->AwaitCollectionBackground();
}
} }
void Heap::CheckCollectionRequested() { void Heap::CheckCollectionRequested() {
if (!collection_barrier_->CollectionRequested()) return; if (!CollectionRequested()) return;
CollectAllGarbage(current_gc_flags_, CollectAllGarbage(current_gc_flags_,
GarbageCollectionReason::kBackgroundAllocationFailure, GarbageCollectionReason::kBackgroundAllocationFailure,
...@@ -2013,14 +2021,12 @@ size_t Heap::PerformGarbageCollection( ...@@ -2013,14 +2021,12 @@ size_t Heap::PerformGarbageCollection(
// cycle. // cycle.
UpdateCurrentEpoch(collector); UpdateCurrentEpoch(collector);
// Stop time-to-collection timer before safepoint - we do not want to measure
// time for safepointing.
collection_barrier_->StopTimeToCollectionTimer();
TRACE_GC_EPOCH(tracer(), CollectorScopeId(collector), ThreadKind::kMain); TRACE_GC_EPOCH(tracer(), CollectorScopeId(collector), ThreadKind::kMain);
SafepointScope safepoint_scope(this); SafepointScope safepoint_scope(this);
collection_barrier_->StopTimeToCollectionTimer();
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
if (FLAG_verify_heap) { if (FLAG_verify_heap) {
Verify(); Verify();
...@@ -4891,7 +4897,11 @@ bool Heap::ShouldExpandOldGenerationOnSlowAllocation(LocalHeap* local_heap) { ...@@ -4891,7 +4897,11 @@ bool Heap::ShouldExpandOldGenerationOnSlowAllocation(LocalHeap* local_heap) {
// was initiated. // was initiated.
if (gc_state() == TEAR_DOWN) return true; if (gc_state() == TEAR_DOWN) return true;
// Ensure that retry of allocation on background thread succeeds // If main thread is parked, it can't perform the GC. Fix the deadlock by
// allowing the allocation.
if (IsMainThreadParked(local_heap)) return true;
// Make it more likely that retry of allocation on background thread succeeds
if (IsRetryOfFailedAllocation(local_heap)) return true; if (IsRetryOfFailedAllocation(local_heap)) return true;
// Background thread requested GC, allocation should fail // Background thread requested GC, allocation should fail
...@@ -4918,6 +4928,11 @@ bool Heap::IsRetryOfFailedAllocation(LocalHeap* local_heap) { ...@@ -4918,6 +4928,11 @@ bool Heap::IsRetryOfFailedAllocation(LocalHeap* local_heap) {
return local_heap->allocation_failed_; return local_heap->allocation_failed_;
} }
bool Heap::IsMainThreadParked(LocalHeap* local_heap) {
if (!local_heap) return false;
return local_heap->main_thread_parked_;
}
Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() { Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
if (ShouldReduceMemory() || FLAG_stress_compaction) { if (ShouldReduceMemory() || FLAG_stress_compaction) {
return Heap::HeapGrowingMode::kMinimal; return Heap::HeapGrowingMode::kMinimal;
...@@ -5480,7 +5495,7 @@ void Heap::StartTearDown() { ...@@ -5480,7 +5495,7 @@ void Heap::StartTearDown() {
// process the event queue anymore. Avoid this deadlock by allowing all // process the event queue anymore. Avoid this deadlock by allowing all
// allocations after tear down was requested to make sure all background // allocations after tear down was requested to make sure all background
// threads finish. // threads finish.
collection_barrier_->ShutdownRequested(); collection_barrier_->NotifyShutdownRequested();
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
// {StartTearDown} is called fairly early during Isolate teardown, so it's // {StartTearDown} is called fairly early during Isolate teardown, so it's
......
...@@ -667,8 +667,8 @@ class Heap { ...@@ -667,8 +667,8 @@ class Heap {
template <FindMementoMode mode> template <FindMementoMode mode>
inline AllocationMemento FindAllocationMemento(Map map, HeapObject object); inline AllocationMemento FindAllocationMemento(Map map, HeapObject object);
// Requests collection and blocks until GC is finished. // Performs GC after background allocation failure.
void RequestCollectionBackground(LocalHeap* local_heap); void CollectGarbageForBackground(LocalHeap* local_heap);
// //
// Support for the API. // Support for the API.
...@@ -1932,12 +1932,14 @@ class Heap { ...@@ -1932,12 +1932,14 @@ class Heap {
bool always_allocate() { return always_allocate_scope_count_ != 0; } bool always_allocate() { return always_allocate_scope_count_ != 0; }
V8_EXPORT_PRIVATE bool CanExpandOldGeneration(size_t size); V8_EXPORT_PRIVATE bool CanExpandOldGeneration(size_t size);
V8_EXPORT_PRIVATE bool CanExpandOldGenerationBackground(size_t size); V8_EXPORT_PRIVATE bool CanExpandOldGenerationBackground(LocalHeap* local_heap,
size_t size);
V8_EXPORT_PRIVATE bool CanPromoteYoungAndExpandOldGeneration(size_t size); V8_EXPORT_PRIVATE bool CanPromoteYoungAndExpandOldGeneration(size_t size);
bool ShouldExpandOldGenerationOnSlowAllocation( bool ShouldExpandOldGenerationOnSlowAllocation(
LocalHeap* local_heap = nullptr); LocalHeap* local_heap = nullptr);
bool IsRetryOfFailedAllocation(LocalHeap* local_heap); bool IsRetryOfFailedAllocation(LocalHeap* local_heap);
bool IsMainThreadParked(LocalHeap* local_heap);
HeapGrowingMode CurrentHeapGrowingMode(); HeapGrowingMode CurrentHeapGrowingMode();
...@@ -2362,6 +2364,7 @@ class Heap { ...@@ -2362,6 +2364,7 @@ class Heap {
friend class ScavengeTaskObserver; friend class ScavengeTaskObserver;
friend class IncrementalMarking; friend class IncrementalMarking;
friend class IncrementalMarkingJob; friend class IncrementalMarkingJob;
friend class LocalHeap;
friend class OldLargeObjectSpace; friend class OldLargeObjectSpace;
template <typename ConcreteVisitor, typename MarkingState> template <typename ConcreteVisitor, typename MarkingState>
friend class MarkingVisitorBase; friend class MarkingVisitorBase;
......
...@@ -162,7 +162,7 @@ AllocationResult OldLargeObjectSpace::AllocateRawBackground( ...@@ -162,7 +162,7 @@ AllocationResult OldLargeObjectSpace::AllocateRawBackground(
LocalHeap* local_heap, int object_size) { LocalHeap* local_heap, int object_size) {
// Check if we want to force a GC before growing the old space further. // Check if we want to force a GC before growing the old space further.
// If so, fail the allocation. // If so, fail the allocation.
if (!heap()->CanExpandOldGenerationBackground(object_size) || if (!heap()->CanExpandOldGenerationBackground(local_heap, object_size) ||
!heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap)) { !heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap)) {
return AllocationResult::Retry(identity()); return AllocationResult::Retry(identity());
} }
......
...@@ -27,8 +27,7 @@ AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type, ...@@ -27,8 +27,7 @@ AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type,
Heap::HeapState state = heap()->gc_state(); Heap::HeapState state = heap()->gc_state();
DCHECK(state == Heap::TEAR_DOWN || state == Heap::NOT_IN_GC); DCHECK(state == Heap::TEAR_DOWN || state == Heap::NOT_IN_GC);
ThreadState current = state_.load(std::memory_order_relaxed); ThreadState current = state_.load(std::memory_order_relaxed);
DCHECK(current == ThreadState::Running || DCHECK(current == kRunning || current == kSafepointRequested);
current == ThreadState::SafepointRequested);
#endif #endif
// Each allocation is supposed to be a safepoint. // Each allocation is supposed to be a safepoint.
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include "src/common/globals.h" #include "src/common/globals.h"
#include "src/execution/isolate.h" #include "src/execution/isolate.h"
#include "src/handles/local-handles.h" #include "src/handles/local-handles.h"
#include "src/heap/collection-barrier.h"
#include "src/heap/gc-tracer.h" #include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h" #include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier.h" #include "src/heap/heap-write-barrier.h"
...@@ -44,8 +45,9 @@ LocalHeap::LocalHeap(Heap* heap, ThreadKind kind, ...@@ -44,8 +45,9 @@ LocalHeap::LocalHeap(Heap* heap, ThreadKind kind,
std::unique_ptr<PersistentHandles> persistent_handles) std::unique_ptr<PersistentHandles> persistent_handles)
: heap_(heap), : heap_(heap),
is_main_thread_(kind == ThreadKind::kMain), is_main_thread_(kind == ThreadKind::kMain),
state_(ThreadState::Parked), state_(kParked),
allocation_failed_(false), allocation_failed_(false),
main_thread_parked_(false),
prev_(nullptr), prev_(nullptr),
next_(nullptr), next_(nullptr),
handles_(new LocalHandles), handles_(new LocalHandles),
...@@ -124,8 +126,8 @@ bool LocalHeap::IsHandleDereferenceAllowed() { ...@@ -124,8 +126,8 @@ bool LocalHeap::IsHandleDereferenceAllowed() {
VerifyCurrent(); VerifyCurrent();
#endif #endif
ThreadState state = state_relaxed(); ThreadState state = state_relaxed();
return state == ThreadState::Running || return state == kRunning || state == kSafepointRequested ||
state == ThreadState::SafepointRequested; state == kCollectionRequested;
} }
#endif #endif
...@@ -134,32 +136,45 @@ bool LocalHeap::IsParked() { ...@@ -134,32 +136,45 @@ bool LocalHeap::IsParked() {
VerifyCurrent(); VerifyCurrent();
#endif #endif
ThreadState state = state_relaxed(); ThreadState state = state_relaxed();
return state == ThreadState::Parked || state == ThreadState::ParkedSafepoint; return state == kParked || state == kParkedSafepointRequested ||
state == kParkedCollectionRequested;
} }
void LocalHeap::Park() { void LocalHeap::ParkSlowPath(ThreadState current_state) {
ThreadState expected = ThreadState::Running; if (is_main_thread()) {
if (!state_.compare_exchange_strong(expected, ThreadState::Parked)) { while (true) {
CHECK_EQ(expected, ThreadState::SafepointRequested); CHECK_EQ(current_state, kCollectionRequested);
expected = ThreadState::SafepointRequested; heap_->CollectGarbageForBackground(this);
CHECK(
state_.compare_exchange_strong(expected, ThreadState::ParkedSafepoint)); current_state = kRunning;
if (state_.compare_exchange_strong(current_state, kParked)) {
return;
}
}
} else {
CHECK_EQ(current_state, kSafepointRequested);
CHECK(state_.compare_exchange_strong(current_state,
kParkedSafepointRequested));
heap_->safepoint()->NotifyPark(); heap_->safepoint()->NotifyPark();
} }
} }
void LocalHeap::Unpark() { void LocalHeap::UnparkSlowPath() {
if (is_main_thread()) {
ThreadState expected = kParkedCollectionRequested;
CHECK(state_.compare_exchange_strong(expected, kCollectionRequested));
heap_->CollectGarbageForBackground(this);
} else {
while (true) { while (true) {
ThreadState expected = ThreadState::Parked; ThreadState expected = kParked;
if (!state_.compare_exchange_strong(expected, ThreadState::Running)) { if (!state_.compare_exchange_strong(expected, kRunning)) {
CHECK_EQ(expected, ThreadState::ParkedSafepoint); CHECK_EQ(expected, kParkedSafepointRequested);
DCHECK(!is_main_thread());
DCHECK_EQ(LocalHeap::Current(), this);
TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_UNPARK, TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_UNPARK,
ThreadKind::kBackground); ThreadKind::kBackground);
heap_->safepoint()->WaitInUnpark(); heap_->safepoint()->WaitInUnpark();
} else { } else {
break; return;
}
} }
} }
} }
...@@ -169,17 +184,20 @@ void LocalHeap::EnsureParkedBeforeDestruction() { ...@@ -169,17 +184,20 @@ void LocalHeap::EnsureParkedBeforeDestruction() {
} }
void LocalHeap::SafepointSlowPath() { void LocalHeap::SafepointSlowPath() {
DCHECK(!is_main_thread()); if (is_main_thread()) {
DCHECK_EQ(LocalHeap::Current(), this); CHECK_EQ(kCollectionRequested, state_relaxed());
heap_->CollectGarbageForBackground(this);
} else {
TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_SAFEPOINT, TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_SAFEPOINT,
ThreadKind::kBackground); ThreadKind::kBackground);
LocalHeap::ThreadState expected = LocalHeap::ThreadState::SafepointRequested; ThreadState expected = kSafepointRequested;
CHECK(state_.compare_exchange_strong(expected, CHECK(state_.compare_exchange_strong(expected, kSafepoint));
LocalHeap::ThreadState::Safepoint));
heap_->safepoint()->WaitInSafepoint(); heap_->safepoint()->WaitInSafepoint();
// This might be a bit surprising, GlobalSafepoint transitions the state from // This might be a bit surprising, GlobalSafepoint transitions the state
// Safepoint (--> Running) --> Parked when returning from the safepoint. // from Safepoint (--> Running) --> Parked when returning from the
// safepoint.
Unpark(); Unpark();
}
} }
void LocalHeap::FreeLinearAllocationArea() { void LocalHeap::FreeLinearAllocationArea() {
...@@ -198,23 +216,63 @@ void LocalHeap::UnmarkLinearAllocationArea() { ...@@ -198,23 +216,63 @@ void LocalHeap::UnmarkLinearAllocationArea() {
old_space_allocator_.UnmarkLinearAllocationArea(); old_space_allocator_.UnmarkLinearAllocationArea();
} }
void LocalHeap::PerformCollection() { bool LocalHeap::TryPerformCollection() {
ParkedScope scope(this); if (is_main_thread()) {
heap_->RequestCollectionBackground(this); heap_->CollectGarbageForBackground(this);
return true;
} else {
LocalHeap* main_thread = heap_->isolate()->main_thread_local_heap();
ThreadState current = main_thread->state_relaxed();
while (true) {
switch (current) {
case kRunning:
if (main_thread->state_.compare_exchange_strong(
current, kCollectionRequested)) {
heap_->collection_barrier_->ActivateStackGuardAndPostTask();
return heap_->collection_barrier_->AwaitCollectionBackground(this);
}
break;
case kCollectionRequested:
return heap_->collection_barrier_->AwaitCollectionBackground(this);
case kParked:
if (main_thread->state_.compare_exchange_strong(
current, kParkedCollectionRequested)) {
heap_->collection_barrier_->ActivateStackGuardAndPostTask();
return false;
}
break;
case kParkedCollectionRequested:
return false;
default:
UNREACHABLE();
}
}
}
} }
Address LocalHeap::PerformCollectionAndAllocateAgain( Address LocalHeap::PerformCollectionAndAllocateAgain(
int object_size, AllocationType type, AllocationOrigin origin, int object_size, AllocationType type, AllocationOrigin origin,
AllocationAlignment alignment) { AllocationAlignment alignment) {
CHECK(!allocation_failed_);
CHECK(!main_thread_parked_);
allocation_failed_ = true; allocation_failed_ = true;
static const int kMaxNumberOfRetries = 3; static const int kMaxNumberOfRetries = 3;
for (int i = 0; i < kMaxNumberOfRetries; i++) { for (int i = 0; i < kMaxNumberOfRetries; i++) {
PerformCollection(); if (!TryPerformCollection()) {
main_thread_parked_ = true;
}
AllocationResult result = AllocateRaw(object_size, type, origin, alignment); AllocationResult result = AllocateRaw(object_size, type, origin, alignment);
if (!result.IsRetry()) { if (!result.IsRetry()) {
allocation_failed_ = false; allocation_failed_ = false;
main_thread_parked_ = false;
return result.ToObjectChecked().address(); return result.ToObjectChecked().address();
} }
} }
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <atomic> #include <atomic>
#include <memory> #include <memory>
#include "src/base/macros.h"
#include "src/base/platform/condition-variable.h" #include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h" #include "src/base/platform/mutex.h"
#include "src/common/assert-scope.h" #include "src/common/assert-scope.h"
...@@ -45,8 +46,11 @@ class V8_EXPORT_PRIVATE LocalHeap { ...@@ -45,8 +46,11 @@ class V8_EXPORT_PRIVATE LocalHeap {
void Safepoint() { void Safepoint() {
DCHECK(AllowSafepoints::IsAllowed()); DCHECK(AllowSafepoints::IsAllowed());
ThreadState current = state_relaxed(); ThreadState current = state_relaxed();
STATIC_ASSERT(kSafepointRequested == kCollectionRequested);
if (V8_UNLIKELY(current == ThreadState::SafepointRequested)) { // The following condition checks for both kSafepointRequested (background
// thread) and kCollectionRequested (main thread).
if (V8_UNLIKELY(current == kSafepointRequested)) {
SafepointSlowPath(); SafepointSlowPath();
} }
} }
...@@ -129,7 +133,7 @@ class V8_EXPORT_PRIVATE LocalHeap { ...@@ -129,7 +133,7 @@ class V8_EXPORT_PRIVATE LocalHeap {
bool is_main_thread() const { return is_main_thread_; } bool is_main_thread() const { return is_main_thread_; }
// Requests GC and blocks until the collection finishes. // Requests GC and blocks until the collection finishes.
void PerformCollection(); bool TryPerformCollection();
// Adds a callback that is invoked with the given |data| after each GC. // Adds a callback that is invoked with the given |data| after each GC.
// The callback is invoked on the main thread before any background thread // The callback is invoked on the main thread before any background thread
...@@ -139,24 +143,38 @@ class V8_EXPORT_PRIVATE LocalHeap { ...@@ -139,24 +143,38 @@ class V8_EXPORT_PRIVATE LocalHeap {
void RemoveGCEpilogueCallback(GCEpilogueCallback* callback, void* data); void RemoveGCEpilogueCallback(GCEpilogueCallback* callback, void* data);
private: private:
enum class ThreadState { enum ThreadState {
// Threads in this state are allowed to access the heap. // Threads in this state are allowed to access the heap.
Running, kRunning,
// Thread was parked, which means that the thread is not allowed to access // Thread was parked, which means that the thread is not allowed to access
// or manipulate the heap in any way. This is considered to be a safepoint. // or manipulate the heap in any way. This is considered to be a safepoint.
Parked, kParked,
// All other states are needed for stopping-the-world. // SafepointRequested is used for Running background threads to force
// SafepointRequested is used for Running threads to force Safepoint() and // Safepoint() and
// Park() into the slow path. // Park() into the slow path.
SafepointRequested, kSafepointRequested,
// A thread transitions into this state from SafepointRequested when it // A background thread transitions into this state from SafepointRequested
// when it
// enters a safepoint. // enters a safepoint.
Safepoint, kSafepoint,
// This state is used for Parked threads and forces Unpark() into the slow // This state is used for Parked background threads and forces Unpark() into
// the slow
// path. It prevents Unpark() to succeed before the safepoint operation is // path. It prevents Unpark() to succeed before the safepoint operation is
// finished. // finished.
ParkedSafepoint, kParkedSafepointRequested,
// This state is used on the main thread when at least one background thread
// requested a GC while the main thread was Running.
// We can use the same value for CollectionRequested and SafepointRequested
// since the first is only used on the main thread, while the other one only
// occurs on background threads. This property is used to have a faster
// check in Safepoint().
kCollectionRequested = kSafepointRequested,
// This state is used on the main thread when at least one background thread
// requested a GC while the main thread was Parked.
kParkedCollectionRequested,
}; };
ThreadState state_relaxed() { return state_.load(std::memory_order_relaxed); } ThreadState state_relaxed() { return state_.load(std::memory_order_relaxed); }
...@@ -168,8 +186,24 @@ class V8_EXPORT_PRIVATE LocalHeap { ...@@ -168,8 +186,24 @@ class V8_EXPORT_PRIVATE LocalHeap {
AllocationOrigin origin, AllocationOrigin origin,
AllocationAlignment alignment); AllocationAlignment alignment);
void Park(); void Park() {
void Unpark(); DCHECK(AllowGarbageCollection::IsAllowed());
ThreadState expected = kRunning;
if (!state_.compare_exchange_strong(expected, kParked)) {
ParkSlowPath(expected);
}
}
void Unpark() {
DCHECK(AllowGarbageCollection::IsAllowed());
ThreadState expected = kParked;
if (!state_.compare_exchange_strong(expected, kRunning)) {
UnparkSlowPath();
}
}
void ParkSlowPath(ThreadState state);
void UnparkSlowPath();
void EnsureParkedBeforeDestruction(); void EnsureParkedBeforeDestruction();
void SafepointSlowPath(); void SafepointSlowPath();
...@@ -183,6 +217,7 @@ class V8_EXPORT_PRIVATE LocalHeap { ...@@ -183,6 +217,7 @@ class V8_EXPORT_PRIVATE LocalHeap {
std::atomic<ThreadState> state_; std::atomic<ThreadState> state_;
bool allocation_failed_; bool allocation_failed_;
bool main_thread_parked_;
LocalHeap* prev_; LocalHeap* prev_;
LocalHeap* next_; LocalHeap* next_;
...@@ -195,12 +230,13 @@ class V8_EXPORT_PRIVATE LocalHeap { ...@@ -195,12 +230,13 @@ class V8_EXPORT_PRIVATE LocalHeap {
ConcurrentAllocator old_space_allocator_; ConcurrentAllocator old_space_allocator_;
friend class Heap; friend class CollectionBarrier;
friend class ConcurrentAllocator;
friend class GlobalSafepoint; friend class GlobalSafepoint;
friend class Heap;
friend class Isolate;
friend class ParkedScope; friend class ParkedScope;
friend class UnparkedScope; friend class UnparkedScope;
friend class ConcurrentAllocator;
friend class Isolate;
}; };
} // namespace internal } // namespace internal
......
...@@ -592,7 +592,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground( ...@@ -592,7 +592,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
} }
if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) && if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
heap()->CanExpandOldGenerationBackground(AreaSize())) { heap()->CanExpandOldGenerationBackground(local_heap, AreaSize())) {
auto result = ExpandBackground(local_heap, max_size_in_bytes); auto result = ExpandBackground(local_heap, max_size_in_bytes);
if (result) { if (result) {
DCHECK_EQ(Heap::GetFillToAlign(result->first, alignment), 0); DCHECK_EQ(Heap::GetFillToAlign(result->first, alignment), 0);
......
...@@ -45,18 +45,16 @@ void GlobalSafepoint::EnterSafepointScope() { ...@@ -45,18 +45,16 @@ void GlobalSafepoint::EnterSafepointScope() {
LocalHeap::ThreadState expected = local_heap->state_relaxed(); LocalHeap::ThreadState expected = local_heap->state_relaxed();
while (true) { while (true) {
CHECK(expected == LocalHeap::ThreadState::Parked || CHECK(expected == LocalHeap::kParked || expected == LocalHeap::kRunning);
expected == LocalHeap::ThreadState::Running);
LocalHeap::ThreadState new_state = LocalHeap::ThreadState new_state =
expected == LocalHeap::ThreadState::Parked expected == LocalHeap::kParked ? LocalHeap::kParkedSafepointRequested
? LocalHeap::ThreadState::ParkedSafepoint : LocalHeap::kSafepointRequested;
: LocalHeap::ThreadState::SafepointRequested;
if (local_heap->state_.compare_exchange_strong(expected, new_state)) { if (local_heap->state_.compare_exchange_strong(expected, new_state)) {
if (expected == LocalHeap::ThreadState::Running) { if (expected == LocalHeap::kRunning) {
running++; running++;
} else { } else {
CHECK_EQ(expected, LocalHeap::ThreadState::Parked); CHECK_EQ(expected, LocalHeap::kParked);
} }
break; break;
} }
...@@ -78,16 +76,17 @@ void GlobalSafepoint::LeaveSafepointScope() { ...@@ -78,16 +76,17 @@ void GlobalSafepoint::LeaveSafepointScope() {
continue; continue;
} }
// We transition both ParkedSafepoint and Safepoint states to Parked. While // We transition both ParkedSafepointRequested and Safepoint states to
// this is probably intuitive for ParkedSafepoint, this might be surprising // Parked. While this is probably intuitive for ParkedSafepointRequested,
// for Safepoint though. SafepointSlowPath() will later unpark that thread // this might be surprising for Safepoint though. SafepointSlowPath() will
// again. Going through Parked means that a background thread doesn't need // later unpark that thread again. Going through Parked means that a
// to be waked up before the main thread can start the next safepoint. // background thread doesn't need to be waked up before the main thread can
// start the next safepoint.
LocalHeap::ThreadState old_state = LocalHeap::ThreadState old_state =
local_heap->state_.exchange(LocalHeap::ThreadState::Parked); local_heap->state_.exchange(LocalHeap::kParked);
CHECK(old_state == LocalHeap::ThreadState::ParkedSafepoint || CHECK(old_state == LocalHeap::kParkedSafepointRequested ||
old_state == LocalHeap::ThreadState::Safepoint); old_state == LocalHeap::kSafepoint);
} }
barrier_.Disarm(); barrier_.Disarm();
......
...@@ -184,13 +184,13 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() { ...@@ -184,13 +184,13 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
// TODO(lpy): add support for background compilation RCS trace. // TODO(lpy): add support for background compilation RCS trace.
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileIgnition"); TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileIgnition");
base::Optional<ParkedScope> parked_scope;
if (local_isolate_) parked_scope.emplace(local_isolate_);
// Print AST if flag is enabled. Note, if compiling on a background thread // Print AST if flag is enabled. Note, if compiling on a background thread
// then ASTs from different functions may be intersperse when printed. // then ASTs from different functions may be intersperse when printed.
MaybePrintAst(parse_info(), compilation_info()); MaybePrintAst(parse_info(), compilation_info());
base::Optional<ParkedScope> parked_scope;
if (local_isolate_) parked_scope.emplace(local_isolate_);
generator()->GenerateBytecode(stack_limit()); generator()->GenerateBytecode(stack_limit());
if (generator()->HasStackOverflow()) { if (generator()->HasStackOverflow()) {
......
...@@ -65,7 +65,8 @@ void AllocateSomeObjects(LocalHeap* local_heap) { ...@@ -65,7 +65,8 @@ void AllocateSomeObjects(LocalHeap* local_heap) {
class ConcurrentAllocationThread final : public v8::base::Thread { class ConcurrentAllocationThread final : public v8::base::Thread {
public: public:
explicit ConcurrentAllocationThread(Heap* heap, std::atomic<int>* pending) explicit ConcurrentAllocationThread(Heap* heap,
std::atomic<int>* pending = nullptr)
: v8::base::Thread(base::Thread::Options("ThreadWithLocalHeap")), : v8::base::Thread(base::Thread::Options("ThreadWithLocalHeap")),
heap_(heap), heap_(heap),
pending_(pending) {} pending_(pending) {}
...@@ -74,7 +75,7 @@ class ConcurrentAllocationThread final : public v8::base::Thread { ...@@ -74,7 +75,7 @@ class ConcurrentAllocationThread final : public v8::base::Thread {
LocalHeap local_heap(heap_, ThreadKind::kBackground); LocalHeap local_heap(heap_, ThreadKind::kBackground);
UnparkedScope unparked_scope(&local_heap); UnparkedScope unparked_scope(&local_heap);
AllocateSomeObjects(&local_heap); AllocateSomeObjects(&local_heap);
pending_->fetch_sub(1); if (pending_) pending_->fetch_sub(1);
} }
Heap* heap_; Heap* heap_;
...@@ -128,6 +129,108 @@ UNINITIALIZED_TEST(ConcurrentAllocationInOldSpaceFromMainThread) { ...@@ -128,6 +129,108 @@ UNINITIALIZED_TEST(ConcurrentAllocationInOldSpaceFromMainThread) {
isolate->Dispose(); isolate->Dispose();
} }
UNINITIALIZED_TEST(ConcurrentAllocationWhileMainThreadIsParked) {
FLAG_max_old_space_size = 4;
FLAG_stress_concurrent_allocation = false;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
std::vector<std::unique_ptr<ConcurrentAllocationThread>> threads;
const int kThreads = 4;
{
ParkedScope scope(i_isolate->main_thread_local_isolate());
for (int i = 0; i < kThreads; i++) {
auto thread =
std::make_unique<ConcurrentAllocationThread>(i_isolate->heap());
CHECK(thread->Start());
threads.push_back(std::move(thread));
}
for (auto& thread : threads) {
thread->Join();
}
}
isolate->Dispose();
}
UNINITIALIZED_TEST(ConcurrentAllocationWhileMainThreadParksAndUnparks) {
FLAG_max_old_space_size = 4;
FLAG_stress_concurrent_allocation = false;
FLAG_incremental_marking = false;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
std::vector<std::unique_ptr<ConcurrentAllocationThread>> threads;
const int kThreads = 4;
for (int i = 0; i < kThreads; i++) {
auto thread =
std::make_unique<ConcurrentAllocationThread>(i_isolate->heap());
CHECK(thread->Start());
threads.push_back(std::move(thread));
}
for (int i = 0; i < 300'000; i++) {
ParkedScope scope(i_isolate->main_thread_local_isolate());
}
{
ParkedScope scope(i_isolate->main_thread_local_isolate());
for (auto& thread : threads) {
thread->Join();
}
}
isolate->Dispose();
}
UNINITIALIZED_TEST(ConcurrentAllocationWhileMainThreadRunsWithSafepoints) {
FLAG_max_old_space_size = 4;
FLAG_stress_concurrent_allocation = false;
FLAG_incremental_marking = false;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
std::vector<std::unique_ptr<ConcurrentAllocationThread>> threads;
const int kThreads = 4;
for (int i = 0; i < kThreads; i++) {
auto thread =
std::make_unique<ConcurrentAllocationThread>(i_isolate->heap());
CHECK(thread->Start());
threads.push_back(std::move(thread));
}
// Some of the following Safepoint() invocations are supposed to perform a GC.
for (int i = 0; i < 1'000'000; i++) {
i_isolate->main_thread_local_heap()->Safepoint();
}
{
ParkedScope scope(i_isolate->main_thread_local_isolate());
for (auto& thread : threads) {
thread->Join();
}
}
i_isolate->main_thread_local_heap()->Safepoint();
isolate->Dispose();
}
class LargeObjectConcurrentAllocationThread final : public v8::base::Thread { class LargeObjectConcurrentAllocationThread final : public v8::base::Thread {
public: public:
explicit LargeObjectConcurrentAllocationThread(Heap* heap, explicit LargeObjectConcurrentAllocationThread(Heap* heap,
...@@ -146,7 +249,7 @@ class LargeObjectConcurrentAllocationThread final : public v8::base::Thread { ...@@ -146,7 +249,7 @@ class LargeObjectConcurrentAllocationThread final : public v8::base::Thread {
kLargeObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime, kLargeObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned); AllocationAlignment::kWordAligned);
if (result.IsRetry()) { if (result.IsRetry()) {
local_heap.PerformCollection(); local_heap.TryPerformCollection();
} else { } else {
Address address = result.ToAddress(); Address address = result.ToAddress();
CreateFixedArray(heap_, address, kLargeObjectSize); CreateFixedArray(heap_, address, kLargeObjectSize);
......
...@@ -170,8 +170,11 @@ TEST_F(LocalHeapTest, GCEpilogue) { ...@@ -170,8 +170,11 @@ TEST_F(LocalHeapTest, GCEpilogue) {
CHECK(thread2->Start()); CHECK(thread2->Start());
epilogue[1].WaitUntilStarted(); epilogue[1].WaitUntilStarted();
epilogue[2].WaitUntilStarted(); epilogue[2].WaitUntilStarted();
{
UnparkedScope scope(&lh);
heap->PreciseCollectAllGarbage(Heap::kNoGCFlags, heap->PreciseCollectAllGarbage(Heap::kNoGCFlags,
GarbageCollectionReason::kTesting); GarbageCollectionReason::kTesting);
}
epilogue[1].RequestStop(); epilogue[1].RequestStop();
epilogue[2].RequestStop(); epilogue[2].RequestStop();
thread1->Join(); thread1->Join();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment