Commit 6747144c authored by Omer Katz's avatar Omer Katz Committed by V8 LUCI CQ

cppgc: Parallel marking in atomic pause

Bug: v8:12424
Change-Id: I0633e1bd8c890c14ce2c5519253a5e6eb7592f04
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3295580
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78081}
parent 68cefe40
......@@ -331,7 +331,9 @@ CppHeap::CppHeap(
: cppgc::internal::HeapBase(
std::make_shared<CppgcPlatformAdapter>(platform), custom_spaces,
cppgc::internal::HeapBase::StackSupport::
kSupportsConservativeStackScan),
kSupportsConservativeStackScan,
cppgc::internal::HeapBase::MarkingType::kIncrementalAndConcurrent,
cppgc::internal::HeapBase::SweepingType::kIncrementalAndConcurrent),
wrapper_descriptor_(wrapper_descriptor) {
CHECK_NE(WrapperDescriptor::kUnknownEmbedderId,
wrapper_descriptor_.embedder_id_for_garbage_collected);
......
......@@ -53,7 +53,8 @@ class ObjectSizeCounter : private HeapVisitor<ObjectSizeCounter> {
HeapBase::HeapBase(
std::shared_ptr<cppgc::Platform> platform,
const std::vector<std::unique_ptr<CustomSpaceBase>>& custom_spaces,
StackSupport stack_support)
StackSupport stack_support, MarkingType marking_support,
SweepingType sweeping_support)
: raw_heap_(this, custom_spaces),
platform_(std::move(platform)),
oom_handler_(std::make_unique<FatalOutOfMemoryHandler>(this)),
......@@ -81,7 +82,9 @@ HeapBase::HeapBase(
weak_persistent_region_(*oom_handler_.get()),
strong_cross_thread_persistent_region_(*oom_handler_.get()),
weak_cross_thread_persistent_region_(*oom_handler_.get()),
stack_support_(stack_support) {
stack_support_(stack_support),
marking_support_(marking_support),
sweeping_support_(sweeping_support) {
stats_collector_->RegisterObserver(
&allocation_observer_for_PROCESS_HEAP_STATISTICS_);
}
......
......@@ -71,6 +71,8 @@ class StatsCollector;
class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
public:
using StackSupport = cppgc::Heap::StackSupport;
using MarkingType = cppgc::Heap::MarkingType;
using SweepingType = cppgc::Heap::SweepingType;
static HeapBase& From(cppgc::HeapHandle& heap_handle) {
return static_cast<HeapBase&>(heap_handle);
......@@ -81,7 +83,8 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
HeapBase(std::shared_ptr<cppgc::Platform> platform,
const std::vector<std::unique_ptr<CustomSpaceBase>>& custom_spaces,
StackSupport stack_support);
StackSupport stack_support, MarkingType marking_support,
SweepingType sweeping_support);
virtual ~HeapBase();
HeapBase(const HeapBase&) = delete;
......@@ -203,6 +206,8 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
int GetCreationThreadId() const { return creation_thread_id_; }
MarkingType marking_support() const { return marking_support_; }
protected:
// Used by the incremental scheduler to finalize a GC if supported.
virtual void FinalizeIncrementalGarbageCollectionIfNeeded(
......@@ -273,6 +278,9 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
int creation_thread_id_ = v8::base::OS::GetCurrentThreadId();
const MarkingType marking_support_;
const SweepingType sweeping_support_;
friend class MarkerBase::IncrementalMarkingTask;
friend class cppgc::subtle::DisallowGarbageCollectionScope;
friend class cppgc::subtle::NoGarbageCollectionScope;
......
......@@ -78,13 +78,12 @@ void CheckConfig(Heap::Config config, Heap::MarkingType marking_support,
Heap::Heap(std::shared_ptr<cppgc::Platform> platform,
cppgc::Heap::HeapOptions options)
: HeapBase(platform, options.custom_spaces, options.stack_support),
: HeapBase(platform, options.custom_spaces, options.stack_support,
options.marking_support, options.sweeping_support),
gc_invoker_(this, platform_.get(), options.stack_support),
growing_(&gc_invoker_, stats_collector_.get(),
options.resource_constraints, options.marking_support,
options.sweeping_support),
marking_support_(options.marking_support),
sweeping_support_(options.sweeping_support) {
options.sweeping_support) {
CHECK_IMPLIES(options.marking_support != MarkingType::kAtomic,
platform_->GetForegroundTaskRunner());
CHECK_IMPLIES(options.sweeping_support != SweepingType::kAtomic,
......
......@@ -53,9 +53,6 @@ class V8_EXPORT_PRIVATE Heap final : public HeapBase,
GCInvoker gc_invoker_;
HeapGrowing growing_;
const MarkingType marking_support_;
const SweepingType sweeping_support_;
size_t epoch_ = 0;
};
......
......@@ -30,7 +30,6 @@ void IncrementalMarkingSchedule::UpdateMutatorThreadMarkedBytes(
void IncrementalMarkingSchedule::AddConcurrentlyMarkedBytes(
size_t marked_bytes) {
DCHECK(!incremental_marking_start_time_.IsNull());
concurrently_marked_bytes_.fetch_add(marked_bytes, std::memory_order_relaxed);
}
......
......@@ -240,6 +240,7 @@ void MarkerBase::StartMarking() {
MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
mutator_marking_state_.Publish();
concurrent_marker_->Start();
concurrent_marking_active_ = true;
}
incremental_marking_allocation_observer_ =
std::make_unique<IncrementalMarkingAllocationObserver>(*this);
......@@ -255,8 +256,9 @@ void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
StatsCollector::kMarkAtomicPrologue);
if (ExitIncrementalMarkingIfNeeded(config_, heap())) {
// Cancel remaining concurrent/incremental tasks.
concurrent_marker_->Cancel();
// Cancel remaining incremental tasks. Concurrent marking jobs are left to
// run in parallel with the atomic pause until the mutator thread runs out
// of work.
incremental_marking_handle_.Cancel();
heap().stats_collector()->UnregisterObserver(
incremental_marking_allocation_observer_.get());
......@@ -276,6 +278,17 @@ void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
MarkNotFullyConstructedObjects();
}
}
if (heap().marking_support() ==
MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
// Start parallel marking.
mutator_marking_state_.Publish();
if (concurrent_marking_active_) {
concurrent_marker_->NotifyIncrementalMutatorStepCompleted();
} else {
concurrent_marker_->Start();
concurrent_marking_active_ = true;
}
}
}
void MarkerBase::LeaveAtomicPause() {
......@@ -414,6 +427,16 @@ void MarkerBase::AdvanceMarkingOnAllocation() {
}
}
bool MarkerBase::CancelConcurrentMarkingIfNeeded() {
if (config_.marking_type != MarkingConfig::MarkingType::kAtomic ||
!concurrent_marking_active_)
return false;
concurrent_marker_->Cancel();
concurrent_marking_active_ = false;
return true;
}
bool MarkerBase::AdvanceMarkingWithLimits(v8::base::TimeDelta max_duration,
size_t marked_bytes_limit) {
bool is_done = false;
......@@ -433,6 +456,9 @@ bool MarkerBase::AdvanceMarkingWithLimits(v8::base::TimeDelta max_duration,
// adjustment.
is_done = ProcessWorklistsWithDeadline(marked_bytes_limit, deadline);
}
if (is_done && CancelConcurrentMarkingIfNeeded()) {
is_done = ProcessWorklistsWithDeadline(marked_bytes_limit, deadline);
}
schedule_.UpdateMutatorThreadMarkedBytes(
mutator_marking_state_.marked_bytes());
}
......
......@@ -173,6 +173,8 @@ class V8_EXPORT_PRIVATE MarkerBase {
void AdvanceMarkingOnAllocation();
bool CancelConcurrentMarkingIfNeeded();
HeapBase& heap_;
MarkingConfig config_ = MarkingConfig::Default();
......@@ -189,6 +191,7 @@ class V8_EXPORT_PRIVATE MarkerBase {
IncrementalMarkingSchedule schedule_;
std::unique_ptr<ConcurrentMarkerBase> concurrent_marker_{nullptr};
bool concurrent_marking_active_ = false;
bool main_marking_disabled_for_testing_{false};
bool visited_cross_thread_persistents_in_atomic_pause_{false};
......
......@@ -415,15 +415,17 @@ void MutatorMarkingState::InvokeWeakRootsCallbackIfNeeded(
#if DEBUG
const HeapObjectHeader& header =
HeapObjectHeader::FromObject(desc.base_object_payload);
DCHECK_IMPLIES(header.IsInConstruction(), header.IsMarked());
DCHECK_IMPLIES(header.IsInConstruction(),
header.IsMarked<AccessMode::kAtomic>());
#endif // DEBUG
weak_callback(LivenessBrokerFactory::Create(), parameter);
}
bool MutatorMarkingState::IsMarkedWeakContainer(HeapObjectHeader& header) {
const bool result = weak_containers_worklist_.Contains(&header) &&
!recently_retraced_weak_containers_.Contains(&header);
DCHECK_IMPLIES(result, header.IsMarked());
const bool result =
weak_containers_worklist_.Contains<AccessMode::kAtomic>(&header) &&
!recently_retraced_weak_containers_.Contains(&header);
DCHECK_IMPLIES(result, header.IsMarked<AccessMode::kAtomic>());
DCHECK_IMPLIES(result, !header.IsInConstruction());
return result;
}
......@@ -493,7 +495,7 @@ template <AccessMode mode>
void DynamicallyTraceMarkedObject(Visitor& visitor,
const HeapObjectHeader& header) {
DCHECK(!header.IsInConstruction<mode>());
DCHECK(header.IsMarked<mode>());
DCHECK(header.IsMarked<AccessMode::kAtomic>());
header.Trace<mode>(&visitor);
}
......
......@@ -4,6 +4,7 @@
#include "src/heap/cppgc/marking-visitor.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/marking-state.h"
......@@ -54,7 +55,7 @@ ConservativeMarkingVisitor::ConservativeMarkingVisitor(
void ConservativeMarkingVisitor::VisitFullyConstructedConservatively(
HeapObjectHeader& header) {
if (header.IsMarked()) {
if (header.IsMarked<AccessMode::kAtomic>()) {
if (marking_state_.IsMarkedWeakContainer(header))
marking_state_.ReTraceMarkedWeakContainer(visitor_, header);
return;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment