Commit 1f0d7d20 authored by Michael Lippautz's avatar Michael Lippautz Committed by V8 LUCI CQ

cppgc-js: Concurrently process v8::TracedReference

Adds concurrent marking for reaching through v8::TracedReference.
Before this CL, a v8::TracedReference would always be processed on the
main thread by pushing a callback for each encountered reference.

This CL now wires up concurrent handling for such references. In particular:
- Global handles are already marked as well and not repurposed during
  the same GC cycle.
- Since global handles are not repurposed, it is enough to
  double-deref to the V8 object, checking for possible null pointers.
- The bitmap for global handle flags is mostly non-atomic, with the
  markbit being the exception.
- Finally, all state is wired up in CppHeap. Concurrent markers keep
  their own local worklist while the mutator marker directly pushes to
  the worklist owned by V8.

Bug: v8:12600
Change-Id: Ia67dbd18a57dbcccf4dfb9ccfdb9ee438d27fe71
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3516255Reviewed-by: 's avatarOmer Katz <omerkatz@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79736}
parent ad724878
......@@ -1390,7 +1390,9 @@ filegroup(
"src/heap/cppgc-js/cpp-marking-state-inl.h",
"src/heap/cppgc-js/cpp-snapshot.cc",
"src/heap/cppgc-js/cpp-snapshot.h",
"src/heap/cppgc-js/unified-heap-marking-state.cc",
"src/heap/cppgc-js/unified-heap-marking-state.h",
"src/heap/cppgc-js/unified-heap-marking-state-inl.h",
"src/heap/cppgc-js/unified-heap-marking-verifier.cc",
"src/heap/cppgc-js/unified-heap-marking-verifier.h",
"src/heap/cppgc-js/unified-heap-marking-visitor.cc",
......
......@@ -2981,6 +2981,7 @@ v8_header_set("v8_internal_headers") {
"src/heap/cppgc-js/cpp-marking-state-inl.h",
"src/heap/cppgc-js/cpp-marking-state.h",
"src/heap/cppgc-js/cpp-snapshot.h",
"src/heap/cppgc-js/unified-heap-marking-state-inl.h",
"src/heap/cppgc-js/unified-heap-marking-state.h",
"src/heap/cppgc-js/unified-heap-marking-verifier.h",
"src/heap/cppgc-js/unified-heap-marking-visitor.h",
......@@ -4232,6 +4233,7 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/concurrent-marking.cc",
"src/heap/cppgc-js/cpp-heap.cc",
"src/heap/cppgc-js/cpp-snapshot.cc",
"src/heap/cppgc-js/unified-heap-marking-state.cc",
"src/heap/cppgc-js/unified-heap-marking-verifier.cc",
"src/heap/cppgc-js/unified-heap-marking-visitor.cc",
"src/heap/embedder-tracing.cc",
......
......@@ -5,6 +5,7 @@
#include "src/handles/global-handles.h"
#include <algorithm>
#include <atomic>
#include <cstdint>
#include <map>
......@@ -13,6 +14,7 @@
#include "src/base/compiler-specific.h"
#include "src/base/sanitizer/asan.h"
#include "src/common/allow-deprecated.h"
#include "src/common/globals.h"
#include "src/execution/vm-state-inl.h"
#include "src/heap/base/stack.h"
#include "src/heap/embedder-tracing.h"
......@@ -655,9 +657,21 @@ class GlobalHandles::TracedNode final
bool is_root() const { return IsRoot::decode(flags_); }
void set_root(bool v) { flags_ = IsRoot::update(flags_, v); }
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
void set_markbit() {
if constexpr (access_mode == AccessMode::NON_ATOMIC) {
flags_ = Markbit::update(flags_, true);
return;
}
std::atomic<uint8_t>& atomic_flags =
reinterpret_cast<std::atomic<uint8_t>&>(flags_);
const uint8_t new_value =
Markbit::update(atomic_flags.load(std::memory_order_relaxed), true);
atomic_flags.fetch_or(new_value, std::memory_order_relaxed);
}
bool markbit() const { return Markbit::decode(flags_); }
void clear_markbit() { flags_ = Markbit::update(flags_, false); }
void set_markbit() { flags_ = Markbit::update(flags_, true); }
bool is_on_stack() const { return IsOnStack::decode(flags_); }
void set_is_on_stack(bool v) { flags_ = IsOnStack::update(flags_, v); }
......@@ -675,11 +689,19 @@ class GlobalHandles::TracedNode final
static void Verify(GlobalHandles* global_handles, const Address* const* slot);
protected:
// Various state is managed in a bit field where some of the state is managed
// concurrently, whereas other state is managed only on the main thread when
// no concurrent thread has access to flags, e.g., in the atomic pause of the
// garbage collector.
//
// The following state is managed only on the main thread.
using NodeState = base::BitField8<State, 0, 2>;
using IsInYoungList = NodeState::Next<bool, 1>;
using IsRoot = IsInYoungList::Next<bool, 1>;
using Markbit = IsRoot::Next<bool, 1>;
using IsOnStack = Markbit::Next<bool, 1>;
using IsOnStack = IsRoot::Next<bool, 1>;
// The markbit is the exception as it can be set from the main and marker
// threads at the same time.
using Markbit = IsOnStack::Next<bool, 1>;
void ClearImplFields() {
set_root(true);
......@@ -1081,7 +1103,7 @@ GlobalHandles* GlobalHandles::From(const TracedNode* node) {
void GlobalHandles::MarkTraced(Address* location) {
TracedNode* node = TracedNode::FromLocation(location);
node->set_markbit();
node->set_markbit<AccessMode::ATOMIC>();
DCHECK(node->IsInUse());
}
......
......@@ -206,27 +206,27 @@ class UnifiedHeapConcurrentMarker
: public cppgc::internal::ConcurrentMarkerBase {
public:
UnifiedHeapConcurrentMarker(
cppgc::internal::HeapBase& heap,
cppgc::internal::HeapBase& heap, Heap* v8_heap,
cppgc::internal::MarkingWorklists& marking_worklists,
cppgc::internal::IncrementalMarkingSchedule& incremental_marking_schedule,
cppgc::Platform* platform,
UnifiedHeapMarkingState& unified_heap_marking_state)
: cppgc::internal::ConcurrentMarkerBase(
heap, marking_worklists, incremental_marking_schedule, platform),
unified_heap_marking_state_(unified_heap_marking_state) {}
v8_heap_(v8_heap) {}
std::unique_ptr<cppgc::Visitor> CreateConcurrentMarkingVisitor(
cppgc::internal::ConcurrentMarkingState&) const final;
private:
UnifiedHeapMarkingState& unified_heap_marking_state_;
Heap* const v8_heap_;
};
std::unique_ptr<cppgc::Visitor>
UnifiedHeapConcurrentMarker::CreateConcurrentMarkingVisitor(
cppgc::internal::ConcurrentMarkingState& marking_state) const {
return std::make_unique<ConcurrentUnifiedHeapMarkingVisitor>(
heap(), marking_state, unified_heap_marking_state_);
return std::make_unique<ConcurrentUnifiedHeapMarkingVisitor>(heap(), v8_heap_,
marking_state);
}
void FatalOutOfMemoryHandlerImpl(const std::string& reason,
......@@ -255,6 +255,10 @@ class UnifiedHeapMarker final : public cppgc::internal::MarkerBase {
marking_visitor_->marking_state_);
}
UnifiedHeapMarkingState& GetMutatorUnifiedHeapMarkingState() {
return mutator_unified_heap_marking_state_;
}
protected:
cppgc::Visitor& visitor() final { return *marking_visitor_; }
cppgc::internal::ConservativeTracingVisitor& conservative_visitor() final {
......@@ -265,7 +269,7 @@ class UnifiedHeapMarker final : public cppgc::internal::MarkerBase {
}
private:
UnifiedHeapMarkingState unified_heap_marking_state_;
UnifiedHeapMarkingState mutator_unified_heap_marking_state_;
std::unique_ptr<MutatorUnifiedHeapMarkingVisitor> marking_visitor_;
cppgc::internal::ConservativeMarkingVisitor conservative_marking_visitor_;
};
......@@ -275,19 +279,21 @@ UnifiedHeapMarker::UnifiedHeapMarker(Heap* v8_heap,
cppgc::Platform* platform,
MarkingConfig config)
: cppgc::internal::MarkerBase(heap, platform, config),
unified_heap_marking_state_(v8_heap),
marking_visitor_(
config.collection_type == cppgc::internal::GarbageCollector::Config::
mutator_unified_heap_marking_state_(v8_heap, nullptr),
marking_visitor_(config.collection_type ==
cppgc::internal::GarbageCollector::Config::
CollectionType::kMajor
? std::make_unique<MutatorUnifiedHeapMarkingVisitor>(
heap, mutator_marking_state_, unified_heap_marking_state_)
heap, mutator_marking_state_,
mutator_unified_heap_marking_state_)
: std::make_unique<MutatorMinorGCMarkingVisitor>(
heap, mutator_marking_state_, unified_heap_marking_state_)),
heap, mutator_marking_state_,
mutator_unified_heap_marking_state_)),
conservative_marking_visitor_(heap, mutator_marking_state_,
*marking_visitor_) {
concurrent_marker_ = std::make_unique<UnifiedHeapConcurrentMarker>(
heap_, marking_worklists_, schedule_, platform_,
unified_heap_marking_state_);
heap_, v8_heap, marking_worklists_, schedule_, platform_,
mutator_unified_heap_marking_state_);
}
void UnifiedHeapMarker::AddObject(void* object) {
......@@ -546,6 +552,16 @@ void CppHeap::InitializeTracing(
}
void CppHeap::StartTracing() {
if (isolate_) {
// Reuse the same local worklist for the mutator marking state which results
// in directly processing the objects by the JS logic. Also avoids
// publishing local objects.
static_cast<UnifiedHeapMarker*>(marker_.get())
->GetMutatorUnifiedHeapMarkingState()
.Update(isolate_->heap()
->mark_compact_collector()
->local_marking_worklists());
}
marker_->StartMarking();
marking_done_ = false;
}
......@@ -859,5 +875,12 @@ CppHeap::CreateCppMarkingStateForMutatorThread() {
static_cast<UnifiedHeapMarker*>(marker())->GetMutatorMarkingState());
}
CppHeap::PauseConcurrentMarkingScope::PauseConcurrentMarkingScope(
CppHeap* cpp_heap) {
if (cpp_heap && cpp_heap->marker()) {
pause_scope_.emplace(*cpp_heap->marker());
}
}
} // namespace internal
} // namespace v8
......@@ -17,6 +17,7 @@ static_assert(
#include "src/base/macros.h"
#include "src/base/optional.h"
#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/marker.h"
#include "src/heap/cppgc/stats-collector.h"
#include "src/logging/metrics.h"
......@@ -83,6 +84,15 @@ class V8_EXPORT_PRIVATE CppHeap final
last_incremental_mark_event_;
};
class PauseConcurrentMarkingScope final {
public:
explicit PauseConcurrentMarkingScope(CppHeap*);
private:
base::Optional<cppgc::internal::MarkerBase::PauseConcurrentMarkingScope>
pause_scope_;
};
static CppHeap* From(v8::CppHeap* heap) {
return static_cast<CppHeap*>(heap);
}
......
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_CPPGC_JS_UNIFIED_HEAP_MARKING_STATE_INL_H_
#define V8_HEAP_CPPGC_JS_UNIFIED_HEAP_MARKING_STATE_INL_H_
#include "include/v8-traced-handle.h"
#include "src/base/logging.h"
#include "src/handles/global-handles.h"
#include "src/heap/cppgc-js/unified-heap-marking-state.h"
#include "src/heap/heap.h"
#include "src/heap/mark-compact.h"
#include "src/heap/marking-worklist-inl.h"
namespace v8 {
namespace internal {
class BasicTracedReferenceExtractor {
public:
static Address* ObjectReference(const TracedReferenceBase& ref) {
return const_cast<Address*>(
reinterpret_cast<const Address*>(ref.GetSlotThreadSafe()));
}
};
void UnifiedHeapMarkingState::MarkAndPush(
const TracedReferenceBase& reference) {
// The following code will crash with null pointer derefs when finding a
// non-empty `TracedReferenceBase` when `CppHeap` is in detached mode.
Address* global_handle_location =
BasicTracedReferenceExtractor::ObjectReference(reference);
DCHECK_NOT_NULL(global_handle_location);
GlobalHandles::MarkTraced(global_handle_location);
Object object(reinterpret_cast<std::atomic<Address>*>(global_handle_location)
->load(std::memory_order_relaxed));
if (!object.IsHeapObject()) {
// The embedder is not aware of whether numbers are materialized as heap
// objects are just passed around as Smis.
return;
}
HeapObject heap_object = HeapObject::cast(object);
if (marking_state_->WhiteToGrey(heap_object)) {
local_marking_worklist_->Push(heap_object);
}
if (V8_UNLIKELY(track_retaining_path_)) {
heap_->AddRetainingRoot(Root::kWrapperTracing, heap_object);
}
}
} // namespace internal
} // namespace v8
#endif // V8_HEAP_CPPGC_JS_UNIFIED_HEAP_MARKING_STATE_INL_H_
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/cppgc-js/unified-heap-marking-state.h"
#include "src/base/logging.h"
#include "src/heap/mark-compact.h"
namespace v8 {
namespace internal {
UnifiedHeapMarkingState::UnifiedHeapMarkingState(
Heap* heap, MarkingWorklists::Local* local_marking_worklist)
: heap_(heap),
marking_state_(heap_ ? heap_->mark_compact_collector()->marking_state()
: nullptr),
local_marking_worklist_(local_marking_worklist),
track_retaining_path_(FLAG_track_retaining_path) {
DCHECK_IMPLIES(FLAG_track_retaining_path,
!FLAG_concurrent_marking && !FLAG_parallel_marking);
DCHECK_IMPLIES(heap_, marking_state_);
}
void UnifiedHeapMarkingState::Update(
MarkingWorklists::Local* local_marking_worklist) {
local_marking_worklist_ = local_marking_worklist;
DCHECK_NOT_NULL(heap_);
}
} // namespace internal
} // namespace v8
......@@ -6,43 +6,33 @@
#define V8_HEAP_CPPGC_JS_UNIFIED_HEAP_MARKING_STATE_H_
#include "include/v8-cppgc.h"
#include "src/base/logging.h"
#include "src/heap/heap.h"
#include "src/heap/mark-compact.h"
#include "src/heap/marking-worklist.h"
namespace v8 {
namespace internal {
class BasicTracedReferenceExtractor {
public:
static Address* ObjectReference(const TracedReferenceBase& ref) {
return reinterpret_cast<Address*>(ref.val_);
}
};
class UnifiedHeapMarkingState {
// `UnifiedHeapMarkingState` is used to handle `TracedReferenceBase` and
// friends. It is used when `CppHeap` is attached but also detached. In detached
// mode, the expectation is that no non-null `TracedReferenceBase` is found.
class UnifiedHeapMarkingState final {
public:
explicit UnifiedHeapMarkingState(Heap* heap) : heap_(heap) {}
UnifiedHeapMarkingState(Heap*, MarkingWorklists::Local*);
UnifiedHeapMarkingState(const UnifiedHeapMarkingState&) = delete;
UnifiedHeapMarkingState& operator=(const UnifiedHeapMarkingState&) = delete;
inline void MarkAndPush(const TracedReferenceBase&);
void Update(MarkingWorklists::Local*);
V8_INLINE void MarkAndPush(const TracedReferenceBase&);
private:
Heap* heap_;
Heap* const heap_;
MarkCompactCollector::MarkingState* const marking_state_;
MarkingWorklists::Local* local_marking_worklist_ = nullptr;
const bool track_retaining_path_;
};
void UnifiedHeapMarkingState::MarkAndPush(const TracedReferenceBase& ref) {
// The same visitor is used in testing scenarios without attaching the heap to
// an Isolate under the assumption that no non-empty v8 references are found.
// Having the following DCHECK crash means that the heap is in detached mode
// but we find traceable pointers into an Isolate.
DCHECK_NOT_NULL(heap_);
heap_->RegisterExternallyReferencedObject(
BasicTracedReferenceExtractor::ObjectReference(ref));
}
} // namespace internal
} // namespace v8
......
......@@ -4,10 +4,12 @@
#include "src/heap/cppgc-js/unified-heap-marking-visitor.h"
#include "src/heap/cppgc-js/unified-heap-marking-state.h"
#include "src/heap/cppgc-js/unified-heap-marking-state-inl.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/marking-state.h"
#include "src/heap/cppgc/visitor.h"
#include "src/heap/heap.h"
#include "src/heap/mark-compact.h"
namespace v8 {
namespace internal {
......@@ -53,18 +55,8 @@ void UnifiedHeapMarkingVisitorBase::HandleMovableReference(const void** slot) {
marking_state_.RegisterMovableReference(slot);
}
namespace {
void DeferredTraceTracedReference(cppgc::Visitor* visitor, const void* ref) {
static_cast<JSVisitor*>(visitor)->Trace(
*static_cast<const TracedReferenceBase*>(ref));
}
} // namespace
void UnifiedHeapMarkingVisitorBase::Visit(const TracedReferenceBase& ref) {
bool should_defer_tracing = DeferTraceToMutatorThreadIfConcurrent(
&ref, DeferredTraceTracedReference, 0);
if (!should_defer_tracing) unified_heap_marking_state_.MarkAndPush(ref);
unified_heap_marking_state_.MarkAndPush(ref);
}
MutatorUnifiedHeapMarkingVisitor::MutatorUnifiedHeapMarkingVisitor(
......@@ -89,10 +81,22 @@ void MutatorUnifiedHeapMarkingVisitor::VisitWeakRoot(const void* object,
}
ConcurrentUnifiedHeapMarkingVisitor::ConcurrentUnifiedHeapMarkingVisitor(
HeapBase& heap, cppgc::internal::ConcurrentMarkingState& marking_state,
UnifiedHeapMarkingState& unified_heap_marking_state)
HeapBase& heap, Heap* v8_heap,
cppgc::internal::ConcurrentMarkingState& marking_state)
: UnifiedHeapMarkingVisitorBase(heap, marking_state,
unified_heap_marking_state) {}
concurrent_unified_heap_marking_state_),
local_marking_worklist_(
v8_heap ? std::make_unique<MarkingWorklists::Local>(
v8_heap->mark_compact_collector()->marking_worklists())
: nullptr),
concurrent_unified_heap_marking_state_(v8_heap,
local_marking_worklist_.get()) {}
ConcurrentUnifiedHeapMarkingVisitor::~ConcurrentUnifiedHeapMarkingVisitor() {
if (local_marking_worklist_) {
local_marking_worklist_->Publish();
}
}
bool ConcurrentUnifiedHeapMarkingVisitor::DeferTraceToMutatorThreadIfConcurrent(
const void* parameter, cppgc::TraceCallback callback,
......
......@@ -87,10 +87,9 @@ class V8_EXPORT_PRIVATE MutatorMinorGCMarkingVisitor final
class V8_EXPORT_PRIVATE ConcurrentUnifiedHeapMarkingVisitor final
: public UnifiedHeapMarkingVisitorBase {
public:
ConcurrentUnifiedHeapMarkingVisitor(HeapBase&,
cppgc::internal::ConcurrentMarkingState&,
UnifiedHeapMarkingState&);
~ConcurrentUnifiedHeapMarkingVisitor() override = default;
ConcurrentUnifiedHeapMarkingVisitor(HeapBase&, Heap*,
cppgc::internal::ConcurrentMarkingState&);
~ConcurrentUnifiedHeapMarkingVisitor() override;
protected:
void VisitRoot(const void*, TraceDescriptor, const SourceLocation&) final {
......@@ -103,6 +102,15 @@ class V8_EXPORT_PRIVATE ConcurrentUnifiedHeapMarkingVisitor final
bool DeferTraceToMutatorThreadIfConcurrent(const void*, cppgc::TraceCallback,
size_t) final;
private:
// Visitor owns the local worklist. All remaining items are published on
// destruction of the visitor. This is good enough as concurrent visitation
// ends before computing the rest of the transitive closure on the main
// thread. Dynamically allocated as it is only present when the heaps are
// attached.
std::unique_ptr<MarkingWorklists::Local> local_marking_worklist_;
UnifiedHeapMarkingState concurrent_unified_heap_marking_state_;
};
} // namespace internal
......
......@@ -203,9 +203,12 @@ void ConcurrentMarkerBase::Start() {
std::make_unique<ConcurrentMarkingTask>(*this));
}
void ConcurrentMarkerBase::Cancel() {
if (concurrent_marking_handle_ && concurrent_marking_handle_->IsValid())
bool ConcurrentMarkerBase::Cancel() {
if (!concurrent_marking_handle_ || !concurrent_marking_handle_->IsValid())
return false;
concurrent_marking_handle_->Cancel();
return true;
}
void ConcurrentMarkerBase::JoinForTesting() {
......
......@@ -24,7 +24,8 @@ class V8_EXPORT_PRIVATE ConcurrentMarkerBase {
ConcurrentMarkerBase& operator=(const ConcurrentMarkerBase&) = delete;
void Start();
void Cancel();
// Returns whether the job has been cancelled.
bool Cancel();
void JoinForTesting();
......
......@@ -218,7 +218,6 @@ void MarkerBase::StartMarking() {
MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
mutator_marking_state_.Publish();
concurrent_marker_->Start();
concurrent_marking_active_ = true;
}
incremental_marking_allocation_observer_ =
std::make_unique<IncrementalMarkingAllocationObserver>(*this);
......@@ -262,11 +261,10 @@ void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
// Start parallel marking.
mutator_marking_state_.Publish();
if (concurrent_marking_active_) {
if (concurrent_marker_->IsActive()) {
concurrent_marker_->NotifyIncrementalMutatorStepCompleted();
} else {
concurrent_marker_->Start();
concurrent_marking_active_ = true;
}
}
}
......@@ -433,11 +431,10 @@ void MarkerBase::AdvanceMarkingOnAllocation() {
bool MarkerBase::CancelConcurrentMarkingIfNeeded() {
if (config_.marking_type != MarkingConfig::MarkingType::kAtomic ||
!concurrent_marking_active_)
!concurrent_marker_->IsActive())
return false;
concurrent_marker_->Cancel();
concurrent_marking_active_ = false;
// Concurrent markers may have pushed some "leftover" in-construction objects
// after flushing in EnterAtomicPause.
HandleNotFullyConstructedObjects();
......@@ -626,6 +623,16 @@ void MarkerBase::WaitForConcurrentMarkingForTesting() {
concurrent_marker_->JoinForTesting();
}
MarkerBase::PauseConcurrentMarkingScope::PauseConcurrentMarkingScope(
MarkerBase& marker)
: marker_(marker), resume_on_exit_(marker_.concurrent_marker_->Cancel()) {}
MarkerBase::PauseConcurrentMarkingScope::~PauseConcurrentMarkingScope() {
if (resume_on_exit_) {
marker_.concurrent_marker_->Start();
}
}
Marker::Marker(HeapBase& heap, cppgc::Platform* platform, MarkingConfig config)
: MarkerBase(heap, platform, config),
marking_visitor_(heap, mutator_marking_state_),
......
......@@ -61,6 +61,17 @@ class V8_EXPORT_PRIVATE MarkerBase {
kSteele,
};
// Pauses concurrent marking if running while this scope is active.
class PauseConcurrentMarkingScope final {
public:
explicit PauseConcurrentMarkingScope(MarkerBase&);
~PauseConcurrentMarkingScope();
private:
MarkerBase& marker_;
const bool resume_on_exit_;
};
virtual ~MarkerBase();
MarkerBase(const MarkerBase&) = delete;
......@@ -183,7 +194,6 @@ class V8_EXPORT_PRIVATE MarkerBase {
IncrementalMarkingSchedule schedule_;
std::unique_ptr<ConcurrentMarkerBase> concurrent_marker_{nullptr};
bool concurrent_marking_active_ = false;
bool main_marking_disabled_for_testing_{false};
bool visited_cross_thread_persistents_in_atomic_pause_{false};
......
......@@ -2614,7 +2614,14 @@ void Heap::MinorMarkCompact() {
: nullptr);
IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
incremental_marking());
ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
// Young generation garbage collection is orthogonal from full GC marking. It
// is possible that objects that are currently being processed for marking are
// reclaimed in the young generation GC that interleaves concurrent marking.
// Pause concurrent markers to allow processing them using
// `UpdateMarkingWorklistAfterYoungGenGC()`.
ConcurrentMarking::PauseScope pause_js_marking(concurrent_marking());
CppHeap::PauseConcurrentMarkingScope pause_cpp_marking(
CppHeap::From(cpp_heap_));
minor_mark_compact_collector_->CollectGarbage();
......@@ -2657,7 +2664,14 @@ void Heap::CheckNewSpaceExpansionCriteria() {
void Heap::EvacuateYoungGeneration() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_FAST_PROMOTE);
base::MutexGuard guard(relocation_mutex());
ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
// Young generation garbage collection is orthogonal from full GC marking. It
// is possible that objects that are currently being processed for marking are
// reclaimed in the young generation GC that interleaves concurrent marking.
// Pause concurrent markers to allow processing them using
// `UpdateMarkingWorklistAfterYoungGenGC()`.
ConcurrentMarking::PauseScope pause_js_marking(concurrent_marking());
CppHeap::PauseConcurrentMarkingScope pause_cpp_marking(
CppHeap::From(cpp_heap_));
if (!FLAG_concurrent_marking) {
DCHECK(fast_promotion_mode_);
DCHECK(CanPromoteYoungAndExpandOldGeneration(0));
......@@ -2719,7 +2733,14 @@ void Heap::Scavenge() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
base::MutexGuard guard(relocation_mutex());
ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
// Young generation garbage collection is orthogonal from full GC marking. It
// is possible that objects that are currently being processed for marking are
// reclaimed in the young generation GC that interleaves concurrent marking.
// Pause concurrent markers to allow processing them using
// `UpdateMarkingWorklistAfterYoungGenGC()`.
ConcurrentMarking::PauseScope pause_js_marking(concurrent_marking());
CppHeap::PauseConcurrentMarkingScope pause_cpp_marking(
CppHeap::From(cpp_heap_));
// There are soft limits in the allocation code, designed to trigger a mark
// sweep collection by failing allocations. There is no sense in trying to
// trigger one during scavenge: scavenges allocation should always succeed.
......
......@@ -2459,6 +2459,7 @@ class Heap {
friend class StressConcurrentAllocationObserver;
friend class Space;
friend class Sweeper;
friend class UnifiedHeapMarkingState;
friend class heap::TestMemoryAllocatorScope;
friend class third_party_heap::Heap;
friend class third_party_heap::Impl;
......
......@@ -607,7 +607,7 @@ void MarkCompactCollector::StartMarking() {
if (FLAG_verify_heap) {
VerifyMarkbitsAreClean();
}
#endif
#endif // VERIFY_HEAP
}
void MarkCompactCollector::CollectGarbage() {
......@@ -976,16 +976,21 @@ void MarkCompactCollector::Prepare() {
DCHECK(!sweeping_in_progress());
if (!was_marked_incrementally_) {
auto embedder_flags = heap_->flags_for_embedder_tracer();
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
auto embedder_flags = heap_->flags_for_embedder_tracer();
// PrepareForTrace should be called before visitor initialization in
// StartMarking.
heap_->local_embedder_heap_tracer()->PrepareForTrace(embedder_flags);
heap_->local_embedder_heap_tracer()->TracePrologue(embedder_flags);
}
StartCompaction(StartCompactionMode::kAtomic);
StartMarking();
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
// TracePrologue immediately starts marking which requires V8 worklists to
// be set up.
heap_->local_embedder_heap_tracer()->TracePrologue(embedder_flags);
}
}
heap_->FreeLinearAllocationAreas();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment