Commit 2da23bd5 authored by Michael Lippautz's avatar Michael Lippautz Committed by V8 LUCI CQ

Reland "cppgc-js: Concurrently process v8::TracedReference"

This is a reland of commit 1f0d7d20

The fix merges concurrent marking tasks when marking in the atomic
pause. Without the fix, Oilpan markers would continue running
concurrently, possibly discovering new V8 objects. This violates the
assumption that the final transitive closure runs on a single thread.

Original change's description:
> cppgc-js: Concurrently process v8::TracedReference
>
> Adds concurrent marking for reaching through v8::TracedReference.
> Before this CL, a v8::TracedReference would always be processed on the
> main thread by pushing a callback for each encountered reference.
>
> This CL now wires up concurrent handling for such references. In particular:
> - Global handles are already marked as well and not repurposed during
>   the same GC cycle.
> - Since global handles are not repurposed, it is enough to
>   double-deref to the V8 object, checking for possible null pointers.
> - The bitmap for global handle flags is mostly non-atomic, with the
>   markbit being the exception.
> - Finally, all state is wired up in CppHeap. Concurrent markers keep
>   their own local worklist while the mutator marker directly pushes to
>   the worklist owned by V8.
>
> Bug: v8:12600
> Change-Id: Ia67dbd18a57dbcccf4dfb9ccfdb9ee438d27fe71
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3516255
> Reviewed-by: Omer Katz <omerkatz@chromium.org>
> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
> Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#79736}

Bug: v8:12600
Change-Id: I8545041b2c7b3daf7ecea7e3a100e27534e9b8b5
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3571887Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarOmer Katz <omerkatz@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79919}
parent 86ce461e
...@@ -1389,7 +1389,9 @@ filegroup( ...@@ -1389,7 +1389,9 @@ filegroup(
"src/heap/cppgc-js/cpp-marking-state-inl.h", "src/heap/cppgc-js/cpp-marking-state-inl.h",
"src/heap/cppgc-js/cpp-snapshot.cc", "src/heap/cppgc-js/cpp-snapshot.cc",
"src/heap/cppgc-js/cpp-snapshot.h", "src/heap/cppgc-js/cpp-snapshot.h",
"src/heap/cppgc-js/unified-heap-marking-state.cc",
"src/heap/cppgc-js/unified-heap-marking-state.h", "src/heap/cppgc-js/unified-heap-marking-state.h",
"src/heap/cppgc-js/unified-heap-marking-state-inl.h",
"src/heap/cppgc-js/unified-heap-marking-verifier.cc", "src/heap/cppgc-js/unified-heap-marking-verifier.cc",
"src/heap/cppgc-js/unified-heap-marking-verifier.h", "src/heap/cppgc-js/unified-heap-marking-verifier.h",
"src/heap/cppgc-js/unified-heap-marking-visitor.cc", "src/heap/cppgc-js/unified-heap-marking-visitor.cc",
......
...@@ -2982,6 +2982,7 @@ v8_header_set("v8_internal_headers") { ...@@ -2982,6 +2982,7 @@ v8_header_set("v8_internal_headers") {
"src/heap/cppgc-js/cpp-marking-state-inl.h", "src/heap/cppgc-js/cpp-marking-state-inl.h",
"src/heap/cppgc-js/cpp-marking-state.h", "src/heap/cppgc-js/cpp-marking-state.h",
"src/heap/cppgc-js/cpp-snapshot.h", "src/heap/cppgc-js/cpp-snapshot.h",
"src/heap/cppgc-js/unified-heap-marking-state-inl.h",
"src/heap/cppgc-js/unified-heap-marking-state.h", "src/heap/cppgc-js/unified-heap-marking-state.h",
"src/heap/cppgc-js/unified-heap-marking-verifier.h", "src/heap/cppgc-js/unified-heap-marking-verifier.h",
"src/heap/cppgc-js/unified-heap-marking-visitor.h", "src/heap/cppgc-js/unified-heap-marking-visitor.h",
...@@ -4234,6 +4235,7 @@ v8_source_set("v8_base_without_compiler") { ...@@ -4234,6 +4235,7 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/concurrent-marking.cc", "src/heap/concurrent-marking.cc",
"src/heap/cppgc-js/cpp-heap.cc", "src/heap/cppgc-js/cpp-heap.cc",
"src/heap/cppgc-js/cpp-snapshot.cc", "src/heap/cppgc-js/cpp-snapshot.cc",
"src/heap/cppgc-js/unified-heap-marking-state.cc",
"src/heap/cppgc-js/unified-heap-marking-verifier.cc", "src/heap/cppgc-js/unified-heap-marking-verifier.cc",
"src/heap/cppgc-js/unified-heap-marking-visitor.cc", "src/heap/cppgc-js/unified-heap-marking-visitor.cc",
"src/heap/embedder-tracing.cc", "src/heap/embedder-tracing.cc",
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "src/handles/global-handles.h" #include "src/handles/global-handles.h"
#include <algorithm> #include <algorithm>
#include <atomic>
#include <cstdint> #include <cstdint>
#include <map> #include <map>
...@@ -13,6 +14,7 @@ ...@@ -13,6 +14,7 @@
#include "src/base/compiler-specific.h" #include "src/base/compiler-specific.h"
#include "src/base/sanitizer/asan.h" #include "src/base/sanitizer/asan.h"
#include "src/common/allow-deprecated.h" #include "src/common/allow-deprecated.h"
#include "src/common/globals.h"
#include "src/execution/vm-state-inl.h" #include "src/execution/vm-state-inl.h"
#include "src/heap/base/stack.h" #include "src/heap/base/stack.h"
#include "src/heap/embedder-tracing.h" #include "src/heap/embedder-tracing.h"
...@@ -655,9 +657,21 @@ class GlobalHandles::TracedNode final ...@@ -655,9 +657,21 @@ class GlobalHandles::TracedNode final
bool is_root() const { return IsRoot::decode(flags_); } bool is_root() const { return IsRoot::decode(flags_); }
void set_root(bool v) { flags_ = IsRoot::update(flags_, v); } void set_root(bool v) { flags_ = IsRoot::update(flags_, v); }
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
void set_markbit() {
if constexpr (access_mode == AccessMode::NON_ATOMIC) {
flags_ = Markbit::update(flags_, true);
return;
}
std::atomic<uint8_t>& atomic_flags =
reinterpret_cast<std::atomic<uint8_t>&>(flags_);
const uint8_t new_value =
Markbit::update(atomic_flags.load(std::memory_order_relaxed), true);
atomic_flags.fetch_or(new_value, std::memory_order_relaxed);
}
bool markbit() const { return Markbit::decode(flags_); } bool markbit() const { return Markbit::decode(flags_); }
void clear_markbit() { flags_ = Markbit::update(flags_, false); } void clear_markbit() { flags_ = Markbit::update(flags_, false); }
void set_markbit() { flags_ = Markbit::update(flags_, true); }
bool is_on_stack() const { return IsOnStack::decode(flags_); } bool is_on_stack() const { return IsOnStack::decode(flags_); }
void set_is_on_stack(bool v) { flags_ = IsOnStack::update(flags_, v); } void set_is_on_stack(bool v) { flags_ = IsOnStack::update(flags_, v); }
...@@ -675,11 +689,19 @@ class GlobalHandles::TracedNode final ...@@ -675,11 +689,19 @@ class GlobalHandles::TracedNode final
static void Verify(GlobalHandles* global_handles, const Address* const* slot); static void Verify(GlobalHandles* global_handles, const Address* const* slot);
protected: protected:
// Various state is managed in a bit field where some of the state is managed
// concurrently, whereas other state is managed only on the main thread when
// no concurrent thread has access to flags, e.g., in the atomic pause of the
// garbage collector.
//
// The following state is managed only on the main thread.
using NodeState = base::BitField8<State, 0, 2>; using NodeState = base::BitField8<State, 0, 2>;
using IsInYoungList = NodeState::Next<bool, 1>; using IsInYoungList = NodeState::Next<bool, 1>;
using IsRoot = IsInYoungList::Next<bool, 1>; using IsRoot = IsInYoungList::Next<bool, 1>;
using Markbit = IsRoot::Next<bool, 1>; using IsOnStack = IsRoot::Next<bool, 1>;
using IsOnStack = Markbit::Next<bool, 1>; // The markbit is the exception as it can be set from the main and marker
// threads at the same time.
using Markbit = IsOnStack::Next<bool, 1>;
void ClearImplFields() { void ClearImplFields() {
set_root(true); set_root(true);
...@@ -1081,7 +1103,7 @@ GlobalHandles* GlobalHandles::From(const TracedNode* node) { ...@@ -1081,7 +1103,7 @@ GlobalHandles* GlobalHandles::From(const TracedNode* node) {
void GlobalHandles::MarkTraced(Address* location) { void GlobalHandles::MarkTraced(Address* location) {
TracedNode* node = TracedNode::FromLocation(location); TracedNode* node = TracedNode::FromLocation(location);
node->set_markbit(); node->set_markbit<AccessMode::ATOMIC>();
DCHECK(node->IsInUse()); DCHECK(node->IsInUse());
} }
......
...@@ -204,27 +204,27 @@ class UnifiedHeapConcurrentMarker ...@@ -204,27 +204,27 @@ class UnifiedHeapConcurrentMarker
: public cppgc::internal::ConcurrentMarkerBase { : public cppgc::internal::ConcurrentMarkerBase {
public: public:
UnifiedHeapConcurrentMarker( UnifiedHeapConcurrentMarker(
cppgc::internal::HeapBase& heap, cppgc::internal::HeapBase& heap, Heap* v8_heap,
cppgc::internal::MarkingWorklists& marking_worklists, cppgc::internal::MarkingWorklists& marking_worklists,
cppgc::internal::IncrementalMarkingSchedule& incremental_marking_schedule, cppgc::internal::IncrementalMarkingSchedule& incremental_marking_schedule,
cppgc::Platform* platform, cppgc::Platform* platform,
UnifiedHeapMarkingState& unified_heap_marking_state) UnifiedHeapMarkingState& unified_heap_marking_state)
: cppgc::internal::ConcurrentMarkerBase( : cppgc::internal::ConcurrentMarkerBase(
heap, marking_worklists, incremental_marking_schedule, platform), heap, marking_worklists, incremental_marking_schedule, platform),
unified_heap_marking_state_(unified_heap_marking_state) {} v8_heap_(v8_heap) {}
std::unique_ptr<cppgc::Visitor> CreateConcurrentMarkingVisitor( std::unique_ptr<cppgc::Visitor> CreateConcurrentMarkingVisitor(
cppgc::internal::ConcurrentMarkingState&) const final; cppgc::internal::ConcurrentMarkingState&) const final;
private: private:
UnifiedHeapMarkingState& unified_heap_marking_state_; Heap* const v8_heap_;
}; };
std::unique_ptr<cppgc::Visitor> std::unique_ptr<cppgc::Visitor>
UnifiedHeapConcurrentMarker::CreateConcurrentMarkingVisitor( UnifiedHeapConcurrentMarker::CreateConcurrentMarkingVisitor(
cppgc::internal::ConcurrentMarkingState& marking_state) const { cppgc::internal::ConcurrentMarkingState& marking_state) const {
return std::make_unique<ConcurrentUnifiedHeapMarkingVisitor>( return std::make_unique<ConcurrentUnifiedHeapMarkingVisitor>(heap(), v8_heap_,
heap(), marking_state, unified_heap_marking_state_); marking_state);
} }
void FatalOutOfMemoryHandlerImpl(const std::string& reason, void FatalOutOfMemoryHandlerImpl(const std::string& reason,
...@@ -253,6 +253,10 @@ class UnifiedHeapMarker final : public cppgc::internal::MarkerBase { ...@@ -253,6 +253,10 @@ class UnifiedHeapMarker final : public cppgc::internal::MarkerBase {
marking_visitor_->marking_state_); marking_visitor_->marking_state_);
} }
UnifiedHeapMarkingState& GetMutatorUnifiedHeapMarkingState() {
return mutator_unified_heap_marking_state_;
}
protected: protected:
cppgc::Visitor& visitor() final { return *marking_visitor_; } cppgc::Visitor& visitor() final { return *marking_visitor_; }
cppgc::internal::ConservativeTracingVisitor& conservative_visitor() final { cppgc::internal::ConservativeTracingVisitor& conservative_visitor() final {
...@@ -263,7 +267,7 @@ class UnifiedHeapMarker final : public cppgc::internal::MarkerBase { ...@@ -263,7 +267,7 @@ class UnifiedHeapMarker final : public cppgc::internal::MarkerBase {
} }
private: private:
UnifiedHeapMarkingState unified_heap_marking_state_; UnifiedHeapMarkingState mutator_unified_heap_marking_state_;
std::unique_ptr<MutatorUnifiedHeapMarkingVisitor> marking_visitor_; std::unique_ptr<MutatorUnifiedHeapMarkingVisitor> marking_visitor_;
cppgc::internal::ConservativeMarkingVisitor conservative_marking_visitor_; cppgc::internal::ConservativeMarkingVisitor conservative_marking_visitor_;
}; };
...@@ -273,18 +277,19 @@ UnifiedHeapMarker::UnifiedHeapMarker(Heap* v8_heap, ...@@ -273,18 +277,19 @@ UnifiedHeapMarker::UnifiedHeapMarker(Heap* v8_heap,
cppgc::Platform* platform, cppgc::Platform* platform,
MarkingConfig config) MarkingConfig config)
: cppgc::internal::MarkerBase(heap, platform, config), : cppgc::internal::MarkerBase(heap, platform, config),
unified_heap_marking_state_(v8_heap), mutator_unified_heap_marking_state_(v8_heap, nullptr),
marking_visitor_( marking_visitor_(config.collection_type == CppHeap::CollectionType::kMajor
config.collection_type == CppHeap::CollectionType::kMajor ? std::make_unique<MutatorUnifiedHeapMarkingVisitor>(
? std::make_unique<MutatorUnifiedHeapMarkingVisitor>( heap, mutator_marking_state_,
heap, mutator_marking_state_, unified_heap_marking_state_) mutator_unified_heap_marking_state_)
: std::make_unique<MutatorMinorGCMarkingVisitor>( : std::make_unique<MutatorMinorGCMarkingVisitor>(
heap, mutator_marking_state_, unified_heap_marking_state_)), heap, mutator_marking_state_,
mutator_unified_heap_marking_state_)),
conservative_marking_visitor_(heap, mutator_marking_state_, conservative_marking_visitor_(heap, mutator_marking_state_,
*marking_visitor_) { *marking_visitor_) {
concurrent_marker_ = std::make_unique<UnifiedHeapConcurrentMarker>( concurrent_marker_ = std::make_unique<UnifiedHeapConcurrentMarker>(
heap_, marking_worklists_, schedule_, platform_, heap_, v8_heap, marking_worklists_, schedule_, platform_,
unified_heap_marking_state_); mutator_unified_heap_marking_state_);
} }
void UnifiedHeapMarker::AddObject(void* object) { void UnifiedHeapMarker::AddObject(void* object) {
...@@ -567,6 +572,16 @@ void CppHeap::InitializeTracing(CollectionType collection_type, ...@@ -567,6 +572,16 @@ void CppHeap::InitializeTracing(CollectionType collection_type,
} }
void CppHeap::StartTracing() { void CppHeap::StartTracing() {
if (isolate_) {
// Reuse the same local worklist for the mutator marking state which results
// in directly processing the objects by the JS logic. Also avoids
// publishing local objects.
static_cast<UnifiedHeapMarker*>(marker_.get())
->GetMutatorUnifiedHeapMarkingState()
.Update(isolate_->heap()
->mark_compact_collector()
->local_marking_worklists());
}
marker_->StartMarking(); marker_->StartMarking();
marking_done_ = false; marking_done_ = false;
} }
...@@ -884,5 +899,12 @@ CppHeap::CreateCppMarkingStateForMutatorThread() { ...@@ -884,5 +899,12 @@ CppHeap::CreateCppMarkingStateForMutatorThread() {
static_cast<UnifiedHeapMarker*>(marker())->GetMutatorMarkingState()); static_cast<UnifiedHeapMarker*>(marker())->GetMutatorMarkingState());
} }
CppHeap::PauseConcurrentMarkingScope::PauseConcurrentMarkingScope(
CppHeap* cpp_heap) {
if (cpp_heap && cpp_heap->marker()) {
pause_scope_.emplace(*cpp_heap->marker());
}
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -17,6 +17,7 @@ static_assert( ...@@ -17,6 +17,7 @@ static_assert(
#include "src/base/macros.h" #include "src/base/macros.h"
#include "src/base/optional.h" #include "src/base/optional.h"
#include "src/heap/cppgc/heap-base.h" #include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/marker.h"
#include "src/heap/cppgc/stats-collector.h" #include "src/heap/cppgc/stats-collector.h"
#include "src/logging/metrics.h" #include "src/logging/metrics.h"
...@@ -91,6 +92,15 @@ class V8_EXPORT_PRIVATE CppHeap final ...@@ -91,6 +92,15 @@ class V8_EXPORT_PRIVATE CppHeap final
last_incremental_mark_event_; last_incremental_mark_event_;
}; };
class PauseConcurrentMarkingScope final {
public:
explicit PauseConcurrentMarkingScope(CppHeap*);
private:
base::Optional<cppgc::internal::MarkerBase::PauseConcurrentMarkingScope>
pause_scope_;
};
static CppHeap* From(v8::CppHeap* heap) { static CppHeap* From(v8::CppHeap* heap) {
return static_cast<CppHeap*>(heap); return static_cast<CppHeap*>(heap);
} }
......
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_CPPGC_JS_UNIFIED_HEAP_MARKING_STATE_INL_H_
#define V8_HEAP_CPPGC_JS_UNIFIED_HEAP_MARKING_STATE_INL_H_
#include "include/v8-traced-handle.h"
#include "src/base/logging.h"
#include "src/handles/global-handles.h"
#include "src/heap/cppgc-js/unified-heap-marking-state.h"
#include "src/heap/heap.h"
#include "src/heap/mark-compact.h"
#include "src/heap/marking-worklist-inl.h"
namespace v8 {
namespace internal {
class BasicTracedReferenceExtractor {
public:
static Address* ObjectReference(const TracedReferenceBase& ref) {
return const_cast<Address*>(
reinterpret_cast<const Address*>(ref.GetSlotThreadSafe()));
}
};
void UnifiedHeapMarkingState::MarkAndPush(
const TracedReferenceBase& reference) {
// The following code will crash with null pointer derefs when finding a
// non-empty `TracedReferenceBase` when `CppHeap` is in detached mode.
Address* global_handle_location =
BasicTracedReferenceExtractor::ObjectReference(reference);
DCHECK_NOT_NULL(global_handle_location);
GlobalHandles::MarkTraced(global_handle_location);
Object object(reinterpret_cast<std::atomic<Address>*>(global_handle_location)
->load(std::memory_order_relaxed));
if (!object.IsHeapObject()) {
// The embedder is not aware of whether numbers are materialized as heap
// objects are just passed around as Smis.
return;
}
HeapObject heap_object = HeapObject::cast(object);
if (marking_state_->WhiteToGrey(heap_object)) {
local_marking_worklist_->Push(heap_object);
}
if (V8_UNLIKELY(track_retaining_path_)) {
heap_->AddRetainingRoot(Root::kWrapperTracing, heap_object);
}
}
} // namespace internal
} // namespace v8
#endif // V8_HEAP_CPPGC_JS_UNIFIED_HEAP_MARKING_STATE_INL_H_
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/cppgc-js/unified-heap-marking-state.h"
#include "src/base/logging.h"
#include "src/heap/mark-compact.h"
namespace v8 {
namespace internal {
UnifiedHeapMarkingState::UnifiedHeapMarkingState(
Heap* heap, MarkingWorklists::Local* local_marking_worklist)
: heap_(heap),
marking_state_(heap_ ? heap_->mark_compact_collector()->marking_state()
: nullptr),
local_marking_worklist_(local_marking_worklist),
track_retaining_path_(FLAG_track_retaining_path) {
DCHECK_IMPLIES(FLAG_track_retaining_path,
!FLAG_concurrent_marking && !FLAG_parallel_marking);
DCHECK_IMPLIES(heap_, marking_state_);
}
void UnifiedHeapMarkingState::Update(
MarkingWorklists::Local* local_marking_worklist) {
local_marking_worklist_ = local_marking_worklist;
DCHECK_NOT_NULL(heap_);
}
} // namespace internal
} // namespace v8
...@@ -6,43 +6,33 @@ ...@@ -6,43 +6,33 @@
#define V8_HEAP_CPPGC_JS_UNIFIED_HEAP_MARKING_STATE_H_ #define V8_HEAP_CPPGC_JS_UNIFIED_HEAP_MARKING_STATE_H_
#include "include/v8-cppgc.h" #include "include/v8-cppgc.h"
#include "src/base/logging.h" #include "src/heap/mark-compact.h"
#include "src/heap/heap.h" #include "src/heap/marking-worklist.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
class BasicTracedReferenceExtractor { // `UnifiedHeapMarkingState` is used to handle `TracedReferenceBase` and
public: // friends. It is used when `CppHeap` is attached but also detached. In detached
static Address* ObjectReference(const TracedReferenceBase& ref) { // mode, the expectation is that no non-null `TracedReferenceBase` is found.
return reinterpret_cast<Address*>(ref.val_); class UnifiedHeapMarkingState final {
}
};
class UnifiedHeapMarkingState {
public: public:
explicit UnifiedHeapMarkingState(Heap* heap) : heap_(heap) {} UnifiedHeapMarkingState(Heap*, MarkingWorklists::Local*);
UnifiedHeapMarkingState(const UnifiedHeapMarkingState&) = delete; UnifiedHeapMarkingState(const UnifiedHeapMarkingState&) = delete;
UnifiedHeapMarkingState& operator=(const UnifiedHeapMarkingState&) = delete; UnifiedHeapMarkingState& operator=(const UnifiedHeapMarkingState&) = delete;
inline void MarkAndPush(const TracedReferenceBase&); void Update(MarkingWorklists::Local*);
V8_INLINE void MarkAndPush(const TracedReferenceBase&);
private: private:
Heap* heap_; Heap* const heap_;
MarkCompactCollector::MarkingState* const marking_state_;
MarkingWorklists::Local* local_marking_worklist_ = nullptr;
const bool track_retaining_path_;
}; };
void UnifiedHeapMarkingState::MarkAndPush(const TracedReferenceBase& ref) {
// The same visitor is used in testing scenarios without attaching the heap to
// an Isolate under the assumption that no non-empty v8 references are found.
// Having the following DCHECK crash means that the heap is in detached mode
// but we find traceable pointers into an Isolate.
DCHECK_NOT_NULL(heap_);
heap_->RegisterExternallyReferencedObject(
BasicTracedReferenceExtractor::ObjectReference(ref));
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -4,10 +4,12 @@ ...@@ -4,10 +4,12 @@
#include "src/heap/cppgc-js/unified-heap-marking-visitor.h" #include "src/heap/cppgc-js/unified-heap-marking-visitor.h"
#include "src/heap/cppgc-js/unified-heap-marking-state.h" #include "src/heap/cppgc-js/unified-heap-marking-state-inl.h"
#include "src/heap/cppgc/heap.h" #include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/marking-state.h" #include "src/heap/cppgc/marking-state.h"
#include "src/heap/cppgc/visitor.h" #include "src/heap/cppgc/visitor.h"
#include "src/heap/heap.h"
#include "src/heap/mark-compact.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -53,18 +55,8 @@ void UnifiedHeapMarkingVisitorBase::HandleMovableReference(const void** slot) { ...@@ -53,18 +55,8 @@ void UnifiedHeapMarkingVisitorBase::HandleMovableReference(const void** slot) {
marking_state_.RegisterMovableReference(slot); marking_state_.RegisterMovableReference(slot);
} }
namespace {
void DeferredTraceTracedReference(cppgc::Visitor* visitor, const void* ref) {
static_cast<JSVisitor*>(visitor)->Trace(
*static_cast<const TracedReferenceBase*>(ref));
}
} // namespace
void UnifiedHeapMarkingVisitorBase::Visit(const TracedReferenceBase& ref) { void UnifiedHeapMarkingVisitorBase::Visit(const TracedReferenceBase& ref) {
bool should_defer_tracing = DeferTraceToMutatorThreadIfConcurrent( unified_heap_marking_state_.MarkAndPush(ref);
&ref, DeferredTraceTracedReference, 0);
if (!should_defer_tracing) unified_heap_marking_state_.MarkAndPush(ref);
} }
MutatorUnifiedHeapMarkingVisitor::MutatorUnifiedHeapMarkingVisitor( MutatorUnifiedHeapMarkingVisitor::MutatorUnifiedHeapMarkingVisitor(
...@@ -89,10 +81,22 @@ void MutatorUnifiedHeapMarkingVisitor::VisitWeakRoot(const void* object, ...@@ -89,10 +81,22 @@ void MutatorUnifiedHeapMarkingVisitor::VisitWeakRoot(const void* object,
} }
ConcurrentUnifiedHeapMarkingVisitor::ConcurrentUnifiedHeapMarkingVisitor( ConcurrentUnifiedHeapMarkingVisitor::ConcurrentUnifiedHeapMarkingVisitor(
HeapBase& heap, cppgc::internal::ConcurrentMarkingState& marking_state, HeapBase& heap, Heap* v8_heap,
UnifiedHeapMarkingState& unified_heap_marking_state) cppgc::internal::ConcurrentMarkingState& marking_state)
: UnifiedHeapMarkingVisitorBase(heap, marking_state, : UnifiedHeapMarkingVisitorBase(heap, marking_state,
unified_heap_marking_state) {} concurrent_unified_heap_marking_state_),
local_marking_worklist_(
v8_heap ? std::make_unique<MarkingWorklists::Local>(
v8_heap->mark_compact_collector()->marking_worklists())
: nullptr),
concurrent_unified_heap_marking_state_(v8_heap,
local_marking_worklist_.get()) {}
ConcurrentUnifiedHeapMarkingVisitor::~ConcurrentUnifiedHeapMarkingVisitor() {
if (local_marking_worklist_) {
local_marking_worklist_->Publish();
}
}
bool ConcurrentUnifiedHeapMarkingVisitor::DeferTraceToMutatorThreadIfConcurrent( bool ConcurrentUnifiedHeapMarkingVisitor::DeferTraceToMutatorThreadIfConcurrent(
const void* parameter, cppgc::TraceCallback callback, const void* parameter, cppgc::TraceCallback callback,
......
...@@ -87,10 +87,9 @@ class V8_EXPORT_PRIVATE MutatorMinorGCMarkingVisitor final ...@@ -87,10 +87,9 @@ class V8_EXPORT_PRIVATE MutatorMinorGCMarkingVisitor final
class V8_EXPORT_PRIVATE ConcurrentUnifiedHeapMarkingVisitor final class V8_EXPORT_PRIVATE ConcurrentUnifiedHeapMarkingVisitor final
: public UnifiedHeapMarkingVisitorBase { : public UnifiedHeapMarkingVisitorBase {
public: public:
ConcurrentUnifiedHeapMarkingVisitor(HeapBase&, ConcurrentUnifiedHeapMarkingVisitor(HeapBase&, Heap*,
cppgc::internal::ConcurrentMarkingState&, cppgc::internal::ConcurrentMarkingState&);
UnifiedHeapMarkingState&); ~ConcurrentUnifiedHeapMarkingVisitor() override;
~ConcurrentUnifiedHeapMarkingVisitor() override = default;
protected: protected:
void VisitRoot(const void*, TraceDescriptor, const SourceLocation&) final { void VisitRoot(const void*, TraceDescriptor, const SourceLocation&) final {
...@@ -103,6 +102,15 @@ class V8_EXPORT_PRIVATE ConcurrentUnifiedHeapMarkingVisitor final ...@@ -103,6 +102,15 @@ class V8_EXPORT_PRIVATE ConcurrentUnifiedHeapMarkingVisitor final
bool DeferTraceToMutatorThreadIfConcurrent(const void*, cppgc::TraceCallback, bool DeferTraceToMutatorThreadIfConcurrent(const void*, cppgc::TraceCallback,
size_t) final; size_t) final;
private:
// Visitor owns the local worklist. All remaining items are published on
// destruction of the visitor. This is good enough as concurrent visitation
// ends before computing the rest of the transitive closure on the main
// thread. Dynamically allocated as it is only present when the heaps are
// attached.
std::unique_ptr<MarkingWorklists::Local> local_marking_worklist_;
UnifiedHeapMarkingState concurrent_unified_heap_marking_state_;
}; };
} // namespace internal } // namespace internal
......
...@@ -211,6 +211,14 @@ bool ConcurrentMarkerBase::Join() { ...@@ -211,6 +211,14 @@ bool ConcurrentMarkerBase::Join() {
return true; return true;
} }
bool ConcurrentMarkerBase::Cancel() {
if (!concurrent_marking_handle_ || !concurrent_marking_handle_->IsValid())
return false;
concurrent_marking_handle_->Cancel();
return true;
}
bool ConcurrentMarkerBase::IsActive() const { bool ConcurrentMarkerBase::IsActive() const {
return concurrent_marking_handle_ && concurrent_marking_handle_->IsValid(); return concurrent_marking_handle_ && concurrent_marking_handle_->IsValid();
} }
......
...@@ -26,6 +26,8 @@ class V8_EXPORT_PRIVATE ConcurrentMarkerBase { ...@@ -26,6 +26,8 @@ class V8_EXPORT_PRIVATE ConcurrentMarkerBase {
void Start(); void Start();
// Returns whether the job has been joined. // Returns whether the job has been joined.
bool Join(); bool Join();
// Returns whether the job has been cancelled.
bool Cancel();
void NotifyIncrementalMutatorStepCompleted(); void NotifyIncrementalMutatorStepCompleted();
......
...@@ -640,6 +640,16 @@ void MarkerBase::WaitForConcurrentMarkingForTesting() { ...@@ -640,6 +640,16 @@ void MarkerBase::WaitForConcurrentMarkingForTesting() {
concurrent_marker_->Join(); concurrent_marker_->Join();
} }
MarkerBase::PauseConcurrentMarkingScope::PauseConcurrentMarkingScope(
MarkerBase& marker)
: marker_(marker), resume_on_exit_(marker_.concurrent_marker_->Cancel()) {}
MarkerBase::PauseConcurrentMarkingScope::~PauseConcurrentMarkingScope() {
if (resume_on_exit_) {
marker_.concurrent_marker_->Start();
}
}
Marker::Marker(HeapBase& heap, cppgc::Platform* platform, MarkingConfig config) Marker::Marker(HeapBase& heap, cppgc::Platform* platform, MarkingConfig config)
: MarkerBase(heap, platform, config), : MarkerBase(heap, platform, config),
marking_visitor_(heap, mutator_marking_state_), marking_visitor_(heap, mutator_marking_state_),
......
...@@ -63,6 +63,17 @@ class V8_EXPORT_PRIVATE MarkerBase { ...@@ -63,6 +63,17 @@ class V8_EXPORT_PRIVATE MarkerBase {
kSteele, kSteele,
}; };
// Pauses concurrent marking if running while this scope is active.
class PauseConcurrentMarkingScope final {
public:
explicit PauseConcurrentMarkingScope(MarkerBase&);
~PauseConcurrentMarkingScope();
private:
MarkerBase& marker_;
const bool resume_on_exit_;
};
virtual ~MarkerBase(); virtual ~MarkerBase();
MarkerBase(const MarkerBase&) = delete; MarkerBase(const MarkerBase&) = delete;
......
...@@ -2642,7 +2642,14 @@ void Heap::MinorMarkCompact() { ...@@ -2642,7 +2642,14 @@ void Heap::MinorMarkCompact() {
: nullptr); : nullptr);
IncrementalMarking::PauseBlackAllocationScope pause_black_allocation( IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
incremental_marking()); incremental_marking());
ConcurrentMarking::PauseScope pause_scope(concurrent_marking()); // Young generation garbage collection is orthogonal from full GC marking. It
// is possible that objects that are currently being processed for marking are
// reclaimed in the young generation GC that interleaves concurrent marking.
// Pause concurrent markers to allow processing them using
// `UpdateMarkingWorklistAfterYoungGenGC()`.
ConcurrentMarking::PauseScope pause_js_marking(concurrent_marking());
CppHeap::PauseConcurrentMarkingScope pause_cpp_marking(
CppHeap::From(cpp_heap_));
minor_mark_compact_collector_->CollectGarbage(); minor_mark_compact_collector_->CollectGarbage();
...@@ -2685,7 +2692,14 @@ void Heap::CheckNewSpaceExpansionCriteria() { ...@@ -2685,7 +2692,14 @@ void Heap::CheckNewSpaceExpansionCriteria() {
void Heap::EvacuateYoungGeneration() { void Heap::EvacuateYoungGeneration() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_FAST_PROMOTE); TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_FAST_PROMOTE);
base::MutexGuard guard(relocation_mutex()); base::MutexGuard guard(relocation_mutex());
ConcurrentMarking::PauseScope pause_scope(concurrent_marking()); // Young generation garbage collection is orthogonal from full GC marking. It
// is possible that objects that are currently being processed for marking are
// reclaimed in the young generation GC that interleaves concurrent marking.
// Pause concurrent markers to allow processing them using
// `UpdateMarkingWorklistAfterYoungGenGC()`.
ConcurrentMarking::PauseScope pause_js_marking(concurrent_marking());
CppHeap::PauseConcurrentMarkingScope pause_cpp_marking(
CppHeap::From(cpp_heap_));
if (!FLAG_concurrent_marking) { if (!FLAG_concurrent_marking) {
DCHECK(fast_promotion_mode_); DCHECK(fast_promotion_mode_);
DCHECK(CanPromoteYoungAndExpandOldGeneration(0)); DCHECK(CanPromoteYoungAndExpandOldGeneration(0));
...@@ -2748,7 +2762,14 @@ void Heap::Scavenge() { ...@@ -2748,7 +2762,14 @@ void Heap::Scavenge() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE); TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
base::MutexGuard guard(relocation_mutex()); base::MutexGuard guard(relocation_mutex());
ConcurrentMarking::PauseScope pause_scope(concurrent_marking()); // Young generation garbage collection is orthogonal from full GC marking. It
// is possible that objects that are currently being processed for marking are
// reclaimed in the young generation GC that interleaves concurrent marking.
// Pause concurrent markers to allow processing them using
// `UpdateMarkingWorklistAfterYoungGenGC()`.
ConcurrentMarking::PauseScope pause_js_marking(concurrent_marking());
CppHeap::PauseConcurrentMarkingScope pause_cpp_marking(
CppHeap::From(cpp_heap_));
// There are soft limits in the allocation code, designed to trigger a mark // There are soft limits in the allocation code, designed to trigger a mark
// sweep collection by failing allocations. There is no sense in trying to // sweep collection by failing allocations. There is no sense in trying to
// trigger one during scavenge: scavenges allocation should always succeed. // trigger one during scavenge: scavenges allocation should always succeed.
......
...@@ -2460,6 +2460,7 @@ class Heap { ...@@ -2460,6 +2460,7 @@ class Heap {
friend class StressConcurrentAllocationObserver; friend class StressConcurrentAllocationObserver;
friend class Space; friend class Space;
friend class Sweeper; friend class Sweeper;
friend class UnifiedHeapMarkingState;
friend class heap::TestMemoryAllocatorScope; friend class heap::TestMemoryAllocatorScope;
friend class third_party_heap::Heap; friend class third_party_heap::Heap;
friend class third_party_heap::Impl; friend class third_party_heap::Impl;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment