Commit 6123c571 authored by Michael Lippautz's avatar Michael Lippautz Committed by V8 LUCI CQ

cppgc: Move incremental step on allocation to allocation observer

In addition, trigger the observer only every ~256KiB to avoid
excessive incremental marking steps on fragemented heaps that have to
set up LABs repeatedly.

Bug: v8:12285
Change-Id: Id3d85d2c3f96d9d914c731f998df827898e1863d
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3208810
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarOmer Katz <omerkatz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77278}
parent e1034506
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
#include "src/heap/cppgc/heap-page.h" #include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-statistics-collector.h" #include "src/heap/cppgc/heap-statistics-collector.h"
#include "src/heap/cppgc/heap-visitor.h" #include "src/heap/cppgc/heap-visitor.h"
#include "src/heap/cppgc/marker.h"
#include "src/heap/cppgc/marking-verifier.h" #include "src/heap/cppgc/marking-verifier.h"
#include "src/heap/cppgc/object-view.h" #include "src/heap/cppgc/object-view.h"
#include "src/heap/cppgc/page-memory.h" #include "src/heap/cppgc/page-memory.h"
...@@ -97,10 +96,6 @@ size_t HeapBase::ObjectPayloadSize() const { ...@@ -97,10 +96,6 @@ size_t HeapBase::ObjectPayloadSize() const {
return ObjectSizeCounter().GetSize(const_cast<RawHeap&>(raw_heap())); return ObjectSizeCounter().GetSize(const_cast<RawHeap&>(raw_heap()));
} }
void HeapBase::AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded() {
if (marker_) marker_->AdvanceMarkingOnAllocation();
}
size_t HeapBase::ExecutePreFinalizers() { size_t HeapBase::ExecutePreFinalizers() {
#ifdef CPPGC_ALLOW_ALLOCATIONS_IN_PREFINALIZERS #ifdef CPPGC_ALLOW_ALLOCATIONS_IN_PREFINALIZERS
// Allocations in pre finalizers should not trigger another GC. // Allocations in pre finalizers should not trigger another GC.
......
...@@ -172,8 +172,6 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle { ...@@ -172,8 +172,6 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
return override_stack_state_.get(); return override_stack_state_.get();
} }
void AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded();
// Termination drops all roots (clears them out) and runs garbage collections // Termination drops all roots (clears them out) and runs garbage collections
// in a bounded fixed point loop until no new objects are created in // in a bounded fixed point loop until no new objects are created in
// destructors. Exceeding the loop bound results in a crash. // destructors. Exceeding the loop bound results in a crash.
......
...@@ -4,12 +4,14 @@ ...@@ -4,12 +4,14 @@
#include "src/heap/cppgc/marker.h" #include "src/heap/cppgc/marker.h"
#include <cstddef>
#include <cstdint> #include <cstdint>
#include <memory> #include <memory>
#include "include/cppgc/heap-consistency.h" #include "include/cppgc/heap-consistency.h"
#include "include/cppgc/platform.h" #include "include/cppgc/platform.h"
#include "src/base/platform/time.h" #include "src/base/platform/time.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h" #include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h" #include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-visitor.h" #include "src/heap/cppgc/heap-visitor.h"
...@@ -201,6 +203,27 @@ MarkerBase::~MarkerBase() { ...@@ -201,6 +203,27 @@ MarkerBase::~MarkerBase() {
marking_worklists_.weak_containers_worklist()->Clear(); marking_worklists_.weak_containers_worklist()->Clear();
} }
class MarkerBase::IncrementalMarkingAllocationObserver final
: public StatsCollector::AllocationObserver {
public:
static constexpr size_t kMinAllocatedBytesPerStep = 256 * kKB;
explicit IncrementalMarkingAllocationObserver(MarkerBase& marker)
: marker_(marker) {}
void AllocatedObjectSizeIncreased(size_t delta) final {
current_allocated_size_ += delta;
if (current_allocated_size_ > kMinAllocatedBytesPerStep) {
marker_.AdvanceMarkingOnAllocation();
current_allocated_size_ = 0;
}
}
private:
MarkerBase& marker_;
size_t current_allocated_size_ = 0;
};
void MarkerBase::StartMarking() { void MarkerBase::StartMarking() {
DCHECK(!is_marking_); DCHECK(!is_marking_);
StatsCollector::EnabledScope stats_scope( StatsCollector::EnabledScope stats_scope(
...@@ -227,6 +250,10 @@ void MarkerBase::StartMarking() { ...@@ -227,6 +250,10 @@ void MarkerBase::StartMarking() {
mutator_marking_state_.Publish(); mutator_marking_state_.Publish();
concurrent_marker_->Start(); concurrent_marker_->Start();
} }
incremental_marking_allocation_observer_ =
std::make_unique<IncrementalMarkingAllocationObserver>(*this);
heap().stats_collector()->RegisterObserver(
incremental_marking_allocation_observer_.get());
} }
} }
...@@ -240,6 +267,9 @@ void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) { ...@@ -240,6 +267,9 @@ void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
// Cancel remaining concurrent/incremental tasks. // Cancel remaining concurrent/incremental tasks.
concurrent_marker_->Cancel(); concurrent_marker_->Cancel();
incremental_marking_handle_.Cancel(); incremental_marking_handle_.Cancel();
heap().stats_collector()->UnregisterObserver(
incremental_marking_allocation_observer_.get());
incremental_marking_allocation_observer_.reset();
} }
config_.stack_state = stack_state; config_.stack_state = stack_state;
config_.marking_type = MarkingConfig::MarkingType::kAtomic; config_.marking_type = MarkingConfig::MarkingType::kAtomic;
......
...@@ -83,9 +83,6 @@ class V8_EXPORT_PRIVATE MarkerBase { ...@@ -83,9 +83,6 @@ class V8_EXPORT_PRIVATE MarkerBase {
v8::base::TimeDelta = kMaximumIncrementalStepDuration, v8::base::TimeDelta = kMaximumIncrementalStepDuration,
size_t marked_bytes_limit = 0); size_t marked_bytes_limit = 0);
// Makes marking progress when allocation a new lab.
void AdvanceMarkingOnAllocation();
// Signals leaving the atomic marking pause. This method expects no more // Signals leaving the atomic marking pause. This method expects no more
// objects to be marked and merely updates marking states if needed. // objects to be marked and merely updates marking states if needed.
void LeaveAtomicPause(); void LeaveAtomicPause();
...@@ -141,6 +138,8 @@ class V8_EXPORT_PRIVATE MarkerBase { ...@@ -141,6 +138,8 @@ class V8_EXPORT_PRIVATE MarkerBase {
bool IsMarking() const { return is_marking_; } bool IsMarking() const { return is_marking_; }
protected: protected:
class IncrementalMarkingAllocationObserver;
static constexpr v8::base::TimeDelta kMaximumIncrementalStepDuration = static constexpr v8::base::TimeDelta kMaximumIncrementalStepDuration =
v8::base::TimeDelta::FromMilliseconds(2); v8::base::TimeDelta::FromMilliseconds(2);
...@@ -172,12 +171,16 @@ class V8_EXPORT_PRIVATE MarkerBase { ...@@ -172,12 +171,16 @@ class V8_EXPORT_PRIVATE MarkerBase {
bool IncrementalMarkingStep(MarkingConfig::StackState); bool IncrementalMarkingStep(MarkingConfig::StackState);
void AdvanceMarkingOnAllocation();
HeapBase& heap_; HeapBase& heap_;
MarkingConfig config_ = MarkingConfig::Default(); MarkingConfig config_ = MarkingConfig::Default();
cppgc::Platform* platform_; cppgc::Platform* platform_;
std::shared_ptr<cppgc::TaskRunner> foreground_task_runner_; std::shared_ptr<cppgc::TaskRunner> foreground_task_runner_;
IncrementalMarkingTask::Handle incremental_marking_handle_; IncrementalMarkingTask::Handle incremental_marking_handle_;
std::unique_ptr<IncrementalMarkingAllocationObserver>
incremental_marking_allocation_observer_;
MarkingWorklists marking_worklists_; MarkingWorklists marking_worklists_;
MutatorMarkingState mutator_marking_state_; MutatorMarkingState mutator_marking_state_;
......
...@@ -118,7 +118,6 @@ void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace& space, size_t size, ...@@ -118,7 +118,6 @@ void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace& space, size_t size,
GCInfoIndex gcinfo) { GCInfoIndex gcinfo) {
void* memory = OutOfLineAllocateImpl(space, size, gcinfo); void* memory = OutOfLineAllocateImpl(space, size, gcinfo);
stats_collector_.NotifySafePointForConservativeCollection(); stats_collector_.NotifySafePointForConservativeCollection();
raw_heap_.heap()->AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded();
if (prefinalizer_handler_.IsInvokingPreFinalizers()) { if (prefinalizer_handler_.IsInvokingPreFinalizers()) {
// Objects allocated during pre finalizers should be allocated as black // Objects allocated during pre finalizers should be allocated as black
// since marking is already done. Atomics are not needed because there is // since marking is already done. Atomics are not needed because there is
......
...@@ -34,7 +34,8 @@ void StatsCollector::UnregisterObserver(AllocationObserver* observer) { ...@@ -34,7 +34,8 @@ void StatsCollector::UnregisterObserver(AllocationObserver* observer) {
auto it = std::find(allocation_observers_.begin(), auto it = std::find(allocation_observers_.begin(),
allocation_observers_.end(), observer); allocation_observers_.end(), observer);
DCHECK_NE(allocation_observers_.end(), it); DCHECK_NE(allocation_observers_.end(), it);
allocation_observers_.erase(it); *it = nullptr;
allocation_observer_deleted_ = true;
} }
void StatsCollector::NotifyAllocation(size_t bytes) { void StatsCollector::NotifyAllocation(size_t bytes) {
......
...@@ -218,8 +218,9 @@ class V8_EXPORT_PRIVATE StatsCollector final { ...@@ -218,8 +218,9 @@ class V8_EXPORT_PRIVATE StatsCollector final {
using DisabledConcurrentScope = InternalScope<kDisabled, kConcurrentThread>; using DisabledConcurrentScope = InternalScope<kDisabled, kConcurrentThread>;
using EnabledConcurrentScope = InternalScope<kEnabled, kConcurrentThread>; using EnabledConcurrentScope = InternalScope<kEnabled, kConcurrentThread>;
// Observer for allocated object size. May be used to implement heap growing // Observer for allocated object size. May e.g. be used to implement heap
// heuristics. // growing heuristics. Observers may register/unregister observers at any
// time when being invoked.
class AllocationObserver { class AllocationObserver {
public: public:
// Called after observing at least // Called after observing at least
...@@ -346,6 +347,7 @@ class V8_EXPORT_PRIVATE StatsCollector final { ...@@ -346,6 +347,7 @@ class V8_EXPORT_PRIVATE StatsCollector final {
// vector to allow fast iteration of observers. Register/Unregisters only // vector to allow fast iteration of observers. Register/Unregisters only
// happens on startup/teardown. // happens on startup/teardown.
std::vector<AllocationObserver*> allocation_observers_; std::vector<AllocationObserver*> allocation_observers_;
bool allocation_observer_deleted_ = false;
GarbageCollectionState gc_state_ = GarbageCollectionState::kNotRunning; GarbageCollectionState gc_state_ = GarbageCollectionState::kNotRunning;
...@@ -363,8 +365,19 @@ class V8_EXPORT_PRIVATE StatsCollector final { ...@@ -363,8 +365,19 @@ class V8_EXPORT_PRIVATE StatsCollector final {
template <typename Callback> template <typename Callback>
void StatsCollector::ForAllAllocationObservers(Callback callback) { void StatsCollector::ForAllAllocationObservers(Callback callback) {
for (AllocationObserver* observer : allocation_observers_) { // Iterate using indices to allow push_back() of new observers.
callback(observer); for (size_t i = 0; i < allocation_observers_.size(); ++i) {
auto* observer = allocation_observers_[i];
if (observer) {
callback(observer);
}
}
if (allocation_observer_deleted_) {
allocation_observers_.erase(
std::remove(allocation_observers_.begin(), allocation_observers_.end(),
nullptr),
allocation_observers_.end());
allocation_observer_deleted_ = false;
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment