Commit 760e6797 authored by Omer Katz's avatar Omer Katz Committed by Commit Bot

cppgc: Remove StatsCollector dependency on HeapBase

Bug: chromium:1056170
Change-Id: I561166a7f1be658c5c35aa1caf8dbbbd2d720ab3
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2692815
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Auto-Submit: Omer Katz <omerkatz@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72754}
parent 5ec7ca95
......@@ -296,9 +296,9 @@ bool CppHeap::AdvanceTracing(double deadline_in_ms) {
// accounting since this scope is also accounted under an outer v8 scope.
// Make sure to only account this scope once.
cppgc::internal::StatsCollector::EnabledScope stats_scope(
AsBase(), in_atomic_pause_
? cppgc::internal::StatsCollector::kAtomicMark
: cppgc::internal::StatsCollector::kIncrementalMark);
stats_collector(),
in_atomic_pause_ ? cppgc::internal::StatsCollector::kAtomicMark
: cppgc::internal::StatsCollector::kIncrementalMark);
const v8::base::TimeDelta deadline =
in_atomic_pause_ ? v8::base::TimeDelta::Max()
: v8::base::TimeDelta::FromMillisecondsD(deadline_in_ms);
......@@ -316,7 +316,7 @@ bool CppHeap::IsTracingDone() { return marking_done_; }
void CppHeap::EnterFinalPause(EmbedderStackState stack_state) {
CHECK(!in_disallow_gc_scope());
cppgc::internal::StatsCollector::EnabledScope stats_scope(
AsBase(), cppgc::internal::StatsCollector::kAtomicMark);
stats_collector(), cppgc::internal::StatsCollector::kAtomicMark);
in_atomic_pause_ = true;
if (override_stack_state_) {
stack_state = *override_stack_state_;
......@@ -333,7 +333,7 @@ void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
CHECK(marking_done_);
{
cppgc::internal::StatsCollector::EnabledScope stats_scope(
AsBase(), cppgc::internal::StatsCollector::kAtomicMark);
stats_collector(), cppgc::internal::StatsCollector::kAtomicMark);
cppgc::subtle::DisallowGarbageCollectionScope disallow_gc_scope(*this);
marker_->LeaveAtomicPause();
}
......
......@@ -484,7 +484,7 @@ bool Compactor::CancelIfShouldNotCompact(
Compactor::CompactableSpaceHandling Compactor::CompactSpacesIfEnabled() {
if (!is_enabled_) return CompactableSpaceHandling::kSweep;
StatsCollector::DisabledScope stats_scope(*heap_.heap(),
StatsCollector::DisabledScope stats_scope(heap_.heap()->stats_collector(),
StatsCollector::kAtomicCompact);
MovableReferences movable_references(*heap_.heap());
......
......@@ -73,7 +73,8 @@ ConcurrentMarkingTask::ConcurrentMarkingTask(
void ConcurrentMarkingTask::Run(JobDelegate* job_delegate) {
StatsCollector::EnabledConcurrentScope stats_scope(
concurrent_marker_.heap(), StatsCollector::kConcurrentMark);
concurrent_marker_.heap().stats_collector(),
StatsCollector::kConcurrentMark);
if (!HasWorkForConcurrentMarking(concurrent_marker_.marking_worklists()))
return;
......@@ -150,7 +151,7 @@ void ConcurrentMarkingTask::ProcessWorklists(
{
StatsCollector::DisabledConcurrentScope stats_scope(
concurrent_marker_.heap(),
concurrent_marker_.heap().stats_collector(),
StatsCollector::kConcurrentMarkProcessEphemerons);
if (!DrainWorklistWithYielding(
job_delegate, concurrent_marking_state,
......
......@@ -68,8 +68,8 @@ HeapBase::HeapBase(
page_backend_(
std::make_unique<PageBackend>(platform_->GetPageAllocator())),
#endif
stats_collector_(
std::make_unique<StatsCollector>(std::move(histogram_recorder))),
stats_collector_(std::make_unique<StatsCollector>(
std::move(histogram_recorder), platform_.get())),
stack_(std::make_unique<heap::base::Stack>(
v8::base::Stack::GetStackStart())),
prefinalizer_handler_(std::make_unique<PreFinalizerHandler>(*this)),
......
......@@ -209,7 +209,7 @@ void Heap::DisableHeapGrowingForTesting() { growing_.DisableForTesting(); }
void Heap::FinalizeIncrementalGarbageCollectionIfNeeded(
Config::StackState stack_state) {
StatsCollector::EnabledScope stats_scope(
*this, StatsCollector::kMarkIncrementalFinalize);
stats_collector(), StatsCollector::kMarkIncrementalFinalize);
FinalizeGarbageCollection(stack_state);
}
......
......@@ -63,7 +63,7 @@ void VisitRememberedSlots(HeapBase& heap,
MutatorMarkingState& mutator_marking_state) {
#if defined(CPPGC_YOUNG_GENERATION)
StatsCollector::EnabledScope stats_scope(
heap, StatsCollector::kMarkVisitRememberedSets);
heap.stats_collector(), StatsCollector::kMarkVisitRememberedSets);
for (void* slot : heap.remembered_slots()) {
auto& slot_header = BasePage::FromInnerAddress(&heap, slot)
->ObjectHeaderFromInnerAddress(slot);
......@@ -151,7 +151,7 @@ MarkerBase::IncrementalMarkingTask::Post(cppgc::TaskRunner* runner,
void MarkerBase::IncrementalMarkingTask::Run() {
if (handle_.IsCanceled()) return;
StatsCollector::EnabledScope stats_scope(marker_->heap(),
StatsCollector::EnabledScope stats_scope(marker_->heap().stats_collector(),
StatsCollector::kIncrementalMark);
if (marker_->IncrementalMarkingStep(stack_state_)) {
......@@ -204,9 +204,10 @@ MarkerBase::~MarkerBase() {
void MarkerBase::StartMarking() {
DCHECK(!is_marking_started_);
StatsCollector::EnabledScope stats_scope(
heap(), config_.marking_type == MarkingConfig::MarkingType::kAtomic
? StatsCollector::kAtomicMark
: StatsCollector::kIncrementalMark);
heap().stats_collector(),
config_.marking_type == MarkingConfig::MarkingType::kAtomic
? StatsCollector::kAtomicMark
: StatsCollector::kIncrementalMark);
heap().stats_collector()->NotifyMarkingStarted(config_.collection_type,
config_.is_forced_gc);
......@@ -214,7 +215,7 @@ void MarkerBase::StartMarking() {
is_marking_started_ = true;
if (EnterIncrementalMarkingIfNeeded(config_, heap())) {
StatsCollector::EnabledScope stats_scope(
heap(), StatsCollector::kMarkIncrementalStart);
heap().stats_collector(), StatsCollector::kMarkIncrementalStart);
// Performing incremental or concurrent marking.
schedule_.NotifyIncrementalMarkingStart();
......@@ -230,7 +231,7 @@ void MarkerBase::StartMarking() {
}
void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
StatsCollector::EnabledScope stats_scope(heap(),
StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
StatsCollector::kMarkAtomicPrologue);
if (ExitIncrementalMarkingIfNeeded(config_, heap())) {
......@@ -260,7 +261,7 @@ void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
}
void MarkerBase::LeaveAtomicPause() {
StatsCollector::EnabledScope stats_scope(heap(),
StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
StatsCollector::kMarkAtomicEpilogue);
DCHECK(!incremental_marking_handle_);
ResetRememberedSet(heap());
......@@ -278,7 +279,8 @@ void MarkerBase::LeaveAtomicPause() {
void MarkerBase::FinishMarking(MarkingConfig::StackState stack_state) {
DCHECK(is_marking_started_);
StatsCollector::EnabledScope stats_scope(heap(), StatsCollector::kAtomicMark);
StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
StatsCollector::kAtomicMark);
EnterAtomicPause(stack_state);
CHECK(AdvanceMarkingWithLimits(v8::base::TimeDelta::Max(), SIZE_MAX));
mutator_marking_state_.Publish();
......@@ -288,7 +290,7 @@ void MarkerBase::FinishMarking(MarkingConfig::StackState stack_state) {
void MarkerBase::ProcessWeakness() {
DCHECK_EQ(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
StatsCollector::DisabledScope stats_scope(heap(),
StatsCollector::DisabledScope stats_scope(heap().stats_collector(),
StatsCollector::kAtomicWeak);
heap().GetWeakPersistentRegion().Trace(&visitor());
......@@ -310,7 +312,7 @@ void MarkerBase::ProcessWeakness() {
}
void MarkerBase::VisitRoots(MarkingConfig::StackState stack_state) {
StatsCollector::EnabledScope stats_scope(heap(),
StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
StatsCollector::kMarkVisitRoots);
// Reset LABs before scanning roots. LABs are cleared to allow
......@@ -320,12 +322,13 @@ void MarkerBase::VisitRoots(MarkingConfig::StackState stack_state) {
{
{
StatsCollector::DisabledScope inner_stats_scope(
heap(), StatsCollector::kMarkVisitPersistents);
heap().stats_collector(), StatsCollector::kMarkVisitPersistents);
heap().GetStrongPersistentRegion().Trace(&visitor());
}
if (config_.marking_type == MarkingConfig::MarkingType::kAtomic) {
StatsCollector::DisabledScope inner_stats_scope(
heap(), StatsCollector::kMarkVisitCrossThreadPersistents);
heap().stats_collector(),
StatsCollector::kMarkVisitCrossThreadPersistents);
g_process_mutex.Get().AssertHeld();
heap().GetStrongCrossThreadPersistentRegion().Trace(&visitor());
}
......@@ -333,7 +336,7 @@ void MarkerBase::VisitRoots(MarkingConfig::StackState stack_state) {
if (stack_state != MarkingConfig::StackState::kNoHeapPointers) {
StatsCollector::DisabledScope stack_stats_scope(
heap(), StatsCollector::kMarkVisitStack);
heap().stats_collector(), StatsCollector::kMarkVisitStack);
heap().stack()->IteratePointers(&stack_visitor());
}
if (config_.collection_type == MarkingConfig::CollectionType::kMinor) {
......@@ -379,8 +382,9 @@ bool MarkerBase::AdvanceMarkingWithLimits(v8::base::TimeDelta max_duration,
GetNextIncrementalStepDuration(schedule_, heap_);
}
StatsCollector::EnabledScope deadline_scope(
heap(), StatsCollector::kMarkTransitiveClosureWithDeadline,
"deadline_ms", max_duration.InMillisecondsF());
heap().stats_collector(),
StatsCollector::kMarkTransitiveClosureWithDeadline, "deadline_ms",
max_duration.InMillisecondsF());
is_done = ProcessWorklistsWithDeadline(
marked_bytes_limit, v8::base::TimeTicks::Now() + max_duration);
if (with_schedule) {
......@@ -404,7 +408,7 @@ bool MarkerBase::AdvanceMarkingWithLimits(v8::base::TimeDelta max_duration,
bool MarkerBase::ProcessWorklistsWithDeadline(
size_t marked_bytes_deadline, v8::base::TimeTicks time_deadline) {
StatsCollector::EnabledScope stats_scope(
heap(), StatsCollector::kMarkTransitiveClosure);
heap().stats_collector(), StatsCollector::kMarkTransitiveClosure);
do {
if ((config_.marking_type == MarkingConfig::MarkingType::kAtomic) ||
schedule_.ShouldFlushEphemeronPairs()) {
......@@ -416,7 +420,7 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
// checks to guarantee the deadline is not exceeded.
{
StatsCollector::EnabledScope inner_scope(
heap(), StatsCollector::kMarkProcessBailOutObjects);
heap().stats_collector(), StatsCollector::kMarkProcessBailOutObjects);
if (!DrainWorklistWithBytesAndTimeDeadline<kDefaultDeadlineCheckInterval /
5>(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
......@@ -432,7 +436,8 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
{
StatsCollector::EnabledScope inner_scope(
heap(), StatsCollector::kMarkProcessNotFullyconstructedWorklist);
heap().stats_collector(),
StatsCollector::kMarkProcessNotFullyconstructedWorklist);
if (!DrainWorklistWithBytesAndTimeDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_
......@@ -448,7 +453,8 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
{
StatsCollector::EnabledScope inner_scope(
heap(), StatsCollector::kMarkProcessMarkingWorklist);
heap().stats_collector(),
StatsCollector::kMarkProcessMarkingWorklist);
if (!DrainWorklistWithBytesAndTimeDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.marking_worklist(),
......@@ -466,7 +472,8 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
{
StatsCollector::EnabledScope inner_scope(
heap(), StatsCollector::kMarkProcessWriteBarrierWorklist);
heap().stats_collector(),
StatsCollector::kMarkProcessWriteBarrierWorklist);
if (!DrainWorklistWithBytesAndTimeDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.write_barrier_worklist(),
......@@ -481,7 +488,7 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
{
StatsCollector::EnabledScope stats_scope(
heap(), StatsCollector::kMarkProcessEphemerons);
heap().stats_collector(), StatsCollector::kMarkProcessEphemerons);
if (!DrainWorklistWithBytesAndTimeDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.ephemeron_pairs_for_processing_worklist(),
......@@ -498,7 +505,8 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
void MarkerBase::MarkNotFullyConstructedObjects() {
StatsCollector::DisabledScope stats_scope(
heap(), StatsCollector::kMarkVisitNotFullyConstructedObjects);
heap().stats_collector(),
StatsCollector::kMarkVisitNotFullyConstructedObjects);
std::unordered_set<HeapObjectHeader*> objects =
mutator_marking_state_.not_fully_constructed_worklist().Extract();
for (HeapObjectHeader* object : objects) {
......
......@@ -6,6 +6,7 @@
#include <unordered_set>
#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/stats-collector.h"
namespace cppgc {
......@@ -22,7 +23,7 @@ void MutatorMarkingState::FlushNotFullyConstructedObjects() {
void MutatorMarkingState::FlushDiscoveredEphemeronPairs() {
StatsCollector::EnabledScope stats_scope(
heap_, StatsCollector::kMarkFlushEphemerons);
heap_.stats_collector(), StatsCollector::kMarkFlushEphemerons);
discovered_ephemeron_pairs_worklist_.Publish();
if (!discovered_ephemeron_pairs_worklist_.IsGlobalEmpty()) {
ephemeron_pairs_for_processing_worklist_.Merge(
......
......@@ -49,7 +49,7 @@ void PreFinalizerHandler::RegisterPrefinalizer(PreFinalizer pre_finalizer) {
void PreFinalizerHandler::InvokePreFinalizers() {
StatsCollector::DisabledScope stats_scope(
heap_, StatsCollector::kSweepInvokePreFinalizers);
heap_.stats_collector(), StatsCollector::kSweepInvokePreFinalizers);
DCHECK(CurrentThreadIsCreationThread());
LivenessBroker liveness_broker = LivenessBrokerFactory::Create();
......
......@@ -17,8 +17,10 @@ namespace internal {
constexpr size_t StatsCollector::kAllocationThresholdBytes;
StatsCollector::StatsCollector(
std::unique_ptr<MetricRecorder> histogram_recorder)
: metric_recorder_(std::move(histogram_recorder)) {}
std::unique_ptr<MetricRecorder> histogram_recorder, Platform* platform)
: metric_recorder_(std::move(histogram_recorder)), platform_(platform) {
USE(platform_);
}
void StatsCollector::RegisterObserver(AllocationObserver* observer) {
DCHECK_EQ(allocation_observers_.end(),
......
......@@ -10,10 +10,10 @@
#include <vector>
#include "include/cppgc/platform.h"
#include "src/base/macros.h"
#include "src/base/platform/time.h"
#include "src/heap/cppgc/garbage-collector.h"
#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/metric-recorder.h"
#include "src/heap/cppgc/trace-event.h"
......@@ -154,9 +154,9 @@ class V8_EXPORT_PRIVATE StatsCollector final {
public:
template <typename... Args>
InternalScope(HeapBase& heap, ScopeIdType scope_id, Args... args)
: heap_(heap),
stats_collector_(heap_.stats_collector()),
InternalScope(StatsCollector* stats_collector, ScopeIdType scope_id,
Args... args)
: stats_collector_(stats_collector),
start_time_(v8::base::TimeTicks::Now()),
scope_id_(scope_id) {
DCHECK_LE(0, scope_id_);
......@@ -203,7 +203,6 @@ class V8_EXPORT_PRIVATE StatsCollector final {
inline void IncreaseScopeTime();
HeapBase& heap_;
StatsCollector* const stats_collector_;
v8::base::TimeTicks start_time_;
const ScopeIdType scope_id_;
......@@ -240,7 +239,7 @@ class V8_EXPORT_PRIVATE StatsCollector final {
// reasonably interesting sizes.
static constexpr size_t kAllocationThresholdBytes = 1024;
explicit StatsCollector(std::unique_ptr<MetricRecorder>);
StatsCollector(std::unique_ptr<MetricRecorder>, Platform*);
StatsCollector(const StatsCollector&) = delete;
StatsCollector& operator=(const StatsCollector&) = delete;
......@@ -325,6 +324,8 @@ class V8_EXPORT_PRIVATE StatsCollector final {
Event previous_;
std::unique_ptr<MetricRecorder> metric_recorder_;
Platform* platform_;
};
template <typename Callback>
......
......@@ -427,7 +427,7 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
void Run(cppgc::JobDelegate* delegate) final {
StatsCollector::EnabledConcurrentScope stats_scope(
heap_, StatsCollector::kConcurrentSweep);
heap_.stats_collector(), StatsCollector::kConcurrentSweep);
for (SpaceState& state : *states_) {
while (auto page = state.unswept_pages.Pop()) {
......@@ -531,7 +531,7 @@ class Sweeper::SweeperImpl final {
~SweeperImpl() { CancelSweepers(); }
void Start(SweepingConfig config) {
StatsCollector::EnabledScope stats_scope(*heap_->heap(),
StatsCollector::EnabledScope stats_scope(heap_->heap()->stats_collector(),
StatsCollector::kAtomicSweep);
is_in_progress_ = true;
#if DEBUG
......@@ -558,10 +558,10 @@ class Sweeper::SweeperImpl final {
// allocate new memory.
if (is_sweeping_on_mutator_thread_) return false;
StatsCollector::EnabledScope stats_scope(*heap_->heap(),
StatsCollector::EnabledScope stats_scope(heap_->heap()->stats_collector(),
StatsCollector::kIncrementalSweep);
StatsCollector::EnabledScope inner_scope(
*heap_->heap(), StatsCollector::kSweepOnAllocation);
heap_->heap()->stats_collector(), StatsCollector::kSweepOnAllocation);
MutatorThreadSweepingScope sweeping_in_progresss(*this);
SpaceState& space_state = space_states_[space->index()];
......@@ -597,8 +597,8 @@ class Sweeper::SweeperImpl final {
{
StatsCollector::EnabledScope stats_scope(
*heap_->heap(), StatsCollector::kIncrementalSweep);
StatsCollector::EnabledScope inner_scope(*heap_->heap(),
heap_->heap()->stats_collector(), StatsCollector::kIncrementalSweep);
StatsCollector::EnabledScope inner_scope(heap_->heap()->stats_collector(),
StatsCollector::kSweepFinalize);
if (concurrent_sweeper_handle_ && concurrent_sweeper_handle_->IsValid() &&
concurrent_sweeper_handle_->UpdatePriorityEnabled()) {
......@@ -698,14 +698,15 @@ class Sweeper::SweeperImpl final {
bool sweep_complete;
{
StatsCollector::EnabledScope stats_scope(
*sweeper_->heap_->heap(), StatsCollector::kIncrementalSweep);
sweeper_->heap_->heap()->stats_collector(),
StatsCollector::kIncrementalSweep);
MutatorThreadSweeper sweeper(&sweeper_->space_states_,
sweeper_->platform_);
{
StatsCollector::EnabledScope stats_scope(
*sweeper_->heap_->heap(), StatsCollector::kSweepIdleStep,
"idleDeltaInSeconds",
sweeper_->heap_->heap()->stats_collector(),
StatsCollector::kSweepIdleStep, "idleDeltaInSeconds",
(deadline_in_seconds -
sweeper_->platform_->MonotonicallyIncreasingTime()));
......
......@@ -116,7 +116,7 @@ enum CategoryGroupEnabledFlags {
#define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...) \
DCHECK_NOT_NULL(name); \
do { \
cppgc::Platform* platform = heap_.platform(); \
cppgc::Platform* platform = stats_collector_->platform_; \
INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
cppgc::internal::AddTraceEvent( \
......
......@@ -60,7 +60,8 @@ void FakeAllocate(StatsCollector* stats_collector, size_t bytes) {
} // namespace
TEST(HeapGrowingTest, ConservativeGCInvoked) {
StatsCollector stats_collector(nullptr /* metric_recorder */);
StatsCollector stats_collector(nullptr /* metric_recorder */,
nullptr /* platform */);
MockGarbageCollector gc;
cppgc::Heap::ResourceConstraints constraints;
// Force GC at the first update.
......@@ -73,7 +74,8 @@ TEST(HeapGrowingTest, ConservativeGCInvoked) {
}
TEST(HeapGrowingTest, InitialHeapSize) {
StatsCollector stats_collector(nullptr /* metric_recorder */);
StatsCollector stats_collector(nullptr /* metric_recorder */,
nullptr /* platform */);
MockGarbageCollector gc;
cppgc::Heap::ResourceConstraints constraints;
// Use larger size to avoid running into small heap optimizations.
......@@ -90,7 +92,8 @@ TEST(HeapGrowingTest, InitialHeapSize) {
TEST(HeapGrowingTest, ConstantGrowingFactor) {
// Use larger size to avoid running into small heap optimizations.
constexpr size_t kObjectSize = 10 * HeapGrowing::kMinLimitIncrease;
StatsCollector stats_collector(nullptr /* metric_recorder */);
StatsCollector stats_collector(nullptr /* metric_recorder */,
nullptr /* platform */);
FakeGarbageCollector gc(&stats_collector);
cppgc::Heap::ResourceConstraints constraints;
// Force GC at the first update.
......@@ -108,7 +111,8 @@ TEST(HeapGrowingTest, ConstantGrowingFactor) {
TEST(HeapGrowingTest, SmallHeapGrowing) {
// Larger constant to avoid running into special handling for smaller heaps.
constexpr size_t kLargeAllocation = 100 * kMB;
StatsCollector stats_collector(nullptr /* metric_recorder */);
StatsCollector stats_collector(nullptr /* metric_recorder */,
nullptr /* platform */);
FakeGarbageCollector gc(&stats_collector);
cppgc::Heap::ResourceConstraints constraints;
// Force GC at the first update.
......@@ -124,7 +128,8 @@ TEST(HeapGrowingTest, SmallHeapGrowing) {
}
TEST(HeapGrowingTest, IncrementalGCStarted) {
StatsCollector stats_collector(nullptr /* metric_recorder */);
StatsCollector stats_collector(nullptr /* metric_recorder */,
nullptr /* platform */);
MockGarbageCollector gc;
cppgc::Heap::ResourceConstraints constraints;
HeapGrowing growing(&gc, &stats_collector, constraints,
......@@ -137,7 +142,8 @@ TEST(HeapGrowingTest, IncrementalGCStarted) {
}
TEST(HeapGrowingTest, IncrementalGCFinalized) {
StatsCollector stats_collector(nullptr /* metric_recorder */);
StatsCollector stats_collector(nullptr /* metric_recorder */,
nullptr /* platform */);
MockGarbageCollector gc;
cppgc::Heap::ResourceConstraints constraints;
HeapGrowing growing(&gc, &stats_collector, constraints,
......
......@@ -72,8 +72,9 @@ TEST_F(MetricRecorderTest, IncrementalScopesReportedImmediately) {
{
EXPECT_EQ(0u, MetricRecorderImpl::CppGCMainThreadIncrementalMark_callcount);
{
StatsCollector::EnabledScope scope(*Heap::From(GetHeap()),
StatsCollector::kIncrementalMark);
StatsCollector::EnabledScope scope(
Heap::From(GetHeap())->stats_collector(),
StatsCollector::kIncrementalMark);
scope.DecreaseStartTimeForTesting(
v8::base::TimeDelta::FromMilliseconds(1));
}
......@@ -86,8 +87,9 @@ TEST_F(MetricRecorderTest, IncrementalScopesReportedImmediately) {
EXPECT_EQ(0u,
MetricRecorderImpl::CppGCMainThreadIncrementalSweep_callcount);
{
StatsCollector::EnabledScope scope(*Heap::From(GetHeap()),
StatsCollector::kIncrementalSweep);
StatsCollector::EnabledScope scope(
Heap::From(GetHeap())->stats_collector(),
StatsCollector::kIncrementalSweep);
scope.DecreaseStartTimeForTesting(
v8::base::TimeDelta::FromMilliseconds(1));
}
......@@ -107,28 +109,30 @@ TEST_F(MetricRecorderTest, NonIncrementlaScopesNotReportedImmediately) {
MetricRecorderImpl::CppGCMainThreadIncrementalSweep_callcount = 0u;
StartGC();
{
StatsCollector::EnabledScope scope(*Heap::From(GetHeap()),
StatsCollector::EnabledScope scope(Heap::From(GetHeap())->stats_collector(),
StatsCollector::kAtomicMark);
}
{
StatsCollector::EnabledScope scope(*Heap::From(GetHeap()),
StatsCollector::EnabledScope scope(Heap::From(GetHeap())->stats_collector(),
StatsCollector::kAtomicWeak);
}
{
StatsCollector::EnabledScope scope(*Heap::From(GetHeap()),
StatsCollector::EnabledScope scope(Heap::From(GetHeap())->stats_collector(),
StatsCollector::kAtomicCompact);
}
{
StatsCollector::EnabledScope scope(*Heap::From(GetHeap()),
StatsCollector::EnabledScope scope(Heap::From(GetHeap())->stats_collector(),
StatsCollector::kAtomicSweep);
}
{
StatsCollector::EnabledConcurrentScope scope(
*Heap::From(GetHeap()), StatsCollector::kConcurrentMark);
Heap::From(GetHeap())->stats_collector(),
StatsCollector::kConcurrentMark);
}
{
StatsCollector::EnabledConcurrentScope scope(
*Heap::From(GetHeap()), StatsCollector::kConcurrentSweep);
Heap::From(GetHeap())->stats_collector(),
StatsCollector::kConcurrentSweep);
}
EXPECT_EQ(0u, MetricRecorderImpl::CppGCMainThreadIncrementalMark_callcount);
EXPECT_EQ(0u, MetricRecorderImpl::CppGCMainThreadIncrementalSweep_callcount);
......@@ -152,50 +156,52 @@ TEST_F(MetricRecorderTest, CycleEndHistogramReportsCorrectValues) {
EndGC(1000);
StartGC();
{
StatsCollector::EnabledScope scope(*Heap::From(GetHeap()),
StatsCollector::EnabledScope scope(Heap::From(GetHeap())->stats_collector(),
StatsCollector::kIncrementalMark);
scope.DecreaseStartTimeForTesting(
v8::base::TimeDelta::FromMilliseconds(10));
}
{
StatsCollector::EnabledScope scope(*Heap::From(GetHeap()),
StatsCollector::EnabledScope scope(Heap::From(GetHeap())->stats_collector(),
StatsCollector::kIncrementalSweep);
scope.DecreaseStartTimeForTesting(
v8::base::TimeDelta::FromMilliseconds(20));
}
{
StatsCollector::EnabledScope scope(*Heap::From(GetHeap()),
StatsCollector::EnabledScope scope(Heap::From(GetHeap())->stats_collector(),
StatsCollector::kAtomicMark);
scope.DecreaseStartTimeForTesting(
v8::base::TimeDelta::FromMilliseconds(30));
}
{
StatsCollector::EnabledScope scope(*Heap::From(GetHeap()),
StatsCollector::EnabledScope scope(Heap::From(GetHeap())->stats_collector(),
StatsCollector::kAtomicWeak);
scope.DecreaseStartTimeForTesting(
v8::base::TimeDelta::FromMilliseconds(50));
}
{
StatsCollector::EnabledScope scope(*Heap::From(GetHeap()),
StatsCollector::EnabledScope scope(Heap::From(GetHeap())->stats_collector(),
StatsCollector::kAtomicCompact);
scope.DecreaseStartTimeForTesting(
v8::base::TimeDelta::FromMilliseconds(60));
}
{
StatsCollector::EnabledScope scope(*Heap::From(GetHeap()),
StatsCollector::EnabledScope scope(Heap::From(GetHeap())->stats_collector(),
StatsCollector::kAtomicSweep);
scope.DecreaseStartTimeForTesting(
v8::base::TimeDelta::FromMilliseconds(70));
}
{
StatsCollector::EnabledConcurrentScope scope(
*Heap::From(GetHeap()), StatsCollector::kConcurrentMark);
Heap::From(GetHeap())->stats_collector(),
StatsCollector::kConcurrentMark);
scope.DecreaseStartTimeForTesting(
v8::base::TimeDelta::FromMilliseconds(80));
}
{
StatsCollector::EnabledConcurrentScope scope(
*Heap::From(GetHeap()), StatsCollector::kConcurrentSweep);
Heap::From(GetHeap())->stats_collector(),
StatsCollector::kConcurrentSweep);
scope.DecreaseStartTimeForTesting(
v8::base::TimeDelta::FromMilliseconds(100));
}
......
......@@ -109,7 +109,8 @@ TEST_F(CppgcTracingScopesTest, DisabledScope) {
ResetDelegatingTracingController();
{
StatsCollector::DisabledScope scope(
*Heap::From(GetHeap()), StatsCollector::kMarkProcessMarkingWorklist);
Heap::From(GetHeap())->stats_collector(),
StatsCollector::kMarkProcessMarkingWorklist);
}
EXPECT_EQ(0u, DelegatingTracingControllerImpl::AddTraceEvent_callcount);
EndGC();
......@@ -121,7 +122,8 @@ TEST_F(CppgcTracingScopesTest, EnabledScope) {
ResetDelegatingTracingController("CppGC.MarkProcessMarkingWorklist");
{
StatsCollector::EnabledScope scope(
*Heap::From(GetHeap()), StatsCollector::kMarkProcessMarkingWorklist);
Heap::From(GetHeap())->stats_collector(),
StatsCollector::kMarkProcessMarkingWorklist);
}
EXPECT_EQ(2u, DelegatingTracingControllerImpl::AddTraceEvent_callcount);
EndGC();
......@@ -131,7 +133,7 @@ TEST_F(CppgcTracingScopesTest, EnabledScope) {
ResetDelegatingTracingController("CppGC.MarkProcessWriteBarrierWorklist");
{
StatsCollector::EnabledScope scope(
*Heap::From(GetHeap()),
Heap::From(GetHeap())->stats_collector(),
StatsCollector::kMarkProcessWriteBarrierWorklist);
}
EXPECT_EQ(2u, DelegatingTracingControllerImpl::AddTraceEvent_callcount);
......@@ -146,7 +148,8 @@ TEST_F(CppgcTracingScopesTest, EnabledScopeWithArgs) {
ResetDelegatingTracingController();
{
StatsCollector::EnabledScope scope(
*Heap::From(GetHeap()), StatsCollector::kMarkProcessMarkingWorklist);
Heap::From(GetHeap())->stats_collector(),
StatsCollector::kMarkProcessMarkingWorklist);
}
EXPECT_EQ(2, DelegatingTracingControllerImpl::stored_num_args);
EndGC();
......@@ -156,8 +159,8 @@ TEST_F(CppgcTracingScopesTest, EnabledScopeWithArgs) {
ResetDelegatingTracingController();
{
StatsCollector::EnabledScope scope(
*Heap::From(GetHeap()), StatsCollector::kMarkProcessMarkingWorklist,
"arg1", 1);
Heap::From(GetHeap())->stats_collector(),
StatsCollector::kMarkProcessMarkingWorklist, "arg1", 1);
}
EXPECT_EQ(3, DelegatingTracingControllerImpl::stored_num_args);
EndGC();
......@@ -167,8 +170,8 @@ TEST_F(CppgcTracingScopesTest, EnabledScopeWithArgs) {
ResetDelegatingTracingController();
{
StatsCollector::EnabledScope scope(
*Heap::From(GetHeap()), StatsCollector::kMarkProcessMarkingWorklist,
"arg1", 1, "arg2", 2);
Heap::From(GetHeap())->stats_collector(),
StatsCollector::kMarkProcessMarkingWorklist, "arg1", 1, "arg2", 2);
}
EXPECT_EQ(4, DelegatingTracingControllerImpl::stored_num_args);
EndGC();
......@@ -181,8 +184,9 @@ TEST_F(CppgcTracingScopesTest, CheckScopeArgs) {
ResetDelegatingTracingController();
{
StatsCollector::EnabledScope scope(
*Heap::From(GetHeap()), StatsCollector::kMarkProcessMarkingWorklist,
"uint_arg", 13u, "bool_arg", false);
Heap::From(GetHeap())->stats_collector(),
StatsCollector::kMarkProcessMarkingWorklist, "uint_arg", 13u,
"bool_arg", false);
}
FindArgument("uint_arg", TRACE_VALUE_TYPE_UINT, 13);
FindArgument("bool_arg", TRACE_VALUE_TYPE_BOOL, false);
......@@ -193,8 +197,9 @@ TEST_F(CppgcTracingScopesTest, CheckScopeArgs) {
ResetDelegatingTracingController();
{
StatsCollector::EnabledScope scope(
*Heap::From(GetHeap()), StatsCollector::kMarkProcessMarkingWorklist,
"neg_int_arg", -5, "pos_int_arg", 7);
Heap::From(GetHeap())->stats_collector(),
StatsCollector::kMarkProcessMarkingWorklist, "neg_int_arg", -5,
"pos_int_arg", 7);
}
FindArgument("neg_int_arg", TRACE_VALUE_TYPE_INT, -5);
FindArgument("pos_int_arg", TRACE_VALUE_TYPE_INT, 7);
......@@ -207,8 +212,9 @@ TEST_F(CppgcTracingScopesTest, CheckScopeArgs) {
const char* string_value = "test";
{
StatsCollector::EnabledScope scope(
*Heap::From(GetHeap()), StatsCollector::kMarkProcessMarkingWorklist,
"string_arg", string_value, "double_arg", double_value);
Heap::From(GetHeap())->stats_collector(),
StatsCollector::kMarkProcessMarkingWorklist, "string_arg",
string_value, "double_arg", double_value);
}
FindArgument("string_arg", TRACE_VALUE_TYPE_STRING,
reinterpret_cast<uint64_t>(string_value));
......@@ -245,7 +251,7 @@ TEST_F(CppgcTracingScopesTest, TestIndividualScopes) {
DelegatingTracingControllerImpl::check_expectations = false;
{
StatsCollector::EnabledScope scope(
*Heap::From(GetHeap()),
Heap::From(GetHeap())->stats_collector(),
static_cast<StatsCollector::ScopeId>(scope_id));
v8::base::TimeTicks time = v8::base::TimeTicks::Now();
while (time == v8::base::TimeTicks::Now()) {
......@@ -278,7 +284,7 @@ TEST_F(CppgcTracingScopesTest, TestIndividualConcurrentScopes) {
DelegatingTracingControllerImpl::check_expectations = false;
{
StatsCollector::EnabledConcurrentScope scope(
*Heap::From(GetHeap()),
Heap::From(GetHeap())->stats_collector(),
static_cast<StatsCollector::ConcurrentScopeId>(scope_id));
v8::base::TimeTicks time = v8::base::TimeTicks::Now();
while (time == v8::base::TimeTicks::Now()) {
......
......@@ -18,7 +18,8 @@ constexpr size_t kMinReportedSize = StatsCollector::kAllocationThresholdBytes;
class StatsCollectorTest : public ::testing::Test {
public:
StatsCollectorTest() : stats(nullptr /* metric_recorder */) {}
StatsCollectorTest()
: stats(nullptr /* metric_recorder */, nullptr /* platform */) {}
void FakeAllocate(size_t bytes) {
stats.NotifyAllocation(bytes);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment