Commit 64bf4c53 authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

Revert "cppgc: Use tracing scopes"

This reverts commit 548fe208.

Reason for revert: Issues on Mac64: https://ci.chromium.org/p/v8/builders/ci/V8%20Mac64%20-%20debug/31710

Original change's description:
> cppgc: Use tracing scopes
>
> The scopes themselves mostly have the same coverage as current scopes in
> blink. A few exception due to encapsulation exist and are highlighted as
> comments on the CL.
>
> Bug: chromium:1056170
> Change-Id: I48af2cfdfd53a8caa1ab5d805d377f6f13a825bc
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2540552
> Commit-Queue: Omer Katz <omerkatz@chromium.org>
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#71285}

TBR=ulan@chromium.org,mlippautz@chromium.org,omerkatz@chromium.org

Change-Id: I20dce9309dcaeff6ea61bdc51df3a2f62c2a103f
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: chromium:1056170
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2550782Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71289}
parent af7f20fb
......@@ -206,26 +206,16 @@ void CppHeap::TracePrologue(TraceFlags flags) {
}
bool CppHeap::AdvanceTracing(double deadline_in_ms) {
v8::base::TimeDelta deadline =
is_in_final_pause_
? v8::base::TimeDelta::Max()
: v8::base::TimeDelta::FromMillisecondsD(deadline_in_ms);
cppgc::internal::StatsCollector::EnabledScope stats_scope(
AsBase(),
is_in_final_pause_
? cppgc::internal::StatsCollector::kAtomicPauseMarkTransitiveClosure
: cppgc::internal::StatsCollector::kUnifiedMarkingStep);
// TODO(chromium:1056170): Replace when unified heap transitions to
// bytes-based deadline.
marking_done_ = marker_->AdvanceMarkingWithMaxDuration(deadline);
DCHECK_IMPLIES(is_in_final_pause_, marking_done_);
// TODO(chromium:1056170): Replace std::numeric_limits<size_t>::max() with a
// proper deadline when unified heap transitions to bytes-based deadline.
marking_done_ = marker_->AdvanceMarkingWithMaxDuration(
v8::base::TimeDelta::FromMillisecondsD(deadline_in_ms));
return marking_done_;
}
bool CppHeap::IsTracingDone() { return marking_done_; }
void CppHeap::EnterFinalPause(EmbedderStackState stack_state) {
is_in_final_pause_ = true;
marker_->EnterAtomicPause(stack_state);
if (compactor_.CancelIfShouldNotCompact(
UnifiedHeapMarker::MarkingConfig::MarkingType::kAtomic,
......@@ -235,40 +225,30 @@ void CppHeap::EnterFinalPause(EmbedderStackState stack_state) {
}
void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
CHECK(is_in_final_pause_);
CHECK(marking_done_);
{
// Weakness callbacks and pre-finalizers are forbidden from allocating
// objects.
cppgc::internal::ObjectAllocator::NoAllocationScope no_allocation_scope_(
object_allocator_);
marker_->LeaveAtomicPause();
is_in_final_pause_ = false;
prefinalizer_handler()->InvokePreFinalizers();
}
{
cppgc::internal::StatsCollector::EnabledScope stats(
AsBase(), cppgc::internal::StatsCollector::kAtomicPauseSweepAndCompact);
{
cppgc::internal::ObjectAllocator::NoAllocationScope no_allocation_scope_(
object_allocator_);
prefinalizer_handler()->InvokePreFinalizers();
}
marker_.reset();
// TODO(chromium:1056170): replace build flag with dedicated flag.
marker_.reset();
// TODO(chromium:1056170): replace build flag with dedicated flag.
#if DEBUG
UnifiedHeapMarkingVerifier verifier(*this);
verifier.Run(cppgc::Heap::StackState::kNoHeapPointers);
UnifiedHeapMarkingVerifier verifier(*this);
verifier.Run(cppgc::Heap::StackState::kNoHeapPointers);
#endif
{
NoGCScope no_gc(*this);
cppgc::internal::Sweeper::SweepingConfig::CompactableSpaceHandling
compactable_space_handling = compactor_.CompactSpacesIfEnabled();
const cppgc::internal::Sweeper::SweepingConfig sweeping_config{
cppgc::internal::Sweeper::SweepingConfig::SweepingType::
kIncrementalAndConcurrent,
compactable_space_handling};
sweeper().Start(sweeping_config);
}
cppgc::internal::Sweeper::SweepingConfig::CompactableSpaceHandling
compactable_space_handling = compactor_.CompactSpacesIfEnabled();
{
NoGCScope no_gc(*this);
const cppgc::internal::Sweeper::SweepingConfig sweeping_config{
cppgc::internal::Sweeper::SweepingConfig::SweepingType::
kIncrementalAndConcurrent,
compactable_space_handling};
sweeper().Start(sweeping_config);
}
sweeper().NotifyDoneIfNeeded();
}
......
......@@ -58,7 +58,6 @@ class V8_EXPORT_PRIVATE CppHeap final : public cppgc::internal::HeapBase,
Isolate& isolate_;
bool marking_done_ = false;
bool is_in_final_pause_ = false;
};
} // namespace internal
......
......@@ -16,7 +16,6 @@
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-space.h"
#include "src/heap/cppgc/raw-heap.h"
#include "src/heap/cppgc/stats-collector.h"
namespace cppgc {
namespace internal {
......@@ -484,9 +483,6 @@ bool Compactor::CancelIfShouldNotCompact(
Compactor::CompactableSpaceHandling Compactor::CompactSpacesIfEnabled() {
if (!is_enabled_) return CompactableSpaceHandling::kSweep;
StatsCollector::DisabledScope stats_scope(
*heap_.heap(), StatsCollector::kAtomicPauseCompaction);
MovableReferences movable_references(*heap_.heap());
CompactionWorklists::MovableReferencesWorklist::Local local(
......
......@@ -10,7 +10,6 @@
#include "src/heap/cppgc/liveness-broker.h"
#include "src/heap/cppgc/marking-state.h"
#include "src/heap/cppgc/marking-visitor.h"
#include "src/heap/cppgc/stats-collector.h"
namespace cppgc {
namespace internal {
......@@ -72,9 +71,6 @@ ConcurrentMarkingTask::ConcurrentMarkingTask(
: concurrent_marker_(concurrent_marker) {}
void ConcurrentMarkingTask::Run(JobDelegate* job_delegate) {
StatsCollector::EnabledConcurrentScope stats_scope(
concurrent_marker_.heap(), StatsCollector::kConcurrentMarkingStep);
if (!HasWorkForConcurrentMarking(concurrent_marker_.marking_worklists()))
return;
ConcurrentMarkingState concurrent_marking_state(
......@@ -148,22 +144,16 @@ void ConcurrentMarkingTask::ProcessWorklists(
return;
}
{
StatsCollector::DisabledConcurrentScope stats_scope(
concurrent_marker_.heap(),
StatsCollector::kConcurrentMarkInvokeEphemeronCallbacks);
if (!DrainWorklistWithYielding(
job_delegate, concurrent_marking_state,
concurrent_marker_.incremental_marking_schedule(),
concurrent_marking_state
.ephemeron_pairs_for_processing_worklist(),
[&concurrent_marking_state](
const MarkingWorklists::EphemeronPairItem& item) {
concurrent_marking_state.ProcessEphemeron(item.key,
item.value_desc);
})) {
return;
}
if (!DrainWorklistWithYielding(
job_delegate, concurrent_marking_state,
concurrent_marker_.incremental_marking_schedule(),
concurrent_marking_state.ephemeron_pairs_for_processing_worklist(),
[&concurrent_marking_state](
const MarkingWorklists::EphemeronPairItem& item) {
concurrent_marking_state.ProcessEphemeron(item.key,
item.value_desc);
})) {
return;
}
} while (
!concurrent_marking_state.marking_worklist().IsLocalAndGlobalEmpty());
......
......@@ -69,7 +69,7 @@ HeapBase::HeapBase(
stats_collector_(std::make_unique<StatsCollector>()),
stack_(std::make_unique<heap::base::Stack>(
v8::base::Stack::GetStackStart())),
prefinalizer_handler_(std::make_unique<PreFinalizerHandler>(*this)),
prefinalizer_handler_(std::make_unique<PreFinalizerHandler>()),
compactor_(raw_heap_),
object_allocator_(&raw_heap_, page_backend_.get(),
stats_collector_.get()),
......
......@@ -12,7 +12,6 @@
#include "src/heap/cppgc/marker.h"
#include "src/heap/cppgc/marking-verifier.h"
#include "src/heap/cppgc/prefinalizer-handler.h"
#include "src/heap/cppgc/stats-collector.h"
namespace cppgc {
......@@ -156,28 +155,20 @@ void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
config_.stack_state = stack_state;
DCHECK(marker_);
{
// This guards atomic pause marking, meaning that no internal method or
// Pre finalizers are forbidden from allocating objects. Note that this also
// guard atomic pause marking below, meaning that no internal method or
// external callbacks are allowed to allocate new objects.
ObjectAllocator::NoAllocationScope no_allocation_scope_(object_allocator_);
marker_->FinishMarking(stack_state);
prefinalizer_handler_->InvokePreFinalizers();
}
{
StatsCollector::EnabledScope stats(
*this, StatsCollector::kAtomicPauseSweepAndCompact);
{
// Pre finalizers are forbidden from allocating objects.
ObjectAllocator::NoAllocationScope no_allocation_scope_(
object_allocator_);
prefinalizer_handler_->InvokePreFinalizers();
}
marker_.reset();
// TODO(chromium:1056170): replace build flag with dedicated flag.
marker_.reset();
// TODO(chromium:1056170): replace build flag with dedicated flag.
#if DEBUG
MarkingVerifier verifier(*this);
verifier.Run(stack_state);
MarkingVerifier verifier(*this);
verifier.Run(stack_state);
#endif
{
NoGCScope no_gc(*this);
const Sweeper::SweepingConfig sweeping_config{
config_.sweeping_type,
......@@ -191,13 +182,5 @@ void Heap::PostGarbageCollection() { gc_in_progress_ = false; }
void Heap::DisableHeapGrowingForTesting() { growing_.DisableForTesting(); }
void Heap::FinalizeIncrementalGarbageCollectionIfNeeded(
Config::StackState stack_state) {
StatsCollector::EnabledScope stats_scope(
*this, StatsCollector::kIncrementalMarkingFinalize);
FinalizeGarbageCollection(stack_state);
}
} // namespace internal
} // namespace cppgc
......@@ -44,7 +44,10 @@ class V8_EXPORT_PRIVATE Heap final : public HeapBase,
void StartGarbageCollection(Config);
void FinalizeGarbageCollection(Config::StackState);
void FinalizeIncrementalGarbageCollectionIfNeeded(Config::StackState) final;
void FinalizeIncrementalGarbageCollectionIfNeeded(
Config::StackState stack_state) final {
FinalizeGarbageCollection(stack_state);
}
void PostGarbageCollection() final;
......
......@@ -59,8 +59,6 @@ bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
void VisitRememberedSlots(HeapBase& heap,
MutatorMarkingState& mutator_marking_state) {
#if defined(CPPGC_YOUNG_GENERATION)
StatsCollector::EnabledScope stats_scope(
heap(), StatsCollector::kVisitRememberedSets);
for (void* slot : heap.remembered_slots()) {
auto& slot_header = BasePage::FromInnerAddress(&heap, slot)
->ObjectHeaderFromInnerAddress(slot);
......@@ -202,9 +200,6 @@ void MarkerBase::StartMarking() {
is_marking_started_ = true;
if (EnterIncrementalMarkingIfNeeded(config_, heap())) {
StatsCollector::EnabledScope stats_scope(
heap(), StatsCollector::kIncrementalMarkingStartMarking);
// Performing incremental or concurrent marking.
schedule_.NotifyIncrementalMarkingStart();
// Scanning the stack is expensive so we only do it at the atomic pause.
......@@ -219,9 +214,6 @@ void MarkerBase::StartMarking() {
}
void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
StatsCollector::EnabledScope stats_scope(
heap(), StatsCollector::kAtomicPauseMarkPrologue);
if (ExitIncrementalMarkingIfNeeded(config_, heap())) {
// Cancel remaining concurrent/incremental tasks.
concurrent_marker_->Cancel();
......@@ -236,102 +228,66 @@ void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
// is either cleared or the object is retained.
g_process_mutex.Pointer()->Lock();
{
StatsCollector::EnabledScope inner_stats_scope(
heap(), StatsCollector::kAtomicPauseMarkRoots);
// VisitRoots also resets the LABs.
VisitRoots(config_.stack_state);
if (config_.stack_state == MarkingConfig::StackState::kNoHeapPointers) {
mutator_marking_state_.FlushNotFullyConstructedObjects();
DCHECK(marking_worklists_.not_fully_constructed_worklist()->IsEmpty());
} else {
MarkNotFullyConstructedObjects();
}
// VisitRoots also resets the LABs.
VisitRoots(config_.stack_state);
if (config_.stack_state == MarkingConfig::StackState::kNoHeapPointers) {
mutator_marking_state_.FlushNotFullyConstructedObjects();
DCHECK(marking_worklists_.not_fully_constructed_worklist()->IsEmpty());
} else {
MarkNotFullyConstructedObjects();
}
}
void MarkerBase::LeaveAtomicPause() {
StatsCollector::EnabledScope stats_scope(
heap(), StatsCollector::kAtomicPauseMarkEpilogue);
DCHECK(!incremental_marking_handle_);
ResetRememberedSet(heap());
heap().stats_collector()->NotifyMarkingCompleted(
// GetOverallMarkedBytes also includes concurrently marked bytes.
schedule_.GetOverallMarkedBytes());
is_marking_started_ = false;
{
// Weakness callbacks are forbidden from allocating objects.
ObjectAllocator::NoAllocationScope no_allocation_scope_(
heap_.object_allocator());
ProcessWeakness();
}
ProcessWeakness();
g_process_mutex.Pointer()->Unlock();
}
void MarkerBase::FinishMarking(MarkingConfig::StackState stack_state) {
DCHECK(is_marking_started_);
EnterAtomicPause(stack_state);
{
StatsCollector::EnabledScope advance_tracing_scope(
heap(), StatsCollector::kAtomicPauseMarkTransitiveClosure);
CHECK(ProcessWorklistsWithDeadline(std::numeric_limits<size_t>::max(),
v8::base::TimeTicks::Max()));
}
CHECK(ProcessWorklistsWithDeadline(std::numeric_limits<size_t>::max(),
v8::base::TimeTicks::Max()));
mutator_marking_state_.Publish();
LeaveAtomicPause();
}
void MarkerBase::ProcessWeakness() {
DCHECK_EQ(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
heap().GetWeakPersistentRegion().Trace(&visitor());
// Processing cross-thread handles requires taking the process lock.
g_process_mutex.Get().AssertHeld();
heap().GetWeakCrossThreadPersistentRegion().Trace(&visitor());
{
StatsCollector::DisabledScope stats_scope(
heap(), StatsCollector::kMarkWeakProcessing);
// Call weak callbacks on objects that may now be pointing to dead objects.
MarkingWorklists::WeakCallbackItem item;
LivenessBroker broker = LivenessBrokerFactory::Create();
MarkingWorklists::WeakCallbackWorklist::Local& local =
mutator_marking_state_.weak_callback_worklist();
while (local.Pop(&item)) {
item.callback(broker, item.parameter);
}
// Call weak callbacks on objects that may now be pointing to dead objects.
MarkingWorklists::WeakCallbackItem item;
LivenessBroker broker = LivenessBrokerFactory::Create();
MarkingWorklists::WeakCallbackWorklist::Local& local =
mutator_marking_state_.weak_callback_worklist();
while (local.Pop(&item)) {
item.callback(broker, item.parameter);
}
// Weak callbacks should not add any new objects for marking.
DCHECK(marking_worklists_.marking_worklist()->IsEmpty());
}
void MarkerBase::VisitRoots(MarkingConfig::StackState stack_state) {
StatsCollector::EnabledScope stats_scope(heap(), StatsCollector::kVisitRoots);
// Reset LABs before scanning roots. LABs are cleared to allow
// ObjectStartBitmap handling without considering LABs.
heap().object_allocator().ResetLinearAllocationBuffers();
{
StatsCollector::DisabledScope stats_scope(
heap(), StatsCollector::kVisitPersistentRoots);
{
StatsCollector::DisabledScope inner_stats_scope(
heap(), StatsCollector::kVisitPersistents);
heap().GetStrongPersistentRegion().Trace(&visitor());
}
if (config_.marking_type == MarkingConfig::MarkingType::kAtomic) {
StatsCollector::DisabledScope inner_stats_scope(
heap(), StatsCollector::kVisitCrossThreadPersistents);
g_process_mutex.Get().AssertHeld();
heap().GetStrongCrossThreadPersistentRegion().Trace(&visitor());
}
heap().GetStrongPersistentRegion().Trace(&visitor());
if (config_.marking_type == MarkingConfig::MarkingType::kAtomic) {
g_process_mutex.Get().AssertHeld();
heap().GetStrongCrossThreadPersistentRegion().Trace(&visitor());
}
if (stack_state != MarkingConfig::StackState::kNoHeapPointers) {
StatsCollector::DisabledScope stack_stats_scope(
heap(), StatsCollector::kVisitStackRoots);
heap().stack()->IteratePointers(&stack_visitor());
}
if (config_.collection_type == MarkingConfig::CollectionType::kMinor) {
......@@ -352,9 +308,6 @@ bool MarkerBase::IncrementalMarkingStepForTesting(
}
bool MarkerBase::IncrementalMarkingStep(MarkingConfig::StackState stack_state) {
StatsCollector::EnabledScope stats_scope(
heap(), StatsCollector::kIncrementalMarkingStep);
if (stack_state == MarkingConfig::StackState::kNoHeapPointers) {
mutator_marking_state_.FlushNotFullyConstructedObjects();
}
......@@ -380,9 +333,6 @@ bool MarkerBase::AdvanceMarkingWithDeadline(v8::base::TimeDelta max_duration) {
if (!incremental_marking_disabled_for_testing_) {
size_t step_size_in_bytes =
GetNextIncrementalStepDuration(schedule_, heap_);
StatsCollector::EnabledScope deadline_scope(
heap(), StatsCollector::kIncrementalMarkingWithDeadline, "deadline_ms",
max_duration.InMillisecondsF());
is_done = ProcessWorklistsWithDeadline(
mutator_marking_state_.marked_bytes() + step_size_in_bytes,
v8::base::TimeTicks::Now() + max_duration);
......@@ -410,97 +360,70 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
mutator_marking_state_.FlushDiscoveredEphemeronPairs();
}
StatsCollector::EnabledScope stats_scope(
heap(), StatsCollector::kMarkProcessWorklists);
// Bailout objects may be complicated to trace and thus might take longer
// than other objects. Therefore we reduce the interval between deadline
// checks to guarantee the deadline is not exceeded.
{
StatsCollector::EnabledScope inner_scope(
heap(), StatsCollector::kMarkBailOutObjects);
if (!DrainWorklistWithBytesAndTimeDeadline<kDefaultDeadlineCheckInterval /
5>(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.concurrent_marking_bailout_worklist(),
[this](
const MarkingWorklists::ConcurrentMarkingBailoutItem& item) {
mutator_marking_state_.AccountMarkedBytes(item.bailedout_size);
item.callback(&visitor(), item.parameter);
})) {
return false;
}
if (!DrainWorklistWithBytesAndTimeDeadline<kDefaultDeadlineCheckInterval /
5>(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.concurrent_marking_bailout_worklist(),
[this](const MarkingWorklists::ConcurrentMarkingBailoutItem& item) {
mutator_marking_state_.AccountMarkedBytes(item.bailedout_size);
item.callback(&visitor(), item.parameter);
})) {
return false;
}
{
StatsCollector::EnabledScope inner_scope(
heap(), StatsCollector::kMarkProcessNotFullyconstructeddWorklist);
if (!DrainWorklistWithBytesAndTimeDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_
.previously_not_fully_constructed_worklist(),
[this](HeapObjectHeader* header) {
mutator_marking_state_.AccountMarkedBytes(*header);
DynamicallyTraceMarkedObject<AccessMode::kNonAtomic>(visitor(),
*header);
})) {
return false;
}
if (!DrainWorklistWithBytesAndTimeDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.previously_not_fully_constructed_worklist(),
[this](HeapObjectHeader* header) {
mutator_marking_state_.AccountMarkedBytes(*header);
DynamicallyTraceMarkedObject<AccessMode::kNonAtomic>(visitor(),
*header);
})) {
return false;
}
{
StatsCollector::EnabledScope inner_scope(
heap(), StatsCollector::kMarkProcessMarkingWorklist);
if (!DrainWorklistWithBytesAndTimeDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.marking_worklist(),
[this](const MarkingWorklists::MarkingItem& item) {
const HeapObjectHeader& header =
HeapObjectHeader::FromPayload(item.base_object_payload);
DCHECK(!header.IsInConstruction<AccessMode::kNonAtomic>());
DCHECK(header.IsMarked<AccessMode::kNonAtomic>());
mutator_marking_state_.AccountMarkedBytes(header);
item.callback(&visitor(), item.base_object_payload);
})) {
return false;
}
if (!DrainWorklistWithBytesAndTimeDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.marking_worklist(),
[this](const MarkingWorklists::MarkingItem& item) {
const HeapObjectHeader& header =
HeapObjectHeader::FromPayload(item.base_object_payload);
DCHECK(!header.IsInConstruction<AccessMode::kNonAtomic>());
DCHECK(header.IsMarked<AccessMode::kNonAtomic>());
mutator_marking_state_.AccountMarkedBytes(header);
item.callback(&visitor(), item.base_object_payload);
})) {
return false;
}
{
StatsCollector::EnabledScope inner_scope(
heap(), StatsCollector::kMarkProcessWriteBarrierWorklist);
if (!DrainWorklistWithBytesAndTimeDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.write_barrier_worklist(),
[this](HeapObjectHeader* header) {
mutator_marking_state_.AccountMarkedBytes(*header);
DynamicallyTraceMarkedObject<AccessMode::kNonAtomic>(visitor(),
*header);
})) {
return false;
}
if (!DrainWorklistWithBytesAndTimeDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.write_barrier_worklist(),
[this](HeapObjectHeader* header) {
mutator_marking_state_.AccountMarkedBytes(*header);
DynamicallyTraceMarkedObject<AccessMode::kNonAtomic>(visitor(),
*header);
})) {
return false;
}
{
StatsCollector::EnabledScope stats_scope(
heap(), StatsCollector::kMarkInvokeEphemeronCallbacks);
if (!DrainWorklistWithBytesAndTimeDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.ephemeron_pairs_for_processing_worklist(),
[this](const MarkingWorklists::EphemeronPairItem& item) {
mutator_marking_state_.ProcessEphemeron(item.key,
item.value_desc);
})) {
return false;
}
if (!DrainWorklistWithBytesAndTimeDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.ephemeron_pairs_for_processing_worklist(),
[this](const MarkingWorklists::EphemeronPairItem& item) {
mutator_marking_state_.ProcessEphemeron(item.key,
item.value_desc);
})) {
return false;
}
} while (!mutator_marking_state_.marking_worklist().IsLocalAndGlobalEmpty());
return true;
}
void MarkerBase::MarkNotFullyConstructedObjects() {
StatsCollector::DisabledScope stats_scope(
heap(), StatsCollector::kMarkNotFullyConstructedObjects);
std::unordered_set<HeapObjectHeader*> objects =
mutator_marking_state_.not_fully_constructed_worklist().Extract();
for (HeapObjectHeader* object : objects) {
......
......@@ -6,8 +6,6 @@
#include <unordered_set>
#include "src/heap/cppgc/stats-collector.h"
namespace cppgc {
namespace internal {
......@@ -21,8 +19,6 @@ void MutatorMarkingState::FlushNotFullyConstructedObjects() {
}
void MutatorMarkingState::FlushDiscoveredEphemeronPairs() {
StatsCollector::EnabledScope stats_scope(
heap_, StatsCollector::kMarkFlushEphemeronPairs);
discovered_ephemeron_pairs_worklist_.Publish();
if (!discovered_ephemeron_pairs_worklist_.IsGlobalEmpty()) {
ephemeron_pairs_for_processing_worklist_.Merge(
......
......@@ -120,7 +120,9 @@ class MarkingStateBase {
return movable_slots_worklist_.get();
}
#ifdef DEBUG
HeapBase& heap_;
#endif // DEBUG
MarkingWorklists::MarkingWorklist::Local marking_worklist_;
MarkingWorklists::NotFullyConstructedWorklist&
......@@ -148,7 +150,9 @@ MarkingStateBase::MarkingStateBase(HeapBase& heap,
MarkingWorklists& marking_worklists,
CompactionWorklists* compaction_worklists)
:
#ifdef DEBUG
heap_(heap),
#endif // DEBUG
marking_worklist_(marking_worklists.marking_worklist()),
not_fully_constructed_worklist_(
*marking_worklists.not_fully_constructed_worklist()),
......
......@@ -134,11 +134,7 @@ void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace* space,
// 3. Lazily sweep pages of this heap until we find a freed area for
// this allocation or we finish sweeping all pages of this heap.
// {
// StatsCollector::EnabledScope stats_scope(
// *space->raw_heap()->heap(), StatsCollector::kLazySweepOnAllocation);
// // TODO(chromium:1056170): Add lazy sweep.
// }
// TODO(chromium:1056170): Add lazy sweep.
// 4. Complete sweeping.
raw_heap_->heap()->sweeper().FinishIfRunning();
......
......@@ -11,7 +11,6 @@
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/liveness-broker.h"
#include "src/heap/cppgc/stats-collector.h"
namespace cppgc {
namespace internal {
......@@ -30,11 +29,9 @@ bool PreFinalizerRegistrationDispatcher::PreFinalizer::operator==(
return (object == other.object) && (callback == other.callback);
}
PreFinalizerHandler::PreFinalizerHandler(HeapBase& heap)
: heap_(heap)
PreFinalizerHandler::PreFinalizerHandler()
#ifdef DEBUG
,
creation_thread_id_(v8::base::OS::GetCurrentThreadId())
: creation_thread_id_(v8::base::OS::GetCurrentThreadId())
#endif
{
}
......@@ -48,9 +45,6 @@ void PreFinalizerHandler::RegisterPrefinalizer(PreFinalizer pre_finalizer) {
}
void PreFinalizerHandler::InvokePreFinalizers() {
StatsCollector::DisabledScope stats_scope(
heap_, StatsCollector::kInvokePreFinalizers);
DCHECK(CurrentThreadIsCreationThread());
LivenessBroker liveness_broker = LivenessBrokerFactory::Create();
ordered_pre_finalizers_.erase(
......
......@@ -12,14 +12,12 @@
namespace cppgc {
namespace internal {
class HeapBase;
class PreFinalizerHandler final {
public:
using PreFinalizer =
cppgc::internal::PreFinalizerRegistrationDispatcher::PreFinalizer;
explicit PreFinalizerHandler(HeapBase& heap);
PreFinalizerHandler();
void RegisterPrefinalizer(PreFinalizer pre_finalizer);
......@@ -35,7 +33,6 @@ class PreFinalizerHandler final {
// back-to-front.
std::vector<PreFinalizer> ordered_pre_finalizers_;
HeapBase& heap_;
#ifdef DEBUG
int creation_thread_id_;
#endif
......
......@@ -19,42 +19,11 @@
namespace cppgc {
namespace internal {
#define CPPGC_FOR_ALL_SCOPES(V) \
V(AtomicPauseCompaction) \
V(AtomicPauseMarkEpilogue) \
V(AtomicPauseMarkPrologue) \
V(AtomicPauseMarkRoots) \
V(AtomicPauseMarkTransitiveClosure) \
V(AtomicPauseSweepAndCompact) \
V(CompleteSweep) \
V(IncrementalMarkingFinalize) \
V(IncrementalMarkingStartMarking) \
V(IncrementalMarkingStep) \
V(IncrementalMarkingWithDeadline) \
V(InvokePreFinalizers) \
V(LazySweepInIdle) \
V(LazySweepOnAllocation) \
V(MarkBailOutObjects) \
V(MarkInvokeEphemeronCallbacks) \
V(MarkFlushEphemeronPairs) \
V(MarkProcessWorklists) \
V(MarkProcessMarkingWorklist) \
V(MarkProcessWriteBarrierWorklist) \
V(MarkProcessNotFullyconstructeddWorklist) \
V(MarkNotFullyConstructedObjects) \
V(MarkWeakProcessing) \
V(UnifiedMarkingStep) \
V(VisitCrossThreadPersistents) \
V(VisitPersistentRoots) \
V(VisitPersistents) \
V(VisitRoots) \
V(VisitStackRoots) \
V(VisitRememberedSets)
#define CPPGC_FOR_ALL_CONCURRENT_SCOPES(V) \
V(ConcurrentMarkInvokeEphemeronCallbacks) \
V(ConcurrentMarkingStep) \
V(ConcurrentSweepingStep)
#define CPPGC_FOR_ALL_SCOPES(V) \
V(MainThreadScopeForTests1) \
V(MainThreadScopeForTests2)
#define CPPGC_FOR_ALL_CONCURRENT_SCOPES(V) V(ConcurrentThreadScopeForTests)
// Sink for various time and memory statistics.
class V8_EXPORT_PRIVATE StatsCollector final {
......
......@@ -392,13 +392,9 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
friend class HeapVisitor<ConcurrentSweepTask>;
public:
explicit ConcurrentSweepTask(HeapBase& heap, SpaceStates* states)
: heap_(heap), states_(states) {}
explicit ConcurrentSweepTask(SpaceStates* states) : states_(states) {}
void Run(cppgc::JobDelegate* delegate) final {
StatsCollector::EnabledConcurrentScope stats_scope(
heap_, StatsCollector::kConcurrentSweepingStep);
for (SpaceState& state : *states_) {
while (auto page = state.unswept_pages.Pop()) {
Traverse(*page);
......@@ -442,7 +438,6 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
return true;
}
HeapBase& heap_;
SpaceStates* states_;
std::atomic_bool is_completed_{false};
};
......@@ -523,16 +518,12 @@ class Sweeper::SweeperImpl final {
void FinishIfRunning() {
if (!is_in_progress_) return;
{
StatsCollector::EnabledScope stats_scope(*heap_->heap(),
StatsCollector::kCompleteSweep);
if (concurrent_sweeper_handle_ && concurrent_sweeper_handle_->IsValid() &&
concurrent_sweeper_handle_->UpdatePriorityEnabled()) {
concurrent_sweeper_handle_->UpdatePriority(
cppgc::TaskPriority::kUserBlocking);
}
Finish();
if (concurrent_sweeper_handle_ && concurrent_sweeper_handle_->IsValid() &&
concurrent_sweeper_handle_->UpdatePriorityEnabled()) {
concurrent_sweeper_handle_->UpdatePriority(
cppgc::TaskPriority::kUserBlocking);
}
Finish();
NotifyDone();
}
......@@ -596,16 +587,9 @@ class Sweeper::SweeperImpl final {
MutatorThreadSweeper sweeper(&sweeper_->space_states_,
sweeper_->platform_);
bool sweep_complete;
{
StatsCollector::EnabledScope stats_scope(
*sweeper_->heap_->heap(), StatsCollector::kLazySweepInIdle,
"idleDeltaInSeconds",
(deadline_in_seconds -
sweeper_->platform_->MonotonicallyIncreasingTime()));
sweep_complete = sweeper.SweepWithDeadline(deadline_in_seconds);
}
const bool sweep_complete =
sweeper.SweepWithDeadline(deadline_in_seconds);
if (sweep_complete) {
sweeper_->FinalizeSweep();
sweeper_->NotifyDone();
......@@ -636,7 +620,7 @@ class Sweeper::SweeperImpl final {
concurrent_sweeper_handle_ = platform_->PostJob(
cppgc::TaskPriority::kUserVisible,
std::make_unique<ConcurrentSweepTask>(*heap_->heap(), &space_states_));
std::make_unique<ConcurrentSweepTask>(&space_states_));
}
void CancelSweepers() {
......
......@@ -7,7 +7,6 @@
#include "include/cppgc/allocation.h"
#include "include/cppgc/custom-space.h"
#include "include/cppgc/persistent.h"
#include "src/heap/cppgc/garbage-collector.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/marker.h"
......@@ -126,12 +125,7 @@ namespace internal {
TEST_F(CompactorTest, NothingToCompact) {
StartCompaction();
heap()->stats_collector()->NotifyMarkingStarted(
GarbageCollector::Config::CollectionType::kMajor,
GarbageCollector::Config::IsForcedGC::kNotForced);
heap()->stats_collector()->NotifyMarkingCompleted(0);
FinishCompaction();
heap()->stats_collector()->NotifySweepingCompleted();
}
TEST_F(CompactorTest, CancelledNothingToCompact) {
......
......@@ -76,7 +76,7 @@ class CppgcTracingScopesTest : public testing::TestWithHeap {
Heap::From(GetHeap())->stats_collector()->NotifySweepingCompleted();
}
void ResetDelegatingTracingController(const char* expected_name = nullptr) {
void ResetTestTracingController(const char* expected_name = nullptr) {
DelegatingTracingControllerImpl::AddTraceEvent_callcount = 0u;
DelegatingTracingControllerImpl::stored_num_args = 0;
DelegatingTracingControllerImpl::stored_arg_names.clear();
......@@ -102,10 +102,10 @@ class CppgcTracingScopesTest : public testing::TestWithHeap {
TEST_F(CppgcTracingScopesTest, DisabledScope) {
StartGC();
ResetDelegatingTracingController();
ResetTestTracingController();
{
StatsCollector::DisabledScope scope(
*Heap::From(GetHeap()), StatsCollector::kMarkProcessMarkingWorklist);
*Heap::From(GetHeap()), StatsCollector::kMainThreadScopeForTests1);
}
EXPECT_EQ(0u, DelegatingTracingControllerImpl::AddTraceEvent_callcount);
EndGC();
......@@ -114,21 +114,20 @@ TEST_F(CppgcTracingScopesTest, DisabledScope) {
TEST_F(CppgcTracingScopesTest, EnabledScope) {
{
StartGC();
ResetDelegatingTracingController("CppGC.MarkProcessMarkingWorklist");
ResetTestTracingController("CppGC.MainThreadScopeForTests1");
{
StatsCollector::EnabledScope scope(
*Heap::From(GetHeap()), StatsCollector::kMarkProcessMarkingWorklist);
*Heap::From(GetHeap()), StatsCollector::kMainThreadScopeForTests1);
}
EXPECT_EQ(2u, DelegatingTracingControllerImpl::AddTraceEvent_callcount);
EndGC();
}
{
StartGC();
ResetDelegatingTracingController("CppGC.MarkProcessWriteBarrierWorklist");
ResetTestTracingController("CppGC.MainThreadScopeForTests2");
{
StatsCollector::EnabledScope scope(
*Heap::From(GetHeap()),
StatsCollector::kMarkProcessWriteBarrierWorklist);
*Heap::From(GetHeap()), StatsCollector::kMainThreadScopeForTests2);
}
EXPECT_EQ(2u, DelegatingTracingControllerImpl::AddTraceEvent_callcount);
EndGC();
......@@ -139,20 +138,20 @@ TEST_F(CppgcTracingScopesTest, EnabledScopeWithArgs) {
// Scopes always add 2 arguments: epoch and is_forced_gc.
{
StartGC();
ResetDelegatingTracingController();
ResetTestTracingController();
{
StatsCollector::EnabledScope scope(
*Heap::From(GetHeap()), StatsCollector::kMarkProcessMarkingWorklist);
*Heap::From(GetHeap()), StatsCollector::kMainThreadScopeForTests1);
}
EXPECT_EQ(2, DelegatingTracingControllerImpl::stored_num_args);
EndGC();
}
{
StartGC();
ResetDelegatingTracingController();
ResetTestTracingController();
{
StatsCollector::EnabledScope scope(
*Heap::From(GetHeap()), StatsCollector::kMarkProcessMarkingWorklist,
*Heap::From(GetHeap()), StatsCollector::kMainThreadScopeForTests1,
"arg1", 1);
}
EXPECT_EQ(3, DelegatingTracingControllerImpl::stored_num_args);
......@@ -160,10 +159,10 @@ TEST_F(CppgcTracingScopesTest, EnabledScopeWithArgs) {
}
{
StartGC();
ResetDelegatingTracingController();
ResetTestTracingController();
{
StatsCollector::EnabledScope scope(
*Heap::From(GetHeap()), StatsCollector::kMarkProcessMarkingWorklist,
*Heap::From(GetHeap()), StatsCollector::kMainThreadScopeForTests1,
"arg1", 1, "arg2", 2);
}
EXPECT_EQ(4, DelegatingTracingControllerImpl::stored_num_args);
......@@ -174,10 +173,10 @@ TEST_F(CppgcTracingScopesTest, EnabledScopeWithArgs) {
TEST_F(CppgcTracingScopesTest, CheckScopeArgs) {
{
StartGC();
ResetDelegatingTracingController();
ResetTestTracingController();
{
StatsCollector::EnabledScope scope(
*Heap::From(GetHeap()), StatsCollector::kMarkProcessMarkingWorklist,
*Heap::From(GetHeap()), StatsCollector::kMainThreadScopeForTests1,
"uint_arg", 13u, "bool_arg", false);
}
FindArgument("uint_arg", TRACE_VALUE_TYPE_UINT, 13);
......@@ -186,10 +185,10 @@ TEST_F(CppgcTracingScopesTest, CheckScopeArgs) {
}
{
StartGC();
ResetDelegatingTracingController();
ResetTestTracingController();
{
StatsCollector::EnabledScope scope(
*Heap::From(GetHeap()), StatsCollector::kMarkProcessMarkingWorklist,
*Heap::From(GetHeap()), StatsCollector::kMainThreadScopeForTests1,
"neg_int_arg", -5, "pos_int_arg", 7);
}
FindArgument("neg_int_arg", TRACE_VALUE_TYPE_INT, -5);
......@@ -198,12 +197,12 @@ TEST_F(CppgcTracingScopesTest, CheckScopeArgs) {
}
{
StartGC();
ResetDelegatingTracingController();
ResetTestTracingController();
double double_value = 1.2;
const char* string_value = "test";
{
StatsCollector::EnabledScope scope(
*Heap::From(GetHeap()), StatsCollector::kMarkProcessMarkingWorklist,
*Heap::From(GetHeap()), StatsCollector::kMainThreadScopeForTests1,
"string_arg", string_value, "double_arg", double_value);
}
FindArgument("string_arg", TRACE_VALUE_TYPE_STRING,
......@@ -215,14 +214,10 @@ TEST_F(CppgcTracingScopesTest, CheckScopeArgs) {
}
TEST_F(CppgcTracingScopesTest, InitalScopesAreZero) {
StatsCollector* stats_collector = Heap::From(GetHeap())->stats_collector();
stats_collector->NotifyMarkingStarted(
GarbageCollector::Config::CollectionType::kMajor,
GarbageCollector::Config::IsForcedGC::kNotForced);
stats_collector->NotifyMarkingCompleted(0);
stats_collector->NotifySweepingCompleted();
StartGC();
EndGC();
const StatsCollector::Event& event =
stats_collector->GetPreviousEventForTesting();
Heap::From(GetHeap())->stats_collector()->GetPreviousEventForTesting();
for (int i = 0; i < StatsCollector::kNumScopeIds; ++i) {
EXPECT_TRUE(event.scope_data[i].IsZero());
}
......@@ -233,10 +228,7 @@ TEST_F(CppgcTracingScopesTest, InitalScopesAreZero) {
TEST_F(CppgcTracingScopesTest, TestIndividualScopes) {
for (int scope_id = 0; scope_id < StatsCollector::kNumScopeIds; ++scope_id) {
StatsCollector* stats_collector = Heap::From(GetHeap())->stats_collector();
stats_collector->NotifyMarkingStarted(
GarbageCollector::Config::CollectionType::kMajor,
GarbageCollector::Config::IsForcedGC::kNotForced);
StartGC();
DelegatingTracingControllerImpl::check_expectations = false;
{
StatsCollector::EnabledScope scope(
......@@ -247,10 +239,9 @@ TEST_F(CppgcTracingScopesTest, TestIndividualScopes) {
// Force time to progress before destroying scope.
}
}
stats_collector->NotifyMarkingCompleted(0);
stats_collector->NotifySweepingCompleted();
EndGC();
const StatsCollector::Event& event =
stats_collector->GetPreviousEventForTesting();
Heap::From(GetHeap())->stats_collector()->GetPreviousEventForTesting();
for (int i = 0; i < StatsCollector::kNumScopeIds; ++i) {
if (i == scope_id)
EXPECT_LT(v8::base::TimeDelta(), event.scope_data[i]);
......@@ -266,10 +257,7 @@ TEST_F(CppgcTracingScopesTest, TestIndividualScopes) {
TEST_F(CppgcTracingScopesTest, TestIndividualConcurrentScopes) {
for (int scope_id = 0; scope_id < StatsCollector::kNumConcurrentScopeIds;
++scope_id) {
StatsCollector* stats_collector = Heap::From(GetHeap())->stats_collector();
stats_collector->NotifyMarkingStarted(
GarbageCollector::Config::CollectionType::kMajor,
GarbageCollector::Config::IsForcedGC::kNotForced);
StartGC();
DelegatingTracingControllerImpl::check_expectations = false;
{
StatsCollector::EnabledConcurrentScope scope(
......@@ -280,10 +268,9 @@ TEST_F(CppgcTracingScopesTest, TestIndividualConcurrentScopes) {
// Force time to progress before destroying scope.
}
}
stats_collector->NotifyMarkingCompleted(0);
stats_collector->NotifySweepingCompleted();
EndGC();
const StatsCollector::Event& event =
stats_collector->GetPreviousEventForTesting();
Heap::From(GetHeap())->stats_collector()->GetPreviousEventForTesting();
for (int i = 0; i < StatsCollector::kNumScopeIds; ++i) {
EXPECT_TRUE(event.scope_data[i].IsZero());
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment