Commit c174643b authored by Omer Katz's avatar Omer Katz Committed by Commit Bot

cppgc: Implement process-global state accessors.

Process global accessors for total physical size and used size are
needed for blink. These are implemented via an allocation observer that
atomically updates static counters.

The public api only provides getters for the counters. An internal class
is in charge of updating the counters as needed. A similar split is also
applied to IsAnyIncrementalOrConcurrentMarking().

Drive-by: ProcessHeap is merged into cppgc::internal::WriteBarrier.

Bug: chromium:1056170
Change-Id: Iaedebd1ac9d49238ce6bdd52ffa5d1ef4d28203d
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2695394
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72832}
parent 81078e2b
...@@ -4697,7 +4697,6 @@ v8_source_set("cppgc_base") { ...@@ -4697,7 +4697,6 @@ v8_source_set("cppgc_base") {
"include/cppgc/internal/persistent-node.h", "include/cppgc/internal/persistent-node.h",
"include/cppgc/internal/pointer-policies.h", "include/cppgc/internal/pointer-policies.h",
"include/cppgc/internal/prefinalizer-handler.h", "include/cppgc/internal/prefinalizer-handler.h",
"include/cppgc/internal/process-heap.h",
"include/cppgc/internal/write-barrier.h", "include/cppgc/internal/write-barrier.h",
"include/cppgc/liveness-broker.h", "include/cppgc/liveness-broker.h",
"include/cppgc/macros.h", "include/cppgc/macros.h",
...@@ -4707,6 +4706,7 @@ v8_source_set("cppgc_base") { ...@@ -4707,6 +4706,7 @@ v8_source_set("cppgc_base") {
"include/cppgc/persistent.h", "include/cppgc/persistent.h",
"include/cppgc/platform.h", "include/cppgc/platform.h",
"include/cppgc/prefinalizer.h", "include/cppgc/prefinalizer.h",
"include/cppgc/process-heap-statistics.h",
"include/cppgc/sentinel-pointer.h", "include/cppgc/sentinel-pointer.h",
"include/cppgc/source-location.h", "include/cppgc/source-location.h",
"include/cppgc/trace-trait.h", "include/cppgc/trace-trait.h",
...@@ -4773,6 +4773,8 @@ v8_source_set("cppgc_base") { ...@@ -4773,6 +4773,8 @@ v8_source_set("cppgc_base") {
"src/heap/cppgc/pointer-policies.cc", "src/heap/cppgc/pointer-policies.cc",
"src/heap/cppgc/prefinalizer-handler.cc", "src/heap/cppgc/prefinalizer-handler.cc",
"src/heap/cppgc/prefinalizer-handler.h", "src/heap/cppgc/prefinalizer-handler.h",
"src/heap/cppgc/process-heap-statistics.cc",
"src/heap/cppgc/process-heap-statistics.h",
"src/heap/cppgc/process-heap.cc", "src/heap/cppgc/process-heap.cc",
"src/heap/cppgc/process-heap.h", "src/heap/cppgc/process-heap.h",
"src/heap/cppgc/raw-heap.cc", "src/heap/cppgc/raw-heap.cc",
...@@ -4790,6 +4792,7 @@ v8_source_set("cppgc_base") { ...@@ -4790,6 +4792,7 @@ v8_source_set("cppgc_base") {
"src/heap/cppgc/virtual-memory.h", "src/heap/cppgc/virtual-memory.h",
"src/heap/cppgc/visitor.cc", "src/heap/cppgc/visitor.cc",
"src/heap/cppgc/write-barrier.cc", "src/heap/cppgc/write-barrier.cc",
"src/heap/cppgc/write-barrier.h",
] ]
if (cppgc_is_standalone) { if (cppgc_is_standalone) {
......
...@@ -5,7 +5,6 @@ include_rules = [ ...@@ -5,7 +5,6 @@ include_rules = [
# Used by v8-cppgc.h to bridge to cppgc. # Used by v8-cppgc.h to bridge to cppgc.
"+cppgc/custom-space.h", "+cppgc/custom-space.h",
"+cppgc/heap-statistics.h", "+cppgc/heap-statistics.h",
"+cppgc/internal/process-heap.h",
"+cppgc/internal/write-barrier.h", "+cppgc/internal/write-barrier.h",
"+cppgc/visitor.h", "+cppgc/visitor.h",
] ]
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#include "cppgc/heap-state.h" #include "cppgc/heap-state.h"
#include "cppgc/internal/api-constants.h" #include "cppgc/internal/api-constants.h"
#include "cppgc/internal/process-heap.h" #include "cppgc/internal/atomic-entry-flag.h"
#include "cppgc/sentinel-pointer.h" #include "cppgc/sentinel-pointer.h"
#include "cppgc/trace-trait.h" #include "cppgc/trace-trait.h"
#include "v8config.h" // NOLINT(build/include_directory) #include "v8config.h" // NOLINT(build/include_directory)
...@@ -86,6 +86,13 @@ class V8_EXPORT WriteBarrier final { ...@@ -86,6 +86,13 @@ class V8_EXPORT WriteBarrier final {
static void CheckParams(Type expected_type, const Params& params) {} static void CheckParams(Type expected_type, const Params& params) {}
#endif // !V8_ENABLE_CHECKS #endif // !V8_ENABLE_CHECKS
// The IncrementalOrConcurrentUpdater class allows cppgc internal to update
// |incremental_or_concurrent_marking_flag_|.
class IncrementalOrConcurrentMarkingFlagUpdater;
static bool IsAnyIncrementalOrConcurrentMarking() {
return incremental_or_concurrent_marking_flag_.MightBeEntered();
}
private: private:
WriteBarrier() = delete; WriteBarrier() = delete;
...@@ -111,6 +118,8 @@ class V8_EXPORT WriteBarrier final { ...@@ -111,6 +118,8 @@ class V8_EXPORT WriteBarrier final {
const AgeTable& ageTable, const AgeTable& ageTable,
const void* slot, uintptr_t value_offset); const void* slot, uintptr_t value_offset);
#endif // CPPGC_YOUNG_GENERATION #endif // CPPGC_YOUNG_GENERATION
static AtomicEntryFlag incremental_or_concurrent_marking_flag_;
}; };
template <WriteBarrier::Type type> template <WriteBarrier::Type type>
...@@ -218,7 +227,7 @@ struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch< ...@@ -218,7 +227,7 @@ struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch<
return SetAndReturnType<WriteBarrier::Type::kGenerational>(params); return SetAndReturnType<WriteBarrier::Type::kGenerational>(params);
} }
#else // !CPPGC_YOUNG_GENERATION #else // !CPPGC_YOUNG_GENERATION
if (V8_LIKELY(!ProcessHeap::IsAnyIncrementalOrConcurrentMarking())) { if (V8_LIKELY(!WriteBarrier::IsAnyIncrementalOrConcurrentMarking())) {
return SetAndReturnType<WriteBarrier::Type::kNone>(params); return SetAndReturnType<WriteBarrier::Type::kNone>(params);
} }
HeapHandle& handle = callback(); HeapHandle& handle = callback();
...@@ -288,7 +297,7 @@ struct WriteBarrierTypeForNonCagedHeapPolicy::ValueModeDispatch< ...@@ -288,7 +297,7 @@ struct WriteBarrierTypeForNonCagedHeapPolicy::ValueModeDispatch<
static V8_INLINE WriteBarrier::Type Get(const void*, const void*, static V8_INLINE WriteBarrier::Type Get(const void*, const void*,
WriteBarrier::Params& params, WriteBarrier::Params& params,
HeapHandleCallback callback) { HeapHandleCallback callback) {
if (V8_UNLIKELY(ProcessHeap::IsAnyIncrementalOrConcurrentMarking())) { if (V8_UNLIKELY(WriteBarrier::IsAnyIncrementalOrConcurrentMarking())) {
HeapHandle& handle = callback(); HeapHandle& handle = callback();
if (IsMarking(handle)) { if (IsMarking(handle)) {
params.heap = &handle; params.heap = &handle;
......
...@@ -2,33 +2,35 @@ ...@@ -2,33 +2,35 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#ifndef INCLUDE_CPPGC_INTERNAL_PROCESS_HEAP_H_ #ifndef INCLUDE_CPPGC_PROCESS_HEAP_STATISTICS_H_
#define INCLUDE_CPPGC_INTERNAL_PROCESS_HEAP_H_ #define INCLUDE_CPPGC_PROCESS_HEAP_STATISTICS_H_
#include <atomic>
#include <cstddef>
#include "cppgc/internal/atomic-entry-flag.h"
#include "v8config.h" // NOLINT(build/include_directory) #include "v8config.h" // NOLINT(build/include_directory)
namespace cppgc { namespace cppgc {
namespace internal { namespace internal {
class ProcessHeapStatisticsUpdater;
} // namespace internal
class V8_EXPORT ProcessHeap final { class V8_EXPORT ProcessHeapStatistics final {
public: public:
static void EnterIncrementalOrConcurrentMarking() { static size_t TotalAllocatedObjectSize() {
concurrent_marking_flag_.Enter(); return total_allocated_object_size_.load(std::memory_order_relaxed);
}
static void ExitIncrementalOrConcurrentMarking() {
concurrent_marking_flag_.Exit();
} }
static size_t TotalAllocatedSpace() {
static bool IsAnyIncrementalOrConcurrentMarking() { return total_allocated_space_.load(std::memory_order_relaxed);
return concurrent_marking_flag_.MightBeEntered();
} }
private: private:
static AtomicEntryFlag concurrent_marking_flag_; static std::atomic_size_t total_allocated_space_;
static std::atomic_size_t total_allocated_object_size_;
friend class internal::ProcessHeapStatisticsUpdater;
}; };
} // namespace internal
} // namespace cppgc } // namespace cppgc
#endif // INCLUDE_CPPGC_INTERNAL_PROCESS_HEAP_H_ #endif // INCLUDE_CPPGC_PROCESS_HEAP_STATISTICS_H_
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#include "cppgc/custom-space.h" #include "cppgc/custom-space.h"
#include "cppgc/heap-statistics.h" #include "cppgc/heap-statistics.h"
#include "cppgc/internal/process-heap.h"
#include "cppgc/internal/write-barrier.h" #include "cppgc/internal/write-barrier.h"
#include "cppgc/visitor.h" #include "cppgc/visitor.h"
#include "v8-internal.h" // NOLINT(build/include_directory) #include "v8-internal.h" // NOLINT(build/include_directory)
...@@ -168,7 +167,7 @@ class V8_EXPORT JSHeapConsistency final { ...@@ -168,7 +167,7 @@ class V8_EXPORT JSHeapConsistency final {
WriteBarrierParams& params, HeapHandleCallback callback) { WriteBarrierParams& params, HeapHandleCallback callback) {
if (ref.IsEmpty()) return WriteBarrierType::kNone; if (ref.IsEmpty()) return WriteBarrierType::kNone;
if (V8_LIKELY(!cppgc::internal::ProcessHeap:: if (V8_LIKELY(!cppgc::internal::WriteBarrier::
IsAnyIncrementalOrConcurrentMarking())) { IsAnyIncrementalOrConcurrentMarking())) {
return cppgc::internal::WriteBarrier::Type::kNone; return cppgc::internal::WriteBarrier::Type::kNone;
} }
......
...@@ -78,6 +78,8 @@ HeapBase::HeapBase( ...@@ -78,6 +78,8 @@ HeapBase::HeapBase(
stats_collector_.get()), stats_collector_.get()),
sweeper_(&raw_heap_, platform_.get(), stats_collector_.get()), sweeper_(&raw_heap_, platform_.get(), stats_collector_.get()),
stack_support_(stack_support) { stack_support_(stack_support) {
stats_collector_->RegisterObserver(
&allocation_observer_for_PROCESS_HEAP_STATISTICS_);
} }
HeapBase::~HeapBase() = default; HeapBase::~HeapBase() = default;
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include "src/heap/cppgc/marker.h" #include "src/heap/cppgc/marker.h"
#include "src/heap/cppgc/metric-recorder.h" #include "src/heap/cppgc/metric-recorder.h"
#include "src/heap/cppgc/object-allocator.h" #include "src/heap/cppgc/object-allocator.h"
#include "src/heap/cppgc/process-heap-statistics.h"
#include "src/heap/cppgc/raw-heap.h" #include "src/heap/cppgc/raw-heap.h"
#include "src/heap/cppgc/sweeper.h" #include "src/heap/cppgc/sweeper.h"
#include "v8config.h" // NOLINT(build/include_directory) #include "v8config.h" // NOLINT(build/include_directory)
...@@ -189,6 +190,8 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle { ...@@ -189,6 +190,8 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
PersistentRegion strong_cross_thread_persistent_region_; PersistentRegion strong_cross_thread_persistent_region_;
PersistentRegion weak_cross_thread_persistent_region_; PersistentRegion weak_cross_thread_persistent_region_;
ProcessHeapStatisticsUpdater::AllocationObserverImpl
allocation_observer_for_PROCESS_HEAP_STATISTICS_;
#if defined(CPPGC_YOUNG_GENERATION) #if defined(CPPGC_YOUNG_GENERATION)
std::set<void*> remembered_slots_; std::set<void*> remembered_slots_;
#endif #endif
......
...@@ -8,7 +8,6 @@ ...@@ -8,7 +8,6 @@
#include <memory> #include <memory>
#include "include/cppgc/heap-consistency.h" #include "include/cppgc/heap-consistency.h"
#include "include/cppgc/internal/process-heap.h"
#include "include/cppgc/platform.h" #include "include/cppgc/platform.h"
#include "src/base/platform/time.h" #include "src/base/platform/time.h"
#include "src/heap/cppgc/heap-object-header.h" #include "src/heap/cppgc/heap-object-header.h"
...@@ -20,6 +19,7 @@ ...@@ -20,6 +19,7 @@
#include "src/heap/cppgc/marking-visitor.h" #include "src/heap/cppgc/marking-visitor.h"
#include "src/heap/cppgc/process-heap.h" #include "src/heap/cppgc/process-heap.h"
#include "src/heap/cppgc/stats-collector.h" #include "src/heap/cppgc/stats-collector.h"
#include "src/heap/cppgc/write-barrier.h"
#if defined(CPPGC_CAGED_HEAP) #if defined(CPPGC_CAGED_HEAP)
#include "include/cppgc/internal/caged-heap-local-data.h" #include "include/cppgc/internal/caged-heap-local-data.h"
...@@ -35,7 +35,7 @@ bool EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config, ...@@ -35,7 +35,7 @@ bool EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental || if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
config.marking_type == config.marking_type ==
Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) { Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
ProcessHeap::EnterIncrementalOrConcurrentMarking(); WriteBarrier::IncrementalOrConcurrentMarkingFlagUpdater::Enter();
#if defined(CPPGC_CAGED_HEAP) #if defined(CPPGC_CAGED_HEAP)
heap.caged_heap().local_data().is_incremental_marking_in_progress = true; heap.caged_heap().local_data().is_incremental_marking_in_progress = true;
#endif #endif
...@@ -49,7 +49,7 @@ bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config, ...@@ -49,7 +49,7 @@ bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental || if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
config.marking_type == config.marking_type ==
Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) { Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
ProcessHeap::ExitIncrementalOrConcurrentMarking(); WriteBarrier::IncrementalOrConcurrentMarkingFlagUpdater::Exit();
#if defined(CPPGC_CAGED_HEAP) #if defined(CPPGC_CAGED_HEAP)
heap.caged_heap().local_data().is_incremental_marking_in_progress = false; heap.caged_heap().local_data().is_incremental_marking_in_progress = false;
#endif #endif
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#include <array> #include <array>
#include "include/cppgc/internal/process-heap.h" #include "include/cppgc/internal/write-barrier.h"
#include "src/base/atomic-utils.h" #include "src/base/atomic-utils.h"
#include "src/base/bits.h" #include "src/base/bits.h"
#include "src/base/macros.h" #include "src/base/macros.h"
...@@ -217,7 +217,7 @@ bool PlatformAwareObjectStartBitmap::ShouldForceNonAtomic() { ...@@ -217,7 +217,7 @@ bool PlatformAwareObjectStartBitmap::ShouldForceNonAtomic() {
#if defined(V8_TARGET_ARCH_ARM) #if defined(V8_TARGET_ARCH_ARM)
// Use non-atomic accesses on ARMv7 when marking is not active. // Use non-atomic accesses on ARMv7 when marking is not active.
if (mode == AccessMode::kAtomic) { if (mode == AccessMode::kAtomic) {
if (V8_LIKELY(!ProcessHeap::IsAnyIncrementalOrConcurrentMarking())) if (V8_LIKELY(!WriteBarrier::IsAnyIncrementalOrConcurrentMarking()))
return true; return true;
} }
#endif // defined(V8_TARGET_ARCH_ARM) #endif // defined(V8_TARGET_ARCH_ARM)
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/cppgc/process-heap-statistics.h"
namespace cppgc {
std::atomic_size_t ProcessHeapStatistics::total_allocated_space_{0};
std::atomic_size_t ProcessHeapStatistics::total_allocated_object_size_{0};
} // namespace cppgc
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_CPPGC_PROCESS_HEAP_STATISTICS_H_
#define V8_HEAP_CPPGC_PROCESS_HEAP_STATISTICS_H_
#include "include/cppgc/process-heap-statistics.h"
#include "src/heap/cppgc/stats-collector.h"
namespace cppgc {
namespace internal {
class ProcessHeapStatisticsUpdater {
public:
// Allocation observer implementation for heaps should register to contribute
// to ProcessHeapStatistics. The heap is responsible for allocating and
// registering the obsrever impl with its stats collector.
class AllocationObserverImpl final
: public StatsCollector::AllocationObserver {
public:
void AllocatedObjectSizeIncreased(size_t bytes) final {
ProcessHeapStatisticsUpdater::IncreaseTotalAllocatedObjectSize(bytes);
object_size_changes_since_last_reset_ += bytes;
}
void AllocatedObjectSizeDecreased(size_t bytes) final {
ProcessHeapStatisticsUpdater::DecreaseTotalAllocatedObjectSize(bytes);
object_size_changes_since_last_reset_ -= bytes;
}
void ResetAllocatedObjectSize(size_t bytes) final {
ProcessHeapStatisticsUpdater::DecreaseTotalAllocatedObjectSize(
object_size_changes_since_last_reset_);
ProcessHeapStatisticsUpdater::IncreaseTotalAllocatedObjectSize(bytes);
object_size_changes_since_last_reset_ = bytes;
}
void AllocatedSizeIncreased(size_t bytes) final {
ProcessHeapStatisticsUpdater::IncreaseTotalAllocatedSpace(bytes);
}
void AllocatedSizeDecreased(size_t bytes) final {
ProcessHeapStatisticsUpdater::DecreaseTotalAllocatedSpace(bytes);
}
private:
size_t object_size_changes_since_last_reset_ = 0;
};
// For cppgc::ProcessHeapStatistics
static void IncreaseTotalAllocatedObjectSize(size_t delta) {
::cppgc::ProcessHeapStatistics::total_allocated_object_size_.fetch_add(
delta, std::memory_order_relaxed);
}
static void DecreaseTotalAllocatedObjectSize(size_t delta) {
::cppgc::ProcessHeapStatistics::total_allocated_object_size_.fetch_sub(
delta, std::memory_order_relaxed);
}
static void IncreaseTotalAllocatedSpace(size_t delta) {
::cppgc::ProcessHeapStatistics::total_allocated_space_.fetch_add(
delta, std::memory_order_relaxed);
}
static void DecreaseTotalAllocatedSpace(size_t delta) {
::cppgc::ProcessHeapStatistics::total_allocated_space_.fetch_sub(
delta, std::memory_order_relaxed);
}
};
} // namespace internal
} // namespace cppgc
#endif // V8_HEAP_CPPGC_PROCESS_HEAP_STATISTICS_H_
...@@ -4,13 +4,9 @@ ...@@ -4,13 +4,9 @@
#include "src/heap/cppgc/process-heap.h" #include "src/heap/cppgc/process-heap.h"
#include "include/cppgc/internal/process-heap.h"
namespace cppgc { namespace cppgc {
namespace internal { namespace internal {
AtomicEntryFlag ProcessHeap::concurrent_marking_flag_;
v8::base::LazyMutex g_process_mutex = LAZY_MUTEX_INITIALIZER; v8::base::LazyMutex g_process_mutex = LAZY_MUTEX_INITIALIZER;
} // namespace internal } // namespace internal
......
...@@ -236,10 +236,16 @@ size_t StatsCollector::allocated_object_size() const { ...@@ -236,10 +236,16 @@ size_t StatsCollector::allocated_object_size() const {
void StatsCollector::NotifyAllocatedMemory(int64_t size) { void StatsCollector::NotifyAllocatedMemory(int64_t size) {
memory_allocated_bytes_ += size; memory_allocated_bytes_ += size;
ForAllAllocationObservers([size](AllocationObserver* observer) {
observer->AllocatedSizeIncreased(static_cast<size_t>(size));
});
} }
void StatsCollector::NotifyFreedMemory(int64_t size) { void StatsCollector::NotifyFreedMemory(int64_t size) {
memory_freed_bytes_since_end_of_marking_ += size; memory_freed_bytes_since_end_of_marking_ += size;
ForAllAllocationObservers([size](AllocationObserver* observer) {
observer->AllocatedSizeDecreased(static_cast<size_t>(size));
});
} }
void StatsCollector::RecordHistogramSample(ScopeId scope_id_, void StatsCollector::RecordHistogramSample(ScopeId scope_id_,
......
...@@ -225,14 +225,21 @@ class V8_EXPORT_PRIVATE StatsCollector final { ...@@ -225,14 +225,21 @@ class V8_EXPORT_PRIVATE StatsCollector final {
// the deltas is interesting. // the deltas is interesting.
// //
// May trigger GC. // May trigger GC.
virtual void AllocatedObjectSizeIncreased(size_t) = 0; virtual void AllocatedObjectSizeIncreased(size_t) {}
virtual void AllocatedObjectSizeDecreased(size_t) = 0; virtual void AllocatedObjectSizeDecreased(size_t) {}
// Called when the exact size of allocated object size is known. In // Called when the exact size of allocated object size is known. In
// practice, this is after marking when marked bytes == allocated bytes. // practice, this is after marking when marked bytes == allocated bytes.
// //
// Must not trigger GC synchronously. // Must not trigger GC synchronously.
virtual void ResetAllocatedObjectSize(size_t) = 0; virtual void ResetAllocatedObjectSize(size_t) {}
// Called upon allocating/releasing chunks of memory (e.g. pages) that can
// contain objects.
//
// Must not trigger GC.
virtual void AllocatedSizeIncreased(size_t) {}
virtual void AllocatedSizeDecreased(size_t) {}
}; };
// Observers are implemented using virtual calls. Avoid notifications below // Observers are implemented using virtual calls. Avoid notifications below
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#include "include/cppgc/internal/write-barrier.h" #include "src/heap/cppgc/write-barrier.h"
#include "include/cppgc/heap-consistency.h" #include "include/cppgc/heap-consistency.h"
#include "include/cppgc/internal/pointer-policies.h" #include "include/cppgc/internal/pointer-policies.h"
...@@ -20,6 +20,9 @@ ...@@ -20,6 +20,9 @@
namespace cppgc { namespace cppgc {
namespace internal { namespace internal {
// static
AtomicEntryFlag WriteBarrier::incremental_or_concurrent_marking_flag_;
namespace { namespace {
void ProcessMarkValue(HeapObjectHeader& header, MarkerBase* marker, void ProcessMarkValue(HeapObjectHeader& header, MarkerBase* marker,
......
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_CPPGC_WRITE_BARRIER_H_
#define V8_HEAP_CPPGC_WRITE_BARRIER_H_
#include "include/cppgc/internal/write-barrier.h"
namespace cppgc {
namespace internal {
class WriteBarrier::IncrementalOrConcurrentMarkingFlagUpdater {
public:
static void Enter() { incremental_or_concurrent_marking_flag_.Enter(); }
static void Exit() { incremental_or_concurrent_marking_flag_.Exit(); }
};
} // namespace internal
} // namespace cppgc
#endif // V8_HEAP_CPPGC_WRITE_BARRIER_H_
...@@ -117,6 +117,8 @@ class MockAllocationObserver : public StatsCollector::AllocationObserver { ...@@ -117,6 +117,8 @@ class MockAllocationObserver : public StatsCollector::AllocationObserver {
MOCK_METHOD(void, AllocatedObjectSizeIncreased, (size_t), (override)); MOCK_METHOD(void, AllocatedObjectSizeIncreased, (size_t), (override));
MOCK_METHOD(void, AllocatedObjectSizeDecreased, (size_t), (override)); MOCK_METHOD(void, AllocatedObjectSizeDecreased, (size_t), (override));
MOCK_METHOD(void, ResetAllocatedObjectSize, (size_t), (override)); MOCK_METHOD(void, ResetAllocatedObjectSize, (size_t), (override));
MOCK_METHOD(void, AllocatedSizeIncreased, (size_t), (override));
MOCK_METHOD(void, AllocatedSizeDecreased, (size_t), (override));
}; };
TEST_F(StatsCollectorTest, RegisterUnregisterObserver) { TEST_F(StatsCollectorTest, RegisterUnregisterObserver) {
...@@ -156,6 +158,18 @@ TEST_F(StatsCollectorTest, ObserveResetAllocatedObjectSize) { ...@@ -156,6 +158,18 @@ TEST_F(StatsCollectorTest, ObserveResetAllocatedObjectSize) {
stats.UnregisterObserver(&observer); stats.UnregisterObserver(&observer);
} }
TEST_F(StatsCollectorTest, ObserveAllocatedMemoryIncreaseAndDecrease) {
MockAllocationObserver observer;
stats.RegisterObserver(&observer);
static constexpr size_t kAllocatedMemorySize = 4096;
EXPECT_CALL(observer, AllocatedSizeIncreased(kAllocatedMemorySize));
stats.NotifyAllocatedMemory(kAllocatedMemorySize);
static constexpr size_t kFreedMemorySize = 2048;
EXPECT_CALL(observer, AllocatedSizeDecreased(kFreedMemorySize));
stats.NotifyFreedMemory(kFreedMemorySize);
stats.UnregisterObserver(&observer);
}
namespace { namespace {
class AllocationObserverTriggeringGC final class AllocationObserverTriggeringGC final
......
...@@ -175,7 +175,7 @@ class NoWriteBarrierTest : public testing::TestWithHeap {}; ...@@ -175,7 +175,7 @@ class NoWriteBarrierTest : public testing::TestWithHeap {};
TEST_F(WriteBarrierTest, EnableDisableIncrementalMarking) { TEST_F(WriteBarrierTest, EnableDisableIncrementalMarking) {
{ {
IncrementalMarkingScope scope(marker()); IncrementalMarkingScope scope(marker());
EXPECT_TRUE(ProcessHeap::IsAnyIncrementalOrConcurrentMarking()); EXPECT_TRUE(WriteBarrier::IsAnyIncrementalOrConcurrentMarking());
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment