Commit 0665568d authored by Michael Lippautz's avatar Michael Lippautz Committed by V8 LUCI CQ

cppgc: Discard memory on memory reducing GCs

Add discarded of memory on memory reducing garbage collections. In
addition, add tracking of discarded memory and properly adjust the
resident memory of heap dumps.

- Memory is discarded during sweeping and the counter is persistent
  across garbage collection cycles.
- Subsequent sweep calls are not supposed to touch the memory anymore.
- As a simplification, discarded memory is tracked on page granularity
  and assumed to be fully paged in as soon as a page's free list entries
  are reused for allocation.

Change-Id: Icfd58f49f3400c4df0d482e20326a0c43c1ca9f5
Bug: chromium:1056170
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3015563
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarAnton Bikineev <bikineev@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75677}
parent a73ce1d0
...@@ -460,7 +460,10 @@ void CppHeap::TraceEpilogue(TraceSummary* trace_summary) { ...@@ -460,7 +460,10 @@ void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
? cppgc::internal::Sweeper::SweepingConfig::SweepingType::kAtomic ? cppgc::internal::Sweeper::SweepingConfig::SweepingType::kAtomic
: cppgc::internal::Sweeper::SweepingConfig::SweepingType:: : cppgc::internal::Sweeper::SweepingConfig::SweepingType::
kIncrementalAndConcurrent, kIncrementalAndConcurrent,
compactable_space_handling}; compactable_space_handling,
current_flags_ & TraceFlags::kReduceMemory
? cppgc::internal::FreeMemoryHandling::kDiscardWherePossible
: cppgc::internal::FreeMemoryHandling::kDoNotDiscard};
DCHECK_IMPLIES( DCHECK_IMPLIES(
!isolate_, !isolate_,
cppgc::internal::Sweeper::SweepingConfig::SweepingType::kAtomic == cppgc::internal::Sweeper::SweepingConfig::SweepingType::kAtomic ==
......
...@@ -60,7 +60,7 @@ FreeList& FreeList::operator=(FreeList&& other) V8_NOEXCEPT { ...@@ -60,7 +60,7 @@ FreeList& FreeList::operator=(FreeList&& other) V8_NOEXCEPT {
return *this; return *this;
} }
void FreeList::Add(FreeList::Block block) { Address FreeList::Add(FreeList::Block block) {
const size_t size = block.size; const size_t size = block.size;
DCHECK_GT(kPageSize, size); DCHECK_GT(kPageSize, size);
DCHECK_LE(sizeof(HeapObjectHeader), size); DCHECK_LE(sizeof(HeapObjectHeader), size);
...@@ -73,7 +73,7 @@ void FreeList::Add(FreeList::Block block) { ...@@ -73,7 +73,7 @@ void FreeList::Add(FreeList::Block block) {
// zeroing it out. // zeroing it out.
ASAN_UNPOISON_MEMORY_REGION(block.address, sizeof(HeapObjectHeader)); ASAN_UNPOISON_MEMORY_REGION(block.address, sizeof(HeapObjectHeader));
new (block.address) HeapObjectHeader(size, kFreeListGCInfoIndex); new (block.address) HeapObjectHeader(size, kFreeListGCInfoIndex);
return; return reinterpret_cast<Address>(block.address) + block.size;
} }
// Make sure the freelist header is writable. SET_MEMORY_ACCESSIBLE is not // Make sure the freelist header is writable. SET_MEMORY_ACCESSIBLE is not
...@@ -86,6 +86,7 @@ void FreeList::Add(FreeList::Block block) { ...@@ -86,6 +86,7 @@ void FreeList::Add(FreeList::Block block) {
if (!entry->Next()) { if (!entry->Next()) {
free_list_tails_[index] = entry; free_list_tails_[index] = entry;
} }
return reinterpret_cast<Address>(block.address) + sizeof(Entry);
} }
void FreeList::Append(FreeList&& other) { void FreeList::Append(FreeList&& other) {
......
...@@ -34,7 +34,9 @@ class V8_EXPORT_PRIVATE FreeList { ...@@ -34,7 +34,9 @@ class V8_EXPORT_PRIVATE FreeList {
Block Allocate(size_t); Block Allocate(size_t);
// Adds block to the freelist. The minimal block size is two words. // Adds block to the freelist. The minimal block size is two words.
void Add(Block); // Returns the start of the free list payload that will not be accessed by
// the free list itself.
Address Add(Block);
// Append other freelist into this. // Append other freelist into this.
void Append(FreeList&&); void Append(FreeList&&);
......
...@@ -45,6 +45,13 @@ const BasePage* BasePage::FromInnerAddress(const HeapBase* heap, ...@@ -45,6 +45,13 @@ const BasePage* BasePage::FromInnerAddress(const HeapBase* heap,
// static // static
void BasePage::Destroy(BasePage* page) { void BasePage::Destroy(BasePage* page) {
if (page->discarded_memory()) {
page->space()
.raw_heap()
->heap()
->stats_collector()
->DecrementDiscardedMemory(page->discarded_memory());
}
if (page->is_large()) { if (page->is_large()) {
LargePage::Destroy(LargePage::From(page)); LargePage::Destroy(LargePage::From(page));
} else { } else {
......
...@@ -78,6 +78,13 @@ class V8_EXPORT_PRIVATE BasePage { ...@@ -78,6 +78,13 @@ class V8_EXPORT_PRIVATE BasePage {
#endif #endif
} }
void IncrementDiscardedMemory(size_t value) {
DCHECK_GE(discarded_memory_ + value, discarded_memory_);
discarded_memory_ += value;
}
void ResetDiscardedMemory() { discarded_memory_ = 0; }
size_t discarded_memory() const { return discarded_memory_; }
protected: protected:
enum class PageType : uint8_t { kNormal, kLarge }; enum class PageType : uint8_t { kNormal, kLarge };
BasePage(HeapBase&, BaseSpace&, PageType); BasePage(HeapBase&, BaseSpace&, PageType);
...@@ -86,6 +93,7 @@ class V8_EXPORT_PRIVATE BasePage { ...@@ -86,6 +93,7 @@ class V8_EXPORT_PRIVATE BasePage {
HeapBase& heap_; HeapBase& heap_;
BaseSpace& space_; BaseSpace& space_;
PageType type_; PageType type_;
size_t discarded_memory_ = 0;
}; };
class V8_EXPORT_PRIVATE NormalPage final : public BasePage { class V8_EXPORT_PRIVATE NormalPage final : public BasePage {
......
...@@ -179,6 +179,13 @@ void* ObjectAllocator::AllocateFromFreeList(NormalPageSpace& space, size_t size, ...@@ -179,6 +179,13 @@ void* ObjectAllocator::AllocateFromFreeList(NormalPageSpace& space, size_t size,
const FreeList::Block entry = space.free_list().Allocate(size); const FreeList::Block entry = space.free_list().Allocate(size);
if (!entry.address) return nullptr; if (!entry.address) return nullptr;
// Assume discarded memory on that page is now zero.
auto& page = *NormalPage::From(BasePage::FromPayload(entry.address));
if (page.discarded_memory()) {
stats_collector_->DecrementDiscardedMemory(page.discarded_memory());
page.ResetDiscardedMemory();
}
ReplaceLinearAllocationBuffer(space, *stats_collector_, ReplaceLinearAllocationBuffer(space, *stats_collector_,
static_cast<Address>(entry.address), static_cast<Address>(entry.address),
entry.size); entry.size);
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "src/heap/cppgc/stats-collector.h" #include "src/heap/cppgc/stats-collector.h"
#include <algorithm> #include <algorithm>
#include <atomic>
#include <cmath> #include <cmath>
#include "src/base/atomicops.h" #include "src/base/atomicops.h"
...@@ -309,6 +310,28 @@ void StatsCollector::NotifyFreedMemory(int64_t size) { ...@@ -309,6 +310,28 @@ void StatsCollector::NotifyFreedMemory(int64_t size) {
#endif // DEBUG #endif // DEBUG
} }
void StatsCollector::IncrementDiscardedMemory(size_t value) {
const size_t old =
discarded_bytes_.fetch_add(value, std::memory_order_relaxed);
DCHECK_GE(old + value, old);
USE(old);
}
void StatsCollector::DecrementDiscardedMemory(size_t value) {
const size_t old =
discarded_bytes_.fetch_sub(value, std::memory_order_relaxed);
DCHECK_GE(old, old - value);
USE(old);
}
void StatsCollector::ResetDiscardedMemory() {
discarded_bytes_.store(0, std::memory_order_relaxed);
}
size_t StatsCollector::discarded_memory() const {
return discarded_bytes_.load(std::memory_order_relaxed);
}
void StatsCollector::RecordHistogramSample(ScopeId scope_id_, void StatsCollector::RecordHistogramSample(ScopeId scope_id_,
v8::base::TimeDelta time) { v8::base::TimeDelta time) {
switch (scope_id_) { switch (scope_id_) {
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <stddef.h> #include <stddef.h>
#include <stdint.h> #include <stdint.h>
#include <atomic>
#include <vector> #include <vector>
#include "include/cppgc/platform.h" #include "include/cppgc/platform.h"
...@@ -293,6 +294,11 @@ class V8_EXPORT_PRIVATE StatsCollector final { ...@@ -293,6 +294,11 @@ class V8_EXPORT_PRIVATE StatsCollector final {
void NotifyAllocatedMemory(int64_t); void NotifyAllocatedMemory(int64_t);
void NotifyFreedMemory(int64_t); void NotifyFreedMemory(int64_t);
void IncrementDiscardedMemory(size_t);
void DecrementDiscardedMemory(size_t);
void ResetDiscardedMemory();
size_t discarded_memory() const;
void SetMetricRecorder(std::unique_ptr<MetricRecorder> histogram_recorder) { void SetMetricRecorder(std::unique_ptr<MetricRecorder> histogram_recorder) {
metric_recorder_ = std::move(histogram_recorder); metric_recorder_ = std::move(histogram_recorder);
} }
...@@ -331,6 +337,7 @@ class V8_EXPORT_PRIVATE StatsCollector final { ...@@ -331,6 +337,7 @@ class V8_EXPORT_PRIVATE StatsCollector final {
int64_t memory_allocated_bytes_ = 0; int64_t memory_allocated_bytes_ = 0;
int64_t memory_freed_bytes_since_end_of_marking_ = 0; int64_t memory_freed_bytes_since_end_of_marking_ = 0;
std::atomic<size_t> discarded_bytes_{0};
// vector to allow fast iteration of observers. Register/Unregisters only // vector to allow fast iteration of observers. Register/Unregisters only
// happens on startup/teardown. // happens on startup/teardown.
......
This diff is collapsed.
...@@ -21,6 +21,11 @@ class HeapBase; ...@@ -21,6 +21,11 @@ class HeapBase;
class ConcurrentSweeperTest; class ConcurrentSweeperTest;
class NormalPageSpace; class NormalPageSpace;
enum class FreeMemoryHandling : uint8_t {
kDoNotDiscard,
kDiscardWherePossible
};
class V8_EXPORT_PRIVATE Sweeper final { class V8_EXPORT_PRIVATE Sweeper final {
public: public:
struct SweepingConfig { struct SweepingConfig {
...@@ -30,6 +35,7 @@ class V8_EXPORT_PRIVATE Sweeper final { ...@@ -30,6 +35,7 @@ class V8_EXPORT_PRIVATE Sweeper final {
SweepingType sweeping_type = SweepingType::kIncrementalAndConcurrent; SweepingType sweeping_type = SweepingType::kIncrementalAndConcurrent;
CompactableSpaceHandling compactable_space_handling = CompactableSpaceHandling compactable_space_handling =
CompactableSpaceHandling::kSweep; CompactableSpaceHandling::kSweep;
FreeMemoryHandling free_memory_handling = FreeMemoryHandling::kDoNotDiscard;
}; };
explicit Sweeper(HeapBase&); explicit Sweeper(HeapBase&);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment