Commit 25981026 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Improve accounting of PagedSpace::CommittedPhysicalMemory()

Instead of using the high water mark for determining this metric, we use
a bitset for all active/used system pages on a V8 heap page. Each time
when allocating a LAB on a page, we add the pages of that memory range
to that bitset. During sweeping we rebuild that bitset from scratch and
replace it with the old one in case free pages are discarded by the GC.
We DCHECK here that the sweeper only ever removes pages. This has the
nice benefit of ensuring that we don't miss any allocations (like we
do now for concurrent allocations).

CommittedPhysicalMemory for a page is then calculated by counting the
set bits in the bitset and multiplying it with the system page size.
This should be simpler to verify and track the "real" effective size
more precisely.

One case where we are partially less precise than the current
implementation is for LABs. In order to reduce complexity we now treat
all pages of a LAB allocation as active immediately. In the current
implementation we tried to only account the actual used part of the LAB
when changing the LAB later. This is more complex to track correctly
but also doesn't account the currently used LAB in effective size.

Change-Id: Ia83df9ad5fbb852f0717c4c396b5074604bd21e9
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3497363Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79428}
parent 35703d9c
......@@ -1295,6 +1295,8 @@ filegroup(
"src/handles/maybe-handles.h",
"src/handles/persistent-handles.cc",
"src/handles/persistent-handles.h",
"src/heap/base/active-system-pages.cc",
"src/heap/base/active-system-pages.h",
"src/heap/allocation-observer.cc",
"src/heap/allocation-observer.h",
"src/heap/allocation-result.h",
......@@ -2935,6 +2937,8 @@ filegroup(
filegroup(
name = "v8_heap_base_files",
srcs = [
"src/heap/base/active-system-pages.cc",
"src/heap/base/active-system-pages.h",
"src/heap/base/stack.cc",
"src/heap/base/stack.h",
"src/heap/base/worklist.cc",
......
......@@ -3852,6 +3852,7 @@ v8_header_set("v8_internal_headers") {
":cppgc_headers",
":generate_bytecode_builtins_list",
":run_torque",
":v8_heap_base_headers",
":v8_libbase",
]
}
......@@ -5454,12 +5455,23 @@ v8_source_set("v8_bigint") {
configs = [ ":internal_config" ]
}
v8_source_set("v8_heap_base_headers") {
sources = [
"src/heap/base/active-system-pages.h",
"src/heap/base/stack.h",
"src/heap/base/worklist.h",
]
configs = [ ":internal_config" ]
public_deps = [ ":v8_libbase" ]
}
v8_source_set("v8_heap_base") {
sources = [
"src/heap/base/active-system-pages.cc",
"src/heap/base/stack.cc",
"src/heap/base/stack.h",
"src/heap/base/worklist.cc",
"src/heap/base/worklist.h",
]
if (is_clang || !is_win) {
......@@ -5496,7 +5508,10 @@ v8_source_set("v8_heap_base") {
configs = [ ":internal_config" ]
public_deps = [ ":v8_libbase" ]
public_deps = [
":v8_heap_base_headers",
":v8_libbase",
]
}
# This is split out to be a non-code containing target that the Chromium browser
......
......@@ -918,6 +918,8 @@ enum class CompactionSpaceKind {
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
enum PageSize { kRegular, kLarge };
enum class CodeFlushMode {
kFlushBytecode,
kFlushBaselineCode,
......
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/base/active-system-pages.h"
#include "src/base/bits.h"
#include "src/base/macros.h"
namespace heap {
namespace base {
size_t ActiveSystemPages::Init(size_t header_size, size_t page_size_bits,
size_t user_page_size) {
#if DEBUG
size_t page_size = 1 << page_size_bits;
DCHECK_LE(RoundUp(user_page_size, page_size) >> page_size_bits,
ActiveSystemPages::kMaxPages);
#endif // DEBUG
Clear();
return Add(0, header_size, page_size_bits);
}
size_t ActiveSystemPages::Add(uintptr_t start, uintptr_t end,
size_t page_size_bits) {
const size_t page_size = 1 << page_size_bits;
DCHECK_LE(start, end);
DCHECK_LE(end, kMaxPages * page_size);
// Make sure we actually get the bitcount as argument.
DCHECK_LT(page_size_bits, sizeof(uintptr_t) * CHAR_BIT);
const uintptr_t start_page_bit =
RoundDown(start, page_size) >> page_size_bits;
const uintptr_t end_page_bit = RoundUp(end, page_size) >> page_size_bits;
DCHECK_LE(start_page_bit, end_page_bit);
const uintptr_t bits = end_page_bit - start_page_bit;
DCHECK_LE(bits, kMaxPages);
const bitset_t mask = bits == kMaxPages
? int64_t{-1}
: ((uint64_t{1} << bits) - 1) << start_page_bit;
const bitset_t added_pages = ~value_ & mask;
value_ |= mask;
return added_pages.count();
}
size_t ActiveSystemPages::Reduce(ActiveSystemPages updated_value) {
DCHECK_EQ(~value_ & updated_value.value_, 0);
const bitset_t removed_pages(value_ & ~updated_value.value_);
value_ = updated_value.value_;
return removed_pages.count();
}
size_t ActiveSystemPages::Clear() {
const size_t removed_pages = value_.count();
value_ = 0;
return removed_pages;
}
size_t ActiveSystemPages::Size(size_t page_size_bits) const {
// Make sure we don't get the full page size as argument.
DCHECK_LT(page_size_bits, sizeof(uintptr_t) * CHAR_BIT);
return value_.count() * (size_t{1} << page_size_bits);
}
} // namespace base
} // namespace heap
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_BASE_ACTIVE_SYSTEM_PAGES_H_
#define V8_HEAP_BASE_ACTIVE_SYSTEM_PAGES_H_
#include <bitset>
#include <cstdint>
#include "src/base/macros.h"
namespace heap {
namespace base {
// Class implements a bitset of system pages on a heap page.
class ActiveSystemPages final {
public:
// Defines the maximum number of system pages that can be tracked in one
// instance.
static constexpr size_t kMaxPages = 64;
// Initializes the set of active pages to the system pages for the header.
V8_EXPORT_PRIVATE size_t Init(size_t header_size, size_t page_size_bits,
size_t user_page_size);
// Adds the pages for this memory range. Returns the number of freshly added
// pages.
V8_EXPORT_PRIVATE size_t Add(size_t start, size_t end, size_t page_size_bits);
// Replaces the current bitset with the given argument. The new bitset needs
// to be a proper subset of the current pages, which means this operation
// can't add pages. Returns the number of removed pages.
V8_EXPORT_PRIVATE size_t Reduce(ActiveSystemPages updated_value);
// Removes all pages. Returns the number of removed pages.
V8_EXPORT_PRIVATE size_t Clear();
// Returns the memory used with the given page size.
V8_EXPORT_PRIVATE size_t Size(size_t page_size_bits) const;
private:
using bitset_t = std::bitset<kMaxPages>;
bitset_t value_;
};
} // namespace base
} // namespace heap
#endif // V8_HEAP_BASE_ACTIVE_SYSTEM_PAGES_H_
......@@ -4569,6 +4569,10 @@ void Heap::Verify() {
code_lo_space_->Verify(isolate());
if (new_lo_space_) new_lo_space_->Verify(isolate());
isolate()->string_table()->VerifyIfOwnedBy(isolate());
#if DEBUG
VerifyCommittedPhysicalMemory();
#endif // DEBUG
}
void Heap::VerifyReadOnlyHeap() {
......@@ -4760,7 +4764,15 @@ void Heap::VerifyCountersBeforeConcurrentSweeping() {
space->VerifyCountersBeforeConcurrentSweeping();
}
}
#endif
void Heap::VerifyCommittedPhysicalMemory() {
PagedSpaceIterator spaces(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
space->VerifyCommittedPhysicalMemory();
}
}
#endif // DEBUG
void Heap::ZapFromSpace() {
if (!new_space_ || !new_space_->IsFromSpaceCommitted()) return;
......
......@@ -1583,13 +1583,14 @@ class Heap {
#ifdef DEBUG
void VerifyCountersAfterSweeping();
void VerifyCountersBeforeConcurrentSweeping();
void VerifyCommittedPhysicalMemory();
void Print();
void PrintHandles();
// Report code statistics.
void ReportCodeStatistics(const char* title);
#endif
#endif // DEBUG
void* GetRandomMmapAddr() {
void* result = v8::internal::GetRandomMmapAddr();
#if V8_TARGET_ARCH_X64
......
......@@ -402,14 +402,15 @@ V8_EXPORT_PRIVATE BasicMemoryChunk* MemoryAllocator::AllocateBasicChunk(
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
size_t commit_area_size,
Executability executable,
PageSize page_size,
BaseSpace* owner) {
BasicMemoryChunk* basic_chunk = AllocateBasicChunk(
reserve_area_size, commit_area_size, executable, owner);
if (basic_chunk == nullptr) return nullptr;
MemoryChunk* chunk =
MemoryChunk::Initialize(basic_chunk, isolate_->heap(), executable);
MemoryChunk* chunk = MemoryChunk::Initialize(basic_chunk, isolate_->heap(),
executable, page_size);
#ifdef DEBUG
if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
......@@ -562,7 +563,7 @@ Page* MemoryAllocator::AllocatePage(MemoryAllocator::AllocationMode alloc_mode,
chunk = AllocatePagePooled(owner);
}
if (chunk == nullptr) {
chunk = AllocateChunk(size, size, executable, owner);
chunk = AllocateChunk(size, size, executable, PageSize::kRegular, owner);
}
if (chunk == nullptr) return nullptr;
return owner->InitializePage(chunk);
......@@ -585,7 +586,8 @@ MemoryAllocator::RemapSharedPage(
LargePage* MemoryAllocator::AllocateLargePage(size_t size,
LargeObjectSpace* owner,
Executability executable) {
MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
MemoryChunk* chunk =
AllocateChunk(size, size, executable, PageSize::kLarge, owner);
if (chunk == nullptr) return nullptr;
return LargePage::Initialize(isolate_->heap(), chunk, executable);
}
......@@ -609,7 +611,8 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(Space* owner) {
BasicMemoryChunk* basic_chunk =
BasicMemoryChunk::Initialize(isolate_->heap(), start, size, area_start,
area_end, owner, std::move(reservation));
MemoryChunk::Initialize(basic_chunk, isolate_->heap(), NOT_EXECUTABLE);
MemoryChunk::Initialize(basic_chunk, isolate_->heap(), NOT_EXECUTABLE,
PageSize::kRegular);
size_ += size;
return chunk;
}
......
......@@ -222,6 +222,7 @@ class MemoryAllocator {
V8_EXPORT_PRIVATE MemoryChunk* AllocateChunk(size_t reserve_area_size,
size_t commit_area_size,
Executability executable,
PageSize page_size,
BaseSpace* space);
// Partially release |bytes_to_free| bytes starting at |start_free|. Note that
......
......@@ -5,6 +5,7 @@
#ifndef V8_HEAP_MEMORY_CHUNK_LAYOUT_H_
#define V8_HEAP_MEMORY_CHUNK_LAYOUT_H_
#include "src/heap/base/active-system-pages.h"
#include "src/heap/heap.h"
#include "src/heap/list.h"
#include "src/heap/progress-bar.h"
......@@ -32,6 +33,8 @@ enum RememberedSetType {
NUMBER_OF_REMEMBERED_SET_TYPES
};
using ActiveSystemPages = ::heap::base::ActiveSystemPages;
class V8_EXPORT_PRIVATE MemoryChunkLayout {
public:
static const int kNumSets = NUMBER_OF_REMEMBERED_SET_TYPES;
......@@ -68,6 +71,7 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout {
FIELD(Bitmap*, YoungGenerationBitmap),
FIELD(CodeObjectRegistry*, CodeObjectRegistry),
FIELD(PossiblyEmptyBuckets, PossiblyEmptyBuckets),
FIELD(ActiveSystemPages, ActiveSystemPages),
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
FIELD(ObjectStartBitmap, ObjectStartBitmap),
#endif
......
......@@ -6,6 +6,7 @@
#include "src/base/platform/platform.h"
#include "src/base/platform/wrappers.h"
#include "src/common/globals.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk-inl.h"
......@@ -118,7 +119,8 @@ PageAllocator::Permission DefaultWritableCodePermissions() {
} // namespace
MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
Executability executable) {
Executability executable,
PageSize page_size) {
MemoryChunk* chunk = static_cast<MemoryChunk*>(basic_chunk);
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
......@@ -181,6 +183,15 @@ MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
chunk->possibly_empty_buckets_.Initialize();
if (page_size == PageSize::kRegular) {
chunk->active_system_pages_.Init(MemoryChunkLayout::kMemoryChunkHeaderSize,
MemoryAllocator::GetCommitPageSizeBits(),
chunk->size());
} else {
// We do not track active system pages for large pages.
chunk->active_system_pages_.Clear();
}
// All pages of a shared heap need to be marked with this flag.
if (heap->IsShared()) chunk->SetFlag(IN_SHARED_HEAP);
......@@ -196,9 +207,8 @@ MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
}
size_t MemoryChunk::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits() || owner_identity() == LO_SPACE)
return size();
return high_water_mark_;
if (!base::OS::HasLazyCommits() || IsLargePage()) return size();
return active_system_pages_.Size(MemoryAllocator::GetCommitPageSizeBits());
}
void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
......
......@@ -10,6 +10,7 @@
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/heap/base/active-system-pages.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/heap.h"
#include "src/heap/invalidated-slots.h"
......@@ -219,7 +220,7 @@ class MemoryChunk : public BasicMemoryChunk {
protected:
static MemoryChunk* Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
Executability executable);
Executability executable, PageSize page_size);
// Release all memory allocated by the chunk. Should be called when memory
// chunk is about to be freed.
......@@ -291,6 +292,8 @@ class MemoryChunk : public BasicMemoryChunk {
PossiblyEmptyBuckets possibly_empty_buckets_;
ActiveSystemPages active_system_pages_;
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
ObjectStartBitmap object_start_bitmap_;
#endif
......
......@@ -491,6 +491,10 @@ void NewSpace::UpdateLinearAllocationArea(Address known_top) {
original_limit_.store(limit(), std::memory_order_relaxed);
original_top_.store(top(), std::memory_order_release);
}
Page* page = to_space_.current_page();
page->active_system_pages()->Add(top() - page->address(),
limit() - page->address(),
MemoryAllocator::GetCommitPageSizeBits());
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
UpdateInlineAllocationLimit(0);
......
......@@ -4,6 +4,8 @@
#include "src/heap/paged-spaces.h"
#include <atomic>
#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/execution/isolate.h"
......@@ -13,8 +15,10 @@
#include "src/heap/incremental-marking.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/memory-chunk-layout.h"
#include "src/heap/paged-spaces-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/safepoint.h"
#include "src/logging/runtime-call-stats-scope.h"
#include "src/objects/string.h"
#include "src/utils/utils.h"
......@@ -211,15 +215,42 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
}
size_t PagedSpace::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits()) return CommittedMemory();
if (!base::OS::HasLazyCommits()) {
DCHECK_EQ(0, committed_physical_memory());
return CommittedMemory();
}
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
base::MutexGuard guard(mutex());
return committed_physical_memory();
}
void PagedSpace::IncrementCommittedPhysicalMemory(size_t increment_value) {
if (!base::OS::HasLazyCommits() || increment_value == 0) return;
size_t old_value = committed_physical_memory_.fetch_add(
increment_value, std::memory_order_relaxed);
USE(old_value);
DCHECK_LT(old_value, old_value + increment_value);
}
void PagedSpace::DecrementCommittedPhysicalMemory(size_t decrement_value) {
if (!base::OS::HasLazyCommits() || decrement_value == 0) return;
size_t old_value = committed_physical_memory_.fetch_sub(
decrement_value, std::memory_order_relaxed);
USE(old_value);
DCHECK_GT(old_value, old_value - decrement_value);
}
#if DEBUG
void PagedSpace::VerifyCommittedPhysicalMemory() {
heap()->safepoint()->AssertActive();
size_t size = 0;
for (Page* page : *this) {
DCHECK(page->SweepingDone());
size += page->CommittedPhysicalMemory();
}
return size;
// Ensure that the space's counter matches the sum of all page counters.
DCHECK_EQ(size, CommittedPhysicalMemory());
}
#endif // DEBUG
bool PagedSpace::ContainsSlow(Address addr) const {
Page* p = Page::FromAddress(addr);
......@@ -264,6 +295,7 @@ size_t PagedSpace::AddPage(Page* page) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
}
IncrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
return RelinkFreeListCategories(page);
}
......@@ -278,6 +310,7 @@ void PagedSpace::RemovePage(Page* page) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
}
DecrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
}
void PagedSpace::SetTopAndLimit(Address top, Address limit) {
......@@ -346,6 +379,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::ExpandBackground(
CHECK_LE(size_in_bytes, page->area_size());
Free(page->area_start() + size_in_bytes, page->area_size() - size_in_bytes,
SpaceAccountingMode::kSpaceAccounted);
AddRangeToActiveSystemPages(page, object_start, object_start + size_in_bytes);
return std::make_pair(object_start, size_in_bytes);
}
......@@ -492,6 +526,7 @@ void PagedSpace::ReleasePage(Page* page) {
}
AccountUncommitted(page->size());
DecrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
accounting_stats_.DecreaseCapacity(page->area_size());
heap()->memory_allocator()->Free(MemoryAllocator::kConcurrently, page);
}
......@@ -573,6 +608,7 @@ bool PagedSpace::TryAllocationFromFreeListMain(size_t size_in_bytes,
Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
}
SetLinearAllocationArea(start, limit);
AddRangeToActiveSystemPages(page, start, limit);
return true;
}
......@@ -693,6 +729,7 @@ PagedSpace::TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
}
Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
}
AddRangeToActiveSystemPages(page, start, limit);
return std::make_pair(start, used_size_in_bytes);
}
......@@ -1006,6 +1043,28 @@ AllocationResult PagedSpace::AllocateRawSlow(int size_in_bytes,
return result;
}
void PagedSpace::AddRangeToActiveSystemPages(Page* page, Address start,
Address end) {
DCHECK_LE(page->address(), start);
DCHECK_LT(start, end);
DCHECK_LE(end, page->address() + Page::kPageSize);
const size_t added_pages = page->active_system_pages()->Add(
start - page->address(), end - page->address(),
MemoryAllocator::GetCommitPageSizeBits());
IncrementCommittedPhysicalMemory(added_pages *
MemoryAllocator::GetCommitPageSize());
}
void PagedSpace::ReduceActiveSystemPages(
Page* page, ActiveSystemPages active_system_pages) {
const size_t reduced_pages =
page->active_system_pages()->Reduce(active_system_pages);
DecrementCommittedPhysicalMemory(reduced_pages *
MemoryAllocator::GetCommitPageSize());
}
// -----------------------------------------------------------------------------
// MapSpace implementation
......
......@@ -5,6 +5,7 @@
#ifndef V8_HEAP_PAGED_SPACES_H_
#define V8_HEAP_PAGED_SPACES_H_
#include <atomic>
#include <memory>
#include <utility>
......@@ -15,6 +16,7 @@
#include "src/common/globals.h"
#include "src/flags/flags.h"
#include "src/heap/allocation-stats.h"
#include "src/heap/memory-chunk-layout.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/spaces.h"
......@@ -108,6 +110,13 @@ class V8_EXPORT_PRIVATE PagedSpace
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override;
#if DEBUG
void VerifyCommittedPhysicalMemory();
#endif // DEBUG
void IncrementCommittedPhysicalMemory(size_t increment_value);
void DecrementCommittedPhysicalMemory(size_t decrement_value);
// Sets the capacity, the available space and the wasted space to zero.
// The stats are rebuilt during sweeping by adding each page to the
// capacity and the size when it is encountered. As free spaces are
......@@ -327,6 +336,10 @@ class V8_EXPORT_PRIVATE PagedSpace
return &pending_allocation_mutex_;
}
void AddRangeToActiveSystemPages(Page* page, Address start, Address end);
void ReduceActiveSystemPages(Page* page,
ActiveSystemPages active_system_pages);
private:
class ConcurrentAllocationMutex {
public:
......@@ -423,6 +436,10 @@ class V8_EXPORT_PRIVATE PagedSpace
V8_WARN_UNUSED_RESULT bool TryExpand(int size_in_bytes,
AllocationOrigin origin);
size_t committed_physical_memory() const {
return committed_physical_memory_.load(std::memory_order_relaxed);
}
Executability executable_;
CompactionSpaceKind compaction_space_kind_;
......@@ -443,6 +460,8 @@ class V8_EXPORT_PRIVATE PagedSpace
// Protects original_top_ and original_limit_.
base::SharedMutex pending_allocation_mutex_;
std::atomic<size_t> committed_physical_memory_{0};
friend class IncrementalMarking;
friend class MarkCompactCollector;
......
......@@ -13,6 +13,7 @@
#include "src/base/macros.h"
#include "src/base/sanitizer/msan.h"
#include "src/common/globals.h"
#include "src/heap/base/active-system-pages.h"
#include "src/heap/combined-heap.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/gc-tracer.h"
......@@ -22,6 +23,7 @@
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/large-spaces.h"
#include "src/heap/mark-compact.h"
#include "src/heap/memory-chunk-layout.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/remembered-set.h"
......
......@@ -13,11 +13,13 @@
#include "src/common/globals.h"
#include "src/heap/allocation-observer.h"
#include "src/heap/base-space.h"
#include "src/heap/base/active-system-pages.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/free-list.h"
#include "src/heap/heap.h"
#include "src/heap/linear-allocation-area.h"
#include "src/heap/list.h"
#include "src/heap/memory-chunk-layout.h"
#include "src/heap/memory-chunk.h"
#include "src/objects/objects.h"
#include "src/utils/allocation.h"
......@@ -307,6 +309,8 @@ class Page : public MemoryChunk {
void MoveOldToNewRememberedSetForSweeping();
void MergeOldToNewRememberedSets();
ActiveSystemPages* active_system_pages() { return &active_system_pages_; }
private:
friend class MemoryAllocator;
};
......
......@@ -6,6 +6,7 @@
#include "src/common/globals.h"
#include "src/execution/vm-state-inl.h"
#include "src/heap/base/active-system-pages.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/free-list-inl.h"
#include "src/heap/gc-tracer.h"
......@@ -335,6 +336,15 @@ int Sweeper::RawSweep(
CodeObjectRegistry* code_object_registry = p->GetCodeObjectRegistry();
if (code_object_registry) code_object_registry->Clear();
base::Optional<ActiveSystemPages> active_system_pages_after_sweeping;
if (should_reduce_memory_) {
// Only decrement counter when we discard unused system pages.
active_system_pages_after_sweeping = ActiveSystemPages();
active_system_pages_after_sweeping->Init(
MemoryChunkLayout::kMemoryChunkHeaderSize,
MemoryAllocator::GetCommitPageSizeBits(), Page::kPageSize);
}
// Phase 2: Free the non-live memory and clean-up the regular remembered set
// entires.
......@@ -389,6 +399,12 @@ int Sweeper::RawSweep(
live_bytes += size;
free_start = free_end + size;
if (active_system_pages_after_sweeping) {
active_system_pages_after_sweeping->Add(
free_end - p->address(), free_start - p->address(),
MemoryAllocator::GetCommitPageSizeBits());
}
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
p->object_start_bitmap()->SetBit(object.address());
#endif
......@@ -411,6 +427,13 @@ int Sweeper::RawSweep(
CleanupInvalidTypedSlotsOfFreeRanges(p, free_ranges_map);
ClearMarkBitsAndHandleLivenessStatistics(p, live_bytes, free_list_mode);
if (active_system_pages_after_sweeping) {
// Decrement accounted memory for discarded memory.
PagedSpace* paged_space = static_cast<PagedSpace*>(p->owner());
paged_space->ReduceActiveSystemPages(p,
*active_system_pages_after_sweeping);
}
if (code_object_registry) code_object_registry->Finalize();
p->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kDone);
if (free_list_mode == IGNORE_FREE_LIST) return 0;
......
......@@ -114,7 +114,8 @@ class V8_NODISCARD TestCodePageAllocatorScope {
static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
v8::PageAllocator* code_page_allocator,
size_t reserve_area_size, size_t commit_area_size,
Executability executable, Space* space) {
Executability executable, PageSize page_size,
Space* space) {
TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved());
MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
TestCodePageAllocatorScope test_code_page_allocator_scope(
......@@ -129,7 +130,7 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
(executable == EXECUTABLE) ? MemoryChunkLayout::CodePageGuardSize() : 0;
MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(
reserve_area_size, commit_area_size, executable, space);
reserve_area_size, commit_area_size, executable, page_size, space);
size_t reserved_size =
((executable == EXECUTABLE))
? allocatable_memory_area_offset +
......@@ -179,11 +180,12 @@ TEST(MemoryChunk) {
base::PageInitializationMode::kAllocatedPagesCanBeUninitialized);
VerifyMemoryChunk(isolate, heap, &code_page_allocator, reserve_area_size,
initial_commit_area_size, EXECUTABLE, heap->code_space());
initial_commit_area_size, EXECUTABLE, PageSize::kLarge,
heap->code_space());
VerifyMemoryChunk(isolate, heap, &code_page_allocator, reserve_area_size,
initial_commit_area_size, NOT_EXECUTABLE,
heap->old_space());
PageSize::kLarge, heap->old_space());
}
}
......
......@@ -42,7 +42,10 @@ v8_executable("v8_heap_base_unittests") {
v8_source_set("v8_heap_base_unittests_sources") {
testonly = true
sources = [ "heap/base/worklist-unittest.cc" ]
sources = [
"heap/base/active-system-pages-unittest.cc",
"heap/base/worklist-unittest.cc",
]
configs = [
"../..:external_config",
......
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/base/active-system-pages.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace heap {
namespace base {
TEST(ActiveSystemPagesTest, Add) {
ActiveSystemPages pages;
const size_t kPageSizeBits = 0;
EXPECT_EQ(pages.Add(0, 1, kPageSizeBits), size_t{1});
EXPECT_EQ(pages.Add(1, 2, kPageSizeBits), size_t{1});
EXPECT_EQ(pages.Add(63, 64, kPageSizeBits), size_t{1});
EXPECT_EQ(pages.Size(kPageSizeBits), size_t{3});
// Try to add page a second time.
EXPECT_EQ(pages.Add(0, 2, kPageSizeBits), size_t{0});
}
TEST(ActiveSystemPagesTest, AddUnalignedRange) {
ActiveSystemPages pages;
const size_t kPageSizeBits = 12;
const size_t kPageSize = size_t{1} << kPageSizeBits;
const size_t kWordSize = 8;
EXPECT_EQ(pages.Add(0, kPageSize + kWordSize, kPageSizeBits), size_t{2});
EXPECT_EQ(pages.Add(3 * kPageSize - kWordSize, 3 * kPageSize, kPageSizeBits),
size_t{1});
EXPECT_EQ(pages.Add(kPageSize + kWordSize, 3 * kPageSize - kWordSize,
kPageSizeBits),
size_t{0});
EXPECT_EQ(pages.Size(kPageSizeBits), size_t{3} * kPageSize);
}
TEST(ActiveSystemPagesTest, AddFullBitset) {
ActiveSystemPages pages;
const size_t kPageSizeBits = 0;
EXPECT_EQ(pages.Add(0, 64, kPageSizeBits), size_t{64});
EXPECT_EQ(pages.Add(0, 64, kPageSizeBits), size_t{0});
EXPECT_EQ(pages.Size(kPageSizeBits), size_t{64});
}
TEST(ActiveSystemPagesTest, Reduce) {
ActiveSystemPages original;
const size_t kPageSizeBits = 0;
EXPECT_EQ(original.Add(0, 3, kPageSizeBits), size_t{3});
ActiveSystemPages updated;
EXPECT_EQ(updated.Add(1, 3, kPageSizeBits), size_t{2});
EXPECT_EQ(original.Reduce(updated), size_t{1});
}
TEST(ActiveSystemPagesTest, ReduceFullBitset) {
ActiveSystemPages original;
const size_t kPageSizeBits = 0;
EXPECT_EQ(original.Add(0, 64, kPageSizeBits), size_t{64});
ActiveSystemPages updated;
EXPECT_EQ(updated.Add(63, 64, kPageSizeBits), size_t{1});
EXPECT_EQ(original.Reduce(updated), size_t{63});
}
TEST(ActiveSystemPagesTest, Clear) {
ActiveSystemPages pages;
const size_t kPageSizeBits = 0;
EXPECT_EQ(pages.Add(0, 64, kPageSizeBits), size_t{64});
EXPECT_EQ(pages.Clear(), size_t{64});
EXPECT_EQ(pages.Size(kPageSizeBits), size_t{0});
EXPECT_EQ(pages.Add(0, 2, kPageSizeBits), size_t{2});
EXPECT_EQ(pages.Clear(), size_t{2});
EXPECT_EQ(pages.Size(kPageSizeBits), size_t{0});
}
} // namespace base
} // namespace heap
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment