Commit 431d2bf6 authored by Anton Bikineev's avatar Anton Bikineev Committed by V8 LUCI CQ

cppgc: Store the list of to-be-finalized object inlined in HoH

The list of to-be-finalized objects can grow significantly. While
running Speedometer2, the metadata that stores to-be-finalized objects
can be the second largest contributor to heap consumption (overall
taking up 2.6MB, checked with heaptrack).

The CL changes the list to be stored inlined in HoH, if cage-heap is
enabled, rather than in a separate vector.

Bug: chromium:1249550
Change-Id: I04a0c84d118655fa6ff8e2440423e802cd722842
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3295448Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Anton Bikineev <bikineev@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78038}
parent c0bc99e0
......@@ -5,6 +5,7 @@
#ifndef V8_HEAP_CPPGC_CAGED_HEAP_H_
#define V8_HEAP_CPPGC_CAGED_HEAP_H_
#include <limits>
#include <memory>
#include "include/cppgc/platform.h"
......@@ -22,7 +23,11 @@ class CagedHeap final {
public:
using AllocatorType = v8::base::BoundedPageAllocator;
static uintptr_t OffsetFromAddress(const void* address) {
template <typename RetType = uintptr_t>
static RetType OffsetFromAddress(const void* address) {
static_assert(
std::numeric_limits<RetType>::max() >= (kCagedHeapReservationSize - 1),
"The return type should be large enough");
return reinterpret_cast<uintptr_t>(address) &
(kCagedHeapReservationAlignment - 1);
}
......@@ -52,6 +57,8 @@ class CagedHeap final {
reserved_area_.address();
}
void* base() const { return reserved_area_.address(); }
private:
const VirtualMemory reserved_area_;
std::unique_ptr<AllocatorType> bounded_allocator_;
......
......@@ -19,6 +19,10 @@
#include "src/heap/cppgc/gc-info-table.h"
#include "src/heap/cppgc/globals.h"
#if defined(CPPGC_CAGED_HEAP)
#include "src/heap/cppgc/caged-heap.h"
#endif // defined(CPPGC_CAGED_HEAP)
namespace cppgc {
class Visitor;
......@@ -102,6 +106,11 @@ class HeapObjectHeader {
inline bool IsFinalizable() const;
void Finalize();
#if defined(CPPGC_CAGED_HEAP)
inline void SetNextUnfinalized(HeapObjectHeader* next);
inline HeapObjectHeader* GetNextUnfinalized(uintptr_t cage_base) const;
#endif // defined(CPPGC_CAGED_HEAP)
V8_EXPORT_PRIVATE HeapObjectName GetName() const;
template <AccessMode = AccessMode::kNonAtomic>
......@@ -140,7 +149,13 @@ class HeapObjectHeader {
inline void StoreEncoded(uint16_t bits, uint16_t mask);
#if defined(V8_TARGET_ARCH_64_BIT)
// If cage is enabled, to save on space required by sweeper metadata, we store
// the list of to-be-finalized objects inlined in HeapObjectHeader.
#if defined(CPPGC_CAGED_HEAP)
uint32_t next_unfinalized_ = 0;
#else // !defined(CPPGC_CAGED_HEAP)
uint32_t padding_ = 0;
#endif // !defined(CPPGC_CAGED_HEAP)
#endif // defined(V8_TARGET_ARCH_64_BIT)
uint16_t encoded_high_;
uint16_t encoded_low_;
......@@ -163,9 +178,9 @@ const HeapObjectHeader& HeapObjectHeader::FromObject(const void* object) {
}
HeapObjectHeader::HeapObjectHeader(size_t size, GCInfoIndex gc_info_index) {
#if defined(V8_TARGET_ARCH_64_BIT)
#if defined(V8_TARGET_ARCH_64_BIT) && !defined(CPPGC_CAGED_HEAP)
USE(padding_);
#endif // defined(V8_TARGET_ARCH_64_BIT)
#endif // defined(V8_TARGET_ARCH_64_BIT) && !defined(CPPGC_CAGED_HEAP)
DCHECK_LT(gc_info_index, GCInfoTable::kMaxIndex);
DCHECK_EQ(0u, size & (sizeof(HeapObjectHeader) - 1));
DCHECK_GE(kMaxSize, size);
......@@ -288,6 +303,22 @@ bool HeapObjectHeader::IsFinalizable() const {
return gc_info.finalize;
}
#if defined(CPPGC_CAGED_HEAP)
void HeapObjectHeader::SetNextUnfinalized(HeapObjectHeader* next) {
next_unfinalized_ = CagedHeap::OffsetFromAddress<uint32_t>(next);
}
HeapObjectHeader* HeapObjectHeader::GetNextUnfinalized(
uintptr_t cage_base) const {
DCHECK(cage_base);
DCHECK_EQ(0u,
CagedHeap::OffsetFromAddress(reinterpret_cast<void*>(cage_base)));
return next_unfinalized_ ? reinterpret_cast<HeapObjectHeader*>(
cage_base + next_unfinalized_)
: nullptr;
}
#endif // defined(CPPGC_CAGED_HEAP)
template <AccessMode mode>
void HeapObjectHeader::Trace(Visitor* visitor) const {
const GCInfo& gc_info =
......
......@@ -166,7 +166,14 @@ class ThreadSafeStack {
struct SpaceState {
struct SweptPageState {
BasePage* page = nullptr;
#if defined(CPPGC_CAGED_HEAP)
// The list of unfinalized objects may be extremely big. To save on space,
// if cage is enabled, the list of unfinalized objects is stored inlined in
// HeapObjectHeader.
HeapObjectHeader* unfinalized_objects_head = nullptr;
#else // !defined(CPPGC_CAGED_HEAP)
std::vector<HeapObjectHeader*> unfinalized_objects;
#endif // !defined(CPPGC_CAGED_HEAP)
FreeList cached_free_list;
std::vector<FreeList::Block> unfinalized_free_list;
bool is_empty = false;
......@@ -230,7 +237,18 @@ class DeferredFinalizationBuilder final : public FreeHandler {
void AddFinalizer(HeapObjectHeader* header, size_t size) {
if (header->IsFinalizable()) {
#if defined(CPPGC_CAGED_HEAP)
if (!current_unfinalized_) {
DCHECK_NULL(result_.unfinalized_objects_head);
current_unfinalized_ = header;
result_.unfinalized_objects_head = header;
} else {
current_unfinalized_->SetNextUnfinalized(header);
current_unfinalized_ = header;
}
#else // !defined(CPPGC_CAGED_HEAP)
result_.unfinalized_objects.push_back({header});
#endif // !defined(CPPGC_CAGED_HEAP)
found_finalizer_ = true;
} else {
SetMemoryInaccessible(header, size);
......@@ -254,6 +272,7 @@ class DeferredFinalizationBuilder final : public FreeHandler {
private:
ResultType result_;
HeapObjectHeader* current_unfinalized_ = 0;
bool found_finalizer_ = false;
};
......@@ -369,11 +388,27 @@ class SweepFinalizer final {
BasePage* page = page_state->page;
// Call finalizers.
for (HeapObjectHeader* object : page_state->unfinalized_objects) {
const size_t size = object->AllocatedSize();
object->Finalize();
SetMemoryInaccessible(object, size);
const auto finalize_header = [](HeapObjectHeader* header) {
const size_t size = header->AllocatedSize();
header->Finalize();
SetMemoryInaccessible(header, size);
};
#if defined(CPPGC_CAGED_HEAP)
const uint64_t cage_base =
reinterpret_cast<uint64_t>(page->heap().caged_heap().base());
HeapObjectHeader* next_unfinalized = 0;
for (auto* unfinalized_header = page_state->unfinalized_objects_head;
unfinalized_header; unfinalized_header = next_unfinalized) {
next_unfinalized = unfinalized_header->GetNextUnfinalized(cage_base);
finalize_header(unfinalized_header);
}
#else // !defined(CPPGC_CAGED_HEAP)
for (HeapObjectHeader* unfinalized_header :
page_state->unfinalized_objects) {
finalize_header(unfinalized_header);
}
#endif // !defined(CPPGC_CAGED_HEAP)
// Unmap page if empty.
if (page_state->is_empty) {
......@@ -576,10 +611,15 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
page.space().AddPage(&page);
return true;
}
#if defined(CPPGC_CAGED_HEAP)
HeapObjectHeader* const unfinalized_objects =
header->IsFinalizable() ? page.ObjectHeader() : nullptr;
#else // !defined(CPPGC_CAGED_HEAP)
std::vector<HeapObjectHeader*> unfinalized_objects;
if (header->IsFinalizable()) {
unfinalized_objects.push_back(page.ObjectHeader());
}
#endif // !defined(CPPGC_CAGED_HEAP)
const size_t space_index = page.space().index();
DCHECK_GT(states_->size(), space_index);
SpaceState& state = (*states_)[space_index];
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment