Commit 431d2bf6 authored by Anton Bikineev's avatar Anton Bikineev Committed by V8 LUCI CQ

cppgc: Store the list of to-be-finalized object inlined in HoH

The list of to-be-finalized objects can grow significantly. While
running Speedometer2, the metadata that stores to-be-finalized objects
can be the second largest contributor to heap consumption (overall
taking up 2.6MB, checked with heaptrack).

The CL changes the list to be stored inlined in HoH, if cage-heap is
enabled, rather than in a separate vector.

Bug: chromium:1249550
Change-Id: I04a0c84d118655fa6ff8e2440423e802cd722842
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3295448Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Anton Bikineev <bikineev@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78038}
parent c0bc99e0
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#ifndef V8_HEAP_CPPGC_CAGED_HEAP_H_ #ifndef V8_HEAP_CPPGC_CAGED_HEAP_H_
#define V8_HEAP_CPPGC_CAGED_HEAP_H_ #define V8_HEAP_CPPGC_CAGED_HEAP_H_
#include <limits>
#include <memory> #include <memory>
#include "include/cppgc/platform.h" #include "include/cppgc/platform.h"
...@@ -22,7 +23,11 @@ class CagedHeap final { ...@@ -22,7 +23,11 @@ class CagedHeap final {
public: public:
using AllocatorType = v8::base::BoundedPageAllocator; using AllocatorType = v8::base::BoundedPageAllocator;
static uintptr_t OffsetFromAddress(const void* address) { template <typename RetType = uintptr_t>
static RetType OffsetFromAddress(const void* address) {
static_assert(
std::numeric_limits<RetType>::max() >= (kCagedHeapReservationSize - 1),
"The return type should be large enough");
return reinterpret_cast<uintptr_t>(address) & return reinterpret_cast<uintptr_t>(address) &
(kCagedHeapReservationAlignment - 1); (kCagedHeapReservationAlignment - 1);
} }
...@@ -52,6 +57,8 @@ class CagedHeap final { ...@@ -52,6 +57,8 @@ class CagedHeap final {
reserved_area_.address(); reserved_area_.address();
} }
void* base() const { return reserved_area_.address(); }
private: private:
const VirtualMemory reserved_area_; const VirtualMemory reserved_area_;
std::unique_ptr<AllocatorType> bounded_allocator_; std::unique_ptr<AllocatorType> bounded_allocator_;
......
...@@ -19,6 +19,10 @@ ...@@ -19,6 +19,10 @@
#include "src/heap/cppgc/gc-info-table.h" #include "src/heap/cppgc/gc-info-table.h"
#include "src/heap/cppgc/globals.h" #include "src/heap/cppgc/globals.h"
#if defined(CPPGC_CAGED_HEAP)
#include "src/heap/cppgc/caged-heap.h"
#endif // defined(CPPGC_CAGED_HEAP)
namespace cppgc { namespace cppgc {
class Visitor; class Visitor;
...@@ -102,6 +106,11 @@ class HeapObjectHeader { ...@@ -102,6 +106,11 @@ class HeapObjectHeader {
inline bool IsFinalizable() const; inline bool IsFinalizable() const;
void Finalize(); void Finalize();
#if defined(CPPGC_CAGED_HEAP)
inline void SetNextUnfinalized(HeapObjectHeader* next);
inline HeapObjectHeader* GetNextUnfinalized(uintptr_t cage_base) const;
#endif // defined(CPPGC_CAGED_HEAP)
V8_EXPORT_PRIVATE HeapObjectName GetName() const; V8_EXPORT_PRIVATE HeapObjectName GetName() const;
template <AccessMode = AccessMode::kNonAtomic> template <AccessMode = AccessMode::kNonAtomic>
...@@ -140,7 +149,13 @@ class HeapObjectHeader { ...@@ -140,7 +149,13 @@ class HeapObjectHeader {
inline void StoreEncoded(uint16_t bits, uint16_t mask); inline void StoreEncoded(uint16_t bits, uint16_t mask);
#if defined(V8_TARGET_ARCH_64_BIT) #if defined(V8_TARGET_ARCH_64_BIT)
// If cage is enabled, to save on space required by sweeper metadata, we store
// the list of to-be-finalized objects inlined in HeapObjectHeader.
#if defined(CPPGC_CAGED_HEAP)
uint32_t next_unfinalized_ = 0;
#else // !defined(CPPGC_CAGED_HEAP)
uint32_t padding_ = 0; uint32_t padding_ = 0;
#endif // !defined(CPPGC_CAGED_HEAP)
#endif // defined(V8_TARGET_ARCH_64_BIT) #endif // defined(V8_TARGET_ARCH_64_BIT)
uint16_t encoded_high_; uint16_t encoded_high_;
uint16_t encoded_low_; uint16_t encoded_low_;
...@@ -163,9 +178,9 @@ const HeapObjectHeader& HeapObjectHeader::FromObject(const void* object) { ...@@ -163,9 +178,9 @@ const HeapObjectHeader& HeapObjectHeader::FromObject(const void* object) {
} }
HeapObjectHeader::HeapObjectHeader(size_t size, GCInfoIndex gc_info_index) { HeapObjectHeader::HeapObjectHeader(size_t size, GCInfoIndex gc_info_index) {
#if defined(V8_TARGET_ARCH_64_BIT) #if defined(V8_TARGET_ARCH_64_BIT) && !defined(CPPGC_CAGED_HEAP)
USE(padding_); USE(padding_);
#endif // defined(V8_TARGET_ARCH_64_BIT) #endif // defined(V8_TARGET_ARCH_64_BIT) && !defined(CPPGC_CAGED_HEAP)
DCHECK_LT(gc_info_index, GCInfoTable::kMaxIndex); DCHECK_LT(gc_info_index, GCInfoTable::kMaxIndex);
DCHECK_EQ(0u, size & (sizeof(HeapObjectHeader) - 1)); DCHECK_EQ(0u, size & (sizeof(HeapObjectHeader) - 1));
DCHECK_GE(kMaxSize, size); DCHECK_GE(kMaxSize, size);
...@@ -288,6 +303,22 @@ bool HeapObjectHeader::IsFinalizable() const { ...@@ -288,6 +303,22 @@ bool HeapObjectHeader::IsFinalizable() const {
return gc_info.finalize; return gc_info.finalize;
} }
#if defined(CPPGC_CAGED_HEAP)
void HeapObjectHeader::SetNextUnfinalized(HeapObjectHeader* next) {
next_unfinalized_ = CagedHeap::OffsetFromAddress<uint32_t>(next);
}
HeapObjectHeader* HeapObjectHeader::GetNextUnfinalized(
uintptr_t cage_base) const {
DCHECK(cage_base);
DCHECK_EQ(0u,
CagedHeap::OffsetFromAddress(reinterpret_cast<void*>(cage_base)));
return next_unfinalized_ ? reinterpret_cast<HeapObjectHeader*>(
cage_base + next_unfinalized_)
: nullptr;
}
#endif // defined(CPPGC_CAGED_HEAP)
template <AccessMode mode> template <AccessMode mode>
void HeapObjectHeader::Trace(Visitor* visitor) const { void HeapObjectHeader::Trace(Visitor* visitor) const {
const GCInfo& gc_info = const GCInfo& gc_info =
......
...@@ -166,7 +166,14 @@ class ThreadSafeStack { ...@@ -166,7 +166,14 @@ class ThreadSafeStack {
struct SpaceState { struct SpaceState {
struct SweptPageState { struct SweptPageState {
BasePage* page = nullptr; BasePage* page = nullptr;
#if defined(CPPGC_CAGED_HEAP)
// The list of unfinalized objects may be extremely big. To save on space,
// if cage is enabled, the list of unfinalized objects is stored inlined in
// HeapObjectHeader.
HeapObjectHeader* unfinalized_objects_head = nullptr;
#else // !defined(CPPGC_CAGED_HEAP)
std::vector<HeapObjectHeader*> unfinalized_objects; std::vector<HeapObjectHeader*> unfinalized_objects;
#endif // !defined(CPPGC_CAGED_HEAP)
FreeList cached_free_list; FreeList cached_free_list;
std::vector<FreeList::Block> unfinalized_free_list; std::vector<FreeList::Block> unfinalized_free_list;
bool is_empty = false; bool is_empty = false;
...@@ -230,7 +237,18 @@ class DeferredFinalizationBuilder final : public FreeHandler { ...@@ -230,7 +237,18 @@ class DeferredFinalizationBuilder final : public FreeHandler {
void AddFinalizer(HeapObjectHeader* header, size_t size) { void AddFinalizer(HeapObjectHeader* header, size_t size) {
if (header->IsFinalizable()) { if (header->IsFinalizable()) {
#if defined(CPPGC_CAGED_HEAP)
if (!current_unfinalized_) {
DCHECK_NULL(result_.unfinalized_objects_head);
current_unfinalized_ = header;
result_.unfinalized_objects_head = header;
} else {
current_unfinalized_->SetNextUnfinalized(header);
current_unfinalized_ = header;
}
#else // !defined(CPPGC_CAGED_HEAP)
result_.unfinalized_objects.push_back({header}); result_.unfinalized_objects.push_back({header});
#endif // !defined(CPPGC_CAGED_HEAP)
found_finalizer_ = true; found_finalizer_ = true;
} else { } else {
SetMemoryInaccessible(header, size); SetMemoryInaccessible(header, size);
...@@ -254,6 +272,7 @@ class DeferredFinalizationBuilder final : public FreeHandler { ...@@ -254,6 +272,7 @@ class DeferredFinalizationBuilder final : public FreeHandler {
private: private:
ResultType result_; ResultType result_;
HeapObjectHeader* current_unfinalized_ = 0;
bool found_finalizer_ = false; bool found_finalizer_ = false;
}; };
...@@ -369,11 +388,27 @@ class SweepFinalizer final { ...@@ -369,11 +388,27 @@ class SweepFinalizer final {
BasePage* page = page_state->page; BasePage* page = page_state->page;
// Call finalizers. // Call finalizers.
for (HeapObjectHeader* object : page_state->unfinalized_objects) { const auto finalize_header = [](HeapObjectHeader* header) {
const size_t size = object->AllocatedSize(); const size_t size = header->AllocatedSize();
object->Finalize(); header->Finalize();
SetMemoryInaccessible(object, size); SetMemoryInaccessible(header, size);
};
#if defined(CPPGC_CAGED_HEAP)
const uint64_t cage_base =
reinterpret_cast<uint64_t>(page->heap().caged_heap().base());
HeapObjectHeader* next_unfinalized = 0;
for (auto* unfinalized_header = page_state->unfinalized_objects_head;
unfinalized_header; unfinalized_header = next_unfinalized) {
next_unfinalized = unfinalized_header->GetNextUnfinalized(cage_base);
finalize_header(unfinalized_header);
}
#else // !defined(CPPGC_CAGED_HEAP)
for (HeapObjectHeader* unfinalized_header :
page_state->unfinalized_objects) {
finalize_header(unfinalized_header);
} }
#endif // !defined(CPPGC_CAGED_HEAP)
// Unmap page if empty. // Unmap page if empty.
if (page_state->is_empty) { if (page_state->is_empty) {
...@@ -576,10 +611,15 @@ class ConcurrentSweepTask final : public cppgc::JobTask, ...@@ -576,10 +611,15 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
page.space().AddPage(&page); page.space().AddPage(&page);
return true; return true;
} }
#if defined(CPPGC_CAGED_HEAP)
HeapObjectHeader* const unfinalized_objects =
header->IsFinalizable() ? page.ObjectHeader() : nullptr;
#else // !defined(CPPGC_CAGED_HEAP)
std::vector<HeapObjectHeader*> unfinalized_objects; std::vector<HeapObjectHeader*> unfinalized_objects;
if (header->IsFinalizable()) { if (header->IsFinalizable()) {
unfinalized_objects.push_back(page.ObjectHeader()); unfinalized_objects.push_back(page.ObjectHeader());
} }
#endif // !defined(CPPGC_CAGED_HEAP)
const size_t space_index = page.space().index(); const size_t space_index = page.space().index();
DCHECK_GT(states_->size(), space_index); DCHECK_GT(states_->size(), space_index);
SpaceState& state = (*states_)[space_index]; SpaceState& state = (*states_)[space_index];
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment