Commit e786faaf authored by Anton Bikineev's avatar Anton Bikineev Committed by V8 LUCI CQ

cppgc: Reset age table only for pages containing young objects

We don't need to reset the entire age table, but merely the pages that
are known to contain young objects. This must improve memory use with
generational GC enabled.

The CL is a prerequisite for another CL that'll increase the size of
the age-table.

Bug: chromium:1029379
Change-Id: Ibb5b607af20380c3936b7396b3d9767f6f17c44b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3695268
Commit-Queue: Anton Bikineev <bikineev@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81106}
parent 6f9df3e7
...@@ -51,13 +51,16 @@ class V8_EXPORT AgeTable final { ...@@ -51,13 +51,16 @@ class V8_EXPORT AgeTable final {
table_[card(cage_offset)] = age; table_[card(cage_offset)] = age;
} }
void SetAgeForRange(uintptr_t cage_offset_begin, uintptr_t cage_offset_end,
Age age, AdjacentCardsPolicy adjacent_cards_policy);
V8_INLINE Age GetAge(uintptr_t cage_offset) const { V8_INLINE Age GetAge(uintptr_t cage_offset) const {
return table_[card(cage_offset)]; return table_[card(cage_offset)];
} }
void SetAgeForRange(uintptr_t cage_offset_begin, uintptr_t cage_offset_end,
Age age, AdjacentCardsPolicy adjacent_cards_policy);
Age GetAgeForRange(uintptr_t cage_offset_begin,
uintptr_t cage_offset_end) const;
void Reset(PageAllocator* allocator); void Reset(PageAllocator* allocator);
private: private:
......
...@@ -53,6 +53,16 @@ void AgeTable::SetAgeForRange(uintptr_t offset_begin, uintptr_t offset_end, ...@@ -53,6 +53,16 @@ void AgeTable::SetAgeForRange(uintptr_t offset_begin, uintptr_t offset_end,
set_age_for_outer_card(offset_end); set_age_for_outer_card(offset_end);
} }
AgeTable::Age AgeTable::GetAgeForRange(uintptr_t offset_begin,
uintptr_t offset_end) const {
Age result = GetAge(offset_begin);
for (auto offset = offset_begin + kCardSizeInBytes; offset < offset_end;
offset += kCardSizeInBytes) {
if (result != GetAge(offset)) result = Age::kMixed;
}
return result;
}
void AgeTable::Reset(PageAllocator* allocator) { void AgeTable::Reset(PageAllocator* allocator) {
// TODO(chromium:1029379): Consider MADV_DONTNEED instead of MADV_FREE on // TODO(chromium:1029379): Consider MADV_DONTNEED instead of MADV_FREE on
// POSIX platforms. // POSIX platforms.
......
...@@ -50,6 +50,46 @@ class ObjectSizeCounter : private HeapVisitor<ObjectSizeCounter> { ...@@ -50,6 +50,46 @@ class ObjectSizeCounter : private HeapVisitor<ObjectSizeCounter> {
size_t accumulated_size_ = 0; size_t accumulated_size_ = 0;
}; };
#if defined(CPPGC_YOUNG_GENERATION)
class AgeTableResetter final : protected HeapVisitor<AgeTableResetter> {
friend class HeapVisitor<AgeTableResetter>;
public:
AgeTableResetter() : age_table_(CagedHeapLocalData::Get().age_table) {}
void Run(RawHeap& raw_heap) { Traverse(raw_heap); }
protected:
bool VisitPage(BasePage& page) {
if (!page.contains_young_objects()) {
#if defined(DEBUG)
DCHECK_EQ(AgeTable::Age::kOld,
age_table_.GetAgeForRange(
CagedHeap::OffsetFromAddress(page.PayloadStart()),
CagedHeap::OffsetFromAddress(page.PayloadEnd())));
#endif // defined(DEBUG)
return true;
}
// Mark the entire page as old in the age-table.
// TODO(chromium:1029379): Consider decommitting pages once in a while.
age_table_.SetAgeForRange(CagedHeap::OffsetFromAddress(page.PayloadStart()),
CagedHeap::OffsetFromAddress(page.PayloadEnd()),
AgeTable::Age::kOld,
AgeTable::AdjacentCardsPolicy::kIgnore);
// Promote page.
page.set_as_containing_young_objects(false);
return true;
}
bool VisitNormalPage(NormalPage& page) { return VisitPage(page); }
bool VisitLargePage(LargePage& page) { return VisitPage(page); }
private:
AgeTable& age_table_;
};
#endif // defined(CPPGC_YOUNG_GENERATION)
} // namespace } // namespace
HeapBase::HeapBase( HeapBase::HeapBase(
...@@ -163,9 +203,10 @@ void HeapBase::ResetRememberedSet() { ...@@ -163,9 +203,10 @@ void HeapBase::ResetRememberedSet() {
return; return;
} }
CagedHeapLocalData::Get().age_table.Reset(page_allocator()); AgeTableResetter age_table_resetter;
age_table_resetter.Run(raw_heap());
remembered_set_.Reset(); remembered_set_.Reset();
return;
} }
#endif // defined(CPPGC_YOUNG_GENERATION) #endif // defined(CPPGC_YOUNG_GENERATION)
......
...@@ -87,6 +87,11 @@ class V8_EXPORT_PRIVATE BasePage : public BasePageHandle { ...@@ -87,6 +87,11 @@ class V8_EXPORT_PRIVATE BasePage : public BasePageHandle {
void ResetDiscardedMemory() { discarded_memory_ = 0; } void ResetDiscardedMemory() { discarded_memory_ = 0; }
size_t discarded_memory() const { return discarded_memory_; } size_t discarded_memory() const { return discarded_memory_; }
bool contains_young_objects() const { return contains_young_objects_; }
void set_as_containing_young_objects(bool value) {
contains_young_objects_ = value;
}
protected: protected:
enum class PageType : uint8_t { kNormal, kLarge }; enum class PageType : uint8_t { kNormal, kLarge };
BasePage(HeapBase&, BaseSpace&, PageType); BasePage(HeapBase&, BaseSpace&, PageType);
...@@ -94,6 +99,7 @@ class V8_EXPORT_PRIVATE BasePage : public BasePageHandle { ...@@ -94,6 +99,7 @@ class V8_EXPORT_PRIVATE BasePage : public BasePageHandle {
private: private:
BaseSpace& space_; BaseSpace& space_;
PageType type_; PageType type_;
bool contains_young_objects_ = false;
size_t discarded_memory_ = 0; size_t discarded_memory_ = 0;
}; };
......
...@@ -26,16 +26,16 @@ namespace internal { ...@@ -26,16 +26,16 @@ namespace internal {
namespace { namespace {
void MarkRangeAsYoung(BasePage* page, Address begin, Address end) { void MarkRangeAsYoung(BasePage& page, Address begin, Address end) {
#if defined(CPPGC_YOUNG_GENERATION) #if defined(CPPGC_YOUNG_GENERATION)
DCHECK_LT(begin, end); DCHECK_LT(begin, end);
if (!page->heap().generational_gc_supported()) return; if (!page.heap().generational_gc_supported()) return;
// Then, if the page is newly allocated, force the first and last cards to be // Then, if the page is newly allocated, force the first and last cards to be
// marked as young. // marked as young.
const bool new_page = const bool new_page =
(begin == page->PayloadStart()) && (end == page->PayloadEnd()); (begin == page.PayloadStart()) && (end == page.PayloadEnd());
auto& age_table = CagedHeapLocalData::Get().age_table; auto& age_table = CagedHeapLocalData::Get().age_table;
age_table.SetAgeForRange(CagedHeap::OffsetFromAddress(begin), age_table.SetAgeForRange(CagedHeap::OffsetFromAddress(begin),
...@@ -43,6 +43,7 @@ void MarkRangeAsYoung(BasePage* page, Address begin, Address end) { ...@@ -43,6 +43,7 @@ void MarkRangeAsYoung(BasePage* page, Address begin, Address end) {
AgeTable::Age::kYoung, AgeTable::Age::kYoung,
new_page ? AgeTable::AdjacentCardsPolicy::kIgnore new_page ? AgeTable::AdjacentCardsPolicy::kIgnore
: AgeTable::AdjacentCardsPolicy::kConsider); : AgeTable::AdjacentCardsPolicy::kConsider);
page.set_as_containing_young_objects(true);
#endif // defined(CPPGC_YOUNG_GENERATION) #endif // defined(CPPGC_YOUNG_GENERATION)
} }
...@@ -74,7 +75,7 @@ void ReplaceLinearAllocationBuffer(NormalPageSpace& space, ...@@ -74,7 +75,7 @@ void ReplaceLinearAllocationBuffer(NormalPageSpace& space,
// Concurrent marking may be running while the LAB is set up next to a live // Concurrent marking may be running while the LAB is set up next to a live
// object sharing the same cell in the bitmap. // object sharing the same cell in the bitmap.
page->object_start_bitmap().ClearBit<AccessMode::kAtomic>(new_buffer); page->object_start_bitmap().ClearBit<AccessMode::kAtomic>(new_buffer);
MarkRangeAsYoung(page, new_buffer, new_buffer + new_size); MarkRangeAsYoung(*page, new_buffer, new_buffer + new_size);
} }
} }
...@@ -88,7 +89,7 @@ void* AllocateLargeObject(PageBackend& page_backend, LargePageSpace& space, ...@@ -88,7 +89,7 @@ void* AllocateLargeObject(PageBackend& page_backend, LargePageSpace& space,
HeapObjectHeader(HeapObjectHeader::kLargeObjectSizeInHeader, gcinfo); HeapObjectHeader(HeapObjectHeader::kLargeObjectSizeInHeader, gcinfo);
stats_collector.NotifyAllocation(size); stats_collector.NotifyAllocation(size);
MarkRangeAsYoung(page, page->PayloadStart(), page->PayloadEnd()); MarkRangeAsYoung(*page, page->PayloadStart(), page->PayloadEnd());
return header->ObjectStart(); return header->ObjectStart();
} }
......
...@@ -60,6 +60,33 @@ struct OtherType<Large> { ...@@ -60,6 +60,33 @@ struct OtherType<Large> {
using Type = Small; using Type = Small;
}; };
void ExpectPageYoung(BasePage& page) {
EXPECT_TRUE(page.contains_young_objects());
auto& age_table = CagedHeapLocalData::Get().age_table;
EXPECT_EQ(AgeTable::Age::kYoung,
age_table.GetAgeForRange(
CagedHeap::OffsetFromAddress(page.PayloadStart()),
CagedHeap::OffsetFromAddress(page.PayloadEnd())));
}
void ExpectPageMixed(BasePage& page) {
EXPECT_TRUE(page.contains_young_objects());
auto& age_table = CagedHeapLocalData::Get().age_table;
EXPECT_EQ(AgeTable::Age::kMixed,
age_table.GetAgeForRange(
CagedHeap::OffsetFromAddress(page.PayloadStart()),
CagedHeap::OffsetFromAddress(page.PayloadEnd())));
}
void ExpectPageOld(BasePage& page) {
EXPECT_FALSE(page.contains_young_objects());
auto& age_table = CagedHeapLocalData::Get().age_table;
EXPECT_EQ(AgeTable::Age::kOld,
age_table.GetAgeForRange(
CagedHeap::OffsetFromAddress(page.PayloadStart()),
CagedHeap::OffsetFromAddress(page.PayloadEnd())));
}
} // namespace } // namespace
class MinorGCTest : public testing::TestWithHeap { class MinorGCTest : public testing::TestWithHeap {
...@@ -569,6 +596,59 @@ TEST_F(MinorGCTest, ReexecuteCustomCallback) { ...@@ -569,6 +596,59 @@ TEST_F(MinorGCTest, ReexecuteCustomCallback) {
// The callback must be called only once. // The callback must be called only once.
EXPECT_EQ(4u, GCedWithCustomWeakCallback::custom_callback_called); EXPECT_EQ(4u, GCedWithCustomWeakCallback::custom_callback_called);
} }
TEST_F(MinorGCTest, AgeTableIsReset) {
using Type1 = SimpleGCed<16>;
using Type2 = SimpleGCed<64>;
using Type3 = SimpleGCed<kLargeObjectSizeThreshold * 2>;
Persistent<Type1> p1 = MakeGarbageCollected<Type1>(GetAllocationHandle());
Persistent<Type2> p2 = MakeGarbageCollected<Type2>(GetAllocationHandle());
Persistent<Type3> p3 = MakeGarbageCollected<Type3>(GetAllocationHandle());
auto* page1 = BasePage::FromPayload(p1.Get());
auto* page2 = BasePage::FromPayload(p2.Get());
auto* page3 = BasePage::FromPayload(p3.Get());
ASSERT_FALSE(page1->is_large());
ASSERT_FALSE(page2->is_large());
ASSERT_TRUE(page3->is_large());
ASSERT_NE(page1, page2);
ASSERT_NE(page1, page3);
ASSERT_NE(page2, page3);
// First, expect all the pages to be young.
ExpectPageYoung(*page1);
ExpectPageYoung(*page2);
ExpectPageYoung(*page3);
CollectMinor();
// Expect pages to be promoted after the minor GC.
ExpectPageOld(*page1);
ExpectPageOld(*page2);
ExpectPageOld(*page3);
// Allocate another objects on the normal pages and a new large page.
p1 = MakeGarbageCollected<Type1>(GetAllocationHandle());
p2 = MakeGarbageCollected<Type2>(GetAllocationHandle());
p3 = MakeGarbageCollected<Type3>(GetAllocationHandle());
// Expect now the normal pages to be mixed.
ExpectPageMixed(*page1);
ExpectPageMixed(*page2);
// The large page must remain old.
ExpectPageOld(*page3);
CollectMajor();
// After major GC all the pages must also become old.
ExpectPageOld(*page1);
ExpectPageOld(*page2);
ExpectPageOld(*BasePage::FromPayload(p3.Get()));
}
} // namespace internal } // namespace internal
} // namespace cppgc } // namespace cppgc
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment