Commit 0d57754c authored by Anton Bikineev's avatar Anton Bikineev Committed by V8 LUCI CQ

cppgc: young-gen: Pack AgeTable knowing allocation granularity

This reduces card granularity from 4096 to 512 bytes with the goal to
improve write barrier filtering.

Bug: chromium:1029379
Change-Id: I22e2a9c61ef4c36c3db65404370213d0a8048e08
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3582393Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Anton Bikineev <bikineev@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79960}
parent 682ba0ef
......@@ -44,6 +44,9 @@ static constexpr size_t kDefaultAlignment = sizeof(void*);
// Maximum support alignment for a type as in `alignof(T)`.
static constexpr size_t kMaxSupportedAlignment = 2 * kDefaultAlignment;
// Granularity of heap allocations.
constexpr size_t kAllocationGranularity = sizeof(void*);
} // namespace api_constants
} // namespace internal
......
......@@ -6,6 +6,8 @@
#define INCLUDE_CPPGC_INTERNAL_CAGED_HEAP_LOCAL_DATA_H_
#include <array>
#include <cstddef>
#include <cstdint>
#include "cppgc/internal/api-constants.h"
#include "cppgc/internal/logging.h"
......@@ -19,32 +21,41 @@ class HeapBase;
#if defined(CPPGC_YOUNG_GENERATION)
// AgeTable contains entries that correspond to 4KB memory regions. Each entry
// can be in one of three states: kOld, kYoung or kUnknown.
// AgeTable is the bytemap needed for the fast generation check in the write
// barrier. AgeTable contains entries that correspond to 512 bytes memory
// regions (cards). Each entry in the table represents generation of the objects
// that reside on the corresponding card (young, old or mixed).
class AgeTable final {
static constexpr size_t kGranularityBits = 12; // 4KiB per byte.
static constexpr size_t kRequiredSize = 1 * api_constants::kMB;
static constexpr size_t kAllocationGranularity =
api_constants::kAllocationGranularity;
public:
enum class Age : uint8_t { kOld, kYoung, kUnknown };
enum class Age : uint8_t { kOld, kYoung, kMixed };
static constexpr size_t kEntrySizeInBytes = 1 << kGranularityBits;
static constexpr size_t kCardSizeInBytes =
(api_constants::kCagedHeapReservationSize / kAllocationGranularity) /
kRequiredSize;
Age& operator[](uintptr_t offset) { return table_[entry(offset)]; }
Age operator[](uintptr_t offset) const { return table_[entry(offset)]; }
void SetAge(uintptr_t cage_offset, Age age) {
table_[card(cage_offset)] = age;
}
V8_INLINE Age GetAge(uintptr_t cage_offset) const {
return table_[card(cage_offset)];
}
void Reset(PageAllocator* allocator);
private:
static constexpr size_t kAgeTableSize =
api_constants::kCagedHeapReservationSize >> kGranularityBits;
size_t entry(uintptr_t offset) const {
V8_INLINE size_t card(uintptr_t offset) const {
constexpr size_t kGranularityBits =
__builtin_ctz(static_cast<uint32_t>(kCardSizeInBytes));
const size_t entry = offset >> kGranularityBits;
CPPGC_DCHECK(table_.size() > entry);
return entry;
}
std::array<Age, kAgeTableSize> table_;
std::array<Age, kRequiredSize> table_;
};
static_assert(sizeof(AgeTable) == 1 * api_constants::kMB,
......
......@@ -405,7 +405,8 @@ void WriteBarrier::GenerationalBarrier(const Params& params, const void* slot) {
const AgeTable& age_table = local_data.age_table;
// Bail out if the slot is in young generation.
if (V8_LIKELY(age_table[params.slot_offset] == AgeTable::Age::kYoung)) return;
if (V8_LIKELY(age_table.GetAge(params.slot_offset) == AgeTable::Age::kYoung))
return;
GenerationalBarrierSlow(local_data, age_table, slot, params.value_offset);
}
......@@ -420,7 +421,8 @@ void WriteBarrier::GenerationalBarrierForSourceObject(
// Assume that if the first element is in young generation, the whole range is
// in young generation.
if (V8_LIKELY(age_table[params.slot_offset] == AgeTable::Age::kYoung)) return;
if (V8_LIKELY(age_table.GetAge(params.slot_offset) == AgeTable::Age::kYoung))
return;
GenerationalBarrierForSourceObjectSlow(local_data, inner_pointer);
}
......
......@@ -106,14 +106,12 @@ bool MarkingVerifierBase::VisitHeapObjectHeader(HeapObjectHeader& header) {
#if defined(CPPGC_YOUNG_GENERATION)
if (collection_type_ == Heap::Config::CollectionType::kMinor) {
const auto age = heap_.caged_heap()
.local_data()
.age_table[heap_.caged_heap().OffsetFromAddress(
header.ObjectStart())];
const auto age = heap_.caged_heap().local_data().age_table.GetAge(
heap_.caged_heap().OffsetFromAddress(header.ObjectStart()));
if (age == AgeTable::Age::kOld) {
// Do not verify old objects.
return true;
} else if (age == AgeTable::Age::kUnknown) {
} else if (age == AgeTable::Age::kMixed) {
// If the age is not known, the marked bytes may not be exact as possibly
// old objects are verified as well.
verifier_found_marked_bytes_are_exact_ = false;
......
......@@ -30,7 +30,7 @@ void MarkRangeAsYoung(BasePage* page, Address begin, Address end) {
#if defined(CPPGC_YOUNG_GENERATION)
DCHECK_LT(begin, end);
static constexpr auto kEntrySize = AgeTable::kEntrySizeInBytes;
static constexpr auto kEntrySize = AgeTable::kCardSizeInBytes;
const uintptr_t offset_begin = CagedHeap::OffsetFromAddress(begin);
const uintptr_t offset_end = CagedHeap::OffsetFromAddress(end);
......@@ -44,16 +44,16 @@ void MarkRangeAsYoung(BasePage* page, Address begin, Address end) {
auto& age_table = page->heap().caged_heap().local_data().age_table;
for (auto offset = young_offset_begin; offset < young_offset_end;
offset += AgeTable::kEntrySizeInBytes) {
age_table[offset] = AgeTable::Age::kYoung;
offset += AgeTable::kCardSizeInBytes) {
age_table.SetAge(offset, AgeTable::Age::kYoung);
}
// Set to kUnknown the first and the last regions of the newly allocated
// linear buffer.
if (begin != page->PayloadStart() && !IsAligned(offset_begin, kEntrySize))
age_table[offset_begin] = AgeTable::Age::kUnknown;
age_table.SetAge(offset_begin, AgeTable::Age::kMixed);
if (end != page->PayloadEnd() && !IsAligned(offset_end, kEntrySize))
age_table[offset_end] = AgeTable::Age::kUnknown;
age_table.SetAge(offset_end, AgeTable::Age::kMixed);
#endif
}
......
......@@ -146,6 +146,8 @@ void* ObjectAllocator::AllocateObjectOnSpace(NormalPageSpace& space,
STATIC_ASSERT(2 * kAllocationGranularity ==
api_constants::kMaxSupportedAlignment);
STATIC_ASSERT(kAllocationGranularity == sizeof(HeapObjectHeader));
STATIC_ASSERT(kAllocationGranularity ==
api_constants::kAllocationGranularity);
DCHECK_EQ(2 * sizeof(HeapObjectHeader), static_cast<size_t>(alignment));
constexpr size_t kAlignment = 2 * kAllocationGranularity;
constexpr size_t kAlignmentMask = kAlignment - 1;
......
......@@ -135,7 +135,7 @@ void WriteBarrier::GenerationalBarrierSlow(const CagedHeapLocalData& local_data,
// results in applying the generational barrier.
if (local_data.heap_base.in_atomic_pause()) return;
if (value_offset > 0 && age_table[value_offset] == AgeTable::Age::kOld)
if (value_offset > 0 && age_table.GetAge(value_offset) == AgeTable::Age::kOld)
return;
// Record slot.
......
......@@ -262,8 +262,9 @@ void InterGenerationalPointerTest(MinorGCTest* test, cppgc::Heap* heap) {
const uintptr_t offset =
internal_heap->caged_heap().OffsetFromAddress(young);
// Age may be young or unknown.
EXPECT_NE(AgeTable::Age::kOld,
Heap::From(heap)->caged_heap().local_data().age_table[offset]);
EXPECT_NE(
AgeTable::Age::kOld,
Heap::From(heap)->caged_heap().local_data().age_table.GetAge(offset));
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment