Commit 64c80878 authored by Michael Lippautz's avatar Michael Lippautz Committed by V8 LUCI CQ

cppgc: Fix compilation of young generation

Drive-by: Pointer to reference conversions and other smaller cleanups.

Change-Id: I83ed114e4b27d5986a389a9753333716b0e20524
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3133146Reviewed-by: 's avatarAnton Bikineev <bikineev@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#76615}
parent 28d2b323
......@@ -53,10 +53,10 @@ static_assert(sizeof(AgeTable) == 1 * api_constants::kMB,
#endif // CPPGC_YOUNG_GENERATION
struct CagedHeapLocalData final {
explicit CagedHeapLocalData(HeapBase* heap_base) : heap_base(heap_base) {}
CagedHeapLocalData(HeapBase&, PageAllocator&);
bool is_incremental_marking_in_progress = false;
HeapBase* heap_base = nullptr;
HeapBase& heap_base;
#if defined(CPPGC_YOUNG_GENERATION)
AgeTable age_table;
#endif
......
......@@ -13,6 +13,14 @@
namespace cppgc {
namespace internal {
CagedHeapLocalData::CagedHeapLocalData(HeapBase& heap_base,
PageAllocator& allocator)
: heap_base(heap_base) {
#if defined(CPPGC_YOUNG_GENERATION)
age_table.Reset(&allocator);
#endif // defined(CPPGC_YOUNG_GENERATION)
}
#if defined(CPPGC_YOUNG_GENERATION)
static_assert(
......@@ -30,7 +38,7 @@ void AgeTable::Reset(PageAllocator* allocator) {
allocator->DiscardSystemPages(reinterpret_cast<void*>(begin), end - begin);
}
#endif
#endif // defined(CPPGC_YOUNG_GENERATION)
} // namespace internal
} // namespace cppgc
......@@ -27,18 +27,17 @@ STATIC_ASSERT(api_constants::kCagedHeapReservationAlignment ==
namespace {
VirtualMemory ReserveCagedHeap(PageAllocator* platform_allocator) {
DCHECK_NOT_NULL(platform_allocator);
VirtualMemory ReserveCagedHeap(PageAllocator& platform_allocator) {
DCHECK_EQ(0u,
kCagedHeapReservationSize % platform_allocator->AllocatePageSize());
kCagedHeapReservationSize % platform_allocator.AllocatePageSize());
static constexpr size_t kAllocationTries = 4;
for (size_t i = 0; i < kAllocationTries; ++i) {
void* hint = reinterpret_cast<void*>(RoundDown(
reinterpret_cast<uintptr_t>(platform_allocator->GetRandomMmapAddr()),
reinterpret_cast<uintptr_t>(platform_allocator.GetRandomMmapAddr()),
kCagedHeapReservationAlignment));
VirtualMemory memory(platform_allocator, kCagedHeapReservationSize,
VirtualMemory memory(&platform_allocator, kCagedHeapReservationSize,
kCagedHeapReservationAlignment, hint);
if (memory.IsReserved()) return memory;
}
......@@ -70,23 +69,19 @@ class CppgcBoundedPageAllocator final : public v8::base::BoundedPageAllocator {
} // namespace
CagedHeap::CagedHeap(HeapBase* heap_base, PageAllocator* platform_allocator)
CagedHeap::CagedHeap(HeapBase& heap_base, PageAllocator& platform_allocator)
: reserved_area_(ReserveCagedHeap(platform_allocator)) {
using CagedAddress = CagedHeap::AllocatorType::Address;
DCHECK_NOT_NULL(heap_base);
CHECK(platform_allocator->SetPermissions(
const bool is_not_oom = platform_allocator.SetPermissions(
reserved_area_.address(),
RoundUp(sizeof(CagedHeapLocalData), platform_allocator->CommitPageSize()),
PageAllocator::kReadWrite));
RoundUp(sizeof(CagedHeapLocalData), platform_allocator.CommitPageSize()),
PageAllocator::kReadWrite);
// Failing to commit the reservation means that we are out of memory.
CHECK(is_not_oom);
auto* local_data =
new (reserved_area_.address()) CagedHeapLocalData(heap_base);
#if defined(CPPGC_YOUNG_GENERATION)
local_data->age_table.Reset(platform_allocator);
#endif
USE(local_data);
new (reserved_area_.address())
CagedHeapLocalData(heap_base, platform_allocator);
const CagedAddress caged_heap_start =
RoundUp(reinterpret_cast<CagedAddress>(reserved_area_.address()) +
......@@ -97,7 +92,7 @@ CagedHeap::CagedHeap(HeapBase* heap_base, PageAllocator* platform_allocator)
reinterpret_cast<CagedAddress>(reserved_area_.address());
bounded_allocator_ = std::make_unique<CppgcBoundedPageAllocator>(
platform_allocator, caged_heap_start,
&platform_allocator, caged_heap_start,
reserved_area_.size() - local_data_size_with_padding, kPageSize);
}
......
......@@ -22,7 +22,17 @@ class CagedHeap final {
public:
using AllocatorType = v8::base::BoundedPageAllocator;
CagedHeap(HeapBase* heap, PageAllocator* platform_allocator);
static uintptr_t OffsetFromAddress(const void* address) {
return reinterpret_cast<uintptr_t>(address) &
(kCagedHeapReservationAlignment - 1);
}
static uintptr_t BaseFromAddress(const void* address) {
return reinterpret_cast<uintptr_t>(address) &
~(kCagedHeapReservationAlignment - 1);
}
CagedHeap(HeapBase& heap, PageAllocator& platform_allocator);
CagedHeap(const CagedHeap&) = delete;
CagedHeap& operator=(const CagedHeap&) = delete;
......@@ -37,23 +47,13 @@ class CagedHeap final {
return *static_cast<CagedHeapLocalData*>(reserved_area_.address());
}
static uintptr_t OffsetFromAddress(const void* address) {
return reinterpret_cast<uintptr_t>(address) &
(kCagedHeapReservationAlignment - 1);
}
static uintptr_t BaseFromAddress(const void* address) {
return reinterpret_cast<uintptr_t>(address) &
~(kCagedHeapReservationAlignment - 1);
}
bool IsOnHeap(const void* address) const {
return reinterpret_cast<void*>(BaseFromAddress(address)) ==
reserved_area_.address();
}
private:
VirtualMemory reserved_area_;
const VirtualMemory reserved_area_;
std::unique_ptr<AllocatorType> bounded_allocator_;
};
......
......@@ -63,7 +63,7 @@ HeapBase::HeapBase(
platform_->GetPageAllocator())),
#endif // LEAK_SANITIZER
#if defined(CPPGC_CAGED_HEAP)
caged_heap_(this, page_allocator()),
caged_heap_(*this, *page_allocator()),
page_backend_(std::make_unique<PageBackend>(caged_heap_.allocator(),
*oom_handler_.get())),
#else // !CPPGC_CAGED_HEAP
......
......@@ -38,7 +38,7 @@ bool EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
WriteBarrier::IncrementalOrConcurrentMarkingFlagUpdater::Enter();
#if defined(CPPGC_CAGED_HEAP)
heap.caged_heap().local_data().is_incremental_marking_in_progress = true;
#endif
#endif // defined(CPPGC_CAGED_HEAP)
return true;
}
return false;
......@@ -52,7 +52,7 @@ bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
WriteBarrier::IncrementalOrConcurrentMarkingFlagUpdater::Exit();
#if defined(CPPGC_CAGED_HEAP)
heap.caged_heap().local_data().is_incremental_marking_in_progress = false;
#endif
#endif // defined(CPPGC_CAGED_HEAP)
return true;
}
return false;
......
......@@ -40,7 +40,7 @@ void MarkRangeAsYoung(BasePage* page, Address begin, Address end) {
? RoundUp(offset_end, kEntrySize)
: RoundDown(offset_end, kEntrySize);
auto& age_table = page->heap()->caged_heap().local_data().age_table;
auto& age_table = page->heap().caged_heap().local_data().age_table;
for (auto offset = young_offset_begin; offset < young_offset_end;
offset += AgeTable::kEntrySizeInBytes) {
age_table[offset] = AgeTable::Age::kYoung;
......
......@@ -132,12 +132,12 @@ void WriteBarrier::GenerationalBarrierSlow(const CagedHeapLocalData& local_data,
// A write during atomic pause (e.g. pre-finalizer) may trigger the slow path
// of the barrier. This is a result of the order of bailouts where not marking
// results in applying the generational barrier.
if (local_data.heap_base->in_atomic_pause()) return;
if (local_data.heap_base.in_atomic_pause()) return;
if (value_offset > 0 && age_table[value_offset] == AgeTable::Age::kOld)
return;
// Record slot.
local_data.heap_base->remembered_slots().insert(const_cast<void*>(slot));
local_data.heap_base.remembered_slots().insert(const_cast<void*>(slot));
}
#endif // CPPGC_YOUNG_GENERATION
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment