Commit ae41f7df authored by Anton Bikineev's avatar Anton Bikineev Committed by V8 LUCI CQ

cppgc: Fix caged-heap reservation when pointer compression is enabled

Currently, PageAllocator assumes that FreePages(start, size) will always
be called on the same region that was passed to AllocatePages(start,
size). This assumption is made in:
1) leak-sanitizer (LsanPageAllocator) that checks it explicitly,
2) on Windows, FreePages() calls VirtualFree() with zero-size and
   MEM_RELEASE, which causes the entire reservation to be freed.

The CL temporarily fixes the bot failures just by holding the unneeded
half and adds a TODO to return the unneded part back to the OS.

Bug: chromium:1325007
Change-Id: I2bd878876d43d693cf2138020f410ffe1615b4e9
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3695363Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Anton Bikineev <bikineev@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81059}
parent b61ee5f3
......@@ -54,6 +54,8 @@ VirtualMemory ReserveCagedHeap(PageAllocator& platform_allocator) {
// pointers have the most significant bit set to 1, so that on decompression
// the bit will be sign-extended. This saves us a branch and 'or' operation
// during compression.
// TODO(chromium:1325007): Provide API in PageAllocator to left trim
// allocations and return the half of the reservation back to the OS.
static constexpr size_t kTryReserveSize = 2 * kCagedHeapReservationSize;
static constexpr size_t kTryReserveAlignment =
2 * kCagedHeapReservationAlignment;
......@@ -68,14 +70,7 @@ VirtualMemory ReserveCagedHeap(PageAllocator& platform_allocator) {
VirtualMemory memory(&platform_allocator, kTryReserveSize,
kTryReserveAlignment, hint);
if (memory.IsReserved()) {
#if defined(CPPGC_POINTER_COMPRESSION)
VirtualMemory second_half = memory.Split(kCagedHeapReservationSize);
return second_half;
#else // !defined(CPPGC_POINTER_COMPRESSION)
return memory;
#endif // !defined(CPPGC_POINTER_COMPRESSION)
}
if (memory.IsReserved()) return memory;
}
FATAL("Fatal process out of memory: Failed to reserve memory for caged heap");
......@@ -100,8 +95,16 @@ CagedHeap::CagedHeap(PageAllocator& platform_allocator)
: reserved_area_(ReserveCagedHeap(platform_allocator)) {
using CagedAddress = CagedHeap::AllocatorType::Address;
CagedHeapBase::g_heap_base_ =
reinterpret_cast<uintptr_t>(reserved_area_.address());
#if defined(CPPGC_POINTER_COMPRESSION)
static constexpr size_t kBaseOffset = kCagedHeapReservationSize;
#else // !defined(CPPGC_POINTER_COMPRESSION)
static constexpr size_t kBaseOffset = 0;
#endif //! defined(CPPGC_POINTER_COMPRESSION)
void* const cage_start =
static_cast<uint8_t*>(reserved_area_.address()) + kBaseOffset;
CagedHeapBase::g_heap_base_ = reinterpret_cast<uintptr_t>(cage_start);
#if defined(CPPGC_POINTER_COMPRESSION)
// With pointer compression only single heap per thread is allowed.
......@@ -110,21 +113,19 @@ CagedHeap::CagedHeap(PageAllocator& platform_allocator)
#endif // defined(CPPGC_POINTER_COMPRESSION)
const bool is_not_oom = platform_allocator.SetPermissions(
reserved_area_.address(),
cage_start,
RoundUp(sizeof(CagedHeapLocalData), platform_allocator.CommitPageSize()),
PageAllocator::kReadWrite);
// Failing to commit the reservation means that we are out of memory.
CHECK(is_not_oom);
new (reserved_area_.address()) CagedHeapLocalData(platform_allocator);
new (cage_start) CagedHeapLocalData(platform_allocator);
const CagedAddress caged_heap_start =
RoundUp(reinterpret_cast<CagedAddress>(reserved_area_.address()) +
sizeof(CagedHeapLocalData),
kPageSize);
const CagedAddress caged_heap_start = RoundUp(
reinterpret_cast<CagedAddress>(cage_start) + sizeof(CagedHeapLocalData),
kPageSize);
const size_t local_data_size_with_padding =
caged_heap_start -
reinterpret_cast<CagedAddress>(reserved_area_.address());
caged_heap_start - reinterpret_cast<CagedAddress>(cage_start);
normal_page_bounded_allocator_ = std::make_unique<
v8::base::BoundedPageAllocator>(
......@@ -137,7 +138,7 @@ CagedHeap::CagedHeap(PageAllocator& platform_allocator)
large_page_bounded_allocator_ = std::make_unique<
v8::base::BoundedPageAllocator>(
&platform_allocator,
reinterpret_cast<uintptr_t>(reserved_area_.address()) +
reinterpret_cast<uintptr_t>(cage_start) +
kCagedHeapNormalPageReservationSize,
kCagedHeapNormalPageReservationSize, kPageSize,
v8::base::PageInitializationMode::kAllocatedPagesMustBeZeroInitialized,
......
......@@ -74,13 +74,6 @@ class V8_EXPORT_PRIVATE CagedHeap final {
BasePage& LookupPageFromInnerPointer(void* ptr) const;
LargePage& LookupLargePageFromInnerPointer(void* ptr) const;
CagedHeapLocalData& local_data() {
return *static_cast<CagedHeapLocalData*>(reserved_area_.address());
}
const CagedHeapLocalData& local_data() const {
return *static_cast<CagedHeapLocalData*>(reserved_area_.address());
}
bool IsOnHeap(const void* address) const {
DCHECK_EQ(reserved_area_.address(),
reinterpret_cast<void*>(CagedHeapBase::GetBase()));
......
......@@ -163,7 +163,7 @@ void HeapBase::ResetRememberedSet() {
return;
}
CagedHeap::Instance().local_data().age_table.Reset(page_allocator());
CagedHeapLocalData::Get().age_table.Reset(page_allocator());
remembered_set_.Reset();
return;
}
......
......@@ -110,7 +110,7 @@ bool MarkingVerifierBase::VisitHeapObjectHeader(HeapObjectHeader& header) {
#if defined(CPPGC_YOUNG_GENERATION)
if (collection_type_ == Heap::Config::CollectionType::kMinor) {
auto& caged_heap = CagedHeap::Instance();
const auto age = caged_heap.local_data().age_table.GetAge(
const auto age = CagedHeapLocalData::Get().age_table.GetAge(
caged_heap.OffsetFromAddress(header.ObjectStart()));
if (age == AgeTable::Age::kOld) {
// Do not verify old objects.
......
......@@ -37,7 +37,7 @@ void MarkRangeAsYoung(BasePage* page, Address begin, Address end) {
const bool new_page =
(begin == page->PayloadStart()) && (end == page->PayloadEnd());
auto& age_table = CagedHeap::Instance().local_data().age_table;
auto& age_table = CagedHeapLocalData::Get().age_table;
age_table.SetAgeForRange(CagedHeap::OffsetFromAddress(begin),
CagedHeap::OffsetFromAddress(end),
AgeTable::Age::kYoung,
......
......@@ -47,21 +47,6 @@ VirtualMemory& VirtualMemory::operator=(VirtualMemory&& other) V8_NOEXCEPT {
return *this;
}
VirtualMemory VirtualMemory::Split(size_t size) {
DCHECK_GT(size, 0u);
DCHECK_LT(size, size_);
DCHECK(IsAligned(size, page_allocator_->CommitPageSize()));
const size_t old_size = std::exchange(size_, size);
VirtualMemory new_memory;
new_memory.page_allocator_ = page_allocator_;
new_memory.start_ = reinterpret_cast<uint8_t*>(start_) + size_;
new_memory.size_ = old_size - size;
return new_memory;
}
void VirtualMemory::Reset() {
start_ = nullptr;
size_ = 0;
......
......@@ -32,9 +32,6 @@ class V8_EXPORT_PRIVATE VirtualMemory {
VirtualMemory(VirtualMemory&&) V8_NOEXCEPT;
VirtualMemory& operator=(VirtualMemory&&) V8_NOEXCEPT;
// Splits the current reservation and returns the residual one.
VirtualMemory Split(size_t size);
// Returns whether the memory has been reserved.
bool IsReserved() const { return start_ != nullptr; }
......
......@@ -267,7 +267,7 @@ void InterGenerationalPointerTest(MinorGCTest* test, cppgc::Heap* heap) {
const uintptr_t offset = CagedHeap::OffsetFromAddress(young);
// Age may be young or unknown.
EXPECT_NE(AgeTable::Age::kOld,
CagedHeap::Instance().local_data().age_table.GetAge(offset));
CagedHeapLocalData::Get().age_table.GetAge(offset));
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment