Commit 1fc24069 authored by Omer Katz's avatar Omer Katz Committed by V8 LUCI CQ

cppgc: Clear large pages on free.

Destroyed large pages can be reallocated before the OS get a chance to
reclaim and clear them. In such cases we will get non-zero memory in a
newly allocated page.
Normal pages are not affected since they are kept in page pools instead
of being freed.
Fix by explicitly clearing the payload when destroying a large page.

Bug: chromium:1056170, chromium:1206274
Change-Id: I6436302f50b8f0b4ef41288425bf464b0eb52d5f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2874404
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74418}
parent 95f72de8
......@@ -45,6 +45,27 @@ VirtualMemory ReserveCagedHeap(PageAllocator* platform_allocator) {
UNREACHABLE();
}
class CppgcBoundedPageAllocator final : public v8::base::BoundedPageAllocator {
public:
CppgcBoundedPageAllocator(v8::PageAllocator* page_allocator, Address start,
size_t size, size_t allocate_page_size)
: BoundedPageAllocator(page_allocator, start, size, allocate_page_size) {}
bool FreePages(void* address, size_t size) final {
// BoundedPageAllocator is not guaranteed to allocate zeroed page.
// Specifically it is possible that BPA frees a page and then tries to
// reallocate the same page before the OS has had a chance to asyncroniously
// reclaim that page. In such cases, the contents of the page would not have
// been cleared by the OS and the reallocated page will keep its previous
// contents. To mitigate this problem, CppgcBoundedPageAllocator clears all
// pages before they are freed. This also includes protected guard pages, so
// CppgcBoundedPageAllocator needs to update permissions before clearing.
SetPermissions(address, size, Permission::kReadWrite);
memset(address, 0, size);
return v8::base::BoundedPageAllocator::FreePages(address, size);
}
};
} // namespace
CagedHeap::CagedHeap(HeapBase* heap_base, PageAllocator* platform_allocator)
......@@ -73,7 +94,7 @@ CagedHeap::CagedHeap(HeapBase* heap_base, PageAllocator* platform_allocator)
caged_heap_start -
reinterpret_cast<CagedAddress>(reserved_area_.address());
bounded_allocator_ = std::make_unique<CagedHeap::AllocatorType>(
bounded_allocator_ = std::make_unique<CppgcBoundedPageAllocator>(
platform_allocator, caged_heap_start,
reserved_area_.size() - local_data_size_with_padding, kPageSize);
}
......
......@@ -90,5 +90,45 @@ TEST_F(CppgcAllocationTest,
EXPECT_FALSE(HeapObjectHeader::FromPayload(obj).IsFree());
}
namespace {
class LargeObject : public GarbageCollected<LargeObject> {
public:
static constexpr size_t kDataSize = kLargeObjectSizeThreshold + 1;
static size_t destructor_calls;
explicit LargeObject(bool check) {
if (!check) return;
for (size_t i = 0; i < LargeObject::kDataSize; ++i) {
EXPECT_EQ(0, data[i]);
}
}
~LargeObject() { ++destructor_calls; }
void Trace(Visitor*) const {}
char data[kDataSize];
};
size_t LargeObject::destructor_calls = 0u;
} // namespace
TEST_F(CppgcAllocationTest, LargePagesAreZeroedOut) {
static constexpr size_t kNumObjects = 1u;
LargeObject::destructor_calls = 0u;
std::vector<void*> pages;
for (size_t i = 0; i < kNumObjects; ++i) {
auto* obj = MakeGarbageCollected<LargeObject>(GetAllocationHandle(), false);
pages.push_back(obj);
memset(obj->data, 0xff, LargeObject::kDataSize);
}
PreciseGC();
EXPECT_EQ(kNumObjects, LargeObject::destructor_calls);
bool reused_page = false;
for (size_t i = 0; i < kNumObjects; ++i) {
auto* obj = MakeGarbageCollected<LargeObject>(GetAllocationHandle(), true);
if (std::find(pages.begin(), pages.end(), obj) != pages.end())
reused_page = true;
}
EXPECT_TRUE(reused_page);
}
} // namespace internal
} // namespace cppgc
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment