Commit 18c37d32 authored by Samuel Groß's avatar Samuel Groß Committed by V8 LUCI CQ

Add PageInitializationMode enum for the BoundedPageAllocator

Currently, when compiling with V8_VIRTUAL_MEMORY_CAGE enabled, the
behavior of the BoundedPageAllocator changes from simply making freed
pages inaccessible to decommitting them, which guarantees that they will
be zero-initialized after the next allocation. As this seems to cause
some performance regressions on Mac, this CL introduces a new enum that
specifies how the allocator should behave:
kAllocatedPagesMustBeZeroInitialized causes the pages to be decommitted
during FreePages() and ReleasePages() and thus guarantees
zero-initialization during AllocPages().
kAllocatedPagesCanBeUninitialized only causes the pages to be made
inaccessible, and so does not generally guarantee zero-initialization
for AllocPages().

Finally, this CL also removes some dead code in allocation.cc.

Bug: chromium:1257089
Change-Id: I53fa52c8913df869bee2b536efe252780d1ad893
Cq-Include-Trybots: luci.v8.try:v8_linux64_heap_sandbox_dbg_ng
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3208812
Commit-Queue: Samuel Groß <saelo@chromium.org>
Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77285}
parent 6fbb8bc8
......@@ -7,13 +7,14 @@
namespace v8 {
namespace base {
BoundedPageAllocator::BoundedPageAllocator(v8::PageAllocator* page_allocator,
Address start, size_t size,
size_t allocate_page_size)
BoundedPageAllocator::BoundedPageAllocator(
v8::PageAllocator* page_allocator, Address start, size_t size,
size_t allocate_page_size, PageInitializationMode page_initialization_mode)
: allocate_page_size_(allocate_page_size),
commit_page_size_(page_allocator->CommitPageSize()),
page_allocator_(page_allocator),
region_allocator_(start, size, allocate_page_size_) {
region_allocator_(start, size, allocate_page_size_),
page_initialization_mode_(page_initialization_mode) {
DCHECK_NOT_NULL(page_allocator);
DCHECK(IsAligned(allocate_page_size, page_allocator->AllocatePageSize()));
DCHECK(IsAligned(allocate_page_size_, commit_page_size_));
......@@ -110,16 +111,17 @@ bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) {
Address address = reinterpret_cast<Address>(raw_address);
size_t freed_size = region_allocator_.FreeRegion(address);
if (freed_size != size) return false;
#ifdef V8_VIRTUAL_MEMORY_CAGE
// When the virtual memory cage is enabled, the pages returned by the
// BoundedPageAllocator must be zero-initialized, as some of the additional
// clients expect them to. Decommitting them during FreePages ensures that
// while also changing the access permissions to kNoAccess.
CHECK(page_allocator_->DecommitPages(raw_address, size));
#else
CHECK(page_allocator_->SetPermissions(raw_address, size,
PageAllocator::kNoAccess));
#endif
if (page_initialization_mode_ ==
PageInitializationMode::kAllocatedPagesMustBeZeroInitialized) {
// When we are required to return zero-initialized pages, we decommit the
// pages here, which will cause any wired pages to be removed by the OS.
CHECK(page_allocator_->DecommitPages(raw_address, size));
} else {
DCHECK_EQ(page_initialization_mode_,
PageInitializationMode::kAllocatedPagesCanBeUninitialized);
CHECK(page_allocator_->SetPermissions(raw_address, size,
PageAllocator::kNoAccess));
}
return true;
}
......@@ -152,14 +154,18 @@ bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size,
// Keep the region in "used" state just uncommit some pages.
Address free_address = address + new_size;
size_t free_size = size - new_size;
#ifdef V8_VIRTUAL_MEMORY_CAGE
// See comment in FreePages().
return page_allocator_->DecommitPages(reinterpret_cast<void*>(free_address),
free_size);
#else
return page_allocator_->SetPermissions(reinterpret_cast<void*>(free_address),
free_size, PageAllocator::kNoAccess);
#endif
if (page_initialization_mode_ ==
PageInitializationMode::kAllocatedPagesMustBeZeroInitialized) {
// See comment in FreePages().
return page_allocator_->DecommitPages(reinterpret_cast<void*>(free_address),
free_size);
} else {
DCHECK_EQ(page_initialization_mode_,
PageInitializationMode::kAllocatedPagesCanBeUninitialized);
return page_allocator_->SetPermissions(
reinterpret_cast<void*>(free_address), free_size,
PageAllocator::kNoAccess);
}
}
bool BoundedPageAllocator::SetPermissions(void* address, size_t size,
......
......@@ -12,10 +12,23 @@
namespace v8 {
namespace base {
// Defines the page initialization mode of a BoundedPageAllocator.
enum class PageInitializationMode {
// The contents of allocated pages must be zero initialized. This causes any
// committed pages to be decommitted during FreePages and ReleasePages. This
// requires the embedder to provide the PageAllocator::DecommitPages API.
kAllocatedPagesMustBeZeroInitialized,
// Allocated pages do not have to be be zero initialized and can contain old
// data. This is slightly faster as comitted pages are not decommitted
// during FreePages and ReleasePages, but only made inaccessible.
kAllocatedPagesCanBeUninitialized,
};
// This is a v8::PageAllocator implementation that allocates pages within the
// pre-reserved region of virtual space. This class requires the virtual space
// to be kept reserved during the lifetime of this object.
// The main application of bounded page allocator are
// - the V8 virtual memory cage
// - V8 heap pointer compression which requires the whole V8 heap to be
// allocated within a contiguous range of virtual address space,
// - executable page allocation, which allows to use PC-relative 32-bit code
......@@ -28,7 +41,8 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
using Address = uintptr_t;
BoundedPageAllocator(v8::PageAllocator* page_allocator, Address start,
size_t size, size_t allocate_page_size);
size_t size, size_t allocate_page_size,
PageInitializationMode page_initialization_mode);
BoundedPageAllocator(const BoundedPageAllocator&) = delete;
BoundedPageAllocator& operator=(const BoundedPageAllocator&) = delete;
~BoundedPageAllocator() override = default;
......@@ -79,6 +93,7 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
const size_t commit_page_size_;
v8::PageAllocator* const page_allocator_;
v8::base::RegionAllocator region_allocator_;
const PageInitializationMode page_initialization_mode_;
};
} // namespace base
......
......@@ -50,7 +50,9 @@ class CppgcBoundedPageAllocator final : public v8::base::BoundedPageAllocator {
public:
CppgcBoundedPageAllocator(v8::PageAllocator* page_allocator, Address start,
size_t size, size_t allocate_page_size)
: BoundedPageAllocator(page_allocator, start, size, allocate_page_size) {}
: BoundedPageAllocator(page_allocator, start, size, allocate_page_size,
v8::base::PageInitializationMode::
kAllocatedPagesCanBeUninitialized) {}
bool FreePages(void* address, size_t size) final {
// BoundedPageAllocator is not guaranteed to allocate zeroed page.
......
......@@ -60,7 +60,8 @@ bool V8VirtualMemoryCage::Initialize(v8::PageAllocator* page_allocator,
size_ = size;
cage_page_allocator_ = std::make_unique<base::BoundedPageAllocator>(
page_allocator_, base_, size_, page_allocator_->AllocatePageSize());
page_allocator_, base_, size_, page_allocator_->AllocatePageSize(),
base::PageInitializationMode::kAllocatedPagesMustBeZeroInitialized);
initialized_ = true;
......
......@@ -63,21 +63,12 @@ class PageAllocatorInitializer {
PageAllocator* page_allocator() const { return page_allocator_; }
#ifdef V8_VIRTUAL_MEMORY_CAGE
PageAllocator* data_cage_page_allocator() const {
return data_cage_page_allocator_;
}
#endif
void SetPageAllocatorForTesting(PageAllocator* allocator) {
page_allocator_ = allocator;
}
private:
PageAllocator* page_allocator_;
#ifdef V8_VIRTUAL_MEMORY_CAGE
PageAllocator* data_cage_page_allocator_;
#endif
};
DEFINE_LAZY_LEAKY_OBJECT_GETTER(PageAllocatorInitializer,
......@@ -454,7 +445,8 @@ bool VirtualMemoryCage::InitReservation(
params.page_size);
page_allocator_ = std::make_unique<base::BoundedPageAllocator>(
params.page_allocator, allocatable_base, allocatable_size,
params.page_size);
params.page_size,
base::PageInitializationMode::kAllocatedPagesCanBeUninitialized);
return true;
}
......
......@@ -54,7 +54,8 @@ std::unique_ptr<v8::base::BoundedPageAllocator> CreateBoundedAllocator(
auto allocator = std::make_unique<v8::base::BoundedPageAllocator>(
platform_allocator, reservation_start, ZoneCompression::kReservationSize,
kZonePageSize);
kZonePageSize,
base::PageInitializationMode::kAllocatedPagesCanBeUninitialized);
// Exclude first page from allocation to ensure that accesses through
// decompressed null pointer will seg-fault.
......
......@@ -175,7 +175,8 @@ TEST(MemoryChunk) {
base::BoundedPageAllocator code_page_allocator(
page_allocator, code_range_reservation.address(),
code_range_reservation.size(), MemoryChunk::kAlignment);
code_range_reservation.size(), MemoryChunk::kAlignment,
base::PageInitializationMode::kAllocatedPagesCanBeUninitialized);
VerifyMemoryChunk(isolate, heap, &code_page_allocator, reserve_area_size,
initial_commit_area_size, EXECUTABLE, heap->code_space());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment