Commit 8f3c3419 authored by Benoît Lizé's avatar Benoît Lizé Committed by V8 LUCI CQ

[heap/cppgc] Disable guard pages on ARM64 macOS

Guard pages are 4k areas at the beginning and end of each oilpan page
(128kiB) which are meant to be inaccessible. However on ARM64 macOS, the
OS page size is 16kiB, meaning that these are not inaccessible. But we
do pay for these, as they are part of the first and last OS
page. Meaning that we effectively waste 2 * 4kiB = 6.25% of each Oilpan
page.

Since these are not serving their purpose, disable them on this
platform. Another fix could be to make the guard page 16kiB, but given
that the entire oilpan page is 128kiB, this may have adverse effects on
e.g. fragmentation.

Note that this doesn't regress security, as the regions were never
protected to begin with on this platform.

Bug: chromium:1298417
Change-Id: Iad5d05670962780e6d1eeab2bb8a331deb7aa1f3
Cq-Include-Trybots: luci.v8.try:v8_linux_arm64_rel_ng
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3471558Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Benoit Lize <lizeb@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79151}
parent 429f2736
......@@ -43,9 +43,30 @@ constexpr size_t kPageSize = 1 << kPageSizeLog2;
constexpr size_t kPageOffsetMask = kPageSize - 1;
constexpr size_t kPageBaseMask = ~kPageOffsetMask;
#if defined(V8_TARGET_ARCH_ARM64) && defined(V8_OS_MACOSX)
// No guard pages on ARM64 macOS. This target has 16 kiB pages, meaning that
// the guard pages do not protect anything, since there is no inaccessible
// region surrounding the allocation.
//
// However, with a 4k guard page size (as below), we avoid putting any data
// inside the "guard pages" region. Effectively, this wastes 2 * 4kiB of memory
// for each 128kiB page, since this is memory we pay for (since accounting as at
// the OS page level), but never use.
//
// The layout of pages is broadly:
// | guard page | header | payload | guard page |
// <--- 4k ---> <--- 4k --->
// <------------------ 128k -------------------->
//
// Since this is aligned on an OS page boundary (16k), the guard pages are part
// of the first and last OS page, respectively. So they are really private dirty
// memory which we never use.
constexpr size_t kGuardPageSize = 0;
#else
// Guard pages are always put into memory. Whether they are actually protected
// depends on the allocator provided to the garbage collector.
constexpr size_t kGuardPageSize = 4096;
#endif
constexpr size_t kLargeObjectSizeThreshold = kPageSize / 2;
......
......@@ -242,7 +242,8 @@ class V8_EXPORT_PRIVATE PageBackend final {
// Returns true if the provided allocator supports committing at the required
// granularity.
inline bool SupportsCommittingGuardPages(PageAllocator& allocator) {
return kGuardPageSize % allocator.CommitPageSize() == 0;
return kGuardPageSize != 0 &&
kGuardPageSize % allocator.CommitPageSize() == 0;
}
Address NormalPageMemoryRegion::Lookup(ConstAddress address) const {
......
......@@ -115,6 +115,8 @@ TEST(PageMemoryRegionTest, LargePageMemoryRegion) {
EXPECT_EQ(0u, pm.writeable_region().end()[-1]);
}
// See the comment in globals.h when setting |kGuardPageSize| for details.
#if !(defined(V8_TARGET_ARCH_ARM64) && defined(V8_OS_MACOSX))
TEST(PageMemoryRegionTest, PlatformUsesGuardPages) {
// This tests that the testing allocator actually uses protected guard
// regions.
......@@ -132,6 +134,7 @@ TEST(PageMemoryRegionTest, PlatformUsesGuardPages) {
EXPECT_TRUE(SupportsCommittingGuardPages(allocator));
#endif
}
#endif // !(defined(V8_TARGET_ARCH_ARM64) && defined(V8_OS_MACOSX))
namespace {
......@@ -295,15 +298,18 @@ TEST(PageBackendTest, LookupNormal) {
PageBackend backend(allocator, oom_handler);
constexpr size_t kBucket = 0;
Address writeable_base = backend.AllocateNormalPageMemory(kBucket);
EXPECT_EQ(nullptr, backend.Lookup(writeable_base - kGuardPageSize));
if (kGuardPageSize)
EXPECT_EQ(nullptr, backend.Lookup(writeable_base - kGuardPageSize));
EXPECT_EQ(nullptr, backend.Lookup(writeable_base - 1));
EXPECT_EQ(writeable_base, backend.Lookup(writeable_base));
EXPECT_EQ(writeable_base, backend.Lookup(writeable_base + kPageSize -
2 * kGuardPageSize - 1));
EXPECT_EQ(nullptr,
backend.Lookup(writeable_base + kPageSize - 2 * kGuardPageSize));
EXPECT_EQ(nullptr,
backend.Lookup(writeable_base - kGuardPageSize + kPageSize - 1));
if (kGuardPageSize) {
EXPECT_EQ(nullptr,
backend.Lookup(writeable_base - kGuardPageSize + kPageSize - 1));
}
}
TEST(PageBackendTest, LookupLarge) {
......@@ -312,7 +318,8 @@ TEST(PageBackendTest, LookupLarge) {
PageBackend backend(allocator, oom_handler);
constexpr size_t kSize = 7934;
Address writeable_base = backend.AllocateLargePageMemory(kSize);
EXPECT_EQ(nullptr, backend.Lookup(writeable_base - kGuardPageSize));
if (kGuardPageSize)
EXPECT_EQ(nullptr, backend.Lookup(writeable_base - kGuardPageSize));
EXPECT_EQ(nullptr, backend.Lookup(writeable_base - 1));
EXPECT_EQ(writeable_base, backend.Lookup(writeable_base));
EXPECT_EQ(writeable_base, backend.Lookup(writeable_base + kSize - 1));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment