Commit 1ea76c13 authored by Samuel Groß's avatar Samuel Groß Committed by V8 LUCI CQ

Implement a fake virtual memory cage mechanism

On operating systems where reserving virtual address space is expensive,
notably Windows pre 8.1, it is not possible to create a proper virtual
memory cage. In order to still be able to reference caged objects
through offsets from the cage base on these systems, this CL introduces
a fake cage mechanism. When the fake cage is used, most of the virtual
memory for the cage is not actually reserved. Instead, the cage's page
allocator simply relies on hints to the OS to obtain pages inside the
cage. This does, however, not provide the same security benefits as a
real cage as unrelated allocations might end up inside the cage.

Bug: chromium:1218005
Change-Id: Ie5314be23966ed0042a017917b63595481b5e7e3
Cq-Include-Trybots: luci.v8.try:v8_linux64_heap_sandbox_dbg_ng,v8_linux_arm64_sim_heap_sandbox_dbg_ng
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3217200
Commit-Queue: Samuel Groß <saelo@chromium.org>
Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77367}
parent 83c9bded
......@@ -494,13 +494,13 @@ constexpr bool VirtualMemoryCageIsEnabled() {
#endif
}
#ifdef V8_VIRTUAL_MEMORY_CAGE
// Size of the virtual memory cage, excluding the guard regions surrounding it.
constexpr size_t kVirtualMemoryCageSize = size_t{1} << 40; // 1 TB
#ifdef V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
static_assert(kVirtualMemoryCageSize > Internals::kPtrComprCageReservationSize,
"The virtual memory cage must be larger than the pointer "
"compression cage contained within it.");
#define GB (1ULL << 30)
#define TB (1ULL << 40)
// Size of the virtual memory cage, excluding the guard regions surrounding it.
constexpr size_t kVirtualMemoryCageSize = 1ULL * TB;
// Required alignment of the virtual memory cage. For simplicity, we require the
// size of the guard regions to be a multiple of this, so that this specifies
......@@ -513,7 +513,7 @@ constexpr size_t kVirtualMemoryCageAlignment =
// Size of the guard regions surrounding the virtual memory cage. This assumes a
// worst-case scenario of a 32-bit unsigned index being used to access an array
// of 64-bit values.
constexpr size_t kVirtualMemoryCageGuardRegionSize = size_t{32} << 30; // 32 GB
constexpr size_t kVirtualMemoryCageGuardRegionSize = 32ULL * GB;
static_assert((kVirtualMemoryCageGuardRegionSize %
kVirtualMemoryCageAlignment) == 0,
......@@ -525,7 +525,31 @@ static_assert((kVirtualMemoryCageGuardRegionSize %
// until either the reservation succeeds or the minimum size is reached. A
// minimum of 32GB allows the 4GB pointer compression region as well as the
// ArrayBuffer partition and two 10GB WASM memory cages to fit into the cage.
constexpr size_t kVirtualMemoryCageMinimumSize = size_t{32} << 30; // 32 GB
constexpr size_t kVirtualMemoryCageMinimumSize = 32ULL * GB;
static_assert(kVirtualMemoryCageMinimumSize <= kVirtualMemoryCageSize,
"The minimal size of the virtual memory cage must be smaller or "
"equal to the regular size.");
// On OSes where reservation virtual memory is too expensive to create a real
// cage, notably Windows pre 8.1, we create a fake cage that doesn't actually
// reserve most of the memory, and so doesn't have the desired security
// properties, but still ensures that objects that should be located inside the
// cage are allocated within kVirtualMemoryCageSize bytes from the start of the
// cage, and so appear to be inside the cage. The minimum size of the virtual
// memory range that is actually reserved for a fake cage is specified by this
// constant and should be big enough to contain the pointer compression region
// as well as the ArrayBuffer partition.
constexpr size_t kFakeVirtualMemoryCageMinReservationSize = 8ULL * GB;
static_assert(kVirtualMemoryCageMinimumSize >
Internals::kPtrComprCageReservationSize,
"The virtual memory cage must be larger than the pointer "
"compression cage contained within it.");
static_assert(kFakeVirtualMemoryCageMinReservationSize >
Internals::kPtrComprCageReservationSize,
"The reservation for a fake virtual memory cage must be larger "
"than the pointer compression cage contained within it.");
// For now, even if the virtual memory cage is enabled, we still allow backing
// stores to be allocated outside of it as fallback. This will simplify the
......@@ -537,7 +561,10 @@ constexpr bool kAllowBackingStoresOutsideCage = false;
constexpr bool kAllowBackingStoresOutsideCage = true;
#endif // V8_HEAP_SANDBOX
#endif // V8_VIRTUAL_MEMORY_CAGE
#undef GB
#undef TB
#endif // V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
// Only perform cast check for types derived from v8::Data since
// other types do not implement the Cast method.
......
......@@ -553,6 +553,13 @@ V8 shared library set USING_V8_SHARED.
#endif // V8_OS_WIN
// The virtual memory cage is available (i.e. defined) when pointer compression
// is enabled, but it is only used when V8_VIRTUAL_MEMORY_CAGE is enabled as
// well. This allows better test coverage of the cage.
#if defined(V8_COMPRESS_POINTERS)
#define V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
#endif
// clang-format on
#undef V8_HAS_CPP_ATTRIBUTE
......
......@@ -33,16 +33,25 @@ void* BoundedPageAllocator::AllocatePages(void* hint, size_t size,
DCHECK(IsAligned(alignment, region_allocator_.page_size()));
DCHECK(IsAligned(alignment, allocate_page_size_));
Address address;
Address address = RegionAllocator::kAllocationFailure;
Address hint_address = reinterpret_cast<Address>(hint);
if (hint_address && IsAligned(hint_address, alignment) &&
region_allocator_.contains(hint_address, size)) {
if (region_allocator_.AllocateRegionAt(hint_address, size)) {
address = hint_address;
}
}
if (address == RegionAllocator::kAllocationFailure) {
if (alignment <= allocate_page_size_) {
// TODO(ishell): Consider using randomized version here.
address = region_allocator_.AllocateRegion(size);
} else {
// Currently, this should only be necessary when V8_VIRTUAL_MEMORY_CAGE is
// enabled, in which case a bounded page allocator is used to allocate WASM
// memory buffers, which have a larger alignment.
address = region_allocator_.AllocateAlignedRegion(size, alignment);
}
}
if (address == RegionAllocator::kAllocationFailure) {
return nullptr;
}
......
......@@ -98,8 +98,15 @@ void IsolateAllocator::InitializeOncePerProcess() {
// runs, and so this will be guaranteed. Currently however, it is possible
// that the embedder accidentally uses the cage's page allocator prior to
// initializing V8, in which case this CHECK will likely fail.
CHECK(cage->page_allocator()->AllocatePagesAt(
cage->base(), params.reservation_size, PageAllocator::kNoAccess));
// TODO(chromium:12180) here we rely on our BoundedPageAllocators to
// respect the hint parameter. Instead, it would probably be better to add
// a new API that guarantees this, either directly to the PageAllocator
// interface or to a derived one.
void* hint = reinterpret_cast<void*>(cage->base());
void* base = cage->page_allocator()->AllocatePages(
hint, params.reservation_size, params.base_alignment,
PageAllocator::kNoAccess);
CHECK_EQ(base, hint);
existing_reservation =
base::AddressRegion(cage->base(), params.reservation_size);
params.page_allocator = cage->page_allocator();
......
This diff is collapsed.
......@@ -15,7 +15,7 @@ class PageAllocator;
namespace internal {
#ifdef V8_VIRTUAL_MEMORY_CAGE
#ifdef V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
/**
* V8 Virtual Memory Cage.
......@@ -70,11 +70,12 @@ class V8_EXPORT_PRIVATE V8VirtualMemoryCage {
bool is_initialized() const { return initialized_; }
bool is_disabled() const { return disabled_; }
bool is_enabled() const { return !disabled_; }
bool is_fake_cage() const { return is_fake_cage_; }
Address base() const { return base_; }
size_t size() const { return size_; }
base::BoundedPageAllocator* page_allocator() const {
v8::PageAllocator* page_allocator() const {
return cage_page_allocator_.get();
}
......@@ -91,27 +92,48 @@ class V8_EXPORT_PRIVATE V8VirtualMemoryCage {
// cage without guard regions, which would otherwise consume too much memory.
friend class SequentialUnmapperTest;
// These tests call the private Initialize methods below.
FRIEND_TEST(VirtualMemoryCageTest, InitializationWithSize);
FRIEND_TEST(VirtualMemoryCageTest, InitializationAsFakeCage);
FRIEND_TEST(VirtualMemoryCageTest, FakeCagePageAllocation);
// We allow tests to disable the guard regions around the cage. This is useful
// for example for tests like the SequentialUnmapperTest which track page
// allocations and so would incur a large overhead from the guard regions.
bool Initialize(v8::PageAllocator* page_allocator, size_t total_size,
bool Initialize(v8::PageAllocator* page_allocator, size_t size,
bool use_guard_regions);
// Used on OSes where reserving virtual memory is too expensive. A fake cage
// does not reserve all of the virtual memory and so doesn't have the desired
// security properties.
bool InitializeAsFakeCage(v8::PageAllocator* page_allocator, size_t size,
size_t size_to_reserve);
Address base_ = kNullAddress;
size_t size_ = 0;
bool has_guard_regions_ = false;
// Base and size of the virtual memory reservation backing this cage. These
// can be different from the cage base and size due to guard regions or when a
// fake cage is used.
Address reservation_base_ = kNullAddress;
size_t reservation_size_ = 0;
bool initialized_ = false;
bool disabled_ = false;
// The PageAllocator through which the virtual memory of the cage was
// allocated.
bool is_fake_cage_ = false;
// The allocator through which the virtual memory of the cage was allocated.
v8::PageAllocator* page_allocator_ = nullptr;
// The BoundedPageAllocator to allocate pages inside the cage.
std::unique_ptr<base::BoundedPageAllocator> cage_page_allocator_;
// The allocator to allocate pages inside the cage.
std::unique_ptr<v8::PageAllocator> cage_page_allocator_;
};
V8_EXPORT_PRIVATE V8VirtualMemoryCage* GetProcessWideVirtualMemoryCage();
#endif // V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
#endif // V8_VIRTUAL_MEMORY_CAGE
#ifdef V8_VIRTUAL_MEMORY_CAGE
// This function is only available when the cage is actually used.
V8_EXPORT_PRIVATE V8VirtualMemoryCage* GetProcessWideVirtualMemoryCage();
#endif
V8_INLINE bool IsValidBackingStorePointer(void* ptr) {
#ifdef V8_VIRTUAL_MEMORY_CAGE
......
......@@ -291,7 +291,6 @@ v8_source_set("cctest_sources") {
"test-utils.cc",
"test-verifiers.cc",
"test-version.cc",
"test-virtual-memory-cage.cc",
"test-weakmaps.cc",
"test-weaksets.cc",
"test-web-snapshots.cc",
......
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/init/vm-cage.h"
#include "test/cctest/cctest.h"
#ifdef V8_VIRTUAL_MEMORY_CAGE
namespace v8 {
namespace internal {
UNINITIALIZED_TEST(VirtualMemoryCageCreation) {
base::PageAllocator page_allocator;
V8VirtualMemoryCage cage;
CHECK(!cage.is_initialized());
CHECK(!cage.is_disabled());
CHECK_EQ(cage.size(), 0);
CHECK(cage.Initialize(&page_allocator));
CHECK(cage.is_initialized());
CHECK_GT(cage.base(), 0);
CHECK_GT(cage.size(), 0);
cage.TearDown();
CHECK(!cage.is_initialized());
}
} // namespace internal
} // namespace v8
#endif // V8_VIRTUAL_MEMORY_CAGE
......@@ -376,6 +376,7 @@ v8_source_set("unittests_sources") {
"regress/regress-crbug-938251-unittest.cc",
"run-all-unittests.cc",
"runtime/runtime-debug-unittest.cc",
"security/virtual-memory-cage-unittest.cc",
"strings/char-predicates-unittest.cc",
"strings/unicode-unittest.cc",
"tasks/background-compile-task-unittest.cc",
......
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <vector>
#include "src/init/vm-cage.h"
#include "test/unittests/test-utils.h"
#ifdef V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
namespace v8 {
namespace internal {
TEST(VirtualMemoryCageTest, Initialization) {
base::PageAllocator page_allocator;
V8VirtualMemoryCage cage;
EXPECT_FALSE(cage.is_initialized());
EXPECT_FALSE(cage.is_disabled());
EXPECT_FALSE(cage.is_fake_cage());
EXPECT_EQ(cage.size(), 0UL);
EXPECT_TRUE(cage.Initialize(&page_allocator));
EXPECT_TRUE(cage.is_initialized());
EXPECT_NE(cage.base(), 0UL);
EXPECT_GT(cage.size(), 0UL);
cage.TearDown();
EXPECT_FALSE(cage.is_initialized());
}
TEST(VirtualMemoryCageTest, InitializationWithSize) {
base::PageAllocator page_allocator;
V8VirtualMemoryCage cage;
size_t size = kVirtualMemoryCageMinimumSize;
const bool use_guard_regions = false;
EXPECT_TRUE(cage.Initialize(&page_allocator, size, use_guard_regions));
EXPECT_TRUE(cage.is_initialized());
EXPECT_FALSE(cage.is_fake_cage());
EXPECT_EQ(cage.size(), size);
cage.TearDown();
}
TEST(VirtualMemoryCageTest, InitializationAsFakeCage) {
base::PageAllocator page_allocator;
V8VirtualMemoryCage cage;
// Total size of the fake cage.
size_t size = kVirtualMemoryCageSize;
// Size of the virtual memory that is actually reserved at the start of the
// cage.
size_t reserved_size = 2 * page_allocator.AllocatePageSize();
EXPECT_TRUE(cage.InitializeAsFakeCage(&page_allocator, size, reserved_size));
EXPECT_TRUE(cage.is_initialized());
EXPECT_TRUE(cage.is_fake_cage());
EXPECT_NE(cage.base(), 0UL);
EXPECT_EQ(cage.size(), size);
cage.TearDown();
EXPECT_FALSE(cage.is_initialized());
}
TEST(VirtualMemloryCageTest, Contains) {
base::PageAllocator page_allocator;
V8VirtualMemoryCage cage;
EXPECT_TRUE(cage.Initialize(&page_allocator));
Address base = cage.base();
size_t size = cage.size();
base::RandomNumberGenerator rng(::testing::FLAGS_gtest_random_seed);
EXPECT_TRUE(cage.Contains(base));
EXPECT_TRUE(cage.Contains(base + size - 1));
for (int i = 0; i < 10; i++) {
size_t offset = rng.NextInt64() % size;
EXPECT_TRUE(cage.Contains(base + offset));
}
EXPECT_FALSE(cage.Contains(base - 1));
EXPECT_FALSE(cage.Contains(base + size));
for (int i = 0; i < 10; i++) {
Address addr = rng.NextInt64();
if (addr < base || addr >= base + size) {
EXPECT_FALSE(cage.Contains(addr));
}
}
cage.TearDown();
}
void TestCagePageAllocation(V8VirtualMemoryCage& cage) {
const size_t kAllocatinSizesInPages[] = {1, 1, 2, 3, 5, 8, 13, 21, 34};
constexpr int kNumAllocations = arraysize(kAllocatinSizesInPages);
PageAllocator* allocator = cage.page_allocator();
size_t page_size = allocator->AllocatePageSize();
std::vector<void*> allocations;
for (int i = 0; i < kNumAllocations; i++) {
size_t length = page_size * kAllocatinSizesInPages[i];
size_t alignment = page_size;
void* ptr = allocator->AllocatePages(nullptr, length, alignment,
PageAllocator::kNoAccess);
EXPECT_NE(ptr, nullptr);
EXPECT_TRUE(cage.Contains(ptr));
allocations.push_back(ptr);
}
for (int i = 0; i < kNumAllocations; i++) {
size_t length = page_size * kAllocatinSizesInPages[i];
allocator->FreePages(allocations[i], length);
}
}
TEST(VirtualMemoryCageTest, PageAllocation) {
base::PageAllocator page_allocator;
V8VirtualMemoryCage cage;
EXPECT_TRUE(cage.Initialize(&page_allocator));
TestCagePageAllocation(cage);
cage.TearDown();
}
TEST(VirtualMemoryCageTest, FakeCagePageAllocation) {
base::PageAllocator page_allocator;
V8VirtualMemoryCage cage;
size_t size = kVirtualMemoryCageSize;
// Only reserve two pages so the test will allocate memory inside and outside
// of the reserved region.
size_t reserved_size = 2 * page_allocator.AllocatePageSize();
EXPECT_TRUE(cage.InitializeAsFakeCage(&page_allocator, size, reserved_size));
TestCagePageAllocation(cage);
cage.TearDown();
}
} // namespace internal
} // namespace v8
#endif // V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment