Commit 1ea76c13 authored by Samuel Groß's avatar Samuel Groß Committed by V8 LUCI CQ

Implement a fake virtual memory cage mechanism

On operating systems where reserving virtual address space is expensive,
notably Windows pre 8.1, it is not possible to create a proper virtual
memory cage. In order to still be able to reference caged objects
through offsets from the cage base on these systems, this CL introduces
a fake cage mechanism. When the fake cage is used, most of the virtual
memory for the cage is not actually reserved. Instead, the cage's page
allocator simply relies on hints to the OS to obtain pages inside the
cage. This does, however, not provide the same security benefits as a
real cage as unrelated allocations might end up inside the cage.

Bug: chromium:1218005
Change-Id: Ie5314be23966ed0042a017917b63595481b5e7e3
Cq-Include-Trybots: luci.v8.try:v8_linux64_heap_sandbox_dbg_ng,v8_linux_arm64_sim_heap_sandbox_dbg_ng
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3217200
Commit-Queue: Samuel Groß <saelo@chromium.org>
Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77367}
parent 83c9bded
......@@ -494,13 +494,13 @@ constexpr bool VirtualMemoryCageIsEnabled() {
#endif
}
#ifdef V8_VIRTUAL_MEMORY_CAGE
// Size of the virtual memory cage, excluding the guard regions surrounding it.
constexpr size_t kVirtualMemoryCageSize = size_t{1} << 40; // 1 TB
#ifdef V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
static_assert(kVirtualMemoryCageSize > Internals::kPtrComprCageReservationSize,
"The virtual memory cage must be larger than the pointer "
"compression cage contained within it.");
#define GB (1ULL << 30)
#define TB (1ULL << 40)
// Size of the virtual memory cage, excluding the guard regions surrounding it.
constexpr size_t kVirtualMemoryCageSize = 1ULL * TB;
// Required alignment of the virtual memory cage. For simplicity, we require the
// size of the guard regions to be a multiple of this, so that this specifies
......@@ -513,7 +513,7 @@ constexpr size_t kVirtualMemoryCageAlignment =
// Size of the guard regions surrounding the virtual memory cage. This assumes a
// worst-case scenario of a 32-bit unsigned index being used to access an array
// of 64-bit values.
constexpr size_t kVirtualMemoryCageGuardRegionSize = size_t{32} << 30; // 32 GB
constexpr size_t kVirtualMemoryCageGuardRegionSize = 32ULL * GB;
static_assert((kVirtualMemoryCageGuardRegionSize %
kVirtualMemoryCageAlignment) == 0,
......@@ -525,7 +525,31 @@ static_assert((kVirtualMemoryCageGuardRegionSize %
// until either the reservation succeeds or the minimum size is reached. A
// minimum of 32GB allows the 4GB pointer compression region as well as the
// ArrayBuffer partition and two 10GB WASM memory cages to fit into the cage.
constexpr size_t kVirtualMemoryCageMinimumSize = size_t{32} << 30; // 32 GB
constexpr size_t kVirtualMemoryCageMinimumSize = 32ULL * GB;
static_assert(kVirtualMemoryCageMinimumSize <= kVirtualMemoryCageSize,
"The minimal size of the virtual memory cage must be smaller or "
"equal to the regular size.");
// On OSes where reservation virtual memory is too expensive to create a real
// cage, notably Windows pre 8.1, we create a fake cage that doesn't actually
// reserve most of the memory, and so doesn't have the desired security
// properties, but still ensures that objects that should be located inside the
// cage are allocated within kVirtualMemoryCageSize bytes from the start of the
// cage, and so appear to be inside the cage. The minimum size of the virtual
// memory range that is actually reserved for a fake cage is specified by this
// constant and should be big enough to contain the pointer compression region
// as well as the ArrayBuffer partition.
constexpr size_t kFakeVirtualMemoryCageMinReservationSize = 8ULL * GB;
static_assert(kVirtualMemoryCageMinimumSize >
Internals::kPtrComprCageReservationSize,
"The virtual memory cage must be larger than the pointer "
"compression cage contained within it.");
static_assert(kFakeVirtualMemoryCageMinReservationSize >
Internals::kPtrComprCageReservationSize,
"The reservation for a fake virtual memory cage must be larger "
"than the pointer compression cage contained within it.");
// For now, even if the virtual memory cage is enabled, we still allow backing
// stores to be allocated outside of it as fallback. This will simplify the
......@@ -537,7 +561,10 @@ constexpr bool kAllowBackingStoresOutsideCage = false;
constexpr bool kAllowBackingStoresOutsideCage = true;
#endif // V8_HEAP_SANDBOX
#endif // V8_VIRTUAL_MEMORY_CAGE
#undef GB
#undef TB
#endif // V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
// Only perform cast check for types derived from v8::Data since
// other types do not implement the Cast method.
......
......@@ -553,6 +553,13 @@ V8 shared library set USING_V8_SHARED.
#endif // V8_OS_WIN
// The virtual memory cage is available (i.e. defined) when pointer compression
// is enabled, but it is only used when V8_VIRTUAL_MEMORY_CAGE is enabled as
// well. This allows better test coverage of the cage.
#if defined(V8_COMPRESS_POINTERS)
#define V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
#endif
// clang-format on
#undef V8_HAS_CPP_ATTRIBUTE
......
......@@ -33,16 +33,25 @@ void* BoundedPageAllocator::AllocatePages(void* hint, size_t size,
DCHECK(IsAligned(alignment, region_allocator_.page_size()));
DCHECK(IsAligned(alignment, allocate_page_size_));
Address address;
if (alignment <= allocate_page_size_) {
// TODO(ishell): Consider using randomized version here.
address = region_allocator_.AllocateRegion(size);
} else {
// Currently, this should only be necessary when V8_VIRTUAL_MEMORY_CAGE is
// enabled, in which case a bounded page allocator is used to allocate WASM
// memory buffers, which have a larger alignment.
address = region_allocator_.AllocateAlignedRegion(size, alignment);
Address address = RegionAllocator::kAllocationFailure;
Address hint_address = reinterpret_cast<Address>(hint);
if (hint_address && IsAligned(hint_address, alignment) &&
region_allocator_.contains(hint_address, size)) {
if (region_allocator_.AllocateRegionAt(hint_address, size)) {
address = hint_address;
}
}
if (address == RegionAllocator::kAllocationFailure) {
if (alignment <= allocate_page_size_) {
// TODO(ishell): Consider using randomized version here.
address = region_allocator_.AllocateRegion(size);
} else {
address = region_allocator_.AllocateAlignedRegion(size, alignment);
}
}
if (address == RegionAllocator::kAllocationFailure) {
return nullptr;
}
......
......@@ -98,8 +98,15 @@ void IsolateAllocator::InitializeOncePerProcess() {
// runs, and so this will be guaranteed. Currently however, it is possible
// that the embedder accidentally uses the cage's page allocator prior to
// initializing V8, in which case this CHECK will likely fail.
CHECK(cage->page_allocator()->AllocatePagesAt(
cage->base(), params.reservation_size, PageAllocator::kNoAccess));
// TODO(chromium:12180) here we rely on our BoundedPageAllocators to
// respect the hint parameter. Instead, it would probably be better to add
// a new API that guarantees this, either directly to the PageAllocator
// interface or to a derived one.
void* hint = reinterpret_cast<void*>(cage->base());
void* base = cage->page_allocator()->AllocatePages(
hint, params.reservation_size, params.base_alignment,
PageAllocator::kNoAccess);
CHECK_EQ(base, hint);
existing_reservation =
base::AddressRegion(cage->base(), params.reservation_size);
params.page_allocator = cage->page_allocator();
......
......@@ -8,6 +8,8 @@
#include "src/base/bits.h"
#include "src/base/bounded-page-allocator.h"
#include "src/base/lazy-instance.h"
#include "src/base/utils/random-number-generator.h"
#include "src/flags/flags.h"
#include "src/utils/allocation.h"
#if defined(V8_OS_WIN)
......@@ -19,22 +21,199 @@
namespace v8 {
namespace internal {
#ifdef V8_VIRTUAL_MEMORY_CAGE
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// A PageAllocator that allocates pages inside a given virtual address range
// like the BoundedPageAllocator, except that only a (small) part of the range
// has actually been reserved. As such, this allocator relies on page
// allocation hints for the OS to obtain pages inside the non-reserved part.
// This allocator is used on OSes where reserving virtual address space (and
// thus a virtual memory cage) is too expensive, notabley Windows pre 8.1.
class FakeBoundedPageAllocator : public v8::PageAllocator {
public:
FakeBoundedPageAllocator(v8::PageAllocator* page_allocator, Address start,
size_t size, size_t reserved_size)
: page_allocator_(page_allocator),
start_(start),
size_(size),
reserved_size_(reserved_size),
end_of_reserved_region_(start + reserved_size) {
// The size is required to be a power of two so that obtaining a random
// address inside the managed region simply requires a fixed number of
// random bits as offset.
DCHECK(base::bits::IsPowerOfTwo(size));
DCHECK_LT(reserved_size, size);
if (FLAG_random_seed != 0) {
rng_.SetSeed(FLAG_random_seed);
}
reserved_region_page_allocator_ =
std::make_unique<base::BoundedPageAllocator>(
page_allocator_, start_, reserved_size_,
page_allocator_->AllocatePageSize(),
base::PageInitializationMode::kAllocatedPagesMustBeZeroInitialized);
}
~FakeBoundedPageAllocator() override = default;
size_t AllocatePageSize() override {
return page_allocator_->AllocatePageSize();
}
size_t CommitPageSize() override { return page_allocator_->CommitPageSize(); }
void SetRandomMmapSeed(int64_t seed) override { rng_.SetSeed(seed); }
void* GetRandomMmapAddr() override {
// Generate a random number between 0 and size_, then add that to the start
// address to obtain a random mmap address. We deliberately don't use our
// provided page allocator's GetRandomMmapAddr here since that could be
// biased, while we want uniformly distributed random numbers here.
Address addr = rng_.NextInt64() % size_ + start_;
addr = RoundDown(addr, AllocatePageSize());
void* ptr = reinterpret_cast<void*>(addr);
DCHECK(Contains(ptr, 1));
return ptr;
}
void* AllocatePages(void* hint, size_t size, size_t alignment,
Permission access) override {
DCHECK(IsAligned(size, AllocatePageSize()));
DCHECK(IsAligned(alignment, AllocatePageSize()));
// First, try allocating the memory inside the reserved region.
void* ptr = reserved_region_page_allocator_->AllocatePages(
hint, size, alignment, access);
if (ptr) return ptr;
// Then, fall back to allocating memory outside of the reserved region
// through page allocator hints.
// Somewhat arbitrary size limitation to ensure that the loop below for
// finding a fitting base address hint terminates quickly.
if (size >= size_ / 2) return nullptr;
if (!hint || !Contains(hint, size)) hint = GetRandomMmapAddr();
static constexpr int kMaxAttempts = 10;
for (int i = 0; i < kMaxAttempts; i++) {
// If the hint wouldn't result in the entire allocation being inside the
// managed region, simply retry. There is at least a 50% chance of
// getting a usable address due to the size restriction above.
while (!Contains(hint, size)) {
hint = GetRandomMmapAddr();
}
ptr = page_allocator_->AllocatePages(hint, size, alignment, access);
if (ptr && Contains(ptr, size)) {
return ptr;
} else if (ptr) {
page_allocator_->FreePages(ptr, size);
}
// Retry at a different address.
hint = GetRandomMmapAddr();
}
return nullptr;
}
bool FreePages(void* address, size_t size) override {
return AllocatorFor(address)->FreePages(address, size);
}
bool ReleasePages(void* address, size_t size, size_t new_length) override {
return AllocatorFor(address)->ReleasePages(address, size, new_length);
}
bool SetPermissions(void* address, size_t size,
Permission permissions) override {
return AllocatorFor(address)->SetPermissions(address, size, permissions);
}
bool DiscardSystemPages(void* address, size_t size) override {
return AllocatorFor(address)->DiscardSystemPages(address, size);
}
bool DecommitPages(void* address, size_t size) override {
return AllocatorFor(address)->DecommitPages(address, size);
}
private:
bool Contains(void* ptr, size_t length) {
Address addr = reinterpret_cast<Address>(ptr);
return (addr >= start_) && ((addr + length) < (start_ + size_));
}
v8::PageAllocator* AllocatorFor(void* ptr) {
Address addr = reinterpret_cast<Address>(ptr);
if (addr < end_of_reserved_region_) {
DCHECK_GE(addr, start_);
return reserved_region_page_allocator_.get();
} else {
return page_allocator_;
}
}
// The page allocator through which pages inside the region are allocated.
v8::PageAllocator* const page_allocator_;
// The bounded page allocator managing the sub-region that was actually
// reserved.
std::unique_ptr<base::BoundedPageAllocator> reserved_region_page_allocator_;
// Random number generator for generating random addresses.
base::RandomNumberGenerator rng_;
// The start of the virtual memory region in which to allocate pages. This is
// also the start of the sub-region that was reserved.
const Address start_;
// The total size of the address space in which to allocate pages.
const size_t size_;
// The size of the sub-region that has actually been reserved.
const size_t reserved_size_;
// The end of the sub-region that has actually been reserved.
const Address end_of_reserved_region_;
};
static uintptr_t DetermineAddressSpaceLimit() {
// TODO(saelo) should this also take things like rlimits into account?
#ifdef V8_TARGET_ARCH_64_BIT
// TODO(saelo) this should be deteremined based on the CPU model being used
// and its number of virtual address bits.
uintptr_t virtual_address_bits = 48;
// Virtual address space is split 50/50 between userspace and kernel
uintptr_t userspace_virtual_address_bits = virtual_address_bits / 2;
uintptr_t address_space_limit = 1UL << userspace_virtual_address_bits;
return address_space_limit;
#else
#error Unsupported target architecture.
#endif
}
bool V8VirtualMemoryCage::Initialize(PageAllocator* page_allocator) {
bool use_guard_regions = true;
size_t size = kVirtualMemoryCageSize;
// TODO(saelo) We need to take the number of virtual address bits of the CPU
// into account when deteriming the size of the cage. For example, if there
// are only 39 bits available (some older Intel CPUs), split evenly between
// userspace and kernel, then userspace can only address 256GB and so the
// maximum cage size should probably be something around 64GB to 128GB.
const size_t size = kVirtualMemoryCageSize;
#if defined(V8_OS_WIN)
if (!IsWindows8Point1OrGreater()) {
// On Windows pre 8.1, reserving virtual memory is an expensive operation,
// possibly because page table entries are created for the address range.
// For example, a 1TB reservation increases private memory usage by 2GB. As
// such, we can unfortunately only create a minimal cage on these version,
// without guard regions and without our desired security properties.
use_guard_regions = false;
size = kVirtualMemoryCageMinimumSize;
// apparently because the OS already charges for the memory required for
// all page table entries. For example, a 1TB reservation increases private
// memory usage by 2GB. As such, it is not possible to create a proper
// virtual memory cage there and so a fake cage is created which doesn't
// reserve most of the virtual memory, and so doesn't incur the cost, but
// also doesn't provide the desired security benefits.
const size_t size_to_reserve = kFakeVirtualMemoryCageMinReservationSize;
return InitializeAsFakeCage(page_allocator, size, size_to_reserve);
}
#endif
// TODO(saelo) if this fails, we could still fall back to creating a fake
// cage.
const bool use_guard_regions = true;
return Initialize(page_allocator, size, use_guard_regions);
}
......@@ -54,34 +233,109 @@ bool V8VirtualMemoryCage::Initialize(v8::PageAllocator* page_allocator,
// doesn't reduce the cage's security properties if it has a smaller size.
// Which of these options is ultimately taken likey depends on how frequently
// cage reservation failures occur in practice.
while (!base_ && size >= kVirtualMemoryCageMinimumSize) {
size_t reservation_size = size;
size_t reservation_size;
while (!reservation_base_ && size >= kVirtualMemoryCageMinimumSize) {
reservation_size = size;
if (use_guard_regions) {
reservation_size += 2 * kVirtualMemoryCageGuardRegionSize;
}
base_ = reinterpret_cast<Address>(page_allocator->AllocatePages(
nullptr, reservation_size, kVirtualMemoryCageAlignment,
// Technically, we should use kNoAccessWillJitLater here instead since the
// cage will contain JIT pages. However, currently this is not required as
// PA anyway uses MAP_JIT for V8 mappings. Further, we want to eventually
// move JIT pages out of the cage, at which point we'd like to forbid
// making pages inside the cage executable, and so don't want MAP_JIT.
void* hint = page_allocator->GetRandomMmapAddr();
reservation_base_ = reinterpret_cast<Address>(page_allocator->AllocatePages(
hint, reservation_size, kVirtualMemoryCageAlignment,
PageAllocator::kNoAccess));
if (!base_) {
if (!reservation_base_) {
size /= 2;
}
}
if (!base_) return false;
if (!reservation_base_) return false;
base_ = reservation_base_;
if (use_guard_regions) {
base_ += kVirtualMemoryCageGuardRegionSize;
has_guard_regions_ = true;
}
page_allocator_ = page_allocator;
size_ = size;
reservation_size_ = reservation_size;
cage_page_allocator_ = std::make_unique<base::BoundedPageAllocator>(
page_allocator_, base_, size_, page_allocator_->AllocatePageSize(),
base::PageInitializationMode::kAllocatedPagesMustBeZeroInitialized);
initialized_ = true;
is_fake_cage_ = false;
return true;
}
bool V8VirtualMemoryCage::InitializeAsFakeCage(
v8::PageAllocator* page_allocator, size_t size, size_t size_to_reserve) {
CHECK(!initialized_);
CHECK(!disabled_);
CHECK(base::bits::IsPowerOfTwo(size));
CHECK(base::bits::IsPowerOfTwo(size_to_reserve));
CHECK_GE(size, kVirtualMemoryCageMinimumSize);
CHECK_LT(size_to_reserve, size);
// Use a custom random number generator here to ensure that we get uniformly
// distributed random numbers. We figure out the available address space
// ourselves, and so are potentially better positioned to determine a good
// base address for the cage than the embedder-provided GetRandomMmapAddr().
base::RandomNumberGenerator rng;
if (FLAG_random_seed != 0) {
rng.SetSeed(FLAG_random_seed);
}
// We try to ensure that base + size is still fully within the process'
// address space, even though we only reserve a fraction of the memory.
Address address_space_end = DetermineAddressSpaceLimit();
DCHECK(base::bits::IsPowerOfTwo(address_space_end));
Address highest_possible_address = address_space_end - size;
constexpr int kMaxAttempts = 10;
for (int i = 1; i <= kMaxAttempts; i++) {
// The size of the cage is small relative to the size of the usable address
// space, so we can just retry until we get a usable hint.
Address hint;
do {
hint = rng.NextInt64() % address_space_end;
} while (hint > highest_possible_address);
// Align to page size.
hint = RoundDown(hint, page_allocator->AllocatePageSize());
reservation_base_ = reinterpret_cast<Address>(page_allocator->AllocatePages(
reinterpret_cast<void*>(hint), size_to_reserve,
kVirtualMemoryCageAlignment, PageAllocator::kNoAccess));
if (!reservation_base_) return false;
// Take this base if it meets the requirements or if this is the last
// attempt.
if (reservation_base_ <= highest_possible_address || i == kMaxAttempts)
break;
// Can't use this base, so free the reservation and try again
page_allocator_->FreePages(reinterpret_cast<void*>(reservation_base_),
size_to_reserve);
reservation_base_ = kNullAddress;
}
DCHECK(reservation_base_);
base_ = reservation_base_;
size_ = size;
reservation_size_ = size_to_reserve;
initialized_ = true;
is_fake_cage_ = true;
page_allocator_ = page_allocator;
cage_page_allocator_ = std::make_unique<FakeBoundedPageAllocator>(
page_allocator_, base_, size_, reservation_size_);
return true;
}
......@@ -89,26 +343,24 @@ bool V8VirtualMemoryCage::Initialize(v8::PageAllocator* page_allocator,
void V8VirtualMemoryCage::TearDown() {
if (initialized_) {
cage_page_allocator_.reset();
Address reservation_base = base_;
size_t reservation_size = size_;
if (has_guard_regions_) {
reservation_base -= kVirtualMemoryCageGuardRegionSize;
reservation_size += 2 * kVirtualMemoryCageGuardRegionSize;
}
CHECK(page_allocator_->FreePages(reinterpret_cast<void*>(reservation_base),
reservation_size));
page_allocator_ = nullptr;
CHECK(page_allocator_->FreePages(reinterpret_cast<void*>(reservation_base_),
reservation_size_));
base_ = kNullAddress;
size_ = 0;
reservation_base_ = kNullAddress;
reservation_size_ = 0;
initialized_ = false;
has_guard_regions_ = false;
is_fake_cage_ = false;
page_allocator_ = nullptr;
}
disabled_ = false;
}
#endif // V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
#ifdef V8_VIRTUAL_MEMORY_CAGE
DEFINE_LAZY_LEAKY_OBJECT_GETTER(V8VirtualMemoryCage,
GetProcessWideVirtualMemoryCage)
#endif
} // namespace internal
......
......@@ -15,7 +15,7 @@ class PageAllocator;
namespace internal {
#ifdef V8_VIRTUAL_MEMORY_CAGE
#ifdef V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
/**
* V8 Virtual Memory Cage.
......@@ -70,11 +70,12 @@ class V8_EXPORT_PRIVATE V8VirtualMemoryCage {
bool is_initialized() const { return initialized_; }
bool is_disabled() const { return disabled_; }
bool is_enabled() const { return !disabled_; }
bool is_fake_cage() const { return is_fake_cage_; }
Address base() const { return base_; }
size_t size() const { return size_; }
base::BoundedPageAllocator* page_allocator() const {
v8::PageAllocator* page_allocator() const {
return cage_page_allocator_.get();
}
......@@ -91,27 +92,48 @@ class V8_EXPORT_PRIVATE V8VirtualMemoryCage {
// cage without guard regions, which would otherwise consume too much memory.
friend class SequentialUnmapperTest;
// These tests call the private Initialize methods below.
FRIEND_TEST(VirtualMemoryCageTest, InitializationWithSize);
FRIEND_TEST(VirtualMemoryCageTest, InitializationAsFakeCage);
FRIEND_TEST(VirtualMemoryCageTest, FakeCagePageAllocation);
// We allow tests to disable the guard regions around the cage. This is useful
// for example for tests like the SequentialUnmapperTest which track page
// allocations and so would incur a large overhead from the guard regions.
bool Initialize(v8::PageAllocator* page_allocator, size_t total_size,
bool Initialize(v8::PageAllocator* page_allocator, size_t size,
bool use_guard_regions);
// Used on OSes where reserving virtual memory is too expensive. A fake cage
// does not reserve all of the virtual memory and so doesn't have the desired
// security properties.
bool InitializeAsFakeCage(v8::PageAllocator* page_allocator, size_t size,
size_t size_to_reserve);
Address base_ = kNullAddress;
size_t size_ = 0;
bool has_guard_regions_ = false;
// Base and size of the virtual memory reservation backing this cage. These
// can be different from the cage base and size due to guard regions or when a
// fake cage is used.
Address reservation_base_ = kNullAddress;
size_t reservation_size_ = 0;
bool initialized_ = false;
bool disabled_ = false;
// The PageAllocator through which the virtual memory of the cage was
// allocated.
bool is_fake_cage_ = false;
// The allocator through which the virtual memory of the cage was allocated.
v8::PageAllocator* page_allocator_ = nullptr;
// The BoundedPageAllocator to allocate pages inside the cage.
std::unique_ptr<base::BoundedPageAllocator> cage_page_allocator_;
// The allocator to allocate pages inside the cage.
std::unique_ptr<v8::PageAllocator> cage_page_allocator_;
};
V8_EXPORT_PRIVATE V8VirtualMemoryCage* GetProcessWideVirtualMemoryCage();
#endif // V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
#endif // V8_VIRTUAL_MEMORY_CAGE
#ifdef V8_VIRTUAL_MEMORY_CAGE
// This function is only available when the cage is actually used.
V8_EXPORT_PRIVATE V8VirtualMemoryCage* GetProcessWideVirtualMemoryCage();
#endif
V8_INLINE bool IsValidBackingStorePointer(void* ptr) {
#ifdef V8_VIRTUAL_MEMORY_CAGE
......
......@@ -291,7 +291,6 @@ v8_source_set("cctest_sources") {
"test-utils.cc",
"test-verifiers.cc",
"test-version.cc",
"test-virtual-memory-cage.cc",
"test-weakmaps.cc",
"test-weaksets.cc",
"test-web-snapshots.cc",
......
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/init/vm-cage.h"
#include "test/cctest/cctest.h"
#ifdef V8_VIRTUAL_MEMORY_CAGE
namespace v8 {
namespace internal {
UNINITIALIZED_TEST(VirtualMemoryCageCreation) {
base::PageAllocator page_allocator;
V8VirtualMemoryCage cage;
CHECK(!cage.is_initialized());
CHECK(!cage.is_disabled());
CHECK_EQ(cage.size(), 0);
CHECK(cage.Initialize(&page_allocator));
CHECK(cage.is_initialized());
CHECK_GT(cage.base(), 0);
CHECK_GT(cage.size(), 0);
cage.TearDown();
CHECK(!cage.is_initialized());
}
} // namespace internal
} // namespace v8
#endif // V8_VIRTUAL_MEMORY_CAGE
......@@ -376,6 +376,7 @@ v8_source_set("unittests_sources") {
"regress/regress-crbug-938251-unittest.cc",
"run-all-unittests.cc",
"runtime/runtime-debug-unittest.cc",
"security/virtual-memory-cage-unittest.cc",
"strings/char-predicates-unittest.cc",
"strings/unicode-unittest.cc",
"tasks/background-compile-task-unittest.cc",
......
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <vector>
#include "src/init/vm-cage.h"
#include "test/unittests/test-utils.h"
#ifdef V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
namespace v8 {
namespace internal {
TEST(VirtualMemoryCageTest, Initialization) {
base::PageAllocator page_allocator;
V8VirtualMemoryCage cage;
EXPECT_FALSE(cage.is_initialized());
EXPECT_FALSE(cage.is_disabled());
EXPECT_FALSE(cage.is_fake_cage());
EXPECT_EQ(cage.size(), 0UL);
EXPECT_TRUE(cage.Initialize(&page_allocator));
EXPECT_TRUE(cage.is_initialized());
EXPECT_NE(cage.base(), 0UL);
EXPECT_GT(cage.size(), 0UL);
cage.TearDown();
EXPECT_FALSE(cage.is_initialized());
}
TEST(VirtualMemoryCageTest, InitializationWithSize) {
base::PageAllocator page_allocator;
V8VirtualMemoryCage cage;
size_t size = kVirtualMemoryCageMinimumSize;
const bool use_guard_regions = false;
EXPECT_TRUE(cage.Initialize(&page_allocator, size, use_guard_regions));
EXPECT_TRUE(cage.is_initialized());
EXPECT_FALSE(cage.is_fake_cage());
EXPECT_EQ(cage.size(), size);
cage.TearDown();
}
TEST(VirtualMemoryCageTest, InitializationAsFakeCage) {
base::PageAllocator page_allocator;
V8VirtualMemoryCage cage;
// Total size of the fake cage.
size_t size = kVirtualMemoryCageSize;
// Size of the virtual memory that is actually reserved at the start of the
// cage.
size_t reserved_size = 2 * page_allocator.AllocatePageSize();
EXPECT_TRUE(cage.InitializeAsFakeCage(&page_allocator, size, reserved_size));
EXPECT_TRUE(cage.is_initialized());
EXPECT_TRUE(cage.is_fake_cage());
EXPECT_NE(cage.base(), 0UL);
EXPECT_EQ(cage.size(), size);
cage.TearDown();
EXPECT_FALSE(cage.is_initialized());
}
TEST(VirtualMemloryCageTest, Contains) {
base::PageAllocator page_allocator;
V8VirtualMemoryCage cage;
EXPECT_TRUE(cage.Initialize(&page_allocator));
Address base = cage.base();
size_t size = cage.size();
base::RandomNumberGenerator rng(::testing::FLAGS_gtest_random_seed);
EXPECT_TRUE(cage.Contains(base));
EXPECT_TRUE(cage.Contains(base + size - 1));
for (int i = 0; i < 10; i++) {
size_t offset = rng.NextInt64() % size;
EXPECT_TRUE(cage.Contains(base + offset));
}
EXPECT_FALSE(cage.Contains(base - 1));
EXPECT_FALSE(cage.Contains(base + size));
for (int i = 0; i < 10; i++) {
Address addr = rng.NextInt64();
if (addr < base || addr >= base + size) {
EXPECT_FALSE(cage.Contains(addr));
}
}
cage.TearDown();
}
void TestCagePageAllocation(V8VirtualMemoryCage& cage) {
const size_t kAllocatinSizesInPages[] = {1, 1, 2, 3, 5, 8, 13, 21, 34};
constexpr int kNumAllocations = arraysize(kAllocatinSizesInPages);
PageAllocator* allocator = cage.page_allocator();
size_t page_size = allocator->AllocatePageSize();
std::vector<void*> allocations;
for (int i = 0; i < kNumAllocations; i++) {
size_t length = page_size * kAllocatinSizesInPages[i];
size_t alignment = page_size;
void* ptr = allocator->AllocatePages(nullptr, length, alignment,
PageAllocator::kNoAccess);
EXPECT_NE(ptr, nullptr);
EXPECT_TRUE(cage.Contains(ptr));
allocations.push_back(ptr);
}
for (int i = 0; i < kNumAllocations; i++) {
size_t length = page_size * kAllocatinSizesInPages[i];
allocator->FreePages(allocations[i], length);
}
}
TEST(VirtualMemoryCageTest, PageAllocation) {
base::PageAllocator page_allocator;
V8VirtualMemoryCage cage;
EXPECT_TRUE(cage.Initialize(&page_allocator));
TestCagePageAllocation(cage);
cage.TearDown();
}
TEST(VirtualMemoryCageTest, FakeCagePageAllocation) {
base::PageAllocator page_allocator;
V8VirtualMemoryCage cage;
size_t size = kVirtualMemoryCageSize;
// Only reserve two pages so the test will allocate memory inside and outside
// of the reserved region.
size_t reserved_size = 2 * page_allocator.AllocatePageSize();
EXPECT_TRUE(cage.InitializeAsFakeCage(&page_allocator, size, reserved_size));
TestCagePageAllocation(cage);
cage.TearDown();
}
} // namespace internal
} // namespace v8
#endif // V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment