Commit 8581adae authored by Samuel Groß's avatar Samuel Groß Committed by V8 LUCI CQ

Introduce v8_enable_virtual_memory_cage

When this is enabled, v8 reserves a large region of virtual address
space during initialization, at the start of which it will place its 4GB
pointer compression cage. The remainder of the cage is used to store
ArrayBuffer backing stores and WASM memory buffers. This will later
allow referencing these buffers from inside V8 through offsets from the
cage base rather than through raw pointers.

Bug: chromium:1218005
Change-Id: I300094b07f64985217104b14c320cc019f8438af
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3010195Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Samuel Groß <saelo@google.com>
Cr-Commit-Position: refs/heads/master@{#76234}
parent 2890419f
......@@ -312,6 +312,11 @@ declare_args() {
# Sets -DV8_HEAP_SANDBOX.
v8_enable_heap_sandbox = ""
# Enable the Virtual Memory Cage, which contains the pointer compression cage
# as well as ArrayBuffer BackingStores and WASM memory cages.
# Sets -DV8_VIRTUAL_MEMORY_CAGE.
v8_enable_virtual_memory_cage = ""
# Experimental feature for collecting per-class zone memory stats.
# Requires use_rtti = true
v8_enable_precise_zone_stats = false
......@@ -392,6 +397,9 @@ if (v8_enable_zone_compression == "") {
if (v8_enable_heap_sandbox == "") {
v8_enable_heap_sandbox = false
}
if (v8_enable_virtual_memory_cage == "") {
v8_enable_virtual_memory_cage = false
}
if (v8_enable_short_builtin_calls == "") {
v8_enable_short_builtin_calls =
v8_current_cpu == "x64" || (!is_android && v8_current_cpu == "arm64")
......@@ -489,6 +497,13 @@ assert(!v8_enable_heap_sandbox || v8_enable_pointer_compression,
assert(!v8_enable_heap_sandbox || !v8_enable_external_code_space,
"V8 Heap Sandbox is not compatible with external code space YET")
assert(
!v8_enable_virtual_memory_cage || v8_enable_pointer_compression_shared_cage,
"V8 VirtualMemoryCage requires the shared pointer compression cage")
assert(!v8_enable_virtual_memory_cage || !is_lsan,
"V8 VirtualMemoryCage is currently incompatible with Leak Sanitizer")
assert(
!v8_enable_pointer_compression_shared_cage || v8_enable_pointer_compression,
"Can't share a pointer compression cage if pointers aren't compressed")
......@@ -650,6 +665,7 @@ external_v8_defines = [
"V8_31BIT_SMIS_ON_64BIT_ARCH",
"V8_COMPRESS_ZONES",
"V8_HEAP_SANDBOX",
"V8_VIRTUAL_MEMORY_CAGE",
"V8_DEPRECATION_WARNINGS",
"V8_IMMINENT_DEPRECATION_WARNINGS",
"V8_NO_ARGUMENTS_ADAPTOR",
......@@ -680,6 +696,9 @@ if (v8_enable_zone_compression) {
if (v8_enable_heap_sandbox) {
enabled_external_v8_defines += [ "V8_HEAP_SANDBOX" ]
}
if (v8_enable_virtual_memory_cage) {
enabled_external_v8_defines += [ "V8_VIRTUAL_MEMORY_CAGE" ]
}
if (v8_deprecation_warnings) {
enabled_external_v8_defines += [ "V8_DEPRECATION_WARNINGS" ]
}
......@@ -3923,6 +3942,7 @@ v8_source_set("v8_base_without_compiler") {
"src/init/isolate-allocator.cc",
"src/init/startup-data-util.cc",
"src/init/v8.cc",
"src/init/vm-cage.cc",
"src/interpreter/bytecode-array-builder.cc",
"src/interpreter/bytecode-array-iterator.cc",
"src/interpreter/bytecode-array-random-iterator.cc",
......
......@@ -482,6 +482,55 @@ class Internals {
#endif // V8_COMPRESS_POINTERS
};
constexpr bool VirtualMemoryCageIsEnabled() {
#ifdef V8_VIRTUAL_MEMORY_CAGE
return true;
#else
return false;
#endif
}
#ifdef V8_VIRTUAL_MEMORY_CAGE
// Size of the pointer compression cage located at the start of the virtual
// memory cage.
constexpr size_t kVirtualMemoryCagePointerCageSize =
Internals::kPtrComprCageReservationSize;
// Size of the virtual memory cage, excluding the guard regions surrounding it.
constexpr size_t kVirtualMemoryCageSize = size_t{1} << 40; // 1 TB
static_assert(kVirtualMemoryCageSize > kVirtualMemoryCagePointerCageSize,
"The virtual memory cage must be larger than the pointer "
"compression cage contained within it.");
// Required alignment of the virtual memory cage. For simplicity, we require the
// size of the guard regions to be a multiple of this, so that this specifies
// the alignment of the cage including and excluding surrounding guard regions.
// The alignment requirement is due to the pointer compression cage being
// located at the start of the virtual memory cage.
constexpr size_t kVirtualMemoryCageAlignment =
Internals::kPtrComprCageBaseAlignment;
// Size of the guard regions surrounding the virtual memory cage. This assumes a
// worst-case scenario of a 32-bit unsigned index being used to access an array
// of 64-bit values.
constexpr size_t kVirtualMemoryCageGuardRegionSize = size_t{32} << 30; // 32 GB
static_assert((kVirtualMemoryCageGuardRegionSize %
kVirtualMemoryCageAlignment) == 0,
"The size of the virtual memory cage guard region must be a "
"multiple of its required alignment.");
// Minimum possible size of the virtual memory cage, excluding the guard regions
// surrounding it. Used by unit tests.
constexpr size_t kVirtualMemoryCageMinimumSize =
2 * kVirtualMemoryCagePointerCageSize;
// For now, even if the virtual memory cage is enabled, we still allow backing
// stores to be allocated outside of it as fallback.
constexpr bool kAllowBackingStoresOutsideDataCage = true;
#endif // V8_VIRTUAL_MEMORY_CAGE
// Only perform cast check for types derived from v8::Data since
// other types do not implement the Cast method.
template <bool PerformCheck>
......
......@@ -430,10 +430,28 @@ class PageAllocator {
/**
* Frees memory in the given [address, address + size) range. address and size
* should be operating system page-aligned. The next write to this
* memory area brings the memory transparently back.
* memory area brings the memory transparently back. This should be treated as
* a hint to the OS that the pages are no longer needed. It does not guarantee
* that the pages will be discarded immediately or at all.
*/
virtual bool DiscardSystemPages(void* address, size_t size) { return true; }
/**
* Decommits any wired memory pages in the given range, allowing the OS to
* reclaim them, and marks the region as inacessible (kNoAccess). The address
* range stays reserved and can be accessed again later by changing its
* permissions. However, in that case the memory content is guaranteed to be
* zero-initialized again. The memory must have been previously allocated by a
* call to AllocatePages. Returns true on success, false otherwise.
*/
#ifdef V8_VIRTUAL_MEMORY_CAGE
// Implementing this API is required when the virtual memory cage is enabled.
virtual bool DecommitPages(void* address, size_t size) = 0;
#else
// Otherwise, it is optional for now.
virtual bool DecommitPages(void* address, size_t size) { return false; }
#endif
/**
* INTERNAL ONLY: This interface has not been stabilised and may change
* without notice from one release to another without being deprecated first.
......
......@@ -70,6 +70,7 @@ class NumberObject;
class Object;
class ObjectOperationDescriptor;
class ObjectTemplate;
class PageAllocator;
class Platform;
class Primitive;
class PrimitiveArray;
......@@ -5424,7 +5425,10 @@ class V8_EXPORT ArrayBuffer : public Object {
enum class AllocationMode { kNormal, kReservation };
/**
* malloc/free based convenience allocator.
* Convenience allocator.
*
* When the virtual memory cage is enabled, this allocator will allocate its
* backing memory inside the cage. Otherwise, it will rely on malloc/free.
*
* Caller takes ownership, i.e. the returned object needs to be freed using
* |delete allocator| once it is no longer in use.
......@@ -9832,7 +9836,8 @@ class V8_EXPORT V8 {
const int kBuildConfiguration =
(internal::PointerCompressionIsEnabled() ? kPointerCompression : 0) |
(internal::SmiValuesAre31Bits() ? k31BitSmis : 0) |
(internal::HeapSandboxIsEnabled() ? kHeapSandbox : 0);
(internal::HeapSandboxIsEnabled() ? kHeapSandbox : 0) |
(internal::VirtualMemoryCageIsEnabled() ? kVirtualMemoryCage : 0);
return Initialize(kBuildConfiguration);
}
......@@ -9914,6 +9919,37 @@ class V8_EXPORT V8 {
*/
static void ShutdownPlatform();
#ifdef V8_VIRTUAL_MEMORY_CAGE
//
// Virtual Memory Cage related API.
//
// This API is not yet stable and subject to changes in the future.
//
/**
* Initializes the virtual memory cage for V8.
*
* This must be invoked after the platform was initialized but before V8 is
* initialized. The virtual memory cage is torn down during platform shutdown.
* Returns true on success, false otherwise.
*/
static bool InitializeVirtualMemoryCage();
/**
* Provides access to the data page allocator for the virtual memory cage.
*
* This allocator allocates pages inside the data cage part of the virtual
* memory cage in which data buffers such as ArrayBuffer backing stores must
* be allocated. Objects in this region should generally consists purely of
* data and not contain any pointers. It should be assumed that an attacker
* can corrupt data inside the cage, and so in particular the contents of
* pages returned by this allocator, arbitrarily and concurrently.
*
* The virtual memory cage must have been initialized before.
*/
static PageAllocator* GetVirtualMemoryCageDataPageAllocator();
#endif
/**
* Activate trap-based bounds checking for WebAssembly.
*
......@@ -9948,6 +9984,7 @@ class V8_EXPORT V8 {
kPointerCompression = 1 << 0,
k31BitSmis = 1 << 1,
kHeapSandbox = 1 << 2,
kVirtualMemoryCage = 1 << 3,
};
/**
......
......@@ -56,6 +56,7 @@
#include "src/init/icu_util.h"
#include "src/init/startup-data-util.h"
#include "src/init/v8.h"
#include "src/init/vm-cage.h"
#include "src/json/json-parser.h"
#include "src/json/json-stringifier.h"
#include "src/logging/counters-scopes.h"
......@@ -331,6 +332,37 @@ void V8::SetSnapshotDataBlob(StartupData* snapshot_blob) {
namespace {
#ifdef V8_VIRTUAL_MEMORY_CAGE
// ArrayBufferAllocator to use when the virtual memory cage is enabled, in which
// case all ArrayBuffer backing stores need to be allocated inside the data
// cage. Note, the current implementation is extremely inefficient as it uses
// the BoundedPageAllocator. In the future, we'll need a proper allocator
// implementation.
class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
ArrayBufferAllocator() { CHECK(page_allocator_); }
void* Allocate(size_t length) override {
return page_allocator_->AllocatePages(nullptr, RoundUp(length, page_size_),
page_size_,
PageAllocator::kReadWrite);
}
void* AllocateUninitialized(size_t length) override {
return Allocate(length);
}
void Free(void* data, size_t length) override {
page_allocator_->FreePages(data, RoundUp(length, page_size_));
}
private:
PageAllocator* page_allocator_ = internal::GetPlatformDataCagePageAllocator();
const size_t page_size_ = page_allocator_->AllocatePageSize();
};
#else
class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
void* Allocate(size_t length) override {
......@@ -372,6 +404,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
return new_data;
}
};
#endif // V8_VIRTUAL_MEMORY_CAGE
struct SnapshotCreatorData {
explicit SnapshotCreatorData(Isolate* isolate)
......@@ -5852,6 +5885,12 @@ void v8::V8::InitializePlatform(Platform* platform) {
i::V8::InitializePlatform(platform);
}
#ifdef V8_VIRTUAL_MEMORY_CAGE
bool v8::V8::InitializeVirtualMemoryCage() {
return i::V8::InitializeVirtualMemoryCage();
}
#endif
void v8::V8::ShutdownPlatform() { i::V8::ShutdownPlatform(); }
bool v8::V8::Initialize(const int build_config) {
......@@ -5882,6 +5921,16 @@ bool v8::V8::Initialize(const int build_config) {
V8_HEAP_SANDBOX_BOOL ? "ENABLED" : "DISABLED");
}
const bool kEmbedderVirtualMemoryCage =
(build_config & kVirtualMemoryCage) != 0;
if (kEmbedderVirtualMemoryCage != V8_VIRTUAL_MEMORY_CAGE_BOOL) {
FATAL(
"Embedder-vs-V8 build configuration mismatch. On embedder side "
"virtual memory cage is %s while on V8 side it's %s.",
kEmbedderVirtualMemoryCage ? "ENABLED" : "DISABLED",
V8_VIRTUAL_MEMORY_CAGE_BOOL ? "ENABLED" : "DISABLED");
}
i::V8::Initialize();
return true;
}
......@@ -5998,6 +6047,13 @@ void v8::V8::InitializeExternalStartupDataFromFile(const char* snapshot_blob) {
const char* v8::V8::GetVersion() { return i::Version::GetVersion(); }
#ifdef V8_VIRTUAL_MEMORY_CAGE
PageAllocator* GetVirtualMemoryCageDataPageAllocator() {
CHECK(i::GetProcessWideVirtualMemoryCage()->is_initialized());
return i::GetProcessWideVirtualMemoryCage()->GetDataCagePageAllocator();
}
#endif
void V8::GetSharedMemoryStatistics(SharedMemoryStatistics* statistics) {
i::ReadOnlyHeap::PopulateReadOnlySpaceStatistics(statistics);
}
......
......@@ -30,13 +30,18 @@ void* BoundedPageAllocator::AllocatePages(void* hint, size_t size,
PageAllocator::Permission access) {
MutexGuard guard(&mutex_);
DCHECK(IsAligned(alignment, region_allocator_.page_size()));
// Region allocator does not support alignments bigger than it's own
// allocation alignment.
DCHECK_LE(alignment, allocate_page_size_);
// TODO(ishell): Consider using randomized version here.
Address address = region_allocator_.AllocateRegion(size);
DCHECK(IsAligned(alignment, allocate_page_size_));
Address address;
if (alignment <= allocate_page_size_) {
// TODO(ishell): Consider using randomized version here.
address = region_allocator_.AllocateRegion(size);
} else {
// Currently, this should only be necessary when V8_VIRTUAL_MEMORY_CAGE is
// enabled, in which case a bounded page allocator is used to allocate WASM
// memory buffers, which have a larger alignment.
address = region_allocator_.AllocateAlignedRegion(size, alignment);
}
if (address == RegionAllocator::kAllocationFailure) {
return nullptr;
}
......@@ -94,8 +99,16 @@ bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) {
Address address = reinterpret_cast<Address>(raw_address);
size_t freed_size = region_allocator_.FreeRegion(address);
if (freed_size != size) return false;
#ifdef V8_VIRTUAL_MEMORY_CAGE
// When the virtual memory cage is enabled, the pages returned by the
// BoundedPageAllocator must be zero-initialized, as some of the additional
// clients expect them to. Decommitting them during FreePages ensures that
// while also changing the access permissions to kNoAccess.
CHECK(page_allocator_->DecommitPages(raw_address, size));
#else
CHECK(page_allocator_->SetPermissions(raw_address, size,
PageAllocator::kNoAccess));
#endif
return true;
}
......@@ -128,8 +141,14 @@ bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size,
// Keep the region in "used" state just uncommit some pages.
Address free_address = address + new_size;
size_t free_size = size - new_size;
#ifdef V8_VIRTUAL_MEMORY_CAGE
// See comment in FreePages().
return page_allocator_->DecommitPages(reinterpret_cast<void*>(free_address),
free_size);
#else
return page_allocator_->SetPermissions(reinterpret_cast<void*>(free_address),
free_size, PageAllocator::kNoAccess);
#endif
}
bool BoundedPageAllocator::SetPermissions(void* address, size_t size,
......@@ -144,5 +163,9 @@ bool BoundedPageAllocator::DiscardSystemPages(void* address, size_t size) {
return page_allocator_->DiscardSystemPages(address, size);
}
bool BoundedPageAllocator::DecommitPages(void* address, size_t size) {
return page_allocator_->DecommitPages(address, size);
}
} // namespace base
} // namespace v8
......@@ -71,6 +71,8 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
bool DiscardSystemPages(void* address, size_t size) override;
bool DecommitPages(void* address, size_t size) override;
private:
v8::base::Mutex mutex_;
const size_t allocate_page_size_;
......
......@@ -151,5 +151,9 @@ bool PageAllocator::DiscardSystemPages(void* address, size_t size) {
return base::OS::DiscardSystemPages(address, size);
}
bool PageAllocator::DecommitPages(void* address, size_t size) {
return base::OS::DecommitPages(address, size);
}
} // namespace base
} // namespace v8
......@@ -47,6 +47,8 @@ class V8_BASE_EXPORT PageAllocator
bool DiscardSystemPages(void* address, size_t size) override;
bool DecommitPages(void* address, size_t size) override;
private:
friend class v8::base::SharedMemory;
......
......@@ -133,6 +133,11 @@ bool OS::DiscardSystemPages(void* address, size_t size) {
return status == ZX_OK;
}
bool OS::DecommitPages(void* address, size_t size) {
// TODO(chromium:1218005): support this.
return false;
}
// static
bool OS::HasLazyCommits() {
// TODO(scottmg): Port, https://crbug.com/731217.
......
......@@ -491,6 +491,20 @@ bool OS::DiscardSystemPages(void* address, size_t size) {
return ret == 0;
}
bool OS::DecommitPages(void* address, size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
DCHECK_EQ(0, size % CommitPageSize());
// From https://pubs.opengroup.org/onlinepubs/9699919799/functions/mmap.html:
// "If a MAP_FIXED request is successful, then any previous mappings [...] for
// those whole pages containing any part of the address range [pa,pa+len)
// shall be removed, as if by an appropriate call to munmap(), before the new
// mapping is established." As a consequence, the memory will be
// zero-initialized on next access.
void* ptr = mmap(address, size, PROT_NONE,
MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
return ptr == address;
}
// static
bool OS::HasLazyCommits() {
#if V8_OS_AIX || V8_OS_LINUX || V8_OS_MACOSX
......
......@@ -934,6 +934,21 @@ bool OS::DiscardSystemPages(void* address, size_t size) {
return ptr;
}
// static
bool OS::DecommitPages(void* address, size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
DCHECK_EQ(0, size % CommitPageSize());
// https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualfree:
// "If a page is decommitted but not released, its state changes to reserved.
// Subsequently, you can call VirtualAlloc to commit it, or VirtualFree to
// release it. Attempts to read from or write to a reserved page results in an
// access violation exception."
// https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualalloc
// for MEM_COMMIT: "The function also guarantees that when the caller later
// initially accesses the memory, the contents will be zero."
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
}
// static
bool OS::HasLazyCommits() {
// TODO(alph): implement for the platform.
......
......@@ -311,6 +311,8 @@ class V8_BASE_EXPORT OS {
V8_WARN_UNUSED_RESULT static bool DiscardSystemPages(void* address,
size_t size);
V8_WARN_UNUSED_RESULT static bool DecommitPages(void* address, size_t size);
static const int msPerSecond = 1000;
#if V8_OS_POSIX
......
......@@ -200,6 +200,35 @@ bool RegionAllocator::AllocateRegionAt(Address requested_address, size_t size,
return true;
}
RegionAllocator::Address RegionAllocator::AllocateAlignedRegion(
size_t size, size_t alignment) {
DCHECK(IsAligned(size, page_size_));
DCHECK(IsAligned(alignment, page_size_));
DCHECK_GE(alignment, page_size_);
const size_t padded_size = size + alignment - page_size_;
Region* region = FreeListFindRegion(padded_size);
if (region == nullptr) return kAllocationFailure;
if (!IsAligned(region->begin(), alignment)) {
size_t start = RoundUp(region->begin(), alignment);
region = Split(region, start - region->begin());
DCHECK_EQ(region->begin(), start);
DCHECK(IsAligned(region->begin(), alignment));
}
if (region->size() != size) {
Split(region, size);
}
DCHECK(IsAligned(region->begin(), alignment));
DCHECK_EQ(region->size(), size);
// Mark region as used.
FreeListRemoveRegion(region);
region->set_state(RegionState::kAllocated);
return region->begin();
}
size_t RegionAllocator::TrimRegion(Address address, size_t new_size) {
DCHECK(IsAligned(new_size, page_size_));
......
......@@ -61,6 +61,11 @@ class V8_BASE_EXPORT RegionAllocator final {
bool AllocateRegionAt(Address requested_address, size_t size,
RegionState region_state = RegionState::kAllocated);
// Allocates a region of |size| aligned to |alignment|. The size and alignment
// must be a multiple of |page_size|. Returns the address of the region on
// success or kAllocationFailure.
Address AllocateAlignedRegion(size_t size, size_t alignment);
// Frees region at given |address|, returns the size of the region.
// There must be a used region starting at given address otherwise nothing
// will be freed and 0 will be returned.
......
......@@ -166,7 +166,11 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
void* AllocateVM(size_t length) {
DCHECK_LE(kVMThreshold, length);
#ifdef V8_VIRTUAL_MEMORY_CAGE
v8::PageAllocator* page_allocator = i::GetPlatformDataCagePageAllocator();
#else
v8::PageAllocator* page_allocator = i::GetPlatformPageAllocator();
#endif
size_t page_size = page_allocator->AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
return i::AllocatePages(page_allocator, nullptr, allocated, page_size,
......@@ -174,7 +178,11 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
}
void FreeVM(void* data, size_t length) {
#ifdef V8_VIRTUAL_MEMORY_CAGE
v8::PageAllocator* page_allocator = i::GetPlatformDataCagePageAllocator();
#else
v8::PageAllocator* page_allocator = i::GetPlatformPageAllocator();
#endif
size_t page_size = page_allocator->AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
CHECK(i::FreePages(page_allocator, data, allocated));
......@@ -5037,6 +5045,11 @@ int Shell::Main(int argc, char* argv[]) {
V8::SetFlagsFromString("--redirect-code-traces-to=code.asm");
}
v8::V8::InitializePlatform(g_platform.get());
#ifdef V8_VIRTUAL_MEMORY_CAGE
if (!v8::V8::InitializeVirtualMemoryCage()) {
FATAL("Could not initialize the virtual memory cage");
}
#endif
v8::V8::Initialize();
if (options.snapshot_blob) {
v8::V8::InitializeExternalStartupDataFromFile(options.snapshot_blob);
......@@ -5064,6 +5077,11 @@ int Shell::Main(int argc, char* argv[]) {
}
#if V8_OS_LINUX
} else if (options.multi_mapped_mock_allocator) {
#ifdef V8_VIRTUAL_MEMORY_CAGE
CHECK_WITH_MSG(internal::kAllowBackingStoresOutsideDataCage,
"The multi-mapped arraybuffer allocator is currently "
"incompatible with v8_enable_virtual_memory_cage");
#endif
Shell::array_buffer_allocator = &multi_mapped_mock_allocator;
#endif // V8_OS_LINUX
} else {
......
......@@ -33,6 +33,7 @@
#include "src/heap/heap.h"
#include "src/heap/read-only-heap.h"
#include "src/init/isolate-allocator.h"
#include "src/init/vm-cage.h"
#include "src/objects/code.h"
#include "src/objects/contexts.h"
#include "src/objects/debug-objects.h"
......@@ -1081,6 +1082,16 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
return isolate_allocator_->GetPtrComprCage();
}
bool IsValidBackingStorePointer(void* ptr) {
#ifdef V8_VIRTUAL_MEMORY_CAGE
Address addr = reinterpret_cast<Address>(ptr);
return kAllowBackingStoresOutsideDataCage || addr == kNullAddress ||
GetProcessWideVirtualMemoryCage()->Contains(addr);
#else
return true;
#endif
}
// Generated code can embed this address to get access to the isolate-specific
// data (for example, roots, external references, builtins, etc.).
// The kRootRegister is set to this value.
......
......@@ -175,6 +175,12 @@ struct MaybeBoolFlag {
#define V8_HEAP_SANDBOX_BOOL false
#endif
#ifdef V8_VIRTUAL_MEMORY_CAGE
#define V8_VIRTUAL_MEMORY_CAGE_BOOL true
#else
#define V8_VIRTUAL_MEMORY_CAGE_BOOL false
#endif
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
#define ENABLE_CONTROL_FLOW_INTEGRITY_BOOL true
#else
......
......@@ -8,6 +8,7 @@
#include "src/common/ptr-compr.h"
#include "src/execution/isolate.h"
#include "src/heap/code-range.h"
#include "src/init/vm-cage.h"
#include "src/utils/memcopy.h"
#include "src/utils/utils.h"
......@@ -74,7 +75,28 @@ void IsolateAllocator::FreeProcessWidePtrComprCageForTesting() {
void IsolateAllocator::InitializeOncePerProcess() {
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
PtrComprCageReservationParams params;
if (!GetProcessWidePtrComprCage()->InitReservation(params)) {
base::AddressRegion existing_reservation;
#ifdef V8_VIRTUAL_MEMORY_CAGE
// TODO(chromium:1218005) avoid the name collision with
// v8::internal::VirtualMemoryCage and ideally figure out a clear naming
// scheme for the different types of virtual memory cages.
// For now, we allow the virtual memory cage to be disabled even when
// compiling with v8_enable_virtual_memory_cage. This fallback will be
// disallowed in the future, at the latest once ArrayBuffers are referenced
// through an offset rather than a raw pointer.
if (GetProcessWideVirtualMemoryCage()->is_disabled()) {
CHECK(kAllowBackingStoresOutsideDataCage);
} else {
auto cage = GetProcessWideVirtualMemoryCage();
CHECK(cage->is_initialized());
DCHECK_EQ(params.reservation_size, cage->pointer_cage_size());
existing_reservation = base::AddressRegion(cage->pointer_cage_base(),
cage->pointer_cage_size());
}
#endif
if (!GetProcessWidePtrComprCage()->InitReservation(params,
existing_reservation)) {
V8::FatalProcessOutOfMemory(
nullptr,
"Failed to reserve virtual memory for process-wide V8 "
......
......@@ -20,6 +20,7 @@
#include "src/execution/runtime-profiler.h"
#include "src/execution/simulator.h"
#include "src/init/bootstrapper.h"
#include "src/init/vm-cage.h"
#include "src/libsampler/sampler.h"
#include "src/objects/elements.h"
#include "src/objects/objects-inl.h"
......@@ -73,6 +74,17 @@ void V8::TearDown() {
}
void V8::InitializeOncePerProcessImpl() {
CHECK(platform_);
#ifdef V8_VIRTUAL_MEMORY_CAGE
if (!GetProcessWideVirtualMemoryCage()->is_initialized()) {
// For now, we still allow the cage to be disabled even if V8 was compiled
// with V8_VIRTUAL_MEMORY_CAGE. This will eventually be forbidden.
CHECK(kAllowBackingStoresOutsideDataCage);
GetProcessWideVirtualMemoryCage()->Disable();
}
#endif
// Update logging information before enforcing flag implications.
bool* log_all_flags[] = {&FLAG_turbo_profiling_log_builtins,
&FLAG_log_all,
......@@ -207,6 +219,15 @@ void V8::InitializePlatform(v8::Platform* platform) {
#endif
}
#ifdef V8_VIRTUAL_MEMORY_CAGE
bool V8::InitializeVirtualMemoryCage() {
// Platform must have been initialized already.
CHECK(platform_);
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
return GetProcessWideVirtualMemoryCage()->Initialize(page_allocator);
}
#endif
void V8::ShutdownPlatform() {
CHECK(platform_);
#if defined(V8_OS_WIN) && defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
......@@ -216,6 +237,13 @@ void V8::ShutdownPlatform() {
#endif
v8::tracing::TracingCategoryObserver::TearDown();
v8::base::SetPrintStackTrace(nullptr);
#ifdef V8_VIRTUAL_MEMORY_CAGE
// TODO(chromium:1218005) alternatively, this could move to its own
// public TearDownVirtualMemoryCage function.
GetProcessWideVirtualMemoryCage()->TearDown();
#endif
platform_ = nullptr;
}
......
......@@ -29,6 +29,10 @@ class V8 : public AllStatic {
const char* location,
bool is_heap_oom = false);
#ifdef V8_VIRTUAL_MEMORY_CAGE
static bool InitializeVirtualMemoryCage();
#endif
static void InitializePlatform(v8::Platform* platform);
static void ShutdownPlatform();
V8_EXPORT_PRIVATE static v8::Platform* GetCurrentPlatform();
......
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/init/vm-cage.h"
#include "include/v8-internal.h"
#include "src/base/bounded-page-allocator.h"
#include "src/base/lazy-instance.h"
#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
#ifdef V8_VIRTUAL_MEMORY_CAGE
bool V8VirtualMemoryCage::Initialize(PageAllocator* page_allocator) {
constexpr bool use_guard_regions = true;
return Initialize(page_allocator, kVirtualMemoryCageSize, use_guard_regions);
}
bool V8VirtualMemoryCage::Initialize(v8::PageAllocator* page_allocator,
size_t size, bool use_guard_regions) {
CHECK(!initialized_);
CHECK(!disabled_);
CHECK_GE(size, kVirtualMemoryCageMinimumSize);
size_t reservation_size = size;
if (use_guard_regions) {
reservation_size += 2 * kVirtualMemoryCageGuardRegionSize;
}
base_ = reinterpret_cast<Address>(page_allocator->AllocatePages(
nullptr, reservation_size, kVirtualMemoryCageAlignment,
PageAllocator::kNoAccess));
if (!base_) return false;
if (use_guard_regions) {
base_ += kVirtualMemoryCageGuardRegionSize;
has_guard_regions_ = true;
}
page_allocator_ = page_allocator;
size_ = size;
data_cage_page_allocator_ = std::make_unique<base::BoundedPageAllocator>(
page_allocator_, data_cage_base(), data_cage_size(),
page_allocator_->AllocatePageSize());
initialized_ = true;
return true;
}
void V8VirtualMemoryCage::TearDown() {
if (initialized_) {
data_cage_page_allocator_.reset();
Address reservation_base = base_;
size_t reservation_size = size_;
if (has_guard_regions_) {
reservation_base -= kVirtualMemoryCageGuardRegionSize;
reservation_size += 2 * kVirtualMemoryCageGuardRegionSize;
}
CHECK(page_allocator_->FreePages(reinterpret_cast<void*>(reservation_base),
reservation_size));
page_allocator_ = nullptr;
base_ = kNullAddress;
size_ = 0;
initialized_ = false;
has_guard_regions_ = false;
}
disabled_ = false;
}
DEFINE_LAZY_LEAKY_OBJECT_GETTER(V8VirtualMemoryCage,
GetProcessWideVirtualMemoryCage)
#endif
} // namespace internal
} // namespace v8
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_INIT_VM_CAGE_H_
#define V8_INIT_VM_CAGE_H_
#include "include/v8-internal.h"
#include "src/common/globals.h"
namespace v8 {
class PageAllocator;
namespace internal {
#ifdef V8_VIRTUAL_MEMORY_CAGE
/**
* V8 Virtual Memory Cage.
*
* When the virtual memory cage is enabled, v8 will place most of its objects
* inside a dedicated region of virtual address space. In particular, all v8
* heaps, inside which objects reference themselves using compressed (32-bit)
* pointers, are located at the start of the virtual memory cage (the "pointer
* cage") and pure memory buffers like ArrayBuffer backing stores, which
* themselves do not contain any pointers, are located in the remaining part of
* the cage (the "data cage"). These buffers will eventually be referenced from
* inside the v8 heap using offsets rather than pointers. It should then be
* assumed that an attacker is able to corrupt data arbitrarily and concurrently
* inside the virtual memory cage.
*
* As the embedder is responsible for providing ArrayBuffer allocators, v8
* exposes a page allocator for the data cage to the embedder.
*
* TODO(chromium:1218005) Maybe don't call the sub-regions "cages" as well to
* avoid confusion? In any case, the names should probably be identical to the
* internal names for these virtual memory regions (where they are currently
* called cages).
* TODO(chromium:1218005) come up with a coherent naming scheme for this class
* and the other "cages" in v8.
*/
class V8VirtualMemoryCage {
public:
// +- ~~~ -+----------------+----------------------- ~~~ -+- ~~~ -+
// | 32 GB | 4 GB | | 32 GB |
// +- ~~~ -+----------------+----------------------- ~~~ -+- ~~~ -+
// ^ ^ ^ ^
// Guard Pointer Cage Data Cage Guard
// Region (contains all (contains all ArrayBuffer and Region
// (front) V8 heaps) WASM memory backing stores) (back)
//
// | base ---------------- size ------------------> |
V8VirtualMemoryCage() = default;
V8VirtualMemoryCage(const V8VirtualMemoryCage&) = delete;
V8VirtualMemoryCage& operator=(V8VirtualMemoryCage&) = delete;
bool is_initialized() const { return initialized_; }
bool is_disabled() const { return disabled_; }
bool is_enabled() const { return !disabled_; }
bool Initialize(v8::PageAllocator* page_allocator);
void Disable() {
CHECK(!initialized_);
disabled_ = true;
}
void TearDown();
Address base() const { return base_; }
size_t size() const { return size_; }
Address pointer_cage_base() const { return base_; }
size_t pointer_cage_size() const { return kVirtualMemoryCagePointerCageSize; }
Address data_cage_base() const {
return pointer_cage_base() + pointer_cage_size();
}
size_t data_cage_size() const { return size_ - pointer_cage_size(); }
bool Contains(Address addr) const {
return addr >= base_ && addr < base_ + size_;
}
bool Contains(void* ptr) const {
return Contains(reinterpret_cast<Address>(ptr));
}
v8::PageAllocator* GetDataCagePageAllocator() {
return data_cage_page_allocator_.get();
}
private:
friend class SequentialUnmapperTest;
// We allow tests to disable the guard regions around the cage. This is useful
// for example for tests like the SequentialUnmapperTest which track page
// allocations and so would incur a large overhead from the guard regions.
bool Initialize(v8::PageAllocator* page_allocator, size_t total_size,
bool use_guard_regions);
Address base_ = kNullAddress;
size_t size_ = 0;
bool has_guard_regions_ = false;
bool initialized_ = false;
bool disabled_ = false;
v8::PageAllocator* page_allocator_ = nullptr;
std::unique_ptr<v8::PageAllocator> data_cage_page_allocator_;
};
V8VirtualMemoryCage* GetProcessWideVirtualMemoryCage();
#endif // V8_VIRTUAL_MEMORY_CAGE
} // namespace internal
} // namespace v8
#endif // V8_INIT_VM_CAGE_H_
......@@ -8,6 +8,7 @@
#include <queue>
#include "include/libplatform/libplatform.h"
#include "src/base/bounded-page-allocator.h"
#include "src/base/debug/stack_trace.h"
#include "src/base/logging.h"
#include "src/base/page-allocator.h"
......
......@@ -9,6 +9,7 @@
#include "src/base/platform/wrappers.h"
#include "src/execution/isolate.h"
#include "src/handles/global-handles.h"
#include "src/init/vm-cage.h"
#include "src/logging/counters.h"
#if V8_ENABLE_WEBASSEMBLY
......@@ -152,6 +153,15 @@ BackingStore::~BackingStore() {
return;
}
PageAllocator* page_allocator = GetPlatformPageAllocator();
#ifdef V8_VIRTUAL_MEMORY_CAGE
if (GetProcessWideVirtualMemoryCage()->Contains(buffer_start_)) {
page_allocator = GetPlatformDataCagePageAllocator();
} else {
DCHECK(kAllowBackingStoresOutsideDataCage);
}
#endif
#if V8_ENABLE_WEBASSEMBLY
if (is_wasm_memory_) {
// TODO(v8:11111): RAB / GSAB - Wasm integration.
......@@ -176,8 +186,8 @@ BackingStore::~BackingStore() {
bool pages_were_freed =
region.size() == 0 /* no need to free any pages */ ||
FreePages(GetPlatformPageAllocator(),
reinterpret_cast<void*>(region.begin()), region.size());
FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
region.size());
CHECK(pages_were_freed);
BackingStore::ReleaseReservation(reservation_size);
Clear();
......@@ -195,8 +205,8 @@ BackingStore::~BackingStore() {
bool pages_were_freed =
region.size() == 0 /* no need to free any pages */ ||
FreePages(GetPlatformPageAllocator(),
reinterpret_cast<void*>(region.begin()), region.size());
FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
region.size());
CHECK(pages_were_freed);
BackingStore::ReleaseReservation(reservation_size);
Clear();
......@@ -263,6 +273,8 @@ std::unique_ptr<BackingStore> BackingStore::Allocate(
counters->array_buffer_new_size_failures()->AddSample(mb_length);
return {};
}
DCHECK(isolate->IsValidBackingStorePointer(buffer_start));
}
auto result = new BackingStore(buffer_start, // start
......@@ -400,10 +412,24 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
// 2. Allocate pages (inaccessible by default).
//--------------------------------------------------------------------------
void* allocation_base = nullptr;
PageAllocator* page_allocator = GetPlatformPageAllocator();
auto allocate_pages = [&] {
allocation_base =
AllocatePages(GetPlatformPageAllocator(), nullptr, reservation_size,
page_size, PageAllocator::kNoAccess);
#ifdef V8_VIRTUAL_MEMORY_CAGE
page_allocator = GetPlatformDataCagePageAllocator();
allocation_base = AllocatePages(page_allocator, nullptr, reservation_size,
page_size, PageAllocator::kNoAccess);
if (allocation_base) return true;
// We currently still allow falling back to the platform page allocator if
// the data cage page allocator fails. This will eventually be removed.
// TODO(chromium:1218005) once we forbid the fallback, we should have a
// single API, e.g. GetPlatformDataPageAllocator(), that returns the correct
// page allocator to use here depending on whether the virtual memory cage
// is enabled or not.
if (!kAllowBackingStoresOutsideDataCage) return false;
page_allocator = GetPlatformPageAllocator();
#endif
allocation_base = AllocatePages(page_allocator, nullptr, reservation_size,
page_size, PageAllocator::kNoAccess);
return allocation_base != nullptr;
};
if (!gc_retry(allocate_pages)) {
......@@ -414,6 +440,8 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
return {};
}
DCHECK(isolate->IsValidBackingStorePointer(allocation_base));
// Get a pointer to the start of the buffer, skipping negative guard region
// if necessary.
#if V8_ENABLE_WEBASSEMBLY
......@@ -429,8 +457,8 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
size_t committed_byte_length = initial_pages * page_size;
auto commit_memory = [&] {
return committed_byte_length == 0 ||
SetPermissions(GetPlatformPageAllocator(), buffer_start,
committed_byte_length, PageAllocator::kReadWrite);
SetPermissions(page_allocator, buffer_start, committed_byte_length,
PageAllocator::kReadWrite);
};
if (!gc_retry(commit_memory)) {
TRACE_BS("BSw:try failed to set permissions (%p, %zu)\n", buffer_start,
......
......@@ -50,6 +50,7 @@ DEF_GETTER(JSArrayBuffer, backing_store, void*) {
}
void JSArrayBuffer::set_backing_store(Isolate* isolate, void* value) {
DCHECK(isolate->IsValidBackingStorePointer(value));
WriteExternalPointerField(kBackingStoreOffset, isolate,
reinterpret_cast<Address>(value),
kArrayBufferBackingStoreTag);
......@@ -267,6 +268,7 @@ DEF_GETTER(JSTypedArray, external_pointer_raw, ExternalPointer_t) {
}
void JSTypedArray::set_external_pointer(Isolate* isolate, Address value) {
DCHECK(isolate->IsValidBackingStorePointer(reinterpret_cast<void*>(value)));
WriteExternalPointerField(kExternalPointerOffset, isolate, value,
kTypedArrayExternalPointerTag);
}
......@@ -400,6 +402,7 @@ void JSDataView::AllocateExternalPointerEntries(Isolate* isolate) {
}
void JSDataView::set_data_pointer(Isolate* isolate, void* value) {
DCHECK(isolate->IsValidBackingStorePointer(value));
WriteExternalPointerField(kDataPointerOffset, isolate,
reinterpret_cast<Address>(value),
kDataViewDataPointerTag);
......
......@@ -17,6 +17,7 @@
#include "src/base/vector.h"
#include "src/flags/flags.h"
#include "src/init/v8.h"
#include "src/init/vm-cage.h"
#include "src/utils/memcopy.h"
#if V8_LIBC_BIONIC
......@@ -53,6 +54,7 @@ class PageAllocatorInitializer {
page_allocator_ = default_page_allocator.get();
}
#if defined(LEAK_SANITIZER)
static_assert(!V8_VIRTUAL_MEMORY_CAGE_BOOL, "Not currently supported");
static base::LeakyObject<base::LsanPageAllocator> lsan_allocator(
page_allocator_);
page_allocator_ = lsan_allocator.get();
......@@ -61,16 +63,25 @@ class PageAllocatorInitializer {
PageAllocator* page_allocator() const { return page_allocator_; }
#ifdef V8_VIRTUAL_MEMORY_CAGE
PageAllocator* data_cage_page_allocator() const {
return data_cage_page_allocator_;
}
#endif
void SetPageAllocatorForTesting(PageAllocator* allocator) {
page_allocator_ = allocator;
}
private:
PageAllocator* page_allocator_;
#ifdef V8_VIRTUAL_MEMORY_CAGE
PageAllocator* data_cage_page_allocator_;
#endif
};
DEFINE_LAZY_LEAKY_OBJECT_GETTER(PageAllocatorInitializer,
GetPageTableInitializer)
GetPageAllocatorInitializer)
// We will attempt allocation this many times. After each failure, we call
// OnCriticalMemoryPressure to try to free some memory.
......@@ -79,14 +90,29 @@ const int kAllocationTries = 2;
} // namespace
v8::PageAllocator* GetPlatformPageAllocator() {
DCHECK_NOT_NULL(GetPageTableInitializer()->page_allocator());
return GetPageTableInitializer()->page_allocator();
DCHECK_NOT_NULL(GetPageAllocatorInitializer()->page_allocator());
return GetPageAllocatorInitializer()->page_allocator();
}
#ifdef V8_VIRTUAL_MEMORY_CAGE
// TODO(chromium:1218005) once we disallow disabling the cage, name this e.g.
// "GetPlatformDataPageAllocator", and set it to the PlatformPageAllocator when
// V8_VIRTUAL_MEMORY_CAGE is not defined. Then use that allocator whenever
// allocating ArrayBuffer backing stores inside v8.
v8::PageAllocator* GetPlatformDataCagePageAllocator() {
if (GetProcessWideVirtualMemoryCage()->is_disabled()) {
return GetPlatformPageAllocator();
} else {
CHECK(GetProcessWideVirtualMemoryCage()->is_initialized());
return GetProcessWideVirtualMemoryCage()->GetDataCagePageAllocator();
}
}
#endif
v8::PageAllocator* SetPlatformPageAllocatorForTesting(
v8::PageAllocator* new_page_allocator) {
v8::PageAllocator* old_page_allocator = GetPlatformPageAllocator();
GetPageTableInitializer()->SetPageAllocatorForTesting(new_page_allocator);
GetPageAllocatorInitializer()->SetPageAllocatorForTesting(new_page_allocator);
return old_page_allocator;
}
......@@ -323,7 +349,8 @@ inline Address VirtualMemoryCageStart(
}
} // namespace
bool VirtualMemoryCage::InitReservation(const ReservationParams& params) {
bool VirtualMemoryCage::InitReservation(
const ReservationParams& params, base::AddressRegion existing_reservation) {
DCHECK(!reservation_.IsReserved());
const size_t allocate_page_size = params.page_allocator->AllocatePageSize();
......@@ -337,7 +364,16 @@ bool VirtualMemoryCage::InitReservation(const ReservationParams& params) {
RoundUp(params.base_alignment, allocate_page_size)) -
RoundUp(params.base_bias_size, allocate_page_size);
if (params.base_alignment == ReservationParams::kAnyBaseAlignment) {
if (!existing_reservation.is_empty()) {
CHECK_EQ(existing_reservation.size(), params.reservation_size);
CHECK(params.base_alignment == ReservationParams::kAnyBaseAlignment ||
IsAligned(existing_reservation.begin(), params.base_alignment));
reservation_ =
VirtualMemory(params.page_allocator, existing_reservation.begin(),
existing_reservation.size());
base_ = reservation_.address() + params.base_bias_size;
reservation_is_owned_ = false;
} else if (params.base_alignment == ReservationParams::kAnyBaseAlignment) {
// When the base doesn't need to be aligned, the virtual memory reservation
// fails only due to OOM.
VirtualMemory reservation(params.page_allocator, params.reservation_size,
......@@ -426,7 +462,13 @@ void VirtualMemoryCage::Free() {
if (IsReserved()) {
base_ = kNullAddress;
page_allocator_.reset();
reservation_.Free();
if (reservation_is_owned_) {
reservation_.Free();
} else {
// Reservation is owned by the Platform.
DCHECK(V8_VIRTUAL_MEMORY_CAGE_BOOL);
reservation_.Reset();
}
}
}
......
......@@ -100,6 +100,12 @@ V8_EXPORT_PRIVATE void AlignedFree(void* ptr);
// Returns platfrom page allocator instance. Guaranteed to be a valid pointer.
V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformPageAllocator();
#ifdef V8_VIRTUAL_MEMORY_CAGE
// Returns the platform data cage page allocator instance. Guaranteed to be a
// valid pointer.
V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformDataCagePageAllocator();
#endif
// Sets the given page allocator as the platform page allocator and returns
// the current one. This function *must* be used only for testing purposes.
// It is not thread-safe and the testing infrastructure should ensure that
......@@ -310,6 +316,9 @@ class VirtualMemory final {
// and the base bias size must be AllocatePageSize-aligned.
// - The base alignment may be kAnyBaseAlignment to denote any alignment is
// acceptable. In this case the base bias size does not need to be aligned.
//
// TODO(chromium:1218005) can we either combine this class and
// v8::VirtualMemoryCage in v8-platform.h or rename one of the two?
class VirtualMemoryCage {
public:
VirtualMemoryCage();
......@@ -351,13 +360,23 @@ class VirtualMemoryCage {
// A number of attempts is made to try to reserve a region that satisfies the
// constraints in params, but this may fail. The base address may be different
// than the one requested.
bool InitReservation(const ReservationParams& params);
// If an existing reservation is provided, it will be used for this cage
// instead. The caller retains ownership of the reservation and is responsible
// for keeping the memory reserved during the lifetime of this object.
bool InitReservation(
const ReservationParams& params,
base::AddressRegion existing_reservation = base::AddressRegion());
void Free();
protected:
Address base_ = kNullAddress;
std::unique_ptr<base::BoundedPageAllocator> page_allocator_;
// Whether this cage owns the virtual memory reservation and thus should
// release it upon destruction. TODO(chromium:1218005) this is only needed
// when V8_VIRTUAL_MEMORY_CAGE is enabled. Maybe we can remove this again e.g.
// by merging this class and v8::VirtualMemoryCage in v8-platform.h.
bool reservation_is_owned_ = true;
VirtualMemory reservation_;
};
......
......@@ -290,6 +290,7 @@ v8_source_set("cctest_sources") {
"test-utils.cc",
"test-verifiers.cc",
"test-version.cc",
"test-virtual-memory-cage.cc",
"test-weakmaps.cc",
"test-weaksets.cc",
"test-web-snapshots.cc",
......
......@@ -336,6 +336,9 @@ int main(int argc, char* argv[]) {
v8::V8::InitializeICUDefaultLocation(argv[0]);
std::unique_ptr<v8::Platform> platform(v8::platform::NewDefaultPlatform());
v8::V8::InitializePlatform(platform.get());
#ifdef V8_VIRTUAL_MEMORY_CAGE
CHECK(v8::V8::InitializeVirtualMemoryCage());
#endif
cppgc::InitializeProcess(platform->GetPageAllocator());
using HelpOptions = v8::internal::FlagList::HelpOptions;
v8::internal::FlagList::SetFlagsFromCommandLine(
......
......@@ -788,6 +788,7 @@ class FailingPageAllocator : public v8::PageAllocator {
Permission permissions) override {
return false;
}
bool DecommitPages(void* address, size_t length) override { return false; }
};
} // namespace
......
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/init/vm-cage.h"
#include "test/cctest/cctest.h"
#ifdef V8_VIRTUAL_MEMORY_CAGE
namespace v8 {
namespace internal {
UNINITIALIZED_TEST(VirtualMemoryCageCreation) {
base::PageAllocator page_allocator;
V8VirtualMemoryCage cage;
CHECK(!cage.is_initialized());
CHECK(!cage.is_disabled());
CHECK_EQ(cage.size(), 0);
CHECK(cage.Initialize(&page_allocator));
CHECK(cage.is_initialized());
CHECK_GT(cage.base(), 0);
CHECK_GT(cage.size(), 0);
cage.TearDown();
CHECK(!cage.is_initialized());
}
} // namespace internal
} // namespace v8
#endif // V8_VIRTUAL_MEMORY_CAGE
......@@ -78,6 +78,27 @@ TEST(RegionAllocatorTest, SimpleAllocateRegion) {
CHECK_EQ(ra.free_size(), 0);
}
TEST(RegionAllocatorTest, SimpleAllocateAlignedRegion) {
const size_t kPageSize = 4 * KB;
const size_t kPageCount = 16;
const size_t kSize = kPageSize * kPageCount;
const Address kBegin = static_cast<Address>(kPageSize * 153);
RegionAllocator ra(kBegin, kSize, kPageSize);
// Allocate regions with different alignments and verify that they are
// correctly aligned.
const size_t alignments[] = {kPageSize, kPageSize * 8, kPageSize,
kPageSize * 4, kPageSize * 2, kPageSize * 2,
kPageSize * 4, kPageSize * 2};
for (auto alignment : alignments) {
Address address = ra.AllocateAlignedRegion(kPageSize, alignment);
CHECK_NE(address, RegionAllocator::kAllocationFailure);
CHECK(IsAligned(address, alignment));
}
CHECK_EQ(ra.free_size(), 8 * kPageSize);
}
TEST(RegionAllocatorTest, AllocateRegionRandom) {
const size_t kPageSize = 8 * KB;
const size_t kPageCountLog = 16;
......
......@@ -97,6 +97,16 @@ class TrackingPageAllocator : public ::v8::PageAllocator {
return result;
}
bool DecommitPages(void* address, size_t size) override {
bool result = page_allocator_->DecommitPages(address, size);
if (result) {
// Mark pages as non-accessible.
UpdatePagePermissions(reinterpret_cast<Address>(address), size,
kNoAccess);
}
return result;
}
bool SetPermissions(void* address, size_t size,
PageAllocator::Permission access) override {
bool result = page_allocator_->SetPermissions(address, size, access);
......@@ -240,6 +250,13 @@ class SequentialUnmapperTest : public TestWithIsolate {
SetPlatformPageAllocatorForTesting(tracking_page_allocator_));
old_flag_ = i::FLAG_concurrent_sweeping;
i::FLAG_concurrent_sweeping = false;
#ifdef V8_VIRTUAL_MEMORY_CAGE
GetProcessWideVirtualMemoryCage()->TearDown();
constexpr bool use_guard_regions = false;
CHECK(GetProcessWideVirtualMemoryCage()->Initialize(
tracking_page_allocator_, kVirtualMemoryCageMinimumSize,
use_guard_regions));
#endif
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// Reinitialize the process-wide pointer cage so it can pick up the
// TrackingPageAllocator.
......@@ -255,6 +272,9 @@ class SequentialUnmapperTest : public TestWithIsolate {
// Free the process-wide cage reservation, otherwise the pages won't be
// freed until process teardown.
IsolateAllocator::FreeProcessWidePtrComprCageForTesting();
#endif
#ifdef V8_VIRTUAL_MEMORY_CAGE
GetProcessWideVirtualMemoryCage()->TearDown();
#endif
i::FLAG_concurrent_sweeping = old_flag_;
CHECK(tracking_page_allocator_->IsEmpty());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment