Commit b3d9ba81 authored by Samuel Groß's avatar Samuel Groß Committed by V8 LUCI CQ

Simplify the V8VirtualMemoryCage implementation

Instead of explicitely splitting the cage into two separate regions, we
now just create a single BoundedPageAllocator to manage the entire
address range of the cage, then allocate the first 4GB for the pointer
compression cage.

Bug: chromium:1218005
Change-Id: I02c53ca8b6dda9074ae6caccc74c32bd6271d4d2
Cq-Include-Trybots: luci.v8.try:v8_linux64_heap_sandbox_dbg_ng
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3162044Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Commit-Queue: Samuel Groß <saelo@chromium.org>
Cr-Commit-Position: refs/heads/main@{#76900}
parent 6d787191
......@@ -195,22 +195,30 @@ class V8_EXPORT V8 {
* This must be invoked after the platform was initialized but before V8 is
* initialized. The virtual memory cage is torn down during platform shutdown.
* Returns true on success, false otherwise.
*
* TODO(saelo) Once it is no longer optional to create the virtual memory
* cage when compiling with V8_VIRTUAL_MEMORY_CAGE, the cage initialization
* will likely happen as part of V8::Initialize, at which point this function
* should be removed.
*/
static bool InitializeVirtualMemoryCage();
/**
* Provides access to the data page allocator for the virtual memory cage.
* Provides access to the virtual memory cage page allocator.
*
* This allocator allocates pages inside the virtual memory cage. It can for
* example be used to obtain virtual memory for ArrayBuffer backing stores,
* which must be located inside the cage.
*
* This allocator allocates pages inside the data cage part of the virtual
* memory cage in which data buffers such as ArrayBuffer backing stores must
* be allocated. Objects in this region should generally consists purely of
* data and not contain any pointers. It should be assumed that an attacker
* can corrupt data inside the cage, and so in particular the contents of
* pages returned by this allocator, arbitrarily and concurrently.
* It should be assumed that an attacker can corrupt data inside the cage,
* and so in particular the contents of pages returned by this allocator,
* arbitrarily and concurrently. Due to this, it is recommended to to only
* place pure data buffers in pages obtained through this allocator.
*
* The virtual memory cage must have been initialized before.
* This function must only be called after initializing the virtual memory
* cage and V8.
*/
static PageAllocator* GetVirtualMemoryCageDataPageAllocator();
static PageAllocator* GetVirtualMemoryCagePageAllocator();
#endif
/**
......
......@@ -488,15 +488,10 @@ constexpr bool VirtualMemoryCageIsEnabled() {
}
#ifdef V8_VIRTUAL_MEMORY_CAGE
// Size of the pointer compression cage located at the start of the virtual
// memory cage.
constexpr size_t kVirtualMemoryCagePointerCageSize =
Internals::kPtrComprCageReservationSize;
// Size of the virtual memory cage, excluding the guard regions surrounding it.
constexpr size_t kVirtualMemoryCageSize = size_t{1} << 40; // 1 TB
static_assert(kVirtualMemoryCageSize > kVirtualMemoryCagePointerCageSize,
static_assert(kVirtualMemoryCageSize > Internals::kPtrComprCageReservationSize,
"The virtual memory cage must be larger than the pointer "
"compression cage contained within it.");
......@@ -521,16 +516,16 @@ static_assert((kVirtualMemoryCageGuardRegionSize %
// Minimum possible size of the virtual memory cage, excluding the guard regions
// surrounding it. Used by unit tests.
constexpr size_t kVirtualMemoryCageMinimumSize =
2 * kVirtualMemoryCagePointerCageSize;
2 * Internals::kPtrComprCageReservationSize;
// For now, even if the virtual memory cage is enabled, we still allow backing
// stores to be allocated outside of it as fallback. This will simplify the
// initial rollout. However, if the heap sandbox is also enabled, we already use
// the "enforcing mode" of the virtual memory cage. This is useful for testing.
#ifdef V8_HEAP_SANDBOX
constexpr bool kAllowBackingStoresOutsideDataCage = false;
constexpr bool kAllowBackingStoresOutsideCage = false;
#else
constexpr bool kAllowBackingStoresOutsideDataCage = true;
constexpr bool kAllowBackingStoresOutsideCage = true;
#endif // V8_HEAP_SANDBOX
#endif // V8_VIRTUAL_MEMORY_CAGE
......
......@@ -408,7 +408,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
}
private:
PageAllocator* page_allocator_ = internal::GetPlatformDataCagePageAllocator();
PageAllocator* page_allocator_ = internal::GetArrayBufferPageAllocator();
const size_t page_size_ = page_allocator_->AllocatePageSize();
};
......@@ -6111,9 +6111,9 @@ void v8::V8::InitializeExternalStartupDataFromFile(const char* snapshot_blob) {
const char* v8::V8::GetVersion() { return i::Version::GetVersion(); }
#ifdef V8_VIRTUAL_MEMORY_CAGE
PageAllocator* v8::V8::GetVirtualMemoryCageDataPageAllocator() {
PageAllocator* v8::V8::GetVirtualMemoryCagePageAllocator() {
CHECK(i::GetProcessWideVirtualMemoryCage()->is_initialized());
return i::GetProcessWideVirtualMemoryCage()->GetDataCagePageAllocator();
return i::GetProcessWideVirtualMemoryCage()->page_allocator();
}
#endif
......
......@@ -170,11 +170,7 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
void* AllocateVM(size_t length) {
DCHECK_LE(kVMThreshold, length);
#ifdef V8_VIRTUAL_MEMORY_CAGE
v8::PageAllocator* page_allocator = i::GetPlatformDataCagePageAllocator();
#else
v8::PageAllocator* page_allocator = i::GetPlatformPageAllocator();
#endif
v8::PageAllocator* page_allocator = i::GetArrayBufferPageAllocator();
size_t page_size = page_allocator->AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
return i::AllocatePages(page_allocator, nullptr, allocated, page_size,
......@@ -182,11 +178,7 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
}
void FreeVM(void* data, size_t length) {
#ifdef V8_VIRTUAL_MEMORY_CAGE
v8::PageAllocator* page_allocator = i::GetPlatformDataCagePageAllocator();
#else
v8::PageAllocator* page_allocator = i::GetPlatformPageAllocator();
#endif
v8::PageAllocator* page_allocator = i::GetArrayBufferPageAllocator();
size_t page_size = page_allocator->AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
CHECK(i::FreePages(page_allocator, data, allocated));
......
......@@ -86,13 +86,23 @@ void IsolateAllocator::InitializeOncePerProcess() {
// disallowed in the future, at the latest once ArrayBuffers are referenced
// through an offset rather than a raw pointer.
if (GetProcessWideVirtualMemoryCage()->is_disabled()) {
CHECK(kAllowBackingStoresOutsideDataCage);
CHECK(kAllowBackingStoresOutsideCage);
} else {
auto cage = GetProcessWideVirtualMemoryCage();
CHECK(cage->is_initialized());
DCHECK_EQ(params.reservation_size, cage->pointer_cage_size());
existing_reservation = base::AddressRegion(cage->pointer_cage_base(),
cage->pointer_cage_size());
// The pointer compression cage must be placed at the start of the virtual
// memory cage.
// TODO(chromium:12180) this currently assumes that no other pages were
// allocated through the cage's page allocator in the meantime. In the
// future, the cage initialization will happen just before this function
// runs, and so this will be guaranteed. Currently however, it is possible
// that the embedder accidentally uses the cage's page allocator prior to
// initializing V8, in which case this CHECK will likely fail.
CHECK(cage->page_allocator()->AllocatePagesAt(
cage->base(), params.reservation_size, PageAllocator::kNoAccess));
existing_reservation =
base::AddressRegion(cage->base(), params.reservation_size);
params.page_allocator = cage->page_allocator();
}
#endif
if (!GetProcessWidePtrComprCage()->InitReservation(params,
......
......@@ -80,7 +80,7 @@ void V8::InitializeOncePerProcessImpl() {
if (!GetProcessWideVirtualMemoryCage()->is_initialized()) {
// For now, we still allow the cage to be disabled even if V8 was compiled
// with V8_VIRTUAL_MEMORY_CAGE. This will eventually be forbidden.
CHECK(kAllowBackingStoresOutsideDataCage);
CHECK(kAllowBackingStoresOutsideCage);
GetProcessWideVirtualMemoryCage()->Disable();
}
#endif
......
......@@ -43,9 +43,8 @@ bool V8VirtualMemoryCage::Initialize(v8::PageAllocator* page_allocator,
page_allocator_ = page_allocator;
size_ = size;
data_cage_page_allocator_ = std::make_unique<base::BoundedPageAllocator>(
page_allocator_, data_cage_base(), data_cage_size(),
page_allocator_->AllocatePageSize());
cage_page_allocator_ = std::make_unique<base::BoundedPageAllocator>(
page_allocator_, base_, size_, page_allocator_->AllocatePageSize());
initialized_ = true;
......@@ -54,7 +53,7 @@ bool V8VirtualMemoryCage::Initialize(v8::PageAllocator* page_allocator,
void V8VirtualMemoryCage::TearDown() {
if (initialized_) {
data_cage_page_allocator_.reset();
cage_page_allocator_.reset();
Address reservation_base = base_;
size_t reservation_size = size_;
if (has_guard_regions_) {
......
......@@ -6,6 +6,7 @@
#define V8_INIT_VM_CAGE_H_
#include "include/v8-internal.h"
#include "src/base/bounded-page-allocator.h"
#include "src/common/globals.h"
namespace v8 {
......@@ -19,48 +20,45 @@ namespace internal {
/**
* V8 Virtual Memory Cage.
*
* When the virtual memory cage is enabled, v8 will place most of its objects
* inside a dedicated region of virtual address space. In particular, all v8
* heaps, inside which objects reference themselves using compressed (32-bit)
* pointers, are located at the start of the virtual memory cage (the "pointer
* cage") and pure memory buffers like ArrayBuffer backing stores, which
* themselves do not contain any pointers, are located in the remaining part of
* the cage (the "data cage"). These buffers will eventually be referenced from
* inside the v8 heap using offsets rather than pointers. It should then be
* assumed that an attacker is able to corrupt data arbitrarily and concurrently
* inside the virtual memory cage.
* When the virtual memory cage is enabled, V8 will reserve a large region of
* virtual address space - the cage - and place most of its objects inside of
* it. This allows these objects to reference each other through offsets rather
* than raw pointers, which in turn makes it harder for an attacker to abuse
* them in an exploit.
*
* The pointer compression region, which contains most V8 objects, and inside
* of which compressed (32-bit) pointers are used, is located at the start of
* the virtual memory cage. The remainder of the cage is mostly used for memory
* buffers, in particular ArrayBuffer backing stores and WASM memory cages.
*
* It should be assumed that an attacker is able to corrupt data arbitrarily
* and concurrently inside the virtual memory cage. The heap sandbox, of which
* the virtual memory cage is one building block, attempts to then stop an
* attacker from corrupting data outside of the cage.
*
* As the embedder is responsible for providing ArrayBuffer allocators, v8
* exposes a page allocator for the data cage to the embedder.
* exposes a page allocator for the virtual memory cage to the embedder.
*
* TODO(chromium:1218005) Maybe don't call the sub-regions "cages" as well to
* avoid confusion? In any case, the names should probably be identical to the
* internal names for these virtual memory regions (where they are currently
* called cages).
* TODO(chromium:1218005) come up with a coherent naming scheme for this class
* and the other "cages" in v8.
*/
class V8VirtualMemoryCage {
public:
// +- ~~~ -+---------------------------------------- ~~~ -+- ~~~ -+
// | 32 GB | (Ideally) 1 TB | 32 GB |
// | | | |
// | Guard | 4 GB : ArrayBuffer backing stores, | Guard |
// | Region | V8 Heap : WASM memory buffers, and | Region |
// | (front) | Region : any other caged objects. | (back) |
// +- ~~~ -+----------------+----------------------- ~~~ -+- ~~~ -+
// | 32 GB | 4 GB | | 32 GB |
// +- ~~~ -+----------------+----------------------- ~~~ -+- ~~~ -+
// ^ ^ ^ ^
// Guard Pointer Cage Data Cage Guard
// Region (contains all (contains all ArrayBuffer and Region
// (front) V8 heaps) WASM memory backing stores) (back)
//
// | base ---------------- size ------------------> |
// ^ ^
// base base + size
V8VirtualMemoryCage() = default;
V8VirtualMemoryCage(const V8VirtualMemoryCage&) = delete;
V8VirtualMemoryCage& operator=(V8VirtualMemoryCage&) = delete;
bool is_initialized() const { return initialized_; }
bool is_disabled() const { return disabled_; }
bool is_enabled() const { return !disabled_; }
bool Initialize(v8::PageAllocator* page_allocator);
void Disable() {
CHECK(!initialized_);
......@@ -69,16 +67,16 @@ class V8VirtualMemoryCage {
void TearDown();
bool is_initialized() const { return initialized_; }
bool is_disabled() const { return disabled_; }
bool is_enabled() const { return !disabled_; }
Address base() const { return base_; }
size_t size() const { return size_; }
Address pointer_cage_base() const { return base_; }
size_t pointer_cage_size() const { return kVirtualMemoryCagePointerCageSize; }
Address data_cage_base() const {
return pointer_cage_base() + pointer_cage_size();
base::BoundedPageAllocator* page_allocator() const {
return cage_page_allocator_.get();
}
size_t data_cage_size() const { return size_ - pointer_cage_size(); }
bool Contains(Address addr) const {
return addr >= base_ && addr < base_ + size_;
......@@ -88,11 +86,9 @@ class V8VirtualMemoryCage {
return Contains(reinterpret_cast<Address>(ptr));
}
v8::PageAllocator* GetDataCagePageAllocator() {
return data_cage_page_allocator_.get();
}
private:
// The SequentialUnmapperTest calls the private Initialize method to create a
// cage without guard regions, which would otherwise consume too much memory.
friend class SequentialUnmapperTest;
// We allow tests to disable the guard regions around the cage. This is useful
......@@ -106,8 +102,11 @@ class V8VirtualMemoryCage {
bool has_guard_regions_ = false;
bool initialized_ = false;
bool disabled_ = false;
// The PageAllocator through which the virtual memory of the cage was
// allocated.
v8::PageAllocator* page_allocator_ = nullptr;
std::unique_ptr<v8::PageAllocator> data_cage_page_allocator_;
// The BoundedPageAllocator to allocate pages inside the cage.
std::unique_ptr<base::BoundedPageAllocator> cage_page_allocator_;
};
V8VirtualMemoryCage* GetProcessWideVirtualMemoryCage();
......@@ -117,7 +116,7 @@ V8VirtualMemoryCage* GetProcessWideVirtualMemoryCage();
V8_INLINE bool IsValidBackingStorePointer(void* ptr) {
#ifdef V8_VIRTUAL_MEMORY_CAGE
Address addr = reinterpret_cast<Address>(ptr);
return kAllowBackingStoresOutsideDataCage || addr == kNullAddress ||
return kAllowBackingStoresOutsideCage || addr == kNullAddress ||
GetProcessWideVirtualMemoryCage()->Contains(addr);
#else
return true;
......
......@@ -184,11 +184,14 @@ BackingStore::~BackingStore() {
}
PageAllocator* page_allocator = GetPlatformPageAllocator();
// TODO(saelo) here and elsewhere in this file, replace with
// GetArrayBufferPageAllocator once the fallback to the platform page
// allocator is no longer allowed.
#ifdef V8_VIRTUAL_MEMORY_CAGE
if (GetProcessWideVirtualMemoryCage()->Contains(buffer_start_)) {
page_allocator = GetPlatformDataCagePageAllocator();
page_allocator = GetVirtualMemoryCagePageAllocator();
} else {
DCHECK(kAllowBackingStoresOutsideDataCage);
DCHECK(kAllowBackingStoresOutsideCage);
}
#endif
......@@ -445,17 +448,17 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
PageAllocator* page_allocator = GetPlatformPageAllocator();
auto allocate_pages = [&] {
#ifdef V8_VIRTUAL_MEMORY_CAGE
page_allocator = GetPlatformDataCagePageAllocator();
page_allocator = GetVirtualMemoryCagePageAllocator();
allocation_base = AllocatePages(page_allocator, nullptr, reservation_size,
page_size, PageAllocator::kNoAccess);
if (allocation_base) return true;
// We currently still allow falling back to the platform page allocator if
// the data cage page allocator fails. This will eventually be removed.
// the cage page allocator fails. This will eventually be removed.
// TODO(chromium:1218005) once we forbid the fallback, we should have a
// single API, e.g. GetPlatformDataPageAllocator(), that returns the correct
// single API, e.g. GetArrayBufferPageAllocator(), that returns the correct
// page allocator to use here depending on whether the virtual memory cage
// is enabled or not.
if (!kAllowBackingStoresOutsideDataCage) return false;
if (!kAllowBackingStoresOutsideCage) return false;
page_allocator = GetPlatformPageAllocator();
#endif
allocation_base = AllocatePages(page_allocator, nullptr, reservation_size,
......
......@@ -95,16 +95,14 @@ v8::PageAllocator* GetPlatformPageAllocator() {
}
#ifdef V8_VIRTUAL_MEMORY_CAGE
// TODO(chromium:1218005) once we disallow disabling the cage, name this e.g.
// "GetPlatformDataPageAllocator", and set it to the PlatformPageAllocator when
// V8_VIRTUAL_MEMORY_CAGE is not defined. Then use that allocator whenever
// allocating ArrayBuffer backing stores inside v8.
v8::PageAllocator* GetPlatformDataCagePageAllocator() {
v8::PageAllocator* GetVirtualMemoryCagePageAllocator() {
// TODO(chromium:1218005) remove this code once the cage is no longer
// optional.
if (GetProcessWideVirtualMemoryCage()->is_disabled()) {
return GetPlatformPageAllocator();
} else {
CHECK(GetProcessWideVirtualMemoryCage()->is_initialized());
return GetProcessWideVirtualMemoryCage()->GetDataCagePageAllocator();
return GetProcessWideVirtualMemoryCage()->page_allocator();
}
}
#endif
......@@ -372,7 +370,6 @@ bool VirtualMemoryCage::InitReservation(
VirtualMemory(params.page_allocator, existing_reservation.begin(),
existing_reservation.size());
base_ = reservation_.address() + params.base_bias_size;
reservation_is_owned_ = false;
} else if (params.base_alignment == ReservationParams::kAnyBaseAlignment) {
// When the base doesn't need to be aligned, the virtual memory reservation
// fails only due to OOM.
......@@ -462,13 +459,7 @@ void VirtualMemoryCage::Free() {
if (IsReserved()) {
base_ = kNullAddress;
page_allocator_.reset();
if (reservation_is_owned_) {
reservation_.Free();
} else {
// Reservation is owned by the Platform.
DCHECK(V8_VIRTUAL_MEMORY_CAGE_BOOL);
reservation_.Reset();
}
reservation_.Free();
}
}
......
......@@ -101,11 +101,23 @@ V8_EXPORT_PRIVATE void AlignedFree(void* ptr);
V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformPageAllocator();
#ifdef V8_VIRTUAL_MEMORY_CAGE
// Returns the platform data cage page allocator instance. Guaranteed to be a
// valid pointer.
V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformDataCagePageAllocator();
// Returns the virtual memory cage page allocator instance for allocating pages
// inside the virtual memory cage. Guaranteed to be a valid pointer.
V8_EXPORT_PRIVATE v8::PageAllocator* GetVirtualMemoryCagePageAllocator();
#endif
// Returns the appropriate page allocator to use for ArrayBuffer backing stores.
// If the virtual memory cage is enabled, these must be allocated inside the
// cage and so this will be the CagePageAllocator. Otherwise it will be the
// PlatformPageAllocator.
inline v8::PageAllocator* GetArrayBufferPageAllocator() {
#ifdef V8_VIRTUAL_MEMORY_CAGE
return GetVirtualMemoryCagePageAllocator();
#else
return GetPlatformPageAllocator();
#endif
}
// Sets the given page allocator as the platform page allocator and returns
// the current one. This function *must* be used only for testing purposes.
// It is not thread-safe and the testing infrastructure should ensure that
......@@ -372,11 +384,6 @@ class VirtualMemoryCage {
protected:
Address base_ = kNullAddress;
std::unique_ptr<base::BoundedPageAllocator> page_allocator_;
// Whether this cage owns the virtual memory reservation and thus should
// release it upon destruction. TODO(chromium:1218005) this is only needed
// when V8_VIRTUAL_MEMORY_CAGE is enabled. Maybe we can remove this again e.g.
// by merging this class and v8::VirtualMemoryCage in v8-platform.h.
bool reservation_is_owned_ = true;
VirtualMemory reservation_;
};
......
......@@ -250,17 +250,19 @@ class SequentialUnmapperTest : public TestWithIsolate {
SetPlatformPageAllocatorForTesting(tracking_page_allocator_));
old_flag_ = i::FLAG_concurrent_sweeping;
i::FLAG_concurrent_sweeping = false;
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// Reinitialize the process-wide pointer cage so it can pick up the
// TrackingPageAllocator.
// The pointer cage must be destroyed before the virtual memory cage.
IsolateAllocator::FreeProcessWidePtrComprCageForTesting();
#ifdef V8_VIRTUAL_MEMORY_CAGE
// Reinitialze the virtual memory cage so it uses the TrackingPageAllocator.
GetProcessWideVirtualMemoryCage()->TearDown();
constexpr bool use_guard_regions = false;
CHECK(GetProcessWideVirtualMemoryCage()->Initialize(
tracking_page_allocator_, kVirtualMemoryCageMinimumSize,
use_guard_regions));
#endif
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// Reinitialize the process-wide pointer cage so it can pick up the
// TrackingPageAllocator.
IsolateAllocator::FreeProcessWidePtrComprCageForTesting();
IsolateAllocator::InitializeOncePerProcess();
#endif
TestWithIsolate::SetUpTestCase();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment