Commit a7cb30b0 authored by Samuel Groß's avatar Samuel Groß Committed by V8 LUCI CQ

Introduce VirtualAddressSpace interface

This interface is meant to eventually replace the existing
v8::PageAllocator interface. Beyond general refactoring of the
PageAllocator APIs, the new interface now supports the concept of
(contiguous) address space reservations, which previously had to be
implemented through page allocations. These reservations now make better
use of provided OS primitives on Fuchsia (VMARs) and Windows
(placeholder mappings) and can be used to back many of the cages and
virtual memory regions that V8 creates.

The new interface is not yet stable and may change at any time without
deprecating the old version first.

Bug: chromium:1218005
Change-Id: I295253c42e04cf311393c5dab9f8c06bd7451ce3
Cq-Include-Trybots: luci.v8.try:v8_linux64_heap_sandbox_dbg_ng,v8_linux_arm64_sim_heap_sandbox_dbg_ng
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3301475
Commit-Queue: Samuel Groß <saelo@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78235}
parent c1278acd
...@@ -541,6 +541,8 @@ filegroup( ...@@ -541,6 +541,8 @@ filegroup(
"src/base/debug/stack_trace.h", "src/base/debug/stack_trace.h",
"src/base/division-by-constant.cc", "src/base/division-by-constant.cc",
"src/base/division-by-constant.h", "src/base/division-by-constant.h",
"src/base/emulated-virtual-address-subspace.cc",
"src/base/emulated-virtual-address-subspace.h",
"src/base/enum-set.h", "src/base/enum-set.h",
"src/base/export-template.h", "src/base/export-template.h",
"src/base/file-utils.cc", "src/base/file-utils.cc",
...@@ -612,6 +614,10 @@ filegroup( ...@@ -612,6 +614,10 @@ filegroup(
"src/base/utils/random-number-generator.cc", "src/base/utils/random-number-generator.cc",
"src/base/utils/random-number-generator.h", "src/base/utils/random-number-generator.h",
"src/base/vector.h", "src/base/vector.h",
"src/base/virtual-address-space-page-allocator.cc",
"src/base/virtual-address-space-page-allocator.h",
"src/base/virtual-address-space.cc",
"src/base/virtual-address-space.h",
"src/base/v8-fallthrough.h", "src/base/v8-fallthrough.h",
"src/base/vlq-base64.cc", "src/base/vlq-base64.cc",
"src/base/vlq-base64.h", "src/base/vlq-base64.h",
......
...@@ -4985,6 +4985,8 @@ v8_component("v8_libbase") { ...@@ -4985,6 +4985,8 @@ v8_component("v8_libbase") {
"src/base/debug/stack_trace.h", "src/base/debug/stack_trace.h",
"src/base/division-by-constant.cc", "src/base/division-by-constant.cc",
"src/base/division-by-constant.h", "src/base/division-by-constant.h",
"src/base/emulated-virtual-address-subspace.cc",
"src/base/emulated-virtual-address-subspace.h",
"src/base/enum-set.h", "src/base/enum-set.h",
"src/base/export-template.h", "src/base/export-template.h",
"src/base/file-utils.cc", "src/base/file-utils.cc",
...@@ -5063,6 +5065,10 @@ v8_component("v8_libbase") { ...@@ -5063,6 +5065,10 @@ v8_component("v8_libbase") {
"src/base/utils/random-number-generator.h", "src/base/utils/random-number-generator.h",
"src/base/v8-fallthrough.h", "src/base/v8-fallthrough.h",
"src/base/vector.h", "src/base/vector.h",
"src/base/virtual-address-space-page-allocator.cc",
"src/base/virtual-address-space-page-allocator.h",
"src/base/virtual-address-space.cc",
"src/base/virtual-address-space.h",
"src/base/vlq-base64.cc", "src/base/vlq-base64.cc",
"src/base/vlq-base64.h", "src/base/vlq-base64.h",
"src/base/vlq.h", "src/base/vlq.h",
......
...@@ -510,6 +510,213 @@ class PageAllocator { ...@@ -510,6 +510,213 @@ class PageAllocator {
virtual bool CanAllocateSharedPages() { return false; } virtual bool CanAllocateSharedPages() { return false; }
}; };
/**
* Page permissions.
*/
enum class PagePermissions {
kNoAccess,
kRead,
kReadWrite,
kReadWriteExecute,
kReadExecute,
};
/**
* Class to manage a virtual memory address space.
*
* This class represents a contiguous region of virtual address space in which
* sub-spaces and (private or shared) memory pages can be allocated, freed, and
* modified. This interface is meant to eventually replace the PageAllocator
* interface, and can be used as an alternative in the meantime.
*/
class VirtualAddressSpace {
public:
using Address = uintptr_t;
VirtualAddressSpace(size_t page_size, size_t allocation_granularity,
Address base, size_t size)
: page_size_(page_size),
allocation_granularity_(allocation_granularity),
base_(base),
size_(size) {}
virtual ~VirtualAddressSpace() = default;
/**
* The page size used inside this space. Guaranteed to be a power of two.
* Used as granularity for all page-related operations except for allocation,
* which use the allocation_granularity(), see below.
*
* \returns the page size in bytes.
*/
size_t page_size() const { return page_size_; }
/**
* The granularity of page allocations and, by extension, of subspace
* allocations. This is guaranteed to be a power of two and a multiple of the
* page_size(). In practice, this is equal to the page size on most OSes, but
* on Windows it is usually 64KB, while the page size is 4KB.
*
* \returns the allocation granularity in bytes.
*/
size_t allocation_granularity() const { return allocation_granularity_; }
/**
* The base address of the address space managed by this instance.
*
* \returns the base address of this address space.
*/
Address base() const { return base_; }
/**
* The size of the address space managed by this instance.
*
* \returns the size of this address space in bytes.
*/
size_t size() const { return size_; }
/**
* Sets the random seed so that GetRandomPageAddress() will generate
* repeatable sequences of random addresses.
*
* \param The seed for the PRNG.
*/
virtual void SetRandomSeed(int64_t seed) = 0;
/**
* Returns a random address inside this address space, suitable for page
* allocations hints.
*
* \returns a random address aligned to allocation_granularity().
*/
virtual Address RandomPageAddress() = 0;
/**
* Allocates private memory pages with the given alignment and permissions.
*
* \param hint If nonzero, the allocation is attempted to be placed at the
* given address first. If that fails, the allocation is attempted to be
* placed elsewhere, possibly nearby, but that is not guaranteed. Specifying
* zero for the hint always causes this function to choose a random address.
*
* \param size The size of the allocation in bytes. Must be a multiple of the
* allocation_granularity().
*
* \param alignment The alignment of the allocation in bytes. Must be a
* multiple of the allocation_granularity() and should be a power of two.
*
* \param permissions The page permissions of the newly allocated pages.
*
* \returns the start address of the allocated pages on success, zero on
* failure.
*/
static constexpr Address kNoHint = 0;
virtual V8_WARN_UNUSED_RESULT Address
AllocatePages(Address hint, size_t size, size_t alignment,
PagePermissions permissions) = 0;
/**
* Frees previously allocated pages.
*
* \param address The start address of the pages to free. This address must
* have been obtains from a call to AllocatePages.
*
* \param size The size in bytes of the region to free. This must match the
* size passed to AllocatePages when the pages were allocated.
*
* \returns true on success, false otherwise.
*/
virtual V8_WARN_UNUSED_RESULT bool FreePages(Address address,
size_t size) = 0;
/**
* Sets permissions of all allocated pages in the given range.
*
* \param address The start address of the range. Must be aligned to
* page_size().
*
* \param size The size in bytes of the range. Must be a multiple
* of page_size().
*
* \param permissions The new permissions for the range.
*
* \returns true on success, false otherwise.
*/
virtual V8_WARN_UNUSED_RESULT bool SetPagePermissions(
Address address, size_t size, PagePermissions permissions) = 0;
/**
* Whether this instance can allocate subspaces or not.
*
* \returns true if subspaces can be allocated, false if not.
*/
virtual bool CanAllocateSubspaces() = 0;
/*
* Allocate a subspace.
*
* The address space of a subspace stays reserved in the parent space for the
* lifetime of the subspace. As such, it is guaranteed that page allocations
* on the parent space cannot end up inside a subspace.
*
* \param hint Hints where the subspace should be allocated. See
* AllocatePages() for more details.
*
* \param size The size in bytes of the subspace. Must be a multiple of the
* allocation_granularity().
*
* \param alignment The alignment of the subspace in bytes. Must be a multiple
* of the allocation_granularity() and should be a power of two.
*
* \param max_permissions The maximum permissions that pages allocated in the
* subspace can obtain.
*
* \returns a new subspace or nullptr on failure.
*/
virtual std::unique_ptr<VirtualAddressSpace> AllocateSubspace(
Address hint, size_t size, size_t alignment,
PagePermissions max_permissions) = 0;
//
// TODO(v8) maybe refactor the methods below before stabilizing the API. For
// example by combining them into some form of page operation method that
// takes a command enum as parameter.
//
/**
* Frees memory in the given [address, address + size) range. address and
* size should be aligned to the page_size(). The next write to this memory
* area brings the memory transparently back. This should be treated as a
* hint to the OS that the pages are no longer needed. It does not guarantee
* that the pages will be discarded immediately or at all.
*
* \returns true on success, false otherwise. Since this method is only a
* hint, a successful invocation does not imply that pages have been removed.
*/
virtual V8_WARN_UNUSED_RESULT bool DiscardSystemPages(Address address,
size_t size) {
return true;
}
/**
* Decommits any wired memory pages in the given range, allowing the OS to
* reclaim them, and marks the region as inacessible (kNoAccess). The address
* range stays reserved and can be accessed again later by changing its
* permissions. However, in that case the memory content is guaranteed to be
* zero-initialized again. The memory must have been previously allocated by a
* call to AllocatePages.
*
* \returns true on success, false otherwise.
*/
virtual V8_WARN_UNUSED_RESULT bool DecommitPages(Address address,
size_t size) = 0;
private:
const size_t page_size_;
const size_t allocation_granularity_;
const Address base_;
const size_t size_;
};
/** /**
* V8 Allocator used for allocating zone backings. * V8 Allocator used for allocating zone backings.
*/ */
......
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/base/emulated-virtual-address-subspace.h"
#include "src/base/bits.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/wrappers.h"
namespace v8 {
namespace base {
EmulatedVirtualAddressSubspace::EmulatedVirtualAddressSubspace(
VirtualAddressSpace* parent_space, Address base, size_t mapped_size,
size_t total_size)
: VirtualAddressSpace(parent_space->page_size(),
parent_space->allocation_granularity(), base,
total_size),
mapped_size_(mapped_size),
parent_space_(parent_space),
region_allocator_(base, mapped_size, parent_space_->page_size()) {
// For simplicity, we currently require both the mapped and total size to be
// a power of two. This simplifies some things later on, for example, random
// addresses can be generated with a simply bitmask, and will then be inside
// the unmapped space with a probability >= 50% (mapped size == unmapped
// size) or never (mapped size == total size).
DCHECK(base::bits::IsPowerOfTwo(mapped_size));
DCHECK(base::bits::IsPowerOfTwo(total_size));
}
EmulatedVirtualAddressSubspace::~EmulatedVirtualAddressSubspace() {
CHECK(parent_space_->FreePages(base(), mapped_size_));
}
void EmulatedVirtualAddressSubspace::SetRandomSeed(int64_t seed) {
MutexGuard guard(&mutex_);
rng_.SetSeed(seed);
}
Address EmulatedVirtualAddressSubspace::RandomPageAddress() {
MutexGuard guard(&mutex_);
Address addr = base() + (rng_.NextInt64() % size());
return RoundDown(addr, allocation_granularity());
}
Address EmulatedVirtualAddressSubspace::AllocatePages(
Address hint, size_t size, size_t alignment, PagePermissions permissions) {
if (hint == kNoHint || MappedRegionContains(hint, size)) {
MutexGuard guard(&mutex_);
// Attempt to find a region in the mapped region.
Address address = region_allocator_.AllocateRegion(hint, size, alignment);
if (address != RegionAllocator::kAllocationFailure) {
// Success. Only need to adjust the page permissions.
if (parent_space_->SetPagePermissions(address, size, permissions)) {
return address;
}
// Probably ran out of memory, but still try to allocate in the unmapped
// space.
CHECK_EQ(size, region_allocator_.FreeRegion(address));
}
}
// No luck or hint is outside of the mapped region. Try to allocate pages in
// the unmapped space using page allocation hints instead.
// Somewhat arbitrary size limitation to ensure that the loop below for
// finding a fitting base address hint terminates quickly.
if (size >= (unmapped_size() / 2)) return kNullAddress;
static constexpr int kMaxAttempts = 10;
for (int i = 0; i < kMaxAttempts; i++) {
// If the hint wouldn't result in the entire allocation being inside the
// managed region, simply retry. There is at least a 50% chance of
// getting a usable address due to the size restriction above.
while (!UnmappedRegionContains(hint, size)) {
hint = RandomPageAddress();
}
Address region =
parent_space_->AllocatePages(hint, size, alignment, permissions);
if (region && UnmappedRegionContains(region, size)) {
return region;
} else if (region) {
CHECK(parent_space_->FreePages(region, size));
}
// Retry at a different address.
hint = RandomPageAddress();
}
return kNullAddress;
}
bool EmulatedVirtualAddressSubspace::FreePages(Address address, size_t size) {
if (MappedRegionContains(address, size)) {
MutexGuard guard(&mutex_);
if (region_allocator_.FreeRegion(address) != size) return false;
CHECK(parent_space_->DecommitPages(address, size));
return true;
}
if (!UnmappedRegionContains(address, size)) return false;
return parent_space_->FreePages(address, size);
}
bool EmulatedVirtualAddressSubspace::SetPagePermissions(
Address address, size_t size, PagePermissions permissions) {
DCHECK(Contains(address, size));
return parent_space_->SetPagePermissions(address, size, permissions);
}
bool EmulatedVirtualAddressSubspace::CanAllocateSubspaces() {
// This is not supported, mostly because it's not (yet) needed in practice.
return false;
}
std::unique_ptr<v8::VirtualAddressSpace>
EmulatedVirtualAddressSubspace::AllocateSubspace(
Address hint, size_t size, size_t alignment,
PagePermissions max_permissions) {
UNREACHABLE();
}
bool EmulatedVirtualAddressSubspace::DiscardSystemPages(Address address,
size_t size) {
DCHECK(Contains(address, size));
return parent_space_->DiscardSystemPages(address, size);
}
bool EmulatedVirtualAddressSubspace::DecommitPages(Address address,
size_t size) {
DCHECK(Contains(address, size));
return parent_space_->DecommitPages(address, size);
}
} // namespace base
} // namespace v8
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASE_EMULATED_VIRTUAL_ADDRESS_SUBSPACE_H_
#define V8_BASE_EMULATED_VIRTUAL_ADDRESS_SUBSPACE_H_
#include "include/v8-platform.h"
#include "src/base/base-export.h"
#include "src/base/compiler-specific.h"
#include "src/base/platform/mutex.h"
#include "src/base/region-allocator.h"
#include "src/base/virtual-address-space.h"
namespace v8 {
namespace base {
/**
* Emulates a virtual address subspace.
*
* This class is (optionally) backed by a page allocation and emulates a virtual
* address space that is potentially larger than that mapping. It generally
* first attempts to satisfy page allocation requests from its backing mapping,
* but will also attempt to obtain new page mappings inside the unmapped space
* through page allocation hints if necessary.
*
* Caveat: an emulated subspace violates the invariant that page allocations in
* an address space will never end up inside a child space and so does not
* provide the same security gurarantees.
*/
class V8_BASE_EXPORT EmulatedVirtualAddressSubspace final
: public NON_EXPORTED_BASE(::v8::VirtualAddressSpace) {
public:
// Construct an emulated virtual address subspace of the specified total size,
// potentially backed by a page allocation from the parent space. The newly
// created instance takes ownership of the page allocation (if any) and frees
// it during destruction.
EmulatedVirtualAddressSubspace(v8::VirtualAddressSpace* parent_space,
Address base, size_t mapped_size,
size_t total_size);
~EmulatedVirtualAddressSubspace() override;
void SetRandomSeed(int64_t seed) override;
Address RandomPageAddress() override;
Address AllocatePages(Address hint, size_t size, size_t alignment,
PagePermissions permissions) override;
bool FreePages(Address address, size_t size) override;
bool SetPagePermissions(Address address, size_t size,
PagePermissions permissions) override;
bool CanAllocateSubspaces() override;
std::unique_ptr<v8::VirtualAddressSpace> AllocateSubspace(
Address hint, size_t size, size_t alignment,
PagePermissions max_permissions) override;
bool DiscardSystemPages(Address address, size_t size) override;
bool DecommitPages(Address address, size_t size) override;
private:
size_t mapped_size() const { return mapped_size_; }
size_t unmapped_size() const { return size() - mapped_size_; }
Address mapped_base() const { return base(); }
Address unmapped_base() const { return base() + mapped_size_; }
bool Contains(Address outer_start, size_t outer_size, Address inner_start,
size_t inner_size) const {
return (inner_start >= outer_start) &&
((inner_start + inner_size) <= (outer_start + outer_size));
}
bool Contains(Address addr, size_t length) const {
return Contains(base(), size(), addr, length);
}
bool MappedRegionContains(Address addr, size_t length) const {
return Contains(mapped_base(), mapped_size(), addr, length);
}
bool UnmappedRegionContains(Address addr, size_t length) const {
return Contains(unmapped_base(), unmapped_size(), addr, length);
}
// Size of the mapped region located at the beginning of this address space.
const size_t mapped_size_;
// Pointer to the parent space from which the backing pages were allocated.
// Must be kept alive by the owner of this instance.
v8::VirtualAddressSpace* parent_space_;
// Mutex guarding the non-threadsafe RegionAllocator and
// RandomNumberGenerator.
Mutex mutex_;
// RegionAllocator to manage the page allocation and divide it into further
// regions as necessary.
RegionAllocator region_allocator_;
// Random number generator for generating random addresses.
RandomNumberGenerator rng_;
};
} // namespace base
} // namespace v8
#endif // V8_BASE_EMULATED_VIRTUAL_ADDRESS_SUBSPACE_H_
...@@ -123,6 +123,8 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) { ...@@ -123,6 +123,8 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
class AddressSpaceReservation; class AddressSpaceReservation;
class PageAllocator; class PageAllocator;
class TimezoneCache; class TimezoneCache;
class VirtualAddressSpace;
class VirtualAddressSubspace;
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// OS // OS
...@@ -312,6 +314,8 @@ class V8_BASE_EXPORT OS { ...@@ -312,6 +314,8 @@ class V8_BASE_EXPORT OS {
friend class MemoryMappedFile; friend class MemoryMappedFile;
friend class PosixMemoryMappedFile; friend class PosixMemoryMappedFile;
friend class v8::base::PageAllocator; friend class v8::base::PageAllocator;
friend class v8::base::VirtualAddressSpace;
friend class v8::base::VirtualAddressSubspace;
static size_t AllocatePageSize(); static size_t AllocatePageSize();
......
...@@ -236,6 +236,29 @@ RegionAllocator::Address RegionAllocator::AllocateAlignedRegion( ...@@ -236,6 +236,29 @@ RegionAllocator::Address RegionAllocator::AllocateAlignedRegion(
return region->begin(); return region->begin();
} }
RegionAllocator::Address RegionAllocator::AllocateRegion(Address hint,
size_t size,
size_t alignment) {
DCHECK(IsAligned(alignment, page_size()));
DCHECK(IsAligned(hint, alignment));
if (hint && contains(hint, size)) {
if (AllocateRegionAt(hint, size)) {
return hint;
}
}
Address address;
if (alignment <= page_size()) {
// TODO(chromium:1218005): Consider using randomized version here.
address = AllocateRegion(size);
} else {
address = AllocateAlignedRegion(size, alignment);
}
return address;
}
size_t RegionAllocator::TrimRegion(Address address, size_t new_size) { size_t RegionAllocator::TrimRegion(Address address, size_t new_size) {
DCHECK(IsAligned(new_size, page_size_)); DCHECK(IsAligned(new_size, page_size_));
......
...@@ -89,6 +89,11 @@ class V8_BASE_EXPORT RegionAllocator final { ...@@ -89,6 +89,11 @@ class V8_BASE_EXPORT RegionAllocator final {
// success or kAllocationFailure. // success or kAllocationFailure.
Address AllocateAlignedRegion(size_t size, size_t alignment); Address AllocateAlignedRegion(size_t size, size_t alignment);
// Attempts to allocate a region of the given size and alignment at the
// specified address but fall back to allocating the region elsewhere if
// necessary.
Address AllocateRegion(Address hint, size_t size, size_t alignment);
// Frees region at given |address|, returns the size of the region. // Frees region at given |address|, returns the size of the region.
// There must be a used region starting at given address otherwise nothing // There must be a used region starting at given address otherwise nothing
// will be freed and 0 will be returned. // will be freed and 0 will be returned.
......
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/base/virtual-address-space-page-allocator.h"
namespace v8 {
namespace base {
VirtualAddressSpacePageAllocator::VirtualAddressSpacePageAllocator(
v8::VirtualAddressSpace* vas)
: vas_(vas) {}
void* VirtualAddressSpacePageAllocator::AllocatePages(
void* hint, size_t size, size_t alignment,
PageAllocator::Permission access) {
return reinterpret_cast<void*>(
vas_->AllocatePages(reinterpret_cast<Address>(hint), size, alignment,
static_cast<PagePermissions>(access)));
}
bool VirtualAddressSpacePageAllocator::FreePages(void* ptr, size_t size) {
MutexGuard guard(&mutex_);
Address address = reinterpret_cast<Address>(ptr);
// Was this allocation resized previously? If so, use the original size.
auto result = resized_allocations_.find(address);
if (result != resized_allocations_.end()) {
size = result->second;
resized_allocations_.erase(result);
}
return vas_->FreePages(address, size);
}
bool VirtualAddressSpacePageAllocator::ReleasePages(void* ptr, size_t size,
size_t new_size) {
// The VirtualAddressSpace class doesn't support this method because it can't
// be properly implemented on top of Windows placeholder mappings (they cannot
// be partially freed or resized while being allocated). Instead, we emulate
// this behaviour by decommitting the released pages, which in effect achieves
// exactly what ReleasePages would normally do as well. However, we still need
// to pass the original size to FreePages eventually, so we'll need to keep
// track of that.
DCHECK_LE(new_size, size);
MutexGuard guard(&mutex_);
// Will fail if the allocation was resized previously, which is desired.
Address address = reinterpret_cast<Address>(ptr);
resized_allocations_.insert({address, size});
return vas_->DecommitPages(address + new_size, size - new_size);
}
bool VirtualAddressSpacePageAllocator::SetPermissions(
void* address, size_t size, PageAllocator::Permission access) {
return vas_->SetPagePermissions(reinterpret_cast<Address>(address), size,
static_cast<PagePermissions>(access));
}
bool VirtualAddressSpacePageAllocator::DiscardSystemPages(void* address,
size_t size) {
return vas_->DiscardSystemPages(reinterpret_cast<Address>(address), size);
}
bool VirtualAddressSpacePageAllocator::DecommitPages(void* address,
size_t size) {
return vas_->DecommitPages(reinterpret_cast<Address>(address), size);
}
} // namespace base
} // namespace v8
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASE_VIRTUAL_ADDRESS_SPACE_PAGE_ALLOCATOR_H_
#define V8_BASE_VIRTUAL_ADDRESS_SPACE_PAGE_ALLOCATOR_H_
#include <unordered_map>
#include "include/v8-platform.h"
#include "src/base/base-export.h"
#include "src/base/platform/platform.h"
namespace v8 {
namespace base {
// This class bridges a VirtualAddressSpace, the future memory management API,
// to a PageAllocator, the current API.
class V8_BASE_EXPORT VirtualAddressSpacePageAllocator
: public v8::PageAllocator {
public:
using Address = uintptr_t;
explicit VirtualAddressSpacePageAllocator(v8::VirtualAddressSpace* vas);
VirtualAddressSpacePageAllocator(const VirtualAddressSpacePageAllocator&) =
delete;
VirtualAddressSpacePageAllocator& operator=(
const VirtualAddressSpacePageAllocator&) = delete;
~VirtualAddressSpacePageAllocator() override = default;
size_t AllocatePageSize() override { return vas_->allocation_granularity(); }
size_t CommitPageSize() override { return vas_->page_size(); }
void SetRandomMmapSeed(int64_t seed) override { vas_->SetRandomSeed(seed); }
void* GetRandomMmapAddr() override {
return reinterpret_cast<void*>(vas_->RandomPageAddress());
}
void* AllocatePages(void* hint, size_t size, size_t alignment,
Permission access) override;
bool FreePages(void* address, size_t size) override;
bool ReleasePages(void* address, size_t size, size_t new_size) override;
bool SetPermissions(void* address, size_t size, Permission access) override;
bool DiscardSystemPages(void* address, size_t size) override;
bool DecommitPages(void* address, size_t size) override;
private:
// Client of this class must keep the VirtualAddressSpace alive during the
// lifetime of this instance.
v8::VirtualAddressSpace* vas_;
// As the VirtualAddressSpace class doesn't support ReleasePages, this map is
// required to keep track of the original size of resized page allocations.
// See the ReleasePages implementation.
std::unordered_map<Address, size_t> resized_allocations_;
// Mutex guarding the above map.
Mutex mutex_;
};
} // namespace base
} // namespace v8
#endif // V8_BASE_VIRTUAL_ADDRESS_SPACE_PAGE_ALLOCATOR_H_
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/base/virtual-address-space.h"
#include "include/v8-platform.h"
#include "src/base/bits.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/wrappers.h"
namespace v8 {
namespace base {
#define STATIC_ASSERT_ENUM(a, b) \
static_assert(static_cast<int>(a) == static_cast<int>(b), \
"mismatching enum: " #a)
STATIC_ASSERT_ENUM(PagePermissions::kNoAccess, OS::MemoryPermission::kNoAccess);
STATIC_ASSERT_ENUM(PagePermissions::kReadWrite,
OS::MemoryPermission::kReadWrite);
STATIC_ASSERT_ENUM(PagePermissions::kReadWriteExecute,
OS::MemoryPermission::kReadWriteExecute);
STATIC_ASSERT_ENUM(PagePermissions::kReadExecute,
OS::MemoryPermission::kReadExecute);
#undef STATIC_ASSERT_ENUM
VirtualAddressSpace::VirtualAddressSpace()
: VirtualAddressSpaceBase(OS::CommitPageSize(), OS::AllocatePageSize(),
kNullAddress,
std::numeric_limits<uintptr_t>::max()) {
#if V8_OS_WIN
// On Windows, this additional step is required to lookup the VirtualAlloc2
// and friends functions.
OS::EnsureWin32MemoryAPILoaded();
#endif // V8_OS_WIN
DCHECK(bits::IsPowerOfTwo(page_size()));
DCHECK(bits::IsPowerOfTwo(allocation_granularity()));
DCHECK_GE(allocation_granularity(), page_size());
DCHECK(IsAligned(allocation_granularity(), page_size()));
}
void VirtualAddressSpace::SetRandomSeed(int64_t seed) {
OS::SetRandomMmapSeed(seed);
}
Address VirtualAddressSpace::RandomPageAddress() {
return reinterpret_cast<Address>(OS::GetRandomMmapAddr());
}
Address VirtualAddressSpace::AllocatePages(Address hint, size_t size,
size_t alignment,
PagePermissions permissions) {
DCHECK(IsAligned(alignment, allocation_granularity()));
DCHECK(IsAligned(hint, alignment));
DCHECK(IsAligned(size, allocation_granularity()));
return reinterpret_cast<Address>(
OS::Allocate(reinterpret_cast<void*>(hint), size, alignment,
static_cast<OS::MemoryPermission>(permissions)));
}
bool VirtualAddressSpace::FreePages(Address address, size_t size) {
DCHECK(IsAligned(address, allocation_granularity()));
DCHECK(IsAligned(size, allocation_granularity()));
return OS::Free(reinterpret_cast<void*>(address), size);
}
bool VirtualAddressSpace::SetPagePermissions(Address address, size_t size,
PagePermissions permissions) {
DCHECK(IsAligned(address, page_size()));
DCHECK(IsAligned(size, page_size()));
return OS::SetPermissions(reinterpret_cast<void*>(address), size,
static_cast<OS::MemoryPermission>(permissions));
}
bool VirtualAddressSpace::CanAllocateSubspaces() {
return OS::CanReserveAddressSpace();
}
std::unique_ptr<v8::VirtualAddressSpace> VirtualAddressSpace::AllocateSubspace(
Address hint, size_t size, size_t alignment,
PagePermissions max_permissions) {
DCHECK(IsAligned(alignment, allocation_granularity()));
DCHECK(IsAligned(hint, alignment));
DCHECK(IsAligned(size, allocation_granularity()));
base::Optional<AddressSpaceReservation> reservation =
OS::CreateAddressSpaceReservation(
reinterpret_cast<void*>(hint), size, alignment,
static_cast<OS::MemoryPermission>(max_permissions));
if (!reservation.has_value())
return std::unique_ptr<v8::VirtualAddressSpace>();
return std::unique_ptr<v8::VirtualAddressSpace>(
new VirtualAddressSubspace(*reservation, this));
}
bool VirtualAddressSpace::DiscardSystemPages(Address address, size_t size) {
DCHECK(IsAligned(address, page_size()));
DCHECK(IsAligned(size, page_size()));
return OS::DiscardSystemPages(reinterpret_cast<void*>(address), size);
}
bool VirtualAddressSpace::DecommitPages(Address address, size_t size) {
DCHECK(IsAligned(address, page_size()));
DCHECK(IsAligned(size, page_size()));
return OS::DecommitPages(reinterpret_cast<void*>(address), size);
}
bool VirtualAddressSpace::FreeSubspace(VirtualAddressSubspace* subspace) {
return OS::FreeAddressSpaceReservation(subspace->reservation_);
}
VirtualAddressSubspace::VirtualAddressSubspace(
AddressSpaceReservation reservation, VirtualAddressSpaceBase* parent_space)
: VirtualAddressSpaceBase(
parent_space->page_size(), parent_space->allocation_granularity(),
reinterpret_cast<Address>(reservation.base()), reservation.size()),
reservation_(reservation),
region_allocator_(reinterpret_cast<Address>(reservation.base()),
reservation.size(),
parent_space->allocation_granularity()),
parent_space_(parent_space) {
#if V8_OS_WIN
// On Windows, the address space reservation needs to be split and merged at
// the OS level as well.
region_allocator_.set_on_split_callback([this](Address start, size_t size) {
DCHECK(IsAligned(start, allocation_granularity()));
CHECK(reservation_.SplitPlaceholder(reinterpret_cast<void*>(start), size));
});
region_allocator_.set_on_merge_callback([this](Address start, size_t size) {
DCHECK(IsAligned(start, allocation_granularity()));
CHECK(reservation_.MergePlaceholders(reinterpret_cast<void*>(start), size));
});
#endif // V8_OS_WIN
}
VirtualAddressSubspace::~VirtualAddressSubspace() {
CHECK(parent_space_->FreeSubspace(this));
}
void VirtualAddressSubspace::SetRandomSeed(int64_t seed) {
MutexGuard guard(&mutex_);
rng_.SetSeed(seed);
}
Address VirtualAddressSubspace::RandomPageAddress() {
MutexGuard guard(&mutex_);
// Note: the random numbers generated here aren't uniformly distributed if the
// size isn't a power of two.
Address addr = base() + (rng_.NextInt64() % size());
return RoundDown(addr, allocation_granularity());
}
Address VirtualAddressSubspace::AllocatePages(Address hint, size_t size,
size_t alignment,
PagePermissions permissions) {
DCHECK(IsAligned(alignment, allocation_granularity()));
DCHECK(IsAligned(hint, alignment));
DCHECK(IsAligned(size, allocation_granularity()));
MutexGuard guard(&mutex_);
Address address = region_allocator_.AllocateRegion(hint, size, alignment);
if (address == RegionAllocator::kAllocationFailure) return kNullAddress;
if (!reservation_.Allocate(reinterpret_cast<void*>(address), size,
static_cast<OS::MemoryPermission>(permissions))) {
// This most likely means that we ran out of memory.
CHECK_EQ(size, region_allocator_.FreeRegion(address));
return kNullAddress;
}
return address;
}
bool VirtualAddressSubspace::FreePages(Address address, size_t size) {
DCHECK(IsAligned(address, allocation_granularity()));
DCHECK(IsAligned(size, allocation_granularity()));
MutexGuard guard(&mutex_);
if (region_allocator_.CheckRegion(address) != size) return false;
// The order here is important: on Windows, the allocation first has to be
// freed to a placeholder before the placeholder can be merged (during the
// merge_callback) with any surrounding placeholder mappings.
CHECK(reservation_.Free(reinterpret_cast<void*>(address), size));
CHECK_EQ(size, region_allocator_.FreeRegion(address));
return true;
}
bool VirtualAddressSubspace::SetPagePermissions(Address address, size_t size,
PagePermissions permissions) {
DCHECK(IsAligned(address, page_size()));
DCHECK(IsAligned(size, page_size()));
return reservation_.SetPermissions(
reinterpret_cast<void*>(address), size,
static_cast<OS::MemoryPermission>(permissions));
}
std::unique_ptr<v8::VirtualAddressSpace>
VirtualAddressSubspace::AllocateSubspace(Address hint, size_t size,
size_t alignment,
PagePermissions max_permissions) {
DCHECK(IsAligned(alignment, allocation_granularity()));
DCHECK(IsAligned(hint, alignment));
DCHECK(IsAligned(size, allocation_granularity()));
MutexGuard guard(&mutex_);
Address address = region_allocator_.AllocateRegion(hint, size, alignment);
if (address == RegionAllocator::kAllocationFailure) {
return std::unique_ptr<v8::VirtualAddressSpace>();
}
base::Optional<AddressSpaceReservation> reservation =
reservation_.CreateSubReservation(
reinterpret_cast<void*>(address), size,
static_cast<OS::MemoryPermission>(max_permissions));
if (!reservation.has_value()) {
CHECK_EQ(size, region_allocator_.FreeRegion(address));
return nullptr;
}
return std::unique_ptr<v8::VirtualAddressSpace>(
new VirtualAddressSubspace(*reservation, this));
}
bool VirtualAddressSubspace::DiscardSystemPages(Address address, size_t size) {
DCHECK(IsAligned(address, page_size()));
DCHECK(IsAligned(size, page_size()));
return reservation_.DiscardSystemPages(reinterpret_cast<void*>(address),
size);
}
bool VirtualAddressSubspace::DecommitPages(Address address, size_t size) {
DCHECK(IsAligned(address, page_size()));
DCHECK(IsAligned(size, page_size()));
return reservation_.DecommitPages(reinterpret_cast<void*>(address), size);
}
bool VirtualAddressSubspace::FreeSubspace(VirtualAddressSubspace* subspace) {
MutexGuard guard(&mutex_);
AddressSpaceReservation reservation = subspace->reservation_;
Address base = reinterpret_cast<Address>(reservation.base());
if (region_allocator_.FreeRegion(base) != reservation.size()) {
return false;
}
return reservation_.FreeSubReservation(reservation);
}
} // namespace base
} // namespace v8
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASE_VIRTUAL_ADDRESS_SPACE_H_
#define V8_BASE_VIRTUAL_ADDRESS_SPACE_H_
#include "include/v8-platform.h"
#include "src/base/base-export.h"
#include "src/base/compiler-specific.h"
#include "src/base/platform/platform.h"
#include "src/base/region-allocator.h"
namespace v8 {
namespace base {
using Address = uintptr_t;
constexpr Address kNullAddress = 0;
class VirtualAddressSubspace;
/*
* Common parent class to implement deletion of subspaces.
*/
class VirtualAddressSpaceBase
: public NON_EXPORTED_BASE(::v8::VirtualAddressSpace) {
public:
using VirtualAddressSpace::VirtualAddressSpace;
private:
friend VirtualAddressSubspace;
// Called by a subspace during destruction. Responsible for freeing the
// address space reservation and any other data associated with the subspace
// in the parent space.
virtual bool FreeSubspace(VirtualAddressSubspace* subspace) = 0;
};
/*
* The virtual address space of the current process. Conceptionally, there
* should only be one such "root" instance. However, in practice there is no
* issue with having multiple instances as the actual resources are managed by
* the OS kernel.
*/
class V8_BASE_EXPORT VirtualAddressSpace : public VirtualAddressSpaceBase {
public:
VirtualAddressSpace();
~VirtualAddressSpace() override = default;
void SetRandomSeed(int64_t seed) override;
Address RandomPageAddress() override;
Address AllocatePages(Address hint, size_t size, size_t alignment,
PagePermissions access) override;
bool FreePages(Address address, size_t size) override;
bool SetPagePermissions(Address address, size_t size,
PagePermissions access) override;
bool CanAllocateSubspaces() override;
std::unique_ptr<v8::VirtualAddressSpace> AllocateSubspace(
Address hint, size_t size, size_t alignment,
PagePermissions max_permissions) override;
bool DiscardSystemPages(Address address, size_t size) override;
bool DecommitPages(Address address, size_t size) override;
private:
bool FreeSubspace(VirtualAddressSubspace* subspace) override;
};
/*
* A subspace of a parent virtual address space. This represents a reserved
* contiguous region of virtual address space in the current process.
*/
class V8_BASE_EXPORT VirtualAddressSubspace : public VirtualAddressSpaceBase {
public:
~VirtualAddressSubspace() override;
void SetRandomSeed(int64_t seed) override;
Address RandomPageAddress() override;
Address AllocatePages(Address hint, size_t size, size_t alignment,
PagePermissions permissions) override;
bool FreePages(Address address, size_t size) override;
bool SetPagePermissions(Address address, size_t size,
PagePermissions permissions) override;
bool CanAllocateSubspaces() override { return true; }
std::unique_ptr<v8::VirtualAddressSpace> AllocateSubspace(
Address hint, size_t size, size_t alignment,
PagePermissions max_permissions) override;
bool DiscardSystemPages(Address address, size_t size) override;
bool DecommitPages(Address address, size_t size) override;
private:
// The VirtualAddressSpace class creates instances of this class when
// allocating sub spaces.
friend class v8::base::VirtualAddressSpace;
bool FreeSubspace(VirtualAddressSubspace* subspace) override;
VirtualAddressSubspace(AddressSpaceReservation reservation,
VirtualAddressSpaceBase* parent_space);
// The address space reservation backing this subspace.
AddressSpaceReservation reservation_;
// Mutex guarding the non-threadsafe RegionAllocator and
// RandomNumberGenerator.
Mutex mutex_;
// RegionAllocator to manage the virtual address reservation and divide it
// into further regions as necessary.
RegionAllocator region_allocator_;
// Random number generator for generating random addresses.
RandomNumberGenerator rng_;
// Pointer to the parent space. Must be kept alive by the owner of this
// instance during its lifetime.
VirtualAddressSpaceBase* parent_space_;
};
} // namespace base
} // namespace v8
#endif // V8_BASE_VIRTUAL_ADDRESS_SPACE_H_
...@@ -221,8 +221,8 @@ void V8::InitializePlatform(v8::Platform* platform) { ...@@ -221,8 +221,8 @@ void V8::InitializePlatform(v8::Platform* platform) {
bool V8::InitializeVirtualMemoryCage() { bool V8::InitializeVirtualMemoryCage() {
// Platform must have been initialized already. // Platform must have been initialized already.
CHECK(platform_); CHECK(platform_);
v8::PageAllocator* page_allocator = GetPlatformPageAllocator(); v8::VirtualAddressSpace* vas = GetPlatformVirtualAddressSpace();
return GetProcessWideVirtualMemoryCage()->Initialize(page_allocator); return GetProcessWideVirtualMemoryCage()->Initialize(vas);
} }
#endif #endif
......
...@@ -8,8 +8,11 @@ ...@@ -8,8 +8,11 @@
#include "src/base/bits.h" #include "src/base/bits.h"
#include "src/base/bounded-page-allocator.h" #include "src/base/bounded-page-allocator.h"
#include "src/base/cpu.h" #include "src/base/cpu.h"
#include "src/base/emulated-virtual-address-subspace.h"
#include "src/base/lazy-instance.h" #include "src/base/lazy-instance.h"
#include "src/base/utils/random-number-generator.h" #include "src/base/utils/random-number-generator.h"
#include "src/base/virtual-address-space-page-allocator.h"
#include "src/base/virtual-address-space.h"
#include "src/flags/flags.h" #include "src/flags/flags.h"
#include "src/security/caged-pointer.h" #include "src/security/caged-pointer.h"
#include "src/utils/allocation.h" #include "src/utils/allocation.h"
...@@ -25,159 +28,6 @@ namespace internal { ...@@ -25,159 +28,6 @@ namespace internal {
#ifdef V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE #ifdef V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
// A PageAllocator that allocates pages inside a given virtual address range
// like the BoundedPageAllocator, except that only a (small) part of the range
// has actually been reserved. As such, this allocator relies on page
// allocation hints for the OS to obtain pages inside the non-reserved part.
// This allocator is used on OSes where reserving virtual address space (and
// thus a virtual memory cage) is too expensive, notabley Windows pre 8.1.
class FakeBoundedPageAllocator : public v8::PageAllocator {
public:
FakeBoundedPageAllocator(v8::PageAllocator* page_allocator, Address start,
size_t size, size_t reserved_size)
: page_allocator_(page_allocator),
start_(start),
size_(size),
reserved_size_(reserved_size),
end_of_reserved_region_(start + reserved_size) {
// The size is required to be a power of two so that obtaining a random
// address inside the managed region simply requires a fixed number of
// random bits as offset.
DCHECK(base::bits::IsPowerOfTwo(size));
DCHECK_LT(reserved_size, size);
if (FLAG_random_seed != 0) {
rng_.SetSeed(FLAG_random_seed);
}
reserved_region_page_allocator_ =
std::make_unique<base::BoundedPageAllocator>(
page_allocator_, start_, reserved_size_,
page_allocator_->AllocatePageSize(),
base::PageInitializationMode::kAllocatedPagesMustBeZeroInitialized);
}
~FakeBoundedPageAllocator() override = default;
size_t AllocatePageSize() override {
return page_allocator_->AllocatePageSize();
}
size_t CommitPageSize() override { return page_allocator_->CommitPageSize(); }
void SetRandomMmapSeed(int64_t seed) override { rng_.SetSeed(seed); }
void* GetRandomMmapAddr() override {
// Generate a random number between 0 and size_, then add that to the start
// address to obtain a random mmap address. We deliberately don't use our
// provided page allocator's GetRandomMmapAddr here since that could be
// biased, while we want uniformly distributed random numbers here.
Address addr = rng_.NextInt64() % size_ + start_;
addr = RoundDown(addr, AllocatePageSize());
void* ptr = reinterpret_cast<void*>(addr);
DCHECK(Contains(ptr, 1));
return ptr;
}
void* AllocatePages(void* hint, size_t size, size_t alignment,
Permission access) override {
DCHECK(IsAligned(size, AllocatePageSize()));
DCHECK(IsAligned(alignment, AllocatePageSize()));
// First, try allocating the memory inside the reserved region.
void* ptr = reserved_region_page_allocator_->AllocatePages(
hint, size, alignment, access);
if (ptr) return ptr;
// Then, fall back to allocating memory outside of the reserved region
// through page allocator hints.
// Somewhat arbitrary size limitation to ensure that the loop below for
// finding a fitting base address hint terminates quickly.
if (size >= size_ / 2) return nullptr;
if (!hint || !Contains(hint, size)) hint = GetRandomMmapAddr();
static constexpr int kMaxAttempts = 10;
for (int i = 0; i < kMaxAttempts; i++) {
// If the hint wouldn't result in the entire allocation being inside the
// managed region, simply retry. There is at least a 50% chance of
// getting a usable address due to the size restriction above.
while (!Contains(hint, size)) {
hint = GetRandomMmapAddr();
}
ptr = page_allocator_->AllocatePages(hint, size, alignment, access);
if (ptr && Contains(ptr, size)) {
return ptr;
} else if (ptr) {
page_allocator_->FreePages(ptr, size);
}
// Retry at a different address.
hint = GetRandomMmapAddr();
}
return nullptr;
}
bool FreePages(void* address, size_t size) override {
return AllocatorFor(address)->FreePages(address, size);
}
bool ReleasePages(void* address, size_t size, size_t new_length) override {
return AllocatorFor(address)->ReleasePages(address, size, new_length);
}
bool SetPermissions(void* address, size_t size,
Permission permissions) override {
return AllocatorFor(address)->SetPermissions(address, size, permissions);
}
bool DiscardSystemPages(void* address, size_t size) override {
return AllocatorFor(address)->DiscardSystemPages(address, size);
}
bool DecommitPages(void* address, size_t size) override {
return AllocatorFor(address)->DecommitPages(address, size);
}
private:
bool Contains(void* ptr, size_t length) {
Address addr = reinterpret_cast<Address>(ptr);
return (addr >= start_) && ((addr + length) < (start_ + size_));
}
v8::PageAllocator* AllocatorFor(void* ptr) {
Address addr = reinterpret_cast<Address>(ptr);
if (addr < end_of_reserved_region_) {
DCHECK_GE(addr, start_);
return reserved_region_page_allocator_.get();
} else {
return page_allocator_;
}
}
// The page allocator through which pages inside the region are allocated.
v8::PageAllocator* const page_allocator_;
// The bounded page allocator managing the sub-region that was actually
// reserved.
std::unique_ptr<base::BoundedPageAllocator> reserved_region_page_allocator_;
// Random number generator for generating random addresses.
base::RandomNumberGenerator rng_;
// The start of the virtual memory region in which to allocate pages. This is
// also the start of the sub-region that was reserved.
const Address start_;
// The total size of the address space in which to allocate pages.
const size_t size_;
// The size of the sub-region that has actually been reserved.
const size_t reserved_size_;
// The end of the sub-region that has actually been reserved.
const Address end_of_reserved_region_;
};
// Best-effort helper function to determine the size of the userspace virtual // Best-effort helper function to determine the size of the userspace virtual
// address space. Used to determine appropriate cage size and placement. // address space. Used to determine appropriate cage size and placement.
static Address DetermineAddressSpaceLimit() { static Address DetermineAddressSpaceLimit() {
...@@ -231,7 +81,7 @@ static Address DetermineAddressSpaceLimit() { ...@@ -231,7 +81,7 @@ static Address DetermineAddressSpaceLimit() {
return address_space_limit; return address_space_limit;
} }
bool V8VirtualMemoryCage::Initialize(PageAllocator* page_allocator) { bool V8VirtualMemoryCage::Initialize(v8::VirtualAddressSpace* vas) {
// Take the number of virtual address bits into account when determining the // Take the number of virtual address bits into account when determining the
// size of the cage. For example, if there are only 39 bits available, split // size of the cage. For example, if there are only 39 bits available, split
// evenly between userspace and kernel, then userspace can only address 256GB // evenly between userspace and kernel, then userspace can only address 256GB
...@@ -268,25 +118,39 @@ bool V8VirtualMemoryCage::Initialize(PageAllocator* page_allocator) { ...@@ -268,25 +118,39 @@ bool V8VirtualMemoryCage::Initialize(PageAllocator* page_allocator) {
} }
#endif // V8_OS_WIN #endif // V8_OS_WIN
if (!vas->CanAllocateSubspaces()) {
// If we cannot create virtual memory subspaces, we also need to fall back
// to creating a fake cage. In practice, this should only happen on Windows
// version before Windows 10, maybe including early Windows 10 releases,
// where the necessary memory management APIs, in particular, VirtualAlloc2,
// are not available. This check should also in practice subsume the
// preceeding one for Windows 8 and earlier, but we'll keep both just to be
// sure since there the fake cage is technically required for a different
// reason (large virtual memory reservations being too expensive).
size_to_reserve = kFakeVirtualMemoryCageMinReservationSize;
create_fake_cage = true;
}
// In any case, the (fake) cage must be at most as large as our address space. // In any case, the (fake) cage must be at most as large as our address space.
DCHECK_LE(cage_size, address_space_limit); DCHECK_LE(cage_size, address_space_limit);
if (create_fake_cage) { if (create_fake_cage) {
return InitializeAsFakeCage(page_allocator, cage_size, size_to_reserve); return InitializeAsFakeCage(vas, cage_size, size_to_reserve);
} else { } else {
// TODO(saelo) if this fails, we could still fall back to creating a fake // TODO(saelo) if this fails, we could still fall back to creating a fake
// cage. Decide if that would make sense. // cage. Decide if that would make sense.
const bool use_guard_regions = true; const bool use_guard_regions = true;
return Initialize(page_allocator, cage_size, use_guard_regions); return Initialize(vas, cage_size, use_guard_regions);
} }
} }
bool V8VirtualMemoryCage::Initialize(v8::PageAllocator* page_allocator, bool V8VirtualMemoryCage::Initialize(v8::VirtualAddressSpace* vas, size_t size,
size_t size, bool use_guard_regions) { bool use_guard_regions) {
CHECK(!initialized_); CHECK(!initialized_);
CHECK(!disabled_); CHECK(!disabled_);
CHECK(base::bits::IsPowerOfTwo(size)); CHECK(base::bits::IsPowerOfTwo(size));
CHECK_GE(size, kVirtualMemoryCageMinimumSize); CHECK_GE(size, kVirtualMemoryCageMinimumSize);
CHECK(vas->CanAllocateSubspaces());
// Currently, we allow the cage to be smaller than the requested size. This // Currently, we allow the cage to be smaller than the requested size. This
// way, we can gracefully handle cage reservation failures during the initial // way, we can gracefully handle cage reservation failures during the initial
...@@ -298,43 +162,52 @@ bool V8VirtualMemoryCage::Initialize(v8::PageAllocator* page_allocator, ...@@ -298,43 +162,52 @@ bool V8VirtualMemoryCage::Initialize(v8::PageAllocator* page_allocator,
// Which of these options is ultimately taken likey depends on how frequently // Which of these options is ultimately taken likey depends on how frequently
// cage reservation failures occur in practice. // cage reservation failures occur in practice.
size_t reservation_size; size_t reservation_size;
while (!reservation_base_ && size >= kVirtualMemoryCageMinimumSize) { while (!virtual_address_space_ && size >= kVirtualMemoryCageMinimumSize) {
reservation_size = size; reservation_size = size;
if (use_guard_regions) { if (use_guard_regions) {
reservation_size += 2 * kVirtualMemoryCageGuardRegionSize; reservation_size += 2 * kVirtualMemoryCageGuardRegionSize;
} }
// Technically, we should use kNoAccessWillJitLater here instead since the Address hint =
// cage will contain JIT pages. However, currently this is not required as RoundDown(vas->RandomPageAddress(), kVirtualMemoryCageAlignment);
// PA anyway uses MAP_JIT for V8 mappings. Further, we want to eventually
// move JIT pages out of the cage, at which point we'd like to forbid // Currently, executable memory is still allocated inside the cage. In the
// making pages inside the cage executable, and so don't want MAP_JIT. // future, we should drop that and use kReadWrite as max_permissions.
Address hint = RoundDown( virtual_address_space_ = vas->AllocateSubspace(
reinterpret_cast<Address>(page_allocator->GetRandomMmapAddr()), hint, reservation_size, kVirtualMemoryCageAlignment,
kVirtualMemoryCageAlignment); PagePermissions::kReadWriteExecute);
reservation_base_ = reinterpret_cast<Address>(page_allocator->AllocatePages( if (!virtual_address_space_) {
reinterpret_cast<void*>(hint), reservation_size,
kVirtualMemoryCageAlignment, PageAllocator::kNoAccess));
if (!reservation_base_) {
size /= 2; size /= 2;
} }
} }
if (!reservation_base_) return false; if (!virtual_address_space_) return false;
reservation_base_ = virtual_address_space_->base();
base_ = reservation_base_; base_ = reservation_base_;
if (use_guard_regions) { if (use_guard_regions) {
base_ += kVirtualMemoryCageGuardRegionSize; base_ += kVirtualMemoryCageGuardRegionSize;
} }
page_allocator_ = page_allocator;
size_ = size; size_ = size;
end_ = base_ + size_; end_ = base_ + size_;
reservation_size_ = reservation_size; reservation_size_ = reservation_size;
cage_page_allocator_ = std::make_unique<base::BoundedPageAllocator>( if (use_guard_regions) {
page_allocator_, base_, size_, page_allocator_->AllocatePageSize(), // These must succeed since nothing was allocated in the subspace yet.
base::PageInitializationMode::kAllocatedPagesMustBeZeroInitialized); CHECK_EQ(reservation_base_,
virtual_address_space_->AllocatePages(
reservation_base_, kVirtualMemoryCageGuardRegionSize,
vas->allocation_granularity(), PagePermissions::kNoAccess));
CHECK_EQ(end_,
virtual_address_space_->AllocatePages(
end_, kVirtualMemoryCageGuardRegionSize,
vas->allocation_granularity(), PagePermissions::kNoAccess));
}
cage_page_allocator_ =
std::make_unique<base::VirtualAddressSpacePageAllocator>(
virtual_address_space_.get());
initialized_ = true; initialized_ = true;
is_fake_cage_ = false; is_fake_cage_ = false;
...@@ -344,8 +217,9 @@ bool V8VirtualMemoryCage::Initialize(v8::PageAllocator* page_allocator, ...@@ -344,8 +217,9 @@ bool V8VirtualMemoryCage::Initialize(v8::PageAllocator* page_allocator,
return true; return true;
} }
bool V8VirtualMemoryCage::InitializeAsFakeCage( bool V8VirtualMemoryCage::InitializeAsFakeCage(v8::VirtualAddressSpace* vas,
v8::PageAllocator* page_allocator, size_t size, size_t size_to_reserve) { size_t size,
size_t size_to_reserve) {
CHECK(!initialized_); CHECK(!initialized_);
CHECK(!disabled_); CHECK(!disabled_);
CHECK(base::bits::IsPowerOfTwo(size)); CHECK(base::bits::IsPowerOfTwo(size));
...@@ -356,7 +230,7 @@ bool V8VirtualMemoryCage::InitializeAsFakeCage( ...@@ -356,7 +230,7 @@ bool V8VirtualMemoryCage::InitializeAsFakeCage(
// Use a custom random number generator here to ensure that we get uniformly // Use a custom random number generator here to ensure that we get uniformly
// distributed random numbers. We figure out the available address space // distributed random numbers. We figure out the available address space
// ourselves, and so are potentially better positioned to determine a good // ourselves, and so are potentially better positioned to determine a good
// base address for the cage than the embedder-provided GetRandomMmapAddr(). // base address for the cage than the embedder.
base::RandomNumberGenerator rng; base::RandomNumberGenerator rng;
if (FLAG_random_seed != 0) { if (FLAG_random_seed != 0) {
rng.SetSeed(FLAG_random_seed); rng.SetSeed(FLAG_random_seed);
...@@ -375,9 +249,9 @@ bool V8VirtualMemoryCage::InitializeAsFakeCage( ...@@ -375,9 +249,9 @@ bool V8VirtualMemoryCage::InitializeAsFakeCage(
Address hint = rng.NextInt64() % highest_allowed_address; Address hint = rng.NextInt64() % highest_allowed_address;
hint = RoundDown(hint, kVirtualMemoryCageAlignment); hint = RoundDown(hint, kVirtualMemoryCageAlignment);
reservation_base_ = reinterpret_cast<Address>(page_allocator->AllocatePages( reservation_base_ =
reinterpret_cast<void*>(hint), size_to_reserve, vas->AllocatePages(hint, size_to_reserve, kVirtualMemoryCageAlignment,
kVirtualMemoryCageAlignment, PageAllocator::kNoAccess)); PagePermissions::kNoAccess);
if (!reservation_base_) return false; if (!reservation_base_) return false;
...@@ -387,8 +261,7 @@ bool V8VirtualMemoryCage::InitializeAsFakeCage( ...@@ -387,8 +261,7 @@ bool V8VirtualMemoryCage::InitializeAsFakeCage(
break; break;
// Can't use this base, so free the reservation and try again // Can't use this base, so free the reservation and try again
page_allocator->FreePages(reinterpret_cast<void*>(reservation_base_), CHECK(vas->FreePages(reservation_base_, size_to_reserve));
size_to_reserve);
reservation_base_ = kNullAddress; reservation_base_ = kNullAddress;
} }
DCHECK(reservation_base_); DCHECK(reservation_base_);
...@@ -399,9 +272,12 @@ bool V8VirtualMemoryCage::InitializeAsFakeCage( ...@@ -399,9 +272,12 @@ bool V8VirtualMemoryCage::InitializeAsFakeCage(
reservation_size_ = size_to_reserve; reservation_size_ = size_to_reserve;
initialized_ = true; initialized_ = true;
is_fake_cage_ = true; is_fake_cage_ = true;
page_allocator_ = page_allocator; virtual_address_space_ =
cage_page_allocator_ = std::make_unique<FakeBoundedPageAllocator>( std::make_unique<base::EmulatedVirtualAddressSubspace>(
page_allocator_, base_, size_, reservation_size_); vas, reservation_base_, reservation_size_, size_);
cage_page_allocator_ =
std::make_unique<base::VirtualAddressSpacePageAllocator>(
virtual_address_space_.get());
InitializeConstants(); InitializeConstants();
...@@ -418,9 +294,9 @@ void V8VirtualMemoryCage::InitializeConstants() { ...@@ -418,9 +294,9 @@ void V8VirtualMemoryCage::InitializeConstants() {
void V8VirtualMemoryCage::TearDown() { void V8VirtualMemoryCage::TearDown() {
if (initialized_) { if (initialized_) {
// This destroys the sub space and frees the underlying reservation.
virtual_address_space_.reset();
cage_page_allocator_.reset(); cage_page_allocator_.reset();
CHECK(page_allocator_->FreePages(reinterpret_cast<void*>(reservation_base_),
reservation_size_));
base_ = kNullAddress; base_ = kNullAddress;
end_ = kNullAddress; end_ = kNullAddress;
size_ = 0; size_ = 0;
...@@ -428,7 +304,6 @@ void V8VirtualMemoryCage::TearDown() { ...@@ -428,7 +304,6 @@ void V8VirtualMemoryCage::TearDown() {
reservation_size_ = 0; reservation_size_ = 0;
initialized_ = false; initialized_ = false;
is_fake_cage_ = false; is_fake_cage_ = false;
page_allocator_ = nullptr;
#ifdef V8_CAGED_POINTERS #ifdef V8_CAGED_POINTERS
constants_.Reset(); constants_.Reset();
#endif #endif
......
...@@ -11,8 +11,6 @@ ...@@ -11,8 +11,6 @@
namespace v8 { namespace v8 {
class PageAllocator;
namespace internal { namespace internal {
#ifdef V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE #ifdef V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
...@@ -59,7 +57,7 @@ class V8_EXPORT_PRIVATE V8VirtualMemoryCage { ...@@ -59,7 +57,7 @@ class V8_EXPORT_PRIVATE V8VirtualMemoryCage {
V8VirtualMemoryCage(const V8VirtualMemoryCage&) = delete; V8VirtualMemoryCage(const V8VirtualMemoryCage&) = delete;
V8VirtualMemoryCage& operator=(V8VirtualMemoryCage&) = delete; V8VirtualMemoryCage& operator=(V8VirtualMemoryCage&) = delete;
bool Initialize(v8::PageAllocator* page_allocator); bool Initialize(v8::VirtualAddressSpace* vas);
void Disable() { void Disable() {
CHECK(!initialized_); CHECK(!initialized_);
disabled_ = true; disabled_ = true;
...@@ -84,6 +82,10 @@ class V8_EXPORT_PRIVATE V8VirtualMemoryCage { ...@@ -84,6 +82,10 @@ class V8_EXPORT_PRIVATE V8VirtualMemoryCage {
return cage_page_allocator_.get(); return cage_page_allocator_.get();
} }
v8::VirtualAddressSpace* virtual_address_space() const {
return virtual_address_space_.get();
}
bool Contains(Address addr) const { bool Contains(Address addr) const {
return addr >= base_ && addr < base_ + size_; return addr >= base_ && addr < base_ + size_;
} }
...@@ -126,13 +128,18 @@ class V8_EXPORT_PRIVATE V8VirtualMemoryCage { ...@@ -126,13 +128,18 @@ class V8_EXPORT_PRIVATE V8VirtualMemoryCage {
// We allow tests to disable the guard regions around the cage. This is useful // We allow tests to disable the guard regions around the cage. This is useful
// for example for tests like the SequentialUnmapperTest which track page // for example for tests like the SequentialUnmapperTest which track page
// allocations and so would incur a large overhead from the guard regions. // allocations and so would incur a large overhead from the guard regions.
bool Initialize(v8::PageAllocator* page_allocator, size_t size, // The provided virtual address space must be able to allocate subspaces.
// The size must be a multiple of the allocation granularity of the virtual
// memory space.
bool Initialize(v8::VirtualAddressSpace* vas, size_t size,
bool use_guard_regions); bool use_guard_regions);
// Used on OSes where reserving virtual memory is too expensive. A fake cage // Used on OSes where reserving virtual memory is too expensive. A fake cage
// does not reserve all of the virtual memory and so doesn't have the desired // does not reserve all of the virtual memory and so doesn't have the desired
// security properties. // security properties.
bool InitializeAsFakeCage(v8::PageAllocator* page_allocator, size_t size, // The size and size_to_reserve parameters must be multiples of the
// allocation granularity of the virtual address space.
bool InitializeAsFakeCage(v8::VirtualAddressSpace* vas, size_t size,
size_t size_to_reserve); size_t size_to_reserve);
// Initialize the caged pointer constants for this cage. Called by the // Initialize the caged pointer constants for this cage. Called by the
...@@ -153,9 +160,10 @@ class V8_EXPORT_PRIVATE V8VirtualMemoryCage { ...@@ -153,9 +160,10 @@ class V8_EXPORT_PRIVATE V8VirtualMemoryCage {
bool disabled_ = false; bool disabled_ = false;
bool is_fake_cage_ = false; bool is_fake_cage_ = false;
// The allocator through which the virtual memory of the cage was allocated. // The virtual address subspace backing the cage.
v8::PageAllocator* page_allocator_ = nullptr; std::unique_ptr<v8::VirtualAddressSpace> virtual_address_space_;
// The allocator to allocate pages inside the cage.
// The page allocator instance for this cage.
std::unique_ptr<v8::PageAllocator> cage_page_allocator_; std::unique_ptr<v8::PageAllocator> cage_page_allocator_;
#ifdef V8_CAGED_POINTERS #ifdef V8_CAGED_POINTERS
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include "src/base/platform/wrappers.h" #include "src/base/platform/wrappers.h"
#include "src/base/sanitizer/lsan-page-allocator.h" #include "src/base/sanitizer/lsan-page-allocator.h"
#include "src/base/vector.h" #include "src/base/vector.h"
#include "src/base/virtual-address-space.h"
#include "src/flags/flags.h" #include "src/flags/flags.h"
#include "src/init/v8.h" #include "src/init/v8.h"
#include "src/security/vm-cage.h" #include "src/security/vm-cage.h"
...@@ -84,6 +85,11 @@ v8::PageAllocator* GetPlatformPageAllocator() { ...@@ -84,6 +85,11 @@ v8::PageAllocator* GetPlatformPageAllocator() {
return GetPageAllocatorInitializer()->page_allocator(); return GetPageAllocatorInitializer()->page_allocator();
} }
v8::VirtualAddressSpace* GetPlatformVirtualAddressSpace() {
static base::LeakyObject<base::VirtualAddressSpace> vas;
return vas.get();
}
#ifdef V8_VIRTUAL_MEMORY_CAGE #ifdef V8_VIRTUAL_MEMORY_CAGE
v8::PageAllocator* GetVirtualMemoryCagePageAllocator() { v8::PageAllocator* GetVirtualMemoryCagePageAllocator() {
// TODO(chromium:1218005) remove this code once the cage is no longer // TODO(chromium:1218005) remove this code once the cage is no longer
...@@ -189,7 +195,7 @@ void* AllocatePages(v8::PageAllocator* page_allocator, void* hint, size_t size, ...@@ -189,7 +195,7 @@ void* AllocatePages(v8::PageAllocator* page_allocator, void* hint, size_t size,
DCHECK_EQ(hint, AlignedAddress(hint, alignment)); DCHECK_EQ(hint, AlignedAddress(hint, alignment));
DCHECK(IsAligned(size, page_allocator->AllocatePageSize())); DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
if (FLAG_randomize_all_allocations) { if (FLAG_randomize_all_allocations) {
hint = page_allocator->GetRandomMmapAddr(); hint = AlignedAddress(page_allocator->GetRandomMmapAddr(), alignment);
} }
void* result = nullptr; void* result = nullptr;
for (int i = 0; i < kAllocationTries; ++i) { for (int i = 0; i < kAllocationTries; ++i) {
......
...@@ -102,6 +102,10 @@ V8_EXPORT_PRIVATE void AlignedFree(void* ptr); ...@@ -102,6 +102,10 @@ V8_EXPORT_PRIVATE void AlignedFree(void* ptr);
// Returns platfrom page allocator instance. Guaranteed to be a valid pointer. // Returns platfrom page allocator instance. Guaranteed to be a valid pointer.
V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformPageAllocator(); V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformPageAllocator();
// Returns platfrom virtual memory space instance. Guaranteed to be a valid
// pointer.
V8_EXPORT_PRIVATE v8::VirtualAddressSpace* GetPlatformVirtualAddressSpace();
#ifdef V8_VIRTUAL_MEMORY_CAGE #ifdef V8_VIRTUAL_MEMORY_CAGE
// Returns the virtual memory cage page allocator instance for allocating pages // Returns the virtual memory cage page allocator instance for allocating pages
// inside the virtual memory cage. Guaranteed to be a valid pointer. // inside the virtual memory cage. Guaranteed to be a valid pointer.
......
...@@ -233,7 +233,9 @@ class TrackingPageAllocator : public ::v8::PageAllocator { ...@@ -233,7 +233,9 @@ class TrackingPageAllocator : public ::v8::PageAllocator {
PagePermissionsMap page_permissions_; PagePermissionsMap page_permissions_;
}; };
#if !V8_OS_FUCHSIA // This test is currently incompatible with the VirtualMemoryCage. Enable it
// once the VirtualMemorySpace interface is stable.
#if !V8_OS_FUCHSIA && !V8_VIRTUAL_MEMORY_CAGE
class SequentialUnmapperTest : public TestWithIsolate { class SequentialUnmapperTest : public TestWithIsolate {
public: public:
SequentialUnmapperTest() = default; SequentialUnmapperTest() = default;
...@@ -362,7 +364,7 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardown) { ...@@ -362,7 +364,7 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
tracking_page_allocator()->CheckIsFree(page->address(), page_size); tracking_page_allocator()->CheckIsFree(page->address(), page_size);
#endif // V8_COMPRESS_POINTERS #endif // V8_COMPRESS_POINTERS
} }
#endif // !V8_OS_FUCHSIA #endif // !V8_OS_FUCHSIA && !V8_VIRTUAL_MEMORY_CAGE
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <vector> #include <vector>
#include "src/base/virtual-address-space.h"
#include "src/security/vm-cage.h" #include "src/security/vm-cage.h"
#include "test/unittests/test-utils.h" #include "test/unittests/test-utils.h"
...@@ -13,7 +14,7 @@ namespace v8 { ...@@ -13,7 +14,7 @@ namespace v8 {
namespace internal { namespace internal {
TEST(VirtualMemoryCageTest, Initialization) { TEST(VirtualMemoryCageTest, Initialization) {
base::PageAllocator page_allocator; base::VirtualAddressSpace vas;
V8VirtualMemoryCage cage; V8VirtualMemoryCage cage;
...@@ -22,7 +23,7 @@ TEST(VirtualMemoryCageTest, Initialization) { ...@@ -22,7 +23,7 @@ TEST(VirtualMemoryCageTest, Initialization) {
EXPECT_FALSE(cage.is_fake_cage()); EXPECT_FALSE(cage.is_fake_cage());
EXPECT_EQ(cage.size(), 0UL); EXPECT_EQ(cage.size(), 0UL);
EXPECT_TRUE(cage.Initialize(&page_allocator)); EXPECT_TRUE(cage.Initialize(&vas));
EXPECT_TRUE(cage.is_initialized()); EXPECT_TRUE(cage.is_initialized());
EXPECT_NE(cage.base(), 0UL); EXPECT_NE(cage.base(), 0UL);
...@@ -34,11 +35,14 @@ TEST(VirtualMemoryCageTest, Initialization) { ...@@ -34,11 +35,14 @@ TEST(VirtualMemoryCageTest, Initialization) {
} }
TEST(VirtualMemoryCageTest, InitializationWithSize) { TEST(VirtualMemoryCageTest, InitializationWithSize) {
base::PageAllocator page_allocator; base::VirtualAddressSpace vas;
// This test only works if virtual memory subspaces can be allocated.
if (!vas.CanAllocateSubspaces()) return;
V8VirtualMemoryCage cage; V8VirtualMemoryCage cage;
size_t size = kVirtualMemoryCageMinimumSize; size_t size = kVirtualMemoryCageMinimumSize;
const bool use_guard_regions = false; const bool use_guard_regions = false;
EXPECT_TRUE(cage.Initialize(&page_allocator, size, use_guard_regions)); EXPECT_TRUE(cage.Initialize(&vas, size, use_guard_regions));
EXPECT_TRUE(cage.is_initialized()); EXPECT_TRUE(cage.is_initialized());
EXPECT_FALSE(cage.is_fake_cage()); EXPECT_FALSE(cage.is_fake_cage());
...@@ -48,14 +52,14 @@ TEST(VirtualMemoryCageTest, InitializationWithSize) { ...@@ -48,14 +52,14 @@ TEST(VirtualMemoryCageTest, InitializationWithSize) {
} }
TEST(VirtualMemoryCageTest, InitializationAsFakeCage) { TEST(VirtualMemoryCageTest, InitializationAsFakeCage) {
base::PageAllocator page_allocator; base::VirtualAddressSpace vas;
V8VirtualMemoryCage cage; V8VirtualMemoryCage cage;
// Total size of the fake cage. // Total size of the fake cage.
size_t size = kVirtualMemoryCageSize; size_t size = kVirtualMemoryCageSize;
// Size of the virtual memory that is actually reserved at the start of the // Size of the virtual memory that is actually reserved at the start of the
// cage. // cage.
size_t reserved_size = 2 * page_allocator.AllocatePageSize(); size_t reserved_size = 2 * vas.allocation_granularity();
EXPECT_TRUE(cage.InitializeAsFakeCage(&page_allocator, size, reserved_size)); EXPECT_TRUE(cage.InitializeAsFakeCage(&vas, size, reserved_size));
EXPECT_TRUE(cage.is_initialized()); EXPECT_TRUE(cage.is_initialized());
EXPECT_TRUE(cage.is_fake_cage()); EXPECT_TRUE(cage.is_fake_cage());
...@@ -68,9 +72,9 @@ TEST(VirtualMemoryCageTest, InitializationAsFakeCage) { ...@@ -68,9 +72,9 @@ TEST(VirtualMemoryCageTest, InitializationAsFakeCage) {
} }
TEST(VirtualMemloryCageTest, Contains) { TEST(VirtualMemloryCageTest, Contains) {
base::PageAllocator page_allocator; base::VirtualAddressSpace vas;
V8VirtualMemoryCage cage; V8VirtualMemoryCage cage;
EXPECT_TRUE(cage.Initialize(&page_allocator)); EXPECT_TRUE(cage.Initialize(&vas));
Address base = cage.base(); Address base = cage.base();
size_t size = cage.size(); size_t size = cage.size();
...@@ -99,29 +103,29 @@ void TestCagePageAllocation(V8VirtualMemoryCage& cage) { ...@@ -99,29 +103,29 @@ void TestCagePageAllocation(V8VirtualMemoryCage& cage) {
const size_t kAllocatinSizesInPages[] = {1, 1, 2, 3, 5, 8, 13, 21, 34}; const size_t kAllocatinSizesInPages[] = {1, 1, 2, 3, 5, 8, 13, 21, 34};
constexpr int kNumAllocations = arraysize(kAllocatinSizesInPages); constexpr int kNumAllocations = arraysize(kAllocatinSizesInPages);
PageAllocator* allocator = cage.page_allocator(); VirtualAddressSpace* vas = cage.virtual_address_space();
size_t page_size = allocator->AllocatePageSize(); size_t allocation_granularity = vas->allocation_granularity();
std::vector<void*> allocations; std::vector<Address> allocations;
for (int i = 0; i < kNumAllocations; i++) { for (int i = 0; i < kNumAllocations; i++) {
size_t length = page_size * kAllocatinSizesInPages[i]; size_t length = allocation_granularity * kAllocatinSizesInPages[i];
size_t alignment = page_size; size_t alignment = allocation_granularity;
void* ptr = allocator->AllocatePages(nullptr, length, alignment, Address ptr = vas->AllocatePages(VirtualAddressSpace::kNoHint, length,
PageAllocator::kNoAccess); alignment, PagePermissions::kNoAccess);
EXPECT_NE(ptr, nullptr); EXPECT_NE(ptr, kNullAddress);
EXPECT_TRUE(cage.Contains(ptr)); EXPECT_TRUE(cage.Contains(ptr));
allocations.push_back(ptr); allocations.push_back(ptr);
} }
for (int i = 0; i < kNumAllocations; i++) { for (int i = 0; i < kNumAllocations; i++) {
size_t length = page_size * kAllocatinSizesInPages[i]; size_t length = allocation_granularity * kAllocatinSizesInPages[i];
allocator->FreePages(allocations[i], length); EXPECT_TRUE(vas->FreePages(allocations[i], length));
} }
} }
TEST(VirtualMemoryCageTest, PageAllocation) { TEST(VirtualMemoryCageTest, PageAllocation) {
base::PageAllocator page_allocator; base::VirtualAddressSpace vas;
V8VirtualMemoryCage cage; V8VirtualMemoryCage cage;
EXPECT_TRUE(cage.Initialize(&page_allocator)); EXPECT_TRUE(cage.Initialize(&vas));
TestCagePageAllocation(cage); TestCagePageAllocation(cage);
...@@ -129,13 +133,13 @@ TEST(VirtualMemoryCageTest, PageAllocation) { ...@@ -129,13 +133,13 @@ TEST(VirtualMemoryCageTest, PageAllocation) {
} }
TEST(VirtualMemoryCageTest, FakeCagePageAllocation) { TEST(VirtualMemoryCageTest, FakeCagePageAllocation) {
base::PageAllocator page_allocator; base::VirtualAddressSpace vas;
V8VirtualMemoryCage cage; V8VirtualMemoryCage cage;
size_t size = kVirtualMemoryCageSize; size_t size = kVirtualMemoryCageSize;
// Only reserve two pages so the test will allocate memory inside and outside // Only reserve two pages so the test will allocate memory inside and outside
// of the reserved region. // of the reserved region.
size_t reserved_size = 2 * page_allocator.AllocatePageSize(); size_t reserved_size = 2 * vas.allocation_granularity();
EXPECT_TRUE(cage.InitializeAsFakeCage(&page_allocator, size, reserved_size)); EXPECT_TRUE(cage.InitializeAsFakeCage(&vas, size, reserved_size));
TestCagePageAllocation(cage); TestCagePageAllocation(cage);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment