Commit a7cb30b0 authored by Samuel Groß's avatar Samuel Groß Committed by V8 LUCI CQ

Introduce VirtualAddressSpace interface

This interface is meant to eventually replace the existing
v8::PageAllocator interface. Beyond general refactoring of the
PageAllocator APIs, the new interface now supports the concept of
(contiguous) address space reservations, which previously had to be
implemented through page allocations. These reservations now make better
use of provided OS primitives on Fuchsia (VMARs) and Windows
(placeholder mappings) and can be used to back many of the cages and
virtual memory regions that V8 creates.

The new interface is not yet stable and may change at any time without
deprecating the old version first.

Bug: chromium:1218005
Change-Id: I295253c42e04cf311393c5dab9f8c06bd7451ce3
Cq-Include-Trybots: luci.v8.try:v8_linux64_heap_sandbox_dbg_ng,v8_linux_arm64_sim_heap_sandbox_dbg_ng
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3301475
Commit-Queue: Samuel Groß <saelo@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78235}
parent c1278acd
......@@ -541,6 +541,8 @@ filegroup(
"src/base/debug/stack_trace.h",
"src/base/division-by-constant.cc",
"src/base/division-by-constant.h",
"src/base/emulated-virtual-address-subspace.cc",
"src/base/emulated-virtual-address-subspace.h",
"src/base/enum-set.h",
"src/base/export-template.h",
"src/base/file-utils.cc",
......@@ -612,6 +614,10 @@ filegroup(
"src/base/utils/random-number-generator.cc",
"src/base/utils/random-number-generator.h",
"src/base/vector.h",
"src/base/virtual-address-space-page-allocator.cc",
"src/base/virtual-address-space-page-allocator.h",
"src/base/virtual-address-space.cc",
"src/base/virtual-address-space.h",
"src/base/v8-fallthrough.h",
"src/base/vlq-base64.cc",
"src/base/vlq-base64.h",
......
......@@ -4985,6 +4985,8 @@ v8_component("v8_libbase") {
"src/base/debug/stack_trace.h",
"src/base/division-by-constant.cc",
"src/base/division-by-constant.h",
"src/base/emulated-virtual-address-subspace.cc",
"src/base/emulated-virtual-address-subspace.h",
"src/base/enum-set.h",
"src/base/export-template.h",
"src/base/file-utils.cc",
......@@ -5063,6 +5065,10 @@ v8_component("v8_libbase") {
"src/base/utils/random-number-generator.h",
"src/base/v8-fallthrough.h",
"src/base/vector.h",
"src/base/virtual-address-space-page-allocator.cc",
"src/base/virtual-address-space-page-allocator.h",
"src/base/virtual-address-space.cc",
"src/base/virtual-address-space.h",
"src/base/vlq-base64.cc",
"src/base/vlq-base64.h",
"src/base/vlq.h",
......
......@@ -510,6 +510,213 @@ class PageAllocator {
virtual bool CanAllocateSharedPages() { return false; }
};
/**
* Page permissions.
*/
enum class PagePermissions {
kNoAccess,
kRead,
kReadWrite,
kReadWriteExecute,
kReadExecute,
};
/**
* Class to manage a virtual memory address space.
*
* This class represents a contiguous region of virtual address space in which
* sub-spaces and (private or shared) memory pages can be allocated, freed, and
* modified. This interface is meant to eventually replace the PageAllocator
* interface, and can be used as an alternative in the meantime.
*/
class VirtualAddressSpace {
public:
using Address = uintptr_t;
VirtualAddressSpace(size_t page_size, size_t allocation_granularity,
Address base, size_t size)
: page_size_(page_size),
allocation_granularity_(allocation_granularity),
base_(base),
size_(size) {}
virtual ~VirtualAddressSpace() = default;
/**
* The page size used inside this space. Guaranteed to be a power of two.
* Used as granularity for all page-related operations except for allocation,
* which use the allocation_granularity(), see below.
*
* \returns the page size in bytes.
*/
size_t page_size() const { return page_size_; }
/**
* The granularity of page allocations and, by extension, of subspace
* allocations. This is guaranteed to be a power of two and a multiple of the
* page_size(). In practice, this is equal to the page size on most OSes, but
* on Windows it is usually 64KB, while the page size is 4KB.
*
* \returns the allocation granularity in bytes.
*/
size_t allocation_granularity() const { return allocation_granularity_; }
/**
* The base address of the address space managed by this instance.
*
* \returns the base address of this address space.
*/
Address base() const { return base_; }
/**
* The size of the address space managed by this instance.
*
* \returns the size of this address space in bytes.
*/
size_t size() const { return size_; }
/**
* Sets the random seed so that GetRandomPageAddress() will generate
* repeatable sequences of random addresses.
*
* \param The seed for the PRNG.
*/
virtual void SetRandomSeed(int64_t seed) = 0;
/**
* Returns a random address inside this address space, suitable for page
* allocations hints.
*
* \returns a random address aligned to allocation_granularity().
*/
virtual Address RandomPageAddress() = 0;
/**
* Allocates private memory pages with the given alignment and permissions.
*
* \param hint If nonzero, the allocation is attempted to be placed at the
* given address first. If that fails, the allocation is attempted to be
* placed elsewhere, possibly nearby, but that is not guaranteed. Specifying
* zero for the hint always causes this function to choose a random address.
*
* \param size The size of the allocation in bytes. Must be a multiple of the
* allocation_granularity().
*
* \param alignment The alignment of the allocation in bytes. Must be a
* multiple of the allocation_granularity() and should be a power of two.
*
* \param permissions The page permissions of the newly allocated pages.
*
* \returns the start address of the allocated pages on success, zero on
* failure.
*/
static constexpr Address kNoHint = 0;
virtual V8_WARN_UNUSED_RESULT Address
AllocatePages(Address hint, size_t size, size_t alignment,
PagePermissions permissions) = 0;
/**
* Frees previously allocated pages.
*
* \param address The start address of the pages to free. This address must
* have been obtains from a call to AllocatePages.
*
* \param size The size in bytes of the region to free. This must match the
* size passed to AllocatePages when the pages were allocated.
*
* \returns true on success, false otherwise.
*/
virtual V8_WARN_UNUSED_RESULT bool FreePages(Address address,
size_t size) = 0;
/**
* Sets permissions of all allocated pages in the given range.
*
* \param address The start address of the range. Must be aligned to
* page_size().
*
* \param size The size in bytes of the range. Must be a multiple
* of page_size().
*
* \param permissions The new permissions for the range.
*
* \returns true on success, false otherwise.
*/
virtual V8_WARN_UNUSED_RESULT bool SetPagePermissions(
Address address, size_t size, PagePermissions permissions) = 0;
/**
* Whether this instance can allocate subspaces or not.
*
* \returns true if subspaces can be allocated, false if not.
*/
virtual bool CanAllocateSubspaces() = 0;
/*
* Allocate a subspace.
*
* The address space of a subspace stays reserved in the parent space for the
* lifetime of the subspace. As such, it is guaranteed that page allocations
* on the parent space cannot end up inside a subspace.
*
* \param hint Hints where the subspace should be allocated. See
* AllocatePages() for more details.
*
* \param size The size in bytes of the subspace. Must be a multiple of the
* allocation_granularity().
*
* \param alignment The alignment of the subspace in bytes. Must be a multiple
* of the allocation_granularity() and should be a power of two.
*
* \param max_permissions The maximum permissions that pages allocated in the
* subspace can obtain.
*
* \returns a new subspace or nullptr on failure.
*/
virtual std::unique_ptr<VirtualAddressSpace> AllocateSubspace(
Address hint, size_t size, size_t alignment,
PagePermissions max_permissions) = 0;
//
// TODO(v8) maybe refactor the methods below before stabilizing the API. For
// example by combining them into some form of page operation method that
// takes a command enum as parameter.
//
/**
* Frees memory in the given [address, address + size) range. address and
* size should be aligned to the page_size(). The next write to this memory
* area brings the memory transparently back. This should be treated as a
* hint to the OS that the pages are no longer needed. It does not guarantee
* that the pages will be discarded immediately or at all.
*
* \returns true on success, false otherwise. Since this method is only a
* hint, a successful invocation does not imply that pages have been removed.
*/
virtual V8_WARN_UNUSED_RESULT bool DiscardSystemPages(Address address,
size_t size) {
return true;
}
/**
* Decommits any wired memory pages in the given range, allowing the OS to
* reclaim them, and marks the region as inacessible (kNoAccess). The address
* range stays reserved and can be accessed again later by changing its
* permissions. However, in that case the memory content is guaranteed to be
* zero-initialized again. The memory must have been previously allocated by a
* call to AllocatePages.
*
* \returns true on success, false otherwise.
*/
virtual V8_WARN_UNUSED_RESULT bool DecommitPages(Address address,
size_t size) = 0;
private:
const size_t page_size_;
const size_t allocation_granularity_;
const Address base_;
const size_t size_;
};
/**
* V8 Allocator used for allocating zone backings.
*/
......
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/base/emulated-virtual-address-subspace.h"
#include "src/base/bits.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/wrappers.h"
namespace v8 {
namespace base {
EmulatedVirtualAddressSubspace::EmulatedVirtualAddressSubspace(
VirtualAddressSpace* parent_space, Address base, size_t mapped_size,
size_t total_size)
: VirtualAddressSpace(parent_space->page_size(),
parent_space->allocation_granularity(), base,
total_size),
mapped_size_(mapped_size),
parent_space_(parent_space),
region_allocator_(base, mapped_size, parent_space_->page_size()) {
// For simplicity, we currently require both the mapped and total size to be
// a power of two. This simplifies some things later on, for example, random
// addresses can be generated with a simply bitmask, and will then be inside
// the unmapped space with a probability >= 50% (mapped size == unmapped
// size) or never (mapped size == total size).
DCHECK(base::bits::IsPowerOfTwo(mapped_size));
DCHECK(base::bits::IsPowerOfTwo(total_size));
}
EmulatedVirtualAddressSubspace::~EmulatedVirtualAddressSubspace() {
CHECK(parent_space_->FreePages(base(), mapped_size_));
}
void EmulatedVirtualAddressSubspace::SetRandomSeed(int64_t seed) {
MutexGuard guard(&mutex_);
rng_.SetSeed(seed);
}
Address EmulatedVirtualAddressSubspace::RandomPageAddress() {
MutexGuard guard(&mutex_);
Address addr = base() + (rng_.NextInt64() % size());
return RoundDown(addr, allocation_granularity());
}
Address EmulatedVirtualAddressSubspace::AllocatePages(
Address hint, size_t size, size_t alignment, PagePermissions permissions) {
if (hint == kNoHint || MappedRegionContains(hint, size)) {
MutexGuard guard(&mutex_);
// Attempt to find a region in the mapped region.
Address address = region_allocator_.AllocateRegion(hint, size, alignment);
if (address != RegionAllocator::kAllocationFailure) {
// Success. Only need to adjust the page permissions.
if (parent_space_->SetPagePermissions(address, size, permissions)) {
return address;
}
// Probably ran out of memory, but still try to allocate in the unmapped
// space.
CHECK_EQ(size, region_allocator_.FreeRegion(address));
}
}
// No luck or hint is outside of the mapped region. Try to allocate pages in
// the unmapped space using page allocation hints instead.
// Somewhat arbitrary size limitation to ensure that the loop below for
// finding a fitting base address hint terminates quickly.
if (size >= (unmapped_size() / 2)) return kNullAddress;
static constexpr int kMaxAttempts = 10;
for (int i = 0; i < kMaxAttempts; i++) {
// If the hint wouldn't result in the entire allocation being inside the
// managed region, simply retry. There is at least a 50% chance of
// getting a usable address due to the size restriction above.
while (!UnmappedRegionContains(hint, size)) {
hint = RandomPageAddress();
}
Address region =
parent_space_->AllocatePages(hint, size, alignment, permissions);
if (region && UnmappedRegionContains(region, size)) {
return region;
} else if (region) {
CHECK(parent_space_->FreePages(region, size));
}
// Retry at a different address.
hint = RandomPageAddress();
}
return kNullAddress;
}
bool EmulatedVirtualAddressSubspace::FreePages(Address address, size_t size) {
if (MappedRegionContains(address, size)) {
MutexGuard guard(&mutex_);
if (region_allocator_.FreeRegion(address) != size) return false;
CHECK(parent_space_->DecommitPages(address, size));
return true;
}
if (!UnmappedRegionContains(address, size)) return false;
return parent_space_->FreePages(address, size);
}
bool EmulatedVirtualAddressSubspace::SetPagePermissions(
Address address, size_t size, PagePermissions permissions) {
DCHECK(Contains(address, size));
return parent_space_->SetPagePermissions(address, size, permissions);
}
bool EmulatedVirtualAddressSubspace::CanAllocateSubspaces() {
// This is not supported, mostly because it's not (yet) needed in practice.
return false;
}
std::unique_ptr<v8::VirtualAddressSpace>
EmulatedVirtualAddressSubspace::AllocateSubspace(
Address hint, size_t size, size_t alignment,
PagePermissions max_permissions) {
UNREACHABLE();
}
bool EmulatedVirtualAddressSubspace::DiscardSystemPages(Address address,
size_t size) {
DCHECK(Contains(address, size));
return parent_space_->DiscardSystemPages(address, size);
}
bool EmulatedVirtualAddressSubspace::DecommitPages(Address address,
size_t size) {
DCHECK(Contains(address, size));
return parent_space_->DecommitPages(address, size);
}
} // namespace base
} // namespace v8
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASE_EMULATED_VIRTUAL_ADDRESS_SUBSPACE_H_
#define V8_BASE_EMULATED_VIRTUAL_ADDRESS_SUBSPACE_H_
#include "include/v8-platform.h"
#include "src/base/base-export.h"
#include "src/base/compiler-specific.h"
#include "src/base/platform/mutex.h"
#include "src/base/region-allocator.h"
#include "src/base/virtual-address-space.h"
namespace v8 {
namespace base {
/**
* Emulates a virtual address subspace.
*
* This class is (optionally) backed by a page allocation and emulates a virtual
* address space that is potentially larger than that mapping. It generally
* first attempts to satisfy page allocation requests from its backing mapping,
* but will also attempt to obtain new page mappings inside the unmapped space
* through page allocation hints if necessary.
*
* Caveat: an emulated subspace violates the invariant that page allocations in
* an address space will never end up inside a child space and so does not
* provide the same security gurarantees.
*/
class V8_BASE_EXPORT EmulatedVirtualAddressSubspace final
: public NON_EXPORTED_BASE(::v8::VirtualAddressSpace) {
public:
// Construct an emulated virtual address subspace of the specified total size,
// potentially backed by a page allocation from the parent space. The newly
// created instance takes ownership of the page allocation (if any) and frees
// it during destruction.
EmulatedVirtualAddressSubspace(v8::VirtualAddressSpace* parent_space,
Address base, size_t mapped_size,
size_t total_size);
~EmulatedVirtualAddressSubspace() override;
void SetRandomSeed(int64_t seed) override;
Address RandomPageAddress() override;
Address AllocatePages(Address hint, size_t size, size_t alignment,
PagePermissions permissions) override;
bool FreePages(Address address, size_t size) override;
bool SetPagePermissions(Address address, size_t size,
PagePermissions permissions) override;
bool CanAllocateSubspaces() override;
std::unique_ptr<v8::VirtualAddressSpace> AllocateSubspace(
Address hint, size_t size, size_t alignment,
PagePermissions max_permissions) override;
bool DiscardSystemPages(Address address, size_t size) override;
bool DecommitPages(Address address, size_t size) override;
private:
size_t mapped_size() const { return mapped_size_; }
size_t unmapped_size() const { return size() - mapped_size_; }
Address mapped_base() const { return base(); }
Address unmapped_base() const { return base() + mapped_size_; }
bool Contains(Address outer_start, size_t outer_size, Address inner_start,
size_t inner_size) const {
return (inner_start >= outer_start) &&
((inner_start + inner_size) <= (outer_start + outer_size));
}
bool Contains(Address addr, size_t length) const {
return Contains(base(), size(), addr, length);
}
bool MappedRegionContains(Address addr, size_t length) const {
return Contains(mapped_base(), mapped_size(), addr, length);
}
bool UnmappedRegionContains(Address addr, size_t length) const {
return Contains(unmapped_base(), unmapped_size(), addr, length);
}
// Size of the mapped region located at the beginning of this address space.
const size_t mapped_size_;
// Pointer to the parent space from which the backing pages were allocated.
// Must be kept alive by the owner of this instance.
v8::VirtualAddressSpace* parent_space_;
// Mutex guarding the non-threadsafe RegionAllocator and
// RandomNumberGenerator.
Mutex mutex_;
// RegionAllocator to manage the page allocation and divide it into further
// regions as necessary.
RegionAllocator region_allocator_;
// Random number generator for generating random addresses.
RandomNumberGenerator rng_;
};
} // namespace base
} // namespace v8
#endif // V8_BASE_EMULATED_VIRTUAL_ADDRESS_SUBSPACE_H_
......@@ -123,6 +123,8 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
class AddressSpaceReservation;
class PageAllocator;
class TimezoneCache;
class VirtualAddressSpace;
class VirtualAddressSubspace;
// ----------------------------------------------------------------------------
// OS
......@@ -312,6 +314,8 @@ class V8_BASE_EXPORT OS {
friend class MemoryMappedFile;
friend class PosixMemoryMappedFile;
friend class v8::base::PageAllocator;
friend class v8::base::VirtualAddressSpace;
friend class v8::base::VirtualAddressSubspace;
static size_t AllocatePageSize();
......
......@@ -236,6 +236,29 @@ RegionAllocator::Address RegionAllocator::AllocateAlignedRegion(
return region->begin();
}
RegionAllocator::Address RegionAllocator::AllocateRegion(Address hint,
size_t size,
size_t alignment) {
DCHECK(IsAligned(alignment, page_size()));
DCHECK(IsAligned(hint, alignment));
if (hint && contains(hint, size)) {
if (AllocateRegionAt(hint, size)) {
return hint;
}
}
Address address;
if (alignment <= page_size()) {
// TODO(chromium:1218005): Consider using randomized version here.
address = AllocateRegion(size);
} else {
address = AllocateAlignedRegion(size, alignment);
}
return address;
}
size_t RegionAllocator::TrimRegion(Address address, size_t new_size) {
DCHECK(IsAligned(new_size, page_size_));
......
......@@ -89,6 +89,11 @@ class V8_BASE_EXPORT RegionAllocator final {
// success or kAllocationFailure.
Address AllocateAlignedRegion(size_t size, size_t alignment);
// Attempts to allocate a region of the given size and alignment at the
// specified address but fall back to allocating the region elsewhere if
// necessary.
Address AllocateRegion(Address hint, size_t size, size_t alignment);
// Frees region at given |address|, returns the size of the region.
// There must be a used region starting at given address otherwise nothing
// will be freed and 0 will be returned.
......
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/base/virtual-address-space-page-allocator.h"
namespace v8 {
namespace base {
VirtualAddressSpacePageAllocator::VirtualAddressSpacePageAllocator(
v8::VirtualAddressSpace* vas)
: vas_(vas) {}
void* VirtualAddressSpacePageAllocator::AllocatePages(
void* hint, size_t size, size_t alignment,
PageAllocator::Permission access) {
return reinterpret_cast<void*>(
vas_->AllocatePages(reinterpret_cast<Address>(hint), size, alignment,
static_cast<PagePermissions>(access)));
}
bool VirtualAddressSpacePageAllocator::FreePages(void* ptr, size_t size) {
MutexGuard guard(&mutex_);
Address address = reinterpret_cast<Address>(ptr);
// Was this allocation resized previously? If so, use the original size.
auto result = resized_allocations_.find(address);
if (result != resized_allocations_.end()) {
size = result->second;
resized_allocations_.erase(result);
}
return vas_->FreePages(address, size);
}
bool VirtualAddressSpacePageAllocator::ReleasePages(void* ptr, size_t size,
size_t new_size) {
// The VirtualAddressSpace class doesn't support this method because it can't
// be properly implemented on top of Windows placeholder mappings (they cannot
// be partially freed or resized while being allocated). Instead, we emulate
// this behaviour by decommitting the released pages, which in effect achieves
// exactly what ReleasePages would normally do as well. However, we still need
// to pass the original size to FreePages eventually, so we'll need to keep
// track of that.
DCHECK_LE(new_size, size);
MutexGuard guard(&mutex_);
// Will fail if the allocation was resized previously, which is desired.
Address address = reinterpret_cast<Address>(ptr);
resized_allocations_.insert({address, size});
return vas_->DecommitPages(address + new_size, size - new_size);
}
bool VirtualAddressSpacePageAllocator::SetPermissions(
void* address, size_t size, PageAllocator::Permission access) {
return vas_->SetPagePermissions(reinterpret_cast<Address>(address), size,
static_cast<PagePermissions>(access));
}
bool VirtualAddressSpacePageAllocator::DiscardSystemPages(void* address,
size_t size) {
return vas_->DiscardSystemPages(reinterpret_cast<Address>(address), size);
}
bool VirtualAddressSpacePageAllocator::DecommitPages(void* address,
size_t size) {
return vas_->DecommitPages(reinterpret_cast<Address>(address), size);
}
} // namespace base
} // namespace v8
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASE_VIRTUAL_ADDRESS_SPACE_PAGE_ALLOCATOR_H_
#define V8_BASE_VIRTUAL_ADDRESS_SPACE_PAGE_ALLOCATOR_H_
#include <unordered_map>
#include "include/v8-platform.h"
#include "src/base/base-export.h"
#include "src/base/platform/platform.h"
namespace v8 {
namespace base {
// This class bridges a VirtualAddressSpace, the future memory management API,
// to a PageAllocator, the current API.
class V8_BASE_EXPORT VirtualAddressSpacePageAllocator
: public v8::PageAllocator {
public:
using Address = uintptr_t;
explicit VirtualAddressSpacePageAllocator(v8::VirtualAddressSpace* vas);
VirtualAddressSpacePageAllocator(const VirtualAddressSpacePageAllocator&) =
delete;
VirtualAddressSpacePageAllocator& operator=(
const VirtualAddressSpacePageAllocator&) = delete;
~VirtualAddressSpacePageAllocator() override = default;
size_t AllocatePageSize() override { return vas_->allocation_granularity(); }
size_t CommitPageSize() override { return vas_->page_size(); }
void SetRandomMmapSeed(int64_t seed) override { vas_->SetRandomSeed(seed); }
void* GetRandomMmapAddr() override {
return reinterpret_cast<void*>(vas_->RandomPageAddress());
}
void* AllocatePages(void* hint, size_t size, size_t alignment,
Permission access) override;
bool FreePages(void* address, size_t size) override;
bool ReleasePages(void* address, size_t size, size_t new_size) override;
bool SetPermissions(void* address, size_t size, Permission access) override;
bool DiscardSystemPages(void* address, size_t size) override;
bool DecommitPages(void* address, size_t size) override;
private:
// Client of this class must keep the VirtualAddressSpace alive during the
// lifetime of this instance.
v8::VirtualAddressSpace* vas_;
// As the VirtualAddressSpace class doesn't support ReleasePages, this map is
// required to keep track of the original size of resized page allocations.
// See the ReleasePages implementation.
std::unordered_map<Address, size_t> resized_allocations_;
// Mutex guarding the above map.
Mutex mutex_;
};
} // namespace base
} // namespace v8
#endif // V8_BASE_VIRTUAL_ADDRESS_SPACE_PAGE_ALLOCATOR_H_
This diff is collapsed.
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASE_VIRTUAL_ADDRESS_SPACE_H_
#define V8_BASE_VIRTUAL_ADDRESS_SPACE_H_
#include "include/v8-platform.h"
#include "src/base/base-export.h"
#include "src/base/compiler-specific.h"
#include "src/base/platform/platform.h"
#include "src/base/region-allocator.h"
namespace v8 {
namespace base {
using Address = uintptr_t;
constexpr Address kNullAddress = 0;
class VirtualAddressSubspace;
/*
* Common parent class to implement deletion of subspaces.
*/
class VirtualAddressSpaceBase
: public NON_EXPORTED_BASE(::v8::VirtualAddressSpace) {
public:
using VirtualAddressSpace::VirtualAddressSpace;
private:
friend VirtualAddressSubspace;
// Called by a subspace during destruction. Responsible for freeing the
// address space reservation and any other data associated with the subspace
// in the parent space.
virtual bool FreeSubspace(VirtualAddressSubspace* subspace) = 0;
};
/*
* The virtual address space of the current process. Conceptionally, there
* should only be one such "root" instance. However, in practice there is no
* issue with having multiple instances as the actual resources are managed by
* the OS kernel.
*/
class V8_BASE_EXPORT VirtualAddressSpace : public VirtualAddressSpaceBase {
public:
VirtualAddressSpace();
~VirtualAddressSpace() override = default;
void SetRandomSeed(int64_t seed) override;
Address RandomPageAddress() override;
Address AllocatePages(Address hint, size_t size, size_t alignment,
PagePermissions access) override;
bool FreePages(Address address, size_t size) override;
bool SetPagePermissions(Address address, size_t size,
PagePermissions access) override;
bool CanAllocateSubspaces() override;
std::unique_ptr<v8::VirtualAddressSpace> AllocateSubspace(
Address hint, size_t size, size_t alignment,
PagePermissions max_permissions) override;
bool DiscardSystemPages(Address address, size_t size) override;
bool DecommitPages(Address address, size_t size) override;
private:
bool FreeSubspace(VirtualAddressSubspace* subspace) override;
};
/*
* A subspace of a parent virtual address space. This represents a reserved
* contiguous region of virtual address space in the current process.
*/
class V8_BASE_EXPORT VirtualAddressSubspace : public VirtualAddressSpaceBase {
public:
~VirtualAddressSubspace() override;
void SetRandomSeed(int64_t seed) override;
Address RandomPageAddress() override;
Address AllocatePages(Address hint, size_t size, size_t alignment,
PagePermissions permissions) override;
bool FreePages(Address address, size_t size) override;
bool SetPagePermissions(Address address, size_t size,
PagePermissions permissions) override;
bool CanAllocateSubspaces() override { return true; }
std::unique_ptr<v8::VirtualAddressSpace> AllocateSubspace(
Address hint, size_t size, size_t alignment,
PagePermissions max_permissions) override;
bool DiscardSystemPages(Address address, size_t size) override;
bool DecommitPages(Address address, size_t size) override;
private:
// The VirtualAddressSpace class creates instances of this class when
// allocating sub spaces.
friend class v8::base::VirtualAddressSpace;
bool FreeSubspace(VirtualAddressSubspace* subspace) override;
VirtualAddressSubspace(AddressSpaceReservation reservation,
VirtualAddressSpaceBase* parent_space);
// The address space reservation backing this subspace.
AddressSpaceReservation reservation_;
// Mutex guarding the non-threadsafe RegionAllocator and
// RandomNumberGenerator.
Mutex mutex_;
// RegionAllocator to manage the virtual address reservation and divide it
// into further regions as necessary.
RegionAllocator region_allocator_;
// Random number generator for generating random addresses.
RandomNumberGenerator rng_;
// Pointer to the parent space. Must be kept alive by the owner of this
// instance during its lifetime.
VirtualAddressSpaceBase* parent_space_;
};
} // namespace base
} // namespace v8
#endif // V8_BASE_VIRTUAL_ADDRESS_SPACE_H_
......@@ -221,8 +221,8 @@ void V8::InitializePlatform(v8::Platform* platform) {
bool V8::InitializeVirtualMemoryCage() {
// Platform must have been initialized already.
CHECK(platform_);
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
return GetProcessWideVirtualMemoryCage()->Initialize(page_allocator);
v8::VirtualAddressSpace* vas = GetPlatformVirtualAddressSpace();
return GetProcessWideVirtualMemoryCage()->Initialize(vas);
}
#endif
......
This diff is collapsed.
......@@ -11,8 +11,6 @@
namespace v8 {
class PageAllocator;
namespace internal {
#ifdef V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
......@@ -59,7 +57,7 @@ class V8_EXPORT_PRIVATE V8VirtualMemoryCage {
V8VirtualMemoryCage(const V8VirtualMemoryCage&) = delete;
V8VirtualMemoryCage& operator=(V8VirtualMemoryCage&) = delete;
bool Initialize(v8::PageAllocator* page_allocator);
bool Initialize(v8::VirtualAddressSpace* vas);
void Disable() {
CHECK(!initialized_);
disabled_ = true;
......@@ -84,6 +82,10 @@ class V8_EXPORT_PRIVATE V8VirtualMemoryCage {
return cage_page_allocator_.get();
}
v8::VirtualAddressSpace* virtual_address_space() const {
return virtual_address_space_.get();
}
bool Contains(Address addr) const {
return addr >= base_ && addr < base_ + size_;
}
......@@ -126,13 +128,18 @@ class V8_EXPORT_PRIVATE V8VirtualMemoryCage {
// We allow tests to disable the guard regions around the cage. This is useful
// for example for tests like the SequentialUnmapperTest which track page
// allocations and so would incur a large overhead from the guard regions.
bool Initialize(v8::PageAllocator* page_allocator, size_t size,
// The provided virtual address space must be able to allocate subspaces.
// The size must be a multiple of the allocation granularity of the virtual
// memory space.
bool Initialize(v8::VirtualAddressSpace* vas, size_t size,
bool use_guard_regions);
// Used on OSes where reserving virtual memory is too expensive. A fake cage
// does not reserve all of the virtual memory and so doesn't have the desired
// security properties.
bool InitializeAsFakeCage(v8::PageAllocator* page_allocator, size_t size,
// The size and size_to_reserve parameters must be multiples of the
// allocation granularity of the virtual address space.
bool InitializeAsFakeCage(v8::VirtualAddressSpace* vas, size_t size,
size_t size_to_reserve);
// Initialize the caged pointer constants for this cage. Called by the
......@@ -153,9 +160,10 @@ class V8_EXPORT_PRIVATE V8VirtualMemoryCage {
bool disabled_ = false;
bool is_fake_cage_ = false;
// The allocator through which the virtual memory of the cage was allocated.
v8::PageAllocator* page_allocator_ = nullptr;
// The allocator to allocate pages inside the cage.
// The virtual address subspace backing the cage.
std::unique_ptr<v8::VirtualAddressSpace> virtual_address_space_;
// The page allocator instance for this cage.
std::unique_ptr<v8::PageAllocator> cage_page_allocator_;
#ifdef V8_CAGED_POINTERS
......
......@@ -15,6 +15,7 @@
#include "src/base/platform/wrappers.h"
#include "src/base/sanitizer/lsan-page-allocator.h"
#include "src/base/vector.h"
#include "src/base/virtual-address-space.h"
#include "src/flags/flags.h"
#include "src/init/v8.h"
#include "src/security/vm-cage.h"
......@@ -84,6 +85,11 @@ v8::PageAllocator* GetPlatformPageAllocator() {
return GetPageAllocatorInitializer()->page_allocator();
}
v8::VirtualAddressSpace* GetPlatformVirtualAddressSpace() {
static base::LeakyObject<base::VirtualAddressSpace> vas;
return vas.get();
}
#ifdef V8_VIRTUAL_MEMORY_CAGE
v8::PageAllocator* GetVirtualMemoryCagePageAllocator() {
// TODO(chromium:1218005) remove this code once the cage is no longer
......@@ -189,7 +195,7 @@ void* AllocatePages(v8::PageAllocator* page_allocator, void* hint, size_t size,
DCHECK_EQ(hint, AlignedAddress(hint, alignment));
DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
if (FLAG_randomize_all_allocations) {
hint = page_allocator->GetRandomMmapAddr();
hint = AlignedAddress(page_allocator->GetRandomMmapAddr(), alignment);
}
void* result = nullptr;
for (int i = 0; i < kAllocationTries; ++i) {
......
......@@ -102,6 +102,10 @@ V8_EXPORT_PRIVATE void AlignedFree(void* ptr);
// Returns platfrom page allocator instance. Guaranteed to be a valid pointer.
V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformPageAllocator();
// Returns platfrom virtual memory space instance. Guaranteed to be a valid
// pointer.
V8_EXPORT_PRIVATE v8::VirtualAddressSpace* GetPlatformVirtualAddressSpace();
#ifdef V8_VIRTUAL_MEMORY_CAGE
// Returns the virtual memory cage page allocator instance for allocating pages
// inside the virtual memory cage. Guaranteed to be a valid pointer.
......
......@@ -233,7 +233,9 @@ class TrackingPageAllocator : public ::v8::PageAllocator {
PagePermissionsMap page_permissions_;
};
#if !V8_OS_FUCHSIA
// This test is currently incompatible with the VirtualMemoryCage. Enable it
// once the VirtualMemorySpace interface is stable.
#if !V8_OS_FUCHSIA && !V8_VIRTUAL_MEMORY_CAGE
class SequentialUnmapperTest : public TestWithIsolate {
public:
SequentialUnmapperTest() = default;
......@@ -362,7 +364,7 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
tracking_page_allocator()->CheckIsFree(page->address(), page_size);
#endif // V8_COMPRESS_POINTERS
}
#endif // !V8_OS_FUCHSIA
#endif // !V8_OS_FUCHSIA && !V8_VIRTUAL_MEMORY_CAGE
} // namespace internal
} // namespace v8
......@@ -4,6 +4,7 @@
#include <vector>
#include "src/base/virtual-address-space.h"
#include "src/security/vm-cage.h"
#include "test/unittests/test-utils.h"
......@@ -13,7 +14,7 @@ namespace v8 {
namespace internal {
TEST(VirtualMemoryCageTest, Initialization) {
base::PageAllocator page_allocator;
base::VirtualAddressSpace vas;
V8VirtualMemoryCage cage;
......@@ -22,7 +23,7 @@ TEST(VirtualMemoryCageTest, Initialization) {
EXPECT_FALSE(cage.is_fake_cage());
EXPECT_EQ(cage.size(), 0UL);
EXPECT_TRUE(cage.Initialize(&page_allocator));
EXPECT_TRUE(cage.Initialize(&vas));
EXPECT_TRUE(cage.is_initialized());
EXPECT_NE(cage.base(), 0UL);
......@@ -34,11 +35,14 @@ TEST(VirtualMemoryCageTest, Initialization) {
}
TEST(VirtualMemoryCageTest, InitializationWithSize) {
base::PageAllocator page_allocator;
base::VirtualAddressSpace vas;
// This test only works if virtual memory subspaces can be allocated.
if (!vas.CanAllocateSubspaces()) return;
V8VirtualMemoryCage cage;
size_t size = kVirtualMemoryCageMinimumSize;
const bool use_guard_regions = false;
EXPECT_TRUE(cage.Initialize(&page_allocator, size, use_guard_regions));
EXPECT_TRUE(cage.Initialize(&vas, size, use_guard_regions));
EXPECT_TRUE(cage.is_initialized());
EXPECT_FALSE(cage.is_fake_cage());
......@@ -48,14 +52,14 @@ TEST(VirtualMemoryCageTest, InitializationWithSize) {
}
TEST(VirtualMemoryCageTest, InitializationAsFakeCage) {
base::PageAllocator page_allocator;
base::VirtualAddressSpace vas;
V8VirtualMemoryCage cage;
// Total size of the fake cage.
size_t size = kVirtualMemoryCageSize;
// Size of the virtual memory that is actually reserved at the start of the
// cage.
size_t reserved_size = 2 * page_allocator.AllocatePageSize();
EXPECT_TRUE(cage.InitializeAsFakeCage(&page_allocator, size, reserved_size));
size_t reserved_size = 2 * vas.allocation_granularity();
EXPECT_TRUE(cage.InitializeAsFakeCage(&vas, size, reserved_size));
EXPECT_TRUE(cage.is_initialized());
EXPECT_TRUE(cage.is_fake_cage());
......@@ -68,9 +72,9 @@ TEST(VirtualMemoryCageTest, InitializationAsFakeCage) {
}
TEST(VirtualMemloryCageTest, Contains) {
base::PageAllocator page_allocator;
base::VirtualAddressSpace vas;
V8VirtualMemoryCage cage;
EXPECT_TRUE(cage.Initialize(&page_allocator));
EXPECT_TRUE(cage.Initialize(&vas));
Address base = cage.base();
size_t size = cage.size();
......@@ -99,29 +103,29 @@ void TestCagePageAllocation(V8VirtualMemoryCage& cage) {
const size_t kAllocatinSizesInPages[] = {1, 1, 2, 3, 5, 8, 13, 21, 34};
constexpr int kNumAllocations = arraysize(kAllocatinSizesInPages);
PageAllocator* allocator = cage.page_allocator();
size_t page_size = allocator->AllocatePageSize();
std::vector<void*> allocations;
VirtualAddressSpace* vas = cage.virtual_address_space();
size_t allocation_granularity = vas->allocation_granularity();
std::vector<Address> allocations;
for (int i = 0; i < kNumAllocations; i++) {
size_t length = page_size * kAllocatinSizesInPages[i];
size_t alignment = page_size;
void* ptr = allocator->AllocatePages(nullptr, length, alignment,
PageAllocator::kNoAccess);
EXPECT_NE(ptr, nullptr);
size_t length = allocation_granularity * kAllocatinSizesInPages[i];
size_t alignment = allocation_granularity;
Address ptr = vas->AllocatePages(VirtualAddressSpace::kNoHint, length,
alignment, PagePermissions::kNoAccess);
EXPECT_NE(ptr, kNullAddress);
EXPECT_TRUE(cage.Contains(ptr));
allocations.push_back(ptr);
}
for (int i = 0; i < kNumAllocations; i++) {
size_t length = page_size * kAllocatinSizesInPages[i];
allocator->FreePages(allocations[i], length);
size_t length = allocation_granularity * kAllocatinSizesInPages[i];
EXPECT_TRUE(vas->FreePages(allocations[i], length));
}
}
TEST(VirtualMemoryCageTest, PageAllocation) {
base::PageAllocator page_allocator;
base::VirtualAddressSpace vas;
V8VirtualMemoryCage cage;
EXPECT_TRUE(cage.Initialize(&page_allocator));
EXPECT_TRUE(cage.Initialize(&vas));
TestCagePageAllocation(cage);
......@@ -129,13 +133,13 @@ TEST(VirtualMemoryCageTest, PageAllocation) {
}
TEST(VirtualMemoryCageTest, FakeCagePageAllocation) {
base::PageAllocator page_allocator;
base::VirtualAddressSpace vas;
V8VirtualMemoryCage cage;
size_t size = kVirtualMemoryCageSize;
// Only reserve two pages so the test will allocate memory inside and outside
// of the reserved region.
size_t reserved_size = 2 * page_allocator.AllocatePageSize();
EXPECT_TRUE(cage.InitializeAsFakeCage(&page_allocator, size, reserved_size));
size_t reserved_size = 2 * vas.allocation_granularity();
EXPECT_TRUE(cage.InitializeAsFakeCage(&vas, size, reserved_size));
TestCagePageAllocation(cage);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment