Commit a76e7b4b authored by Samuel Groß's avatar Samuel Groß Committed by V8 LUCI CQ

[base] Add VirtualAddressSpace::AllocateSharedPages

This API allows allocating shared memory mappings inside a virtual
address space from a platform-specific handle to a shared memory object.
This will make it possible to allocate shared memory inside the sandbox,
for example as backing memory for ArrayBuffers.

Bug: chromium:1218005
Change-Id: I4f1f50baec50734e846496cff78046e4fffe75c5
Cq-Include-Trybots: luci.v8.try:v8_linux64_heap_sandbox_dbg_ng,v8_linux_arm64_sim_heap_sandbox_dbg_ng
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3383777Reviewed-by: 's avatarVictor Gomes <victorgomes@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Samuel Groß <saelo@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79123}
parent 2b98251c
......@@ -512,6 +512,57 @@ class PageAllocator {
virtual bool CanAllocateSharedPages() { return false; }
};
// Opaque type representing a handle to a shared memory region.
using PlatformSharedMemoryHandle = intptr_t;
static constexpr PlatformSharedMemoryHandle kInvalidSharedMemoryHandle = -1;
// Conversion routines from the platform-dependent shared memory identifiers
// into the opaque PlatformSharedMemoryHandle type. These use the underlying
// types (e.g. unsigned int) instead of the typedef'd ones (e.g. mach_port_t)
// to avoid pulling in large OS header files into this header file. Instead,
// the users of these routines are expected to include the respecitve OS
// headers in addition to this one.
#if V8_OS_MACOSX
// Convert between a shared memory handle and a mach_port_t referencing a memory
// entry object.
inline PlatformSharedMemoryHandle SharedMemoryHandleFromMachMemoryEntry(
unsigned int port) {
return static_cast<PlatformSharedMemoryHandle>(port);
}
inline unsigned int MachMemoryEntryFromSharedMemoryHandle(
PlatformSharedMemoryHandle handle) {
return static_cast<unsigned int>(handle);
}
#elif V8_OS_FUCHSIA
// Convert between a shared memory handle and a zx_handle_t to a VMO.
inline PlatformSharedMemoryHandle SharedMemoryHandleFromVMO(uint32_t handle) {
return static_cast<PlatformSharedMemoryHandle>(handle);
}
inline uint32_t VMOFromSharedMemoryHandle(PlatformSharedMemoryHandle handle) {
return static_cast<uint32_t>(handle);
}
#elif V8_OS_WIN
// Convert between a shared memory handle and a Windows HANDLE to a file mapping
// object.
inline PlatformSharedMemoryHandle SharedMemoryHandleFromFileMapping(
void* handle) {
return reinterpret_cast<PlatformSharedMemoryHandle>(handle);
}
inline void* FileMappingFromSharedMemoryHandle(
PlatformSharedMemoryHandle handle) {
return reinterpret_cast<void*>(handle);
}
#else
// Convert between a shared memory handle and a file descriptor.
inline PlatformSharedMemoryHandle SharedMemoryHandleFromFileDescriptor(int fd) {
return static_cast<PlatformSharedMemoryHandle>(fd);
}
inline int FileDescriptorFromSharedMemoryHandle(
PlatformSharedMemoryHandle handle) {
return static_cast<int>(handle);
}
#endif
/**
* Possible permissions for memory pages.
*/
......@@ -692,6 +743,43 @@ class VirtualAddressSpace {
virtual V8_WARN_UNUSED_RESULT bool FreeGuardRegion(Address address,
size_t size) = 0;
/**
* Allocates shared memory pages with the given permissions.
*
* \param hint Placement hint. See AllocatePages.
*
* \param size The size of the allocation in bytes. Must be a multiple of the
* allocation_granularity().
*
* \param permissions The page permissions of the newly allocated pages.
*
* \param handle A platform-specific handle to a shared memory object. See
* the SharedMemoryHandleFromX routines above for ways to obtain these.
*
* \param offset The offset in the shared memory object at which the mapping
* should start. Must be a multiple of the allocation_granularity().
*
* \returns the start address of the allocated pages on success, zero on
* failure.
*/
virtual V8_WARN_UNUSED_RESULT Address
AllocateSharedPages(Address hint, size_t size, PagePermissions permissions,
PlatformSharedMemoryHandle handle, uint64_t offset) = 0;
/**
* Frees previously allocated shared pages.
*
* \param address The start address of the pages to free. This address must
* have been obtains from a call to AllocateSharedPages.
*
* \param size The size in bytes of the region to free. This must match the
* size passed to AllocateSharedPages when the pages were allocated.
*
* \returns true on success, false otherwise.
*/
virtual V8_WARN_UNUSED_RESULT bool FreeSharedPages(Address address,
size_t size) = 0;
/**
* Whether this instance can allocate subspaces or not.
*
......
......@@ -64,16 +64,16 @@ Address EmulatedVirtualAddressSubspace::AllocatePages(
// No luck or hint is outside of the mapped region. Try to allocate pages in
// the unmapped space using page allocation hints instead.
// Somewhat arbitrary size limitation to ensure that the loop below for
// finding a fitting base address hint terminates quickly.
if (size >= (unmapped_size() / 2)) return kNullAddress;
if (!IsUsableSizeForUnmappedRegion(size)) return kNullAddress;
static constexpr int kMaxAttempts = 10;
for (int i = 0; i < kMaxAttempts; i++) {
// If the hint wouldn't result in the entire allocation being inside the
// managed region, simply retry. There is at least a 50% chance of
// getting a usable address due to the size restriction above.
// If an unmapped region exists, it must cover at least 50% of the whole
// space (unmapped + mapped region). Since we limit the size of allocation
// to 50% of the unmapped region (see IsUsableSizeForUnmappedRegion), a
// random page address has at least a 25% chance of being a usable base. As
// such, this loop should usually terminate quickly.
DCHECK_GE(unmapped_size(), mapped_size());
while (!UnmappedRegionContains(hint, size)) {
hint = RandomPageAddress();
}
......@@ -105,6 +105,39 @@ bool EmulatedVirtualAddressSubspace::FreePages(Address address, size_t size) {
return parent_space_->FreePages(address, size);
}
Address EmulatedVirtualAddressSubspace::AllocateSharedPages(
Address hint, size_t size, PagePermissions permissions,
PlatformSharedMemoryHandle handle, uint64_t offset) {
// Can only allocate shared pages in the unmapped region.
if (!IsUsableSizeForUnmappedRegion(size)) return kNullAddress;
static constexpr int kMaxAttempts = 10;
for (int i = 0; i < kMaxAttempts; i++) {
// See AllocatePages() for why this loop usually terminates quickly.
DCHECK_GE(unmapped_size(), mapped_size());
while (!UnmappedRegionContains(hint, size)) {
hint = RandomPageAddress();
}
Address region = parent_space_->AllocateSharedPages(hint, size, permissions,
handle, offset);
if (UnmappedRegionContains(region, size)) {
return region;
} else if (region) {
CHECK(parent_space_->FreeSharedPages(region, size));
}
hint = RandomPageAddress();
}
return kNullAddress;
}
bool EmulatedVirtualAddressSubspace::FreeSharedPages(Address address,
size_t size) {
return parent_space_->FreeSharedPages(address, size);
}
bool EmulatedVirtualAddressSubspace::SetPagePermissions(
Address address, size_t size, PagePermissions permissions) {
DCHECK(Contains(address, size));
......
......@@ -50,6 +50,13 @@ class V8_BASE_EXPORT EmulatedVirtualAddressSubspace final
bool FreePages(Address address, size_t size) override;
Address AllocateSharedPages(Address hint, size_t size,
PagePermissions permissions,
PlatformSharedMemoryHandle handle,
uint64_t offset) override;
bool FreeSharedPages(Address address, size_t size) override;
bool SetPagePermissions(Address address, size_t size,
PagePermissions permissions) override;
......@@ -92,6 +99,13 @@ class V8_BASE_EXPORT EmulatedVirtualAddressSubspace final
return Contains(unmapped_base(), unmapped_size(), addr, length);
}
// Helper function to define a limit for the size of allocations in the
// unmapped region. This limit makes it possible to estimate the expected
// runtime of some loops in the Allocate methods.
bool IsUsableSizeForUnmappedRegion(size_t size) const {
return size <= (unmapped_size() / 2);
}
// Size of the mapped region located at the beginning of this address space.
const size_t mapped_size_;
......
......@@ -88,31 +88,14 @@ enum class PlacementMode {
kFixed
};
void* AllocateInternal(const zx::vmar& vmar, void* vmar_base, size_t page_size,
void* address, PlacementMode placement, size_t size,
size_t alignment, OS::MemoryPermission access) {
void* MapVmo(const zx::vmar& vmar, void* vmar_base, size_t page_size,
void* address, const zx::vmo& vmo, uint64_t offset,
PlacementMode placement, size_t size, size_t alignment,
OS::MemoryPermission access) {
DCHECK_EQ(0, size % page_size);
DCHECK_EQ(0, alignment % page_size);
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % alignment);
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % page_size);
DCHECK_IMPLIES(placement != PlacementMode::kAnywhere, address != nullptr);
zx::vmo vmo;
if (zx::vmo::create(size, 0, &vmo) != ZX_OK) {
return nullptr;
}
static const char kVirtualMemoryName[] = "v8-virtualmem";
vmo.set_property(ZX_PROP_NAME, kVirtualMemoryName,
strlen(kVirtualMemoryName));
// Always call zx_vmo_replace_as_executable() in case the memory will need
// to be marked as executable in the future.
// TOOD(https://crbug.com/v8/8899): Only call this when we know that the
// region will need to be marked as executable in the future.
zx::unowned_resource vmex(g_vmex_resource);
if (vmo.replace_as_executable(*vmex, &vmo) != ZX_OK) {
return nullptr;
}
zx_vm_option_t options = GetProtectionFromMemoryPermission(access);
zx_vm_option_t alignment_option = GetAlignmentOptionFromAlignment(alignment);
......@@ -147,8 +130,32 @@ void* AllocateInternal(const zx::vmar& vmar, void* vmar_base, size_t page_size,
return reinterpret_cast<void*>(result);
}
bool FreeInternal(const zx::vmar& vmar, size_t page_size, void* address,
const size_t size) {
void* CreateAndMapVmo(const zx::vmar& vmar, void* vmar_base, size_t page_size,
void* address, PlacementMode placement, size_t size,
size_t alignment, OS::MemoryPermission access) {
zx::vmo vmo;
if (zx::vmo::create(size, 0, &vmo) != ZX_OK) {
return nullptr;
}
static const char kVirtualMemoryName[] = "v8-virtualmem";
vmo.set_property(ZX_PROP_NAME, kVirtualMemoryName,
strlen(kVirtualMemoryName));
// Always call zx_vmo_replace_as_executable() in case the memory will need
// to be marked as executable in the future.
// TOOD(https://crbug.com/v8/8899): Only call this when we know that the
// region will need to be marked as executable in the future.
zx::unowned_resource vmex(g_vmex_resource);
if (vmo.replace_as_executable(*vmex, &vmo) != ZX_OK) {
return nullptr;
}
return MapVmo(vmar, vmar_base, page_size, address, vmo, 0, placement, size,
alignment, access);
}
bool UnmapVmo(const zx::vmar& vmar, size_t page_size, void* address,
size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % page_size);
DCHECK_EQ(0, size % page_size);
return vmar.unmap(reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
......@@ -241,15 +248,31 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
MemoryPermission access) {
PlacementMode placement =
address != nullptr ? PlacementMode::kUseHint : PlacementMode::kAnywhere;
return AllocateInternal(*zx::vmar::root_self(), g_root_vmar_base,
return CreateAndMapVmo(*zx::vmar::root_self(), g_root_vmar_base,
AllocatePageSize(), address, placement, size,
alignment, access);
}
// static
bool OS::Free(void* address, const size_t size) {
return FreeInternal(*zx::vmar::root_self(), AllocatePageSize(), address,
size);
bool OS::Free(void* address, size_t size) {
return UnmapVmo(*zx::vmar::root_self(), AllocatePageSize(), address, size);
}
// static
void* OS::AllocateShared(void* address, size_t size,
OS::MemoryPermission access,
PlatformSharedMemoryHandle handle, uint64_t offset) {
PlacementMode placement =
address != nullptr ? PlacementMode::kUseHint : PlacementMode::kAnywhere;
zx::unowned_vmo vmo(VMOFromSharedMemoryHandle(handle));
return MapVmo(*zx::vmar::root_self(), g_root_vmar_base, AllocatePageSize(),
address, *vmo, offset, placement, size, AllocatePageSize(),
access);
}
// static
bool OS::FreeShared(void* address, size_t size) {
return UnmapVmo(*zx::vmar::root_self(), AllocatePageSize(), address, size);
}
// static
......@@ -303,6 +326,22 @@ bool OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) {
return vmar.destroy() == ZX_OK;
}
// static
PlatformSharedMemoryHandle OS::CreateSharedMemoryHandleForTesting(size_t size) {
zx::vmo vmo;
if (zx::vmo::create(size, 0, &vmo) != ZX_OK) {
return kInvalidSharedMemoryHandle;
}
return SharedMemoryHandleFromVMO(vmo.release());
}
// static
void OS::DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle) {
DCHECK_NE(kInvalidSharedMemoryHandle, handle);
zx_handle_t vmo = VMOFromSharedMemoryHandle(handle);
zx_handle_close(vmo);
}
// static
bool OS::HasLazyCommits() { return true; }
......@@ -366,7 +405,7 @@ bool AddressSpaceReservation::FreeSubReservation(
bool AddressSpaceReservation::Allocate(void* address, size_t size,
OS::MemoryPermission access) {
DCHECK(Contains(address, size));
void* allocation = AllocateInternal(
void* allocation = CreateAndMapVmo(
*zx::unowned_vmar(vmar_), base(), OS::AllocatePageSize(), address,
PlacementMode::kFixed, size, OS::AllocatePageSize(), access);
DCHECK(!allocation || allocation == address);
......@@ -375,7 +414,24 @@ bool AddressSpaceReservation::Allocate(void* address, size_t size,
bool AddressSpaceReservation::Free(void* address, size_t size) {
DCHECK(Contains(address, size));
return FreeInternal(*zx::unowned_vmar(vmar_), OS::AllocatePageSize(), address,
return UnmapVmo(*zx::unowned_vmar(vmar_), OS::AllocatePageSize(), address,
size);
}
bool AddressSpaceReservation::AllocateShared(void* address, size_t size,
OS::MemoryPermission access,
PlatformSharedMemoryHandle handle,
uint64_t offset) {
DCHECK(Contains(address, size));
zx::unowned_vmo vmo(VMOFromSharedMemoryHandle(handle));
return MapVmo(*zx::unowned_vmar(vmar_), base(), OS::AllocatePageSize(),
address, *vmo, offset, PlacementMode::kFixed, size,
OS::AllocatePageSize(), access);
}
bool AddressSpaceReservation::FreeShared(void* address, size_t size) {
DCHECK(Contains(address, size));
return UnmapVmo(*zx::unowned_vmar(vmar_), OS::AllocatePageSize(), address,
size);
}
......
......@@ -5,20 +5,18 @@
// Platform-specific code for MacOS goes here. For the POSIX-compatible
// parts, the implementation is in platform-posix.cc.
#include <dlfcn.h>
#include <mach/mach_init.h>
#include <mach-o/dyld.h>
#include <mach-o/getsect.h>
#include <sys/mman.h>
#include <unistd.h>
#include <AvailabilityMacros.h>
#include <dlfcn.h>
#include <errno.h>
#include <libkern/OSAtomic.h>
#include <mach-o/dyld.h>
#include <mach-o/getsect.h>
#include <mach/mach.h>
#include <mach/mach_init.h>
#include <mach/mach_vm.h>
#include <mach/semaphore.h>
#include <mach/task.h>
#include <mach/vm_map.h>
#include <mach/vm_statistics.h>
#include <pthread.h>
#include <semaphore.h>
......@@ -26,10 +24,12 @@
#include <stdarg.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/sysctl.h>
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
#include <cmath>
......@@ -99,6 +99,93 @@ std::vector<OS::MemoryRange> OS::GetFreeMemoryRangesWithin(
return {};
}
namespace {
vm_prot_t GetVMProtFromMemoryPermission(OS::MemoryPermission access) {
switch (access) {
case OS::MemoryPermission::kNoAccess:
case OS::MemoryPermission::kNoAccessWillJitLater:
return VM_PROT_NONE;
case OS::MemoryPermission::kRead:
return VM_PROT_READ;
case OS::MemoryPermission::kReadWrite:
return VM_PROT_READ | VM_PROT_WRITE;
case OS::MemoryPermission::kReadWriteExecute:
return VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
case OS::MemoryPermission::kReadExecute:
return VM_PROT_READ | VM_PROT_EXECUTE;
}
UNREACHABLE();
}
kern_return_t mach_vm_map_wrapper(mach_vm_address_t* address,
mach_vm_size_t size, int flags,
mach_port_t port,
memory_object_offset_t offset,
vm_prot_t prot) {
vm_prot_t current_prot = prot;
vm_prot_t maximum_prot = current_prot;
return mach_vm_map(mach_task_self(), address, size, 0, flags, port, offset,
FALSE, current_prot, maximum_prot, VM_INHERIT_NONE);
}
} // namespace
// static
PlatformSharedMemoryHandle OS::CreateSharedMemoryHandleForTesting(size_t size) {
mach_vm_size_t vm_size = size;
mach_port_t port;
kern_return_t kr = mach_make_memory_entry_64(
mach_task_self(), &vm_size, 0,
MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE, &port,
MACH_PORT_NULL);
if (kr != KERN_SUCCESS) return kInvalidSharedMemoryHandle;
return SharedMemoryHandleFromMachMemoryEntry(port);
}
// static
void OS::DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle) {
DCHECK_NE(kInvalidSharedMemoryHandle, handle);
mach_port_t port = MachMemoryEntryFromSharedMemoryHandle(handle);
CHECK_EQ(KERN_SUCCESS, mach_port_deallocate(mach_task_self(), port));
}
// static
void* OS::AllocateShared(void* hint, size_t size, MemoryPermission access,
PlatformSharedMemoryHandle handle, uint64_t offset) {
DCHECK_EQ(0, size % AllocatePageSize());
mach_vm_address_t addr = reinterpret_cast<mach_vm_address_t>(hint);
vm_prot_t prot = GetVMProtFromMemoryPermission(access);
mach_port_t shared_mem_port = MachMemoryEntryFromSharedMemoryHandle(handle);
kern_return_t kr = mach_vm_map_wrapper(&addr, size, VM_FLAGS_FIXED,
shared_mem_port, offset, prot);
if (kr != KERN_SUCCESS) {
// Retry without hint.
kr = mach_vm_map_wrapper(&addr, size, VM_FLAGS_ANYWHERE, shared_mem_port,
offset, prot);
}
if (kr != KERN_SUCCESS) return nullptr;
return reinterpret_cast<void*>(addr);
}
bool AddressSpaceReservation::AllocateShared(void* address, size_t size,
OS::MemoryPermission access,
PlatformSharedMemoryHandle handle,
uint64_t offset) {
DCHECK(Contains(address, size));
vm_prot_t prot = GetVMProtFromMemoryPermission(access);
mach_vm_address_t addr = reinterpret_cast<mach_vm_address_t>(address);
mach_port_t shared_mem_port = MachMemoryEntryFromSharedMemoryHandle(handle);
kern_return_t kr =
mach_vm_map_wrapper(&addr, size, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
shared_mem_port, offset, prot);
return kr == KERN_SUCCESS;
}
// static
Stack::StackSlot Stack::GetStackStart() {
return pthread_get_stackaddr_np(pthread_self());
......
......@@ -12,15 +12,15 @@
#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
#include <pthread_np.h> // for pthread_set_name_np
#endif
#include <fcntl.h>
#include <sched.h> // for sched_yield
#include <stdio.h>
#include <time.h>
#include <unistd.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || \
defined(__NetBSD__) || defined(__OpenBSD__)
#include <sys/sysctl.h> // for sysctl
......@@ -428,12 +428,32 @@ void* OS::AllocateShared(size_t size, MemoryPermission access) {
}
// static
bool OS::Free(void* address, const size_t size) {
bool OS::Free(void* address, size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize());
DCHECK_EQ(0, size % AllocatePageSize());
return munmap(address, size) == 0;
}
// macOS specific implementation in platform-macos.cc.
#if !defined(V8_OS_MACOSX)
// static
void* OS::AllocateShared(void* hint, size_t size, MemoryPermission access,
PlatformSharedMemoryHandle handle, uint64_t offset) {
DCHECK_EQ(0, size % AllocatePageSize());
int prot = GetProtectionFromMemoryPermission(access);
int fd = FileDescriptorFromSharedMemoryHandle(handle);
void* result = mmap(hint, size, prot, MAP_SHARED, fd, offset);
if (result == MAP_FAILED) return nullptr;
return result;
}
#endif // !defined(V8_OS_MACOSX)
// static
bool OS::FreeShared(void* address, size_t size) {
DCHECK_EQ(0, size % AllocatePageSize());
return munmap(address, size) == 0;
}
// static
bool OS::Release(void* address, size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
......@@ -552,6 +572,30 @@ bool OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) {
return Free(reservation.base(), reservation.size());
}
// macOS specific implementation in platform-macos.cc.
#if !defined(V8_OS_MACOSX)
// static
PlatformSharedMemoryHandle OS::CreateSharedMemoryHandleForTesting(size_t size) {
#if V8_OS_LINUX && !V8_OS_ANDROID
const char* shm_name = "/V8_SharedMemoryForTesting";
int fd = shm_open(shm_name, O_RDWR | O_CREAT, S_IREAD | S_IWRITE);
if (fd == -1) return kInvalidSharedMemoryHandle;
CHECK_EQ(0, ftruncate(fd, size));
CHECK_EQ(0, shm_unlink(shm_name));
return SharedMemoryHandleFromFileDescriptor(fd);
#else
return kInvalidSharedMemoryHandle;
#endif
}
// static
void OS::DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle) {
DCHECK_NE(kInvalidSharedMemoryHandle, handle);
int fd = FileDescriptorFromSharedMemoryHandle(handle);
CHECK_EQ(0, close(fd));
}
#endif // !defined(V8_OS_MACOSX)
// static
bool OS::HasLazyCommits() {
#if V8_OS_AIX || V8_OS_LINUX || V8_OS_MACOSX
......@@ -906,6 +950,26 @@ bool AddressSpaceReservation::Free(void* address, size_t size) {
return OS::DecommitPages(address, size);
}
// macOS specific implementation in platform-macos.cc.
#if !defined(V8_OS_MACOSX)
bool AddressSpaceReservation::AllocateShared(void* address, size_t size,
OS::MemoryPermission access,
PlatformSharedMemoryHandle handle,
uint64_t offset) {
DCHECK(Contains(address, size));
int prot = GetProtectionFromMemoryPermission(access);
int fd = FileDescriptorFromSharedMemoryHandle(handle);
return mmap(address, size, prot, MAP_SHARED | MAP_FIXED, fd, offset) !=
MAP_FAILED;
}
#endif // !defined(V8_OS_MACOSX)
bool AddressSpaceReservation::FreeShared(void* address, size_t size) {
DCHECK(Contains(address, size));
return mmap(address, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE,
-1, 0) == address;
}
bool AddressSpaceReservation::SetPermissions(void* address, size_t size,
OS::MemoryPermission access) {
DCHECK(Contains(address, size));
......
......@@ -724,7 +724,14 @@ void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
typedef PVOID (*VirtualAlloc2_t)(HANDLE, PVOID, SIZE_T, ULONG, ULONG,
MEM_EXTENDED_PARAMETER*, ULONG);
VirtualAlloc2_t VirtualAlloc2;
VirtualAlloc2_t VirtualAlloc2 = nullptr;
typedef PVOID (*MapViewOfFile3_t)(HANDLE, HANDLE, PVOID, ULONG64, SIZE_T, ULONG,
ULONG, MEM_EXTENDED_PARAMETER*, ULONG);
MapViewOfFile3_t MapViewOfFile3 = nullptr;
typedef PVOID (*UnmapViewOfFile2_t)(HANDLE, PVOID, ULONG);
UnmapViewOfFile2_t UnmapViewOfFile2 = nullptr;
void OS::EnsureWin32MemoryAPILoaded() {
static bool loaded = false;
......@@ -732,6 +739,12 @@ void OS::EnsureWin32MemoryAPILoaded() {
VirtualAlloc2 = (VirtualAlloc2_t)GetProcAddress(
GetModuleHandle(L"kernelbase.dll"), "VirtualAlloc2");
MapViewOfFile3 = (MapViewOfFile3_t)GetProcAddress(
GetModuleHandle(L"kernelbase.dll"), "MapViewOfFile3");
UnmapViewOfFile2 = (UnmapViewOfFile2_t)GetProcAddress(
GetModuleHandle(L"kernelbase.dll"), "UnmapViewOfFile2");
loaded = true;
}
}
......@@ -815,6 +828,22 @@ DWORD GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
UNREACHABLE();
}
// Desired access parameter for MapViewOfFile
DWORD GetFileViewAccessFromMemoryPermission(OS::MemoryPermission access) {
switch (access) {
case OS::MemoryPermission::kNoAccess:
case OS::MemoryPermission::kNoAccessWillJitLater:
case OS::MemoryPermission::kRead:
return FILE_MAP_READ;
case OS::MemoryPermission::kReadWrite:
return FILE_MAP_READ | FILE_MAP_WRITE;
default:
// Execute access is not supported
break;
}
UNREACHABLE();
}
void* VirtualAllocWrapper(void* address, size_t size, DWORD flags,
DWORD protect) {
if (VirtualAlloc2) {
......@@ -897,13 +926,41 @@ void* OS::Allocate(void* hint, size_t size, size_t alignment,
}
// static
bool OS::Free(void* address, const size_t size) {
bool OS::Free(void* address, size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize());
DCHECK_EQ(0, size % AllocatePageSize());
USE(size);
return VirtualFree(address, 0, MEM_RELEASE) != 0;
}
// static
void* OS::AllocateShared(void* hint, size_t size, MemoryPermission permission,
PlatformSharedMemoryHandle handle, uint64_t offset) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(hint) % AllocatePageSize());
DCHECK_EQ(0, size % AllocatePageSize());
DCHECK_EQ(0, offset % AllocatePageSize());
DWORD off_hi = static_cast<DWORD>(offset >> 32);
DWORD off_lo = static_cast<DWORD>(offset);
DWORD access = GetFileViewAccessFromMemoryPermission(permission);
HANDLE file_mapping = FileMappingFromSharedMemoryHandle(handle);
void* result =
MapViewOfFileEx(file_mapping, access, off_hi, off_lo, size, hint);
if (!result) {
// Retry without hint.
result = MapViewOfFile(file_mapping, access, off_hi, off_lo, size);
}
return result;
}
// static
bool OS::FreeShared(void* address, size_t size) {
return UnmapViewOfFile(address);
}
// static
bool OS::Release(void* address, size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
......@@ -965,7 +1022,10 @@ bool OS::DecommitPages(void* address, size_t size) {
}
// static
bool OS::CanReserveAddressSpace() { return VirtualAlloc2 != nullptr; }
bool OS::CanReserveAddressSpace() {
return VirtualAlloc2 != nullptr && MapViewOfFile3 != nullptr &&
UnmapViewOfFile2 != nullptr;
}
// static
Optional<AddressSpaceReservation> OS::CreateAddressSpaceReservation(
......@@ -993,6 +1053,21 @@ bool OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) {
return OS::Free(reservation.base(), reservation.size());
}
// static
PlatformSharedMemoryHandle OS::CreateSharedMemoryHandleForTesting(size_t size) {
HANDLE handle = CreateFileMapping(INVALID_HANDLE_VALUE, nullptr,
PAGE_READWRITE, 0, size, nullptr);
if (!handle) return kInvalidSharedMemoryHandle;
return SharedMemoryHandleFromFileMapping(handle);
}
// static
void OS::DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle) {
DCHECK_NE(kInvalidSharedMemoryHandle, handle);
HANDLE file_mapping = FileMappingFromSharedMemoryHandle(handle);
CHECK(CloseHandle(file_mapping));
}
// static
bool OS::HasLazyCommits() {
// TODO(alph): implement for the platform.
......@@ -1147,7 +1222,7 @@ bool AddressSpaceReservation::Allocate(void* address, size_t size,
? MEM_RESERVE | MEM_REPLACE_PLACEHOLDER
: MEM_RESERVE | MEM_COMMIT | MEM_REPLACE_PLACEHOLDER;
DWORD protect = GetProtectionFromMemoryPermission(access);
return VirtualAlloc2(nullptr, address, size, flags, protect, NULL, 0);
return VirtualAlloc2(nullptr, address, size, flags, protect, nullptr, 0);
}
bool AddressSpaceReservation::Free(void* address, size_t size) {
......@@ -1155,6 +1230,26 @@ bool AddressSpaceReservation::Free(void* address, size_t size) {
return VirtualFree(address, size, MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER);
}
bool AddressSpaceReservation::AllocateShared(void* address, size_t size,
OS::MemoryPermission access,
PlatformSharedMemoryHandle handle,
uint64_t offset) {
DCHECK(Contains(address, size));
CHECK(MapViewOfFile3);
DWORD protect = GetProtectionFromMemoryPermission(access);
HANDLE file_mapping = FileMappingFromSharedMemoryHandle(handle);
return MapViewOfFile3(file_mapping, nullptr, address, offset, size,
MEM_REPLACE_PLACEHOLDER, protect, nullptr, 0);
}
bool AddressSpaceReservation::FreeShared(void* address, size_t size) {
DCHECK(Contains(address, size));
CHECK(UnmapViewOfFile2);
return UnmapViewOfFile2(nullptr, address, MEM_PRESERVE_PLACEHOLDER);
}
bool AddressSpaceReservation::SetPermissions(void* address, size_t size,
OS::MemoryPermission access) {
DCHECK(Contains(address, size));
......
......@@ -26,6 +26,7 @@
#include <string>
#include <vector>
#include "include/v8-platform.h"
#include "src/base/base-export.h"
#include "src/base/build_config.h"
#include "src/base/compiler-specific.h"
......@@ -208,6 +209,11 @@ class V8_BASE_EXPORT OS {
kNoAccessWillJitLater
};
// Helpers to create shared memory objects. Currently only used for testing.
static PlatformSharedMemoryHandle CreateSharedMemoryHandleForTesting(
size_t size);
static void DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle);
static bool HasLazyCommits();
// Sleep for a specified time interval.
......@@ -335,7 +341,13 @@ class V8_BASE_EXPORT OS {
void* new_address,
size_t size);
V8_WARN_UNUSED_RESULT static bool Free(void* address, const size_t size);
V8_WARN_UNUSED_RESULT static bool Free(void* address, size_t size);
V8_WARN_UNUSED_RESULT static void* AllocateShared(
void* address, size_t size, OS::MemoryPermission access,
PlatformSharedMemoryHandle handle, uint64_t offset);
V8_WARN_UNUSED_RESULT static bool FreeShared(void* address, size_t size);
V8_WARN_UNUSED_RESULT static bool Release(void* address, size_t size);
......@@ -405,6 +417,13 @@ class V8_BASE_EXPORT AddressSpaceReservation {
V8_WARN_UNUSED_RESULT bool Free(void* address, size_t size);
V8_WARN_UNUSED_RESULT bool AllocateShared(void* address, size_t size,
OS::MemoryPermission access,
PlatformSharedMemoryHandle handle,
uint64_t offset);
V8_WARN_UNUSED_RESULT bool FreeShared(void* address, size_t size);
V8_WARN_UNUSED_RESULT bool SetPermissions(void* address, size_t size,
OS::MemoryPermission access);
......
......@@ -28,7 +28,7 @@ Address LsanVirtualAddressSpace::AllocatePages(Address hint, size_t size,
PagePermissions permissions) {
Address result = vas_->AllocatePages(hint, size, alignment, permissions);
#if defined(LEAK_SANITIZER)
if (result != 0) {
if (result) {
__lsan_register_root_region(reinterpret_cast<void*>(result), size);
}
#endif // defined(LEAK_SANITIZER)
......@@ -45,6 +45,29 @@ bool LsanVirtualAddressSpace::FreePages(Address address, size_t size) {
return result;
}
Address LsanVirtualAddressSpace::AllocateSharedPages(
Address hint, size_t size, PagePermissions permissions,
PlatformSharedMemoryHandle handle, uint64_t offset) {
Address result =
vas_->AllocateSharedPages(hint, size, permissions, handle, offset);
#if defined(LEAK_SANITIZER)
if (result) {
__lsan_register_root_region(reinterpret_cast<void*>(result), size);
}
#endif // defined(LEAK_SANITIZER)
return result;
}
bool LsanVirtualAddressSpace::FreeSharedPages(Address address, size_t size) {
bool result = vas_->FreeSharedPages(address, size);
#if defined(LEAK_SANITIZER)
if (result) {
__lsan_unregister_root_region(reinterpret_cast<void*>(address), size);
}
#endif // defined(LEAK_SANITIZER)
return result;
}
std::unique_ptr<VirtualAddressSpace> LsanVirtualAddressSpace::AllocateSubspace(
Address hint, size_t size, size_t alignment,
PagePermissions max_page_permissions) {
......
......@@ -35,6 +35,13 @@ class V8_BASE_EXPORT LsanVirtualAddressSpace final
bool FreePages(Address address, size_t size) override;
Address AllocateSharedPages(Address hint, size_t size,
PagePermissions permissions,
PlatformSharedMemoryHandle handle,
uint64_t offset) override;
bool FreeSharedPages(Address address, size_t size) override;
bool SetPagePermissions(Address address, size_t size,
PagePermissions permissions) override {
return vas_->SetPagePermissions(address, size, permissions);
......
......@@ -125,6 +125,25 @@ bool VirtualAddressSpace::CanAllocateSubspaces() {
return OS::CanReserveAddressSpace();
}
Address VirtualAddressSpace::AllocateSharedPages(
Address hint, size_t size, PagePermissions permissions,
PlatformSharedMemoryHandle handle, uint64_t offset) {
DCHECK(IsAligned(hint, allocation_granularity()));
DCHECK(IsAligned(size, allocation_granularity()));
DCHECK(IsAligned(offset, allocation_granularity()));
return reinterpret_cast<Address>(OS::AllocateShared(
reinterpret_cast<void*>(hint), size,
static_cast<OS::MemoryPermission>(permissions), handle, offset));
}
bool VirtualAddressSpace::FreeSharedPages(Address address, size_t size) {
DCHECK(IsAligned(address, allocation_granularity()));
DCHECK(IsAligned(size, allocation_granularity()));
return OS::FreeShared(reinterpret_cast<void*>(address), size);
}
std::unique_ptr<v8::VirtualAddressSpace> VirtualAddressSpace::AllocateSubspace(
Address hint, size_t size, size_t alignment,
PagePermissions max_page_permissions) {
......@@ -272,6 +291,44 @@ bool VirtualAddressSubspace::FreeGuardRegion(Address address, size_t size) {
return region_allocator_.FreeRegion(address) == size;
}
Address VirtualAddressSubspace::AllocateSharedPages(
Address hint, size_t size, PagePermissions permissions,
PlatformSharedMemoryHandle handle, uint64_t offset) {
DCHECK(IsAligned(hint, allocation_granularity()));
DCHECK(IsAligned(size, allocation_granularity()));
DCHECK(IsAligned(offset, allocation_granularity()));
MutexGuard guard(&mutex_);
Address address =
region_allocator_.AllocateRegion(hint, size, allocation_granularity());
if (address == RegionAllocator::kAllocationFailure) return kNullAddress;
if (!reservation_.AllocateShared(
reinterpret_cast<void*>(address), size,
static_cast<OS::MemoryPermission>(permissions), handle, offset)) {
CHECK_EQ(size, region_allocator_.FreeRegion(address));
return kNullAddress;
}
return address;
}
bool VirtualAddressSubspace::FreeSharedPages(Address address, size_t size) {
DCHECK(IsAligned(address, allocation_granularity()));
DCHECK(IsAligned(size, allocation_granularity()));
MutexGuard guard(&mutex_);
if (region_allocator_.CheckRegion(address) != size) return false;
// The order here is important: on Windows, the allocation first has to be
// freed to a placeholder before the placeholder can be merged (during the
// merge_callback) with any surrounding placeholder mappings.
CHECK(reservation_.FreeShared(reinterpret_cast<void*>(address), size));
CHECK_EQ(size, region_allocator_.FreeRegion(address));
return true;
}
std::unique_ptr<v8::VirtualAddressSpace>
VirtualAddressSubspace::AllocateSubspace(Address hint, size_t size,
size_t alignment,
......
......@@ -68,6 +68,13 @@ class V8_BASE_EXPORT VirtualAddressSpace : public VirtualAddressSpaceBase {
bool FreeGuardRegion(Address address, size_t size) override;
Address AllocateSharedPages(Address hint, size_t size,
PagePermissions permissions,
PlatformSharedMemoryHandle handle,
uint64_t offset) override;
bool FreeSharedPages(Address address, size_t size) override;
bool CanAllocateSubspaces() override;
std::unique_ptr<v8::VirtualAddressSpace> AllocateSubspace(
......@@ -106,6 +113,13 @@ class V8_BASE_EXPORT VirtualAddressSubspace : public VirtualAddressSpaceBase {
bool FreeGuardRegion(Address address, size_t size) override;
Address AllocateSharedPages(Address hint, size_t size,
PagePermissions permissions,
PlatformSharedMemoryHandle handle,
uint64_t offset) override;
bool FreeSharedPages(Address address, size_t size) override;
bool CanAllocateSubspaces() override { return true; }
std::unique_ptr<v8::VirtualAddressSpace> AllocateSubspace(
......
......@@ -98,6 +98,34 @@ void TestParentSpaceCannotAllocateInChildSpace(v8::VirtualAddressSpace* parent,
}
}
void TestSharedPageAllocation(v8::VirtualAddressSpace* space) {
const size_t size = 2 * space->allocation_granularity();
PlatformSharedMemoryHandle handle =
OS::CreateSharedMemoryHandleForTesting(size);
if (handle == kInvalidSharedMemoryHandle) return;
Address mapping1 =
space->AllocateSharedPages(VirtualAddressSpace::kNoHint, size,
PagePermissions::kReadWrite, handle, 0);
ASSERT_NE(kNullAddress, mapping1);
Address mapping2 =
space->AllocateSharedPages(VirtualAddressSpace::kNoHint, size,
PagePermissions::kReadWrite, handle, 0);
ASSERT_NE(kNullAddress, mapping2);
ASSERT_NE(mapping1, mapping2);
int value = 0x42;
EXPECT_EQ(0, *reinterpret_cast<int*>(mapping2));
*reinterpret_cast<int*>(mapping1) = value;
EXPECT_EQ(value, *reinterpret_cast<int*>(mapping2));
EXPECT_TRUE(space->FreeSharedPages(mapping1, size));
EXPECT_TRUE(space->FreeSharedPages(mapping2, size));
OS::DestroySharedMemoryHandle(handle);
}
TEST(VirtualAddressSpaceTest, TestPagePermissionSubsets) {
const PagePermissions kNoAccess = PagePermissions::kNoAccess;
const PagePermissions kRead = PagePermissions::kRead;
......@@ -142,6 +170,7 @@ TEST(VirtualAddressSpaceTest, TestRootSpace) {
TestRandomPageAddressGeneration(&rootspace);
TestBasicPageAllocation(&rootspace);
TestPageAllocationAlignment(&rootspace);
TestSharedPageAllocation(&rootspace);
}
TEST(VirtualAddressSpaceTest, TestSubspace) {
......@@ -164,6 +193,7 @@ TEST(VirtualAddressSpaceTest, TestSubspace) {
TestBasicPageAllocation(subspace.get());
TestPageAllocationAlignment(subspace.get());
TestParentSpaceCannotAllocateInChildSpace(&rootspace, subspace.get());
TestSharedPageAllocation(subspace.get());
// Test sub-subspaces
if (!subspace->CanAllocateSubspaces()) return;
......@@ -180,6 +210,7 @@ TEST(VirtualAddressSpaceTest, TestSubspace) {
TestBasicPageAllocation(subsubspace.get());
TestPageAllocationAlignment(subsubspace.get());
TestParentSpaceCannotAllocateInChildSpace(subspace.get(), subsubspace.get());
TestSharedPageAllocation(subsubspace.get());
}
TEST(VirtualAddressSpaceTest, TestEmulatedSubspace) {
......@@ -227,6 +258,7 @@ TEST(VirtualAddressSpaceTest, TestEmulatedSubspace) {
TestPageAllocationAlignment(&subspace);
// An emulated subspace does *not* guarantee that the parent space cannot
// allocate pages in it, so no TestParentSpaceCannotAllocateInChildSpace.
TestSharedPageAllocation(&subspace);
}
} // namespace base
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment