Commit afcd2191 authored by Samuel Groß's avatar Samuel Groß Committed by V8 LUCI CQ

[platform] Introduce AddressSpaceReservation API

This low-level API implements virtual address space reservations on the
supported platforms. An AddressSpaceReservation supports similar
functionality as the global page management APIs in the OS class but
operates inside a continuous region of previously reserved virtual
address space. A reservation is backed by regular mmap mappings on
Posix, by placeholder mappings on Windows, and by VMARs on Fuchsia.

Bug: chromium:1218005
Change-Id: I99bc6bcbc26eb4aa3b54a31c671c9e06e92c471b
Cq-Include-Trybots: luci.v8.try:v8_linux64_heap_sandbox_dbg_ng,v8_linux_arm64_sim_heap_sandbox_dbg_ng
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3270540
Commit-Queue: Samuel Groß <saelo@chromium.org>
Reviewed-by: 's avatarVictor Gomes <victorgomes@chromium.org>
Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78130}
parent 76059a86
This diff is collapsed.
......@@ -467,6 +467,7 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
return ret == 0;
}
// static
bool OS::DiscardSystemPages(void* address, size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
DCHECK_EQ(0, size % CommitPageSize());
......@@ -495,6 +496,7 @@ bool OS::DiscardSystemPages(void* address, size_t size) {
return ret == 0;
}
// static
bool OS::DecommitPages(void* address, size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
DCHECK_EQ(0, size % CommitPageSize());
......@@ -509,6 +511,30 @@ bool OS::DecommitPages(void* address, size_t size) {
return ptr == address;
}
// static
bool OS::CanReserveAddressSpace() { return true; }
// static
Optional<AddressSpaceReservation> OS::CreateAddressSpaceReservation(
void* hint, size_t size, size_t alignment,
MemoryPermission max_permission) {
// On POSIX, address space reservations are backed by private memory mappings.
MemoryPermission permission = MemoryPermission::kNoAccess;
if (max_permission == MemoryPermission::kReadWriteExecute) {
permission = MemoryPermission::kNoAccessWillJitLater;
}
void* reservation = Allocate(hint, size, alignment, permission);
if (!reservation) return {};
return AddressSpaceReservation(reservation, size);
}
// static
bool OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) {
return Free(reservation.base(), reservation.size());
}
// static
bool OS::HasLazyCommits() {
#if V8_OS_AIX || V8_OS_LINUX || V8_OS_MACOSX
......@@ -823,6 +849,57 @@ void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
strncpy(dest, src, n);
}
// ----------------------------------------------------------------------------
// POSIX Address space reservation support.
//
#if !V8_OS_CYGWIN && !V8_OS_FUCHSIA
Optional<AddressSpaceReservation> AddressSpaceReservation::CreateSubReservation(
void* address, size_t size, OS::MemoryPermission max_permission) {
DCHECK(Contains(address, size));
DCHECK_EQ(0, size % OS::AllocatePageSize());
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % OS::AllocatePageSize());
return AddressSpaceReservation(address, size);
}
bool AddressSpaceReservation::FreeSubReservation(
AddressSpaceReservation reservation) {
// Nothing to do.
// Pages allocated inside the reservation must've already been freed.
return true;
}
bool AddressSpaceReservation::Allocate(void* address, size_t size,
OS::MemoryPermission access) {
// The region is already mmap'ed, so it just has to be made accessible now.
DCHECK(Contains(address, size));
return OS::SetPermissions(address, size, access);
}
bool AddressSpaceReservation::Free(void* address, size_t size) {
DCHECK(Contains(address, size));
return OS::DecommitPages(address, size);
}
bool AddressSpaceReservation::SetPermissions(void* address, size_t size,
OS::MemoryPermission access) {
DCHECK(Contains(address, size));
return OS::SetPermissions(address, size, access);
}
bool AddressSpaceReservation::DiscardSystemPages(void* address, size_t size) {
DCHECK(Contains(address, size));
return OS::DiscardSystemPages(address, size);
}
bool AddressSpaceReservation::DecommitPages(void* address, size_t size) {
DCHECK(Contains(address, size));
return OS::DecommitPages(address, size);
}
#endif // !V8_OS_CYGWIN && !V8_OS_FUCHSIA
// ----------------------------------------------------------------------------
// POSIX thread support.
......
......@@ -722,6 +722,20 @@ void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
g_hard_abort = hard_abort;
}
typedef PVOID (*VirtualAlloc2_t)(HANDLE, PVOID, SIZE_T, ULONG, ULONG,
MEM_EXTENDED_PARAMETER*, ULONG);
VirtualAlloc2_t VirtualAlloc2;
void OS::EnsureWin32MemoryAPILoaded() {
static bool loaded = false;
if (!loaded) {
VirtualAlloc2 = (VirtualAlloc2_t)GetProcAddress(
GetModuleHandle(L"kernelbase.dll"), "VirtualAlloc2");
loaded = true;
}
}
// static
size_t OS::AllocatePageSize() {
static size_t allocate_alignment = 0;
......@@ -801,6 +815,14 @@ DWORD GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
UNREACHABLE();
}
void* VirtualAllocWrapper(void* hint, size_t size, DWORD flags, DWORD protect) {
if (VirtualAlloc2) {
return VirtualAlloc2(nullptr, hint, size, flags, protect, NULL, 0);
} else {
return VirtualAlloc(hint, size, flags, protect);
}
}
uint8_t* RandomizedVirtualAlloc(size_t size, DWORD flags, DWORD protect,
void* hint) {
LPVOID base = nullptr;
......@@ -816,32 +838,18 @@ uint8_t* RandomizedVirtualAlloc(size_t size, DWORD flags, DWORD protect,
if (use_aslr && protect != PAGE_READWRITE) {
// For executable or reserved pages try to randomize the allocation address.
base = VirtualAlloc(hint, size, flags, protect);
base = VirtualAllocWrapper(hint, size, flags, protect);
}
// On failure, let the OS find an address to use.
if (base == nullptr) {
base = VirtualAlloc(nullptr, size, flags, protect);
base = VirtualAllocWrapper(nullptr, size, flags, protect);
}
return reinterpret_cast<uint8_t*>(base);
}
} // namespace
// static
void* OS::Allocate(void* hint, size_t size, size_t alignment,
MemoryPermission access) {
size_t page_size = AllocatePageSize();
DCHECK_EQ(0, size % page_size);
DCHECK_EQ(0, alignment % page_size);
DCHECK_LE(page_size, alignment);
hint = AlignedAddress(hint, alignment);
DWORD flags = (access == OS::MemoryPermission::kNoAccess)
? MEM_RESERVE
: MEM_RESERVE | MEM_COMMIT;
DWORD protect = GetProtectionFromMemoryPermission(access);
void* AllocateInternal(void* hint, size_t size, size_t alignment,
size_t page_size, DWORD flags, DWORD protect) {
// First, try an exact size aligned allocation.
uint8_t* base = RandomizedVirtualAlloc(size, flags, protect, hint);
if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
......@@ -852,7 +860,7 @@ void* OS::Allocate(void* hint, size_t size, size_t alignment,
if (base == aligned_base) return reinterpret_cast<void*>(base);
// Otherwise, free it and try a larger allocation.
CHECK(Free(base, size));
CHECK(VirtualFree(base, 0, MEM_RELEASE));
// Clear the hint. It's unlikely we can allocate at this address.
hint = nullptr;
......@@ -868,11 +876,11 @@ void* OS::Allocate(void* hint, size_t size, size_t alignment,
// Try to trim the allocation by freeing the padded allocation and then
// calling VirtualAlloc at the aligned base.
CHECK(Free(base, padded_size));
CHECK(VirtualFree(base, 0, MEM_RELEASE));
aligned_base = reinterpret_cast<uint8_t*>(
RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
base = reinterpret_cast<uint8_t*>(
VirtualAlloc(aligned_base, size, flags, protect));
VirtualAllocWrapper(aligned_base, size, flags, protect));
// We might not get the reduced allocation due to a race. In that case,
// base will be nullptr.
if (base != nullptr) break;
......@@ -881,6 +889,25 @@ void* OS::Allocate(void* hint, size_t size, size_t alignment,
return reinterpret_cast<void*>(base);
}
} // namespace
// static
void* OS::Allocate(void* hint, size_t size, size_t alignment,
MemoryPermission access) {
size_t page_size = AllocatePageSize();
DCHECK_EQ(0, size % page_size);
DCHECK_EQ(0, alignment % page_size);
DCHECK_LE(page_size, alignment);
hint = AlignedAddress(hint, alignment);
DWORD flags = (access == OS::MemoryPermission::kNoAccess)
? MEM_RESERVE
: MEM_RESERVE | MEM_COMMIT;
DWORD protect = GetProtectionFromMemoryPermission(access);
return AllocateInternal(hint, size, alignment, page_size, flags, protect);
}
// static
bool OS::Free(void* address, const size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize());
......@@ -904,7 +931,7 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
}
DWORD protect = GetProtectionFromMemoryPermission(access);
return VirtualAlloc(address, size, MEM_COMMIT, protect) != nullptr;
return VirtualAllocWrapper(address, size, MEM_COMMIT, protect) != nullptr;
}
// static
......@@ -929,7 +956,7 @@ bool OS::DiscardSystemPages(void* address, size_t size) {
}
// DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
// failure.
void* ptr = VirtualAlloc(address, size, MEM_RESET, PAGE_READWRITE);
void* ptr = VirtualAllocWrapper(address, size, MEM_RESET, PAGE_READWRITE);
CHECK(ptr);
return ptr;
}
......@@ -949,6 +976,35 @@ bool OS::DecommitPages(void* address, size_t size) {
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
}
// static
bool OS::CanReserveAddressSpace() { return VirtualAlloc2 != nullptr; }
// static
Optional<AddressSpaceReservation> OS::CreateAddressSpaceReservation(
void* hint, size_t size, size_t alignment,
MemoryPermission max_permission) {
CHECK(CanReserveAddressSpace());
size_t page_size = AllocatePageSize();
DCHECK_EQ(0, size % page_size);
DCHECK_EQ(0, alignment % page_size);
DCHECK_LE(page_size, alignment);
hint = AlignedAddress(hint, alignment);
// On Windows, address space reservations are backed by placeholder mappings.
void* reservation =
AllocateInternal(hint, size, alignment, page_size,
MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, PAGE_NOACCESS);
if (!reservation) return {};
return AddressSpaceReservation(reservation, size);
}
// static
bool OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) {
return OS::Free(reservation.base(), reservation.size());
}
// static
bool OS::HasLazyCommits() {
// TODO(alph): implement for the platform.
......@@ -1068,6 +1124,64 @@ Win32MemoryMappedFile::~Win32MemoryMappedFile() {
CloseHandle(file_);
}
Optional<AddressSpaceReservation> AddressSpaceReservation::CreateSubReservation(
void* address, size_t size, OS::MemoryPermission max_permission) {
// Nothing to do, the sub reservation must already have been split by now.
DCHECK(Contains(address, size));
DCHECK_EQ(0, size % OS::AllocatePageSize());
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % OS::AllocatePageSize());
return AddressSpaceReservation(address, size);
}
bool AddressSpaceReservation::FreeSubReservation(
AddressSpaceReservation reservation) {
// Nothing to do.
// Pages allocated inside the reservation must've already been freed.
return true;
}
bool AddressSpaceReservation::SplitPlaceholder(void* address, size_t size) {
DCHECK(Contains(address, size));
return VirtualFree(address, size, MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER);
}
bool AddressSpaceReservation::MergePlaceholders(void* address, size_t size) {
DCHECK(Contains(address, size));
return VirtualFree(address, size, MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS);
}
bool AddressSpaceReservation::Allocate(void* address, size_t size,
OS::MemoryPermission access) {
DCHECK(Contains(address, size));
CHECK(VirtualAlloc2);
DWORD flags = (access == OS::MemoryPermission::kNoAccess)
? MEM_RESERVE | MEM_REPLACE_PLACEHOLDER
: MEM_RESERVE | MEM_COMMIT | MEM_REPLACE_PLACEHOLDER;
DWORD protect = GetProtectionFromMemoryPermission(access);
return VirtualAlloc2(nullptr, address, size, flags, protect, NULL, 0);
}
bool AddressSpaceReservation::Free(void* address, size_t size) {
DCHECK(Contains(address, size));
return VirtualFree(address, size, MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER);
}
bool AddressSpaceReservation::SetPermissions(void* address, size_t size,
OS::MemoryPermission access) {
DCHECK(Contains(address, size));
return OS::SetPermissions(address, size, access);
}
bool AddressSpaceReservation::DiscardSystemPages(void* address, size_t size) {
DCHECK(Contains(address, size));
return OS::DiscardSystemPages(address, size);
}
bool AddressSpaceReservation::DecommitPages(void* address, size_t size) {
DCHECK(Contains(address, size));
return OS::DecommitPages(address, size);
}
// The following code loads functions defined in DbhHelp.h and TlHelp32.h
// dynamically. This is to avoid being depending on dbghelp.dll and
......
......@@ -29,6 +29,7 @@
#include "src/base/base-export.h"
#include "src/base/build_config.h"
#include "src/base/compiler-specific.h"
#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h"
......@@ -36,6 +37,10 @@
#include "src/base/qnx-math.h"
#endif
#if V8_OS_FUCHSIA
#include <zircon/types.h>
#endif // V8_OS_FUCHSIA
#ifdef V8_USE_ADDRESS_SANITIZER
#include <sanitizer/asan_interface.h>
#endif // V8_USE_ADDRESS_SANITIZER
......@@ -115,6 +120,7 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
#endif // V8_NO_FAST_TLS
class AddressSpaceReservation;
class PageAllocator;
class TimezoneCache;
......@@ -132,6 +138,17 @@ class V8_BASE_EXPORT OS {
// - gc_fake_mmap: Name of the file for fake gc mmap used in ll_prof.
static void Initialize(bool hard_abort, const char* const gc_fake_mmap);
#if V8_OS_WIN
// On Windows, ensure the newer memory API is loaded if available. This
// includes function like VirtualAlloc2 and MapViewOfFile3.
// TODO(chromium:1218005) this should probably happen as part of Initialize,
// but that is currently invoked too late, after the virtual memory cage
// is initialized. However, eventually the virtual memory cage initialization
// will happen as part of V8::Initialize, at which point this function can
// probably be merged into OS::Initialize.
static void EnsureWin32MemoryAPILoaded();
#endif
// Returns the accumulated user time for thread. This routine
// can be used for profiling. The implementation should
// strive for high-precision timer resolution, preferable
......@@ -291,6 +308,7 @@ class V8_BASE_EXPORT OS {
private:
// These classes use the private memory management API below.
friend class AddressSpaceReservation;
friend class MemoryMappedFile;
friend class PosixMemoryMappedFile;
friend class v8::base::PageAllocator;
......@@ -326,6 +344,15 @@ class V8_BASE_EXPORT OS {
V8_WARN_UNUSED_RESULT static bool DecommitPages(void* address, size_t size);
V8_WARN_UNUSED_RESULT static bool CanReserveAddressSpace();
V8_WARN_UNUSED_RESULT static Optional<AddressSpaceReservation>
CreateAddressSpaceReservation(void* hint, size_t size, size_t alignment,
MemoryPermission max_permission);
V8_WARN_UNUSED_RESULT static bool FreeAddressSpaceReservation(
AddressSpaceReservation reservation);
static const int msPerSecond = 1000;
#if V8_OS_POSIX
......@@ -347,6 +374,73 @@ inline void EnsureConsoleOutput() {
#endif // (defined(_WIN32) || defined(_WIN64))
}
// ----------------------------------------------------------------------------
// AddressSpaceReservation
//
// This class provides the same memory management functions as OS but operates
// inside a previously reserved contiguous region of virtual address space.
class V8_BASE_EXPORT AddressSpaceReservation {
public:
using Address = uintptr_t;
void* base() const { return base_; }
size_t size() const { return size_; }
bool Contains(void* region_addr, size_t region_size) const {
Address base = reinterpret_cast<Address>(base_);
Address region_base = reinterpret_cast<Address>(region_addr);
return (region_base >= base) &&
((region_base + region_size) <= (base + size_));
}
V8_WARN_UNUSED_RESULT bool Allocate(void* address, size_t size,
OS::MemoryPermission access);
V8_WARN_UNUSED_RESULT bool Free(void* address, size_t size);
V8_WARN_UNUSED_RESULT bool SetPermissions(void* address, size_t size,
OS::MemoryPermission access);
V8_WARN_UNUSED_RESULT bool DiscardSystemPages(void* address, size_t size);
V8_WARN_UNUSED_RESULT bool DecommitPages(void* address, size_t size);
V8_WARN_UNUSED_RESULT Optional<AddressSpaceReservation> CreateSubReservation(
void* address, size_t size, OS::MemoryPermission max_permission);
V8_WARN_UNUSED_RESULT static bool FreeSubReservation(
AddressSpaceReservation reservation);
#if V8_OS_WIN
// On Windows, the placeholder mappings backing address space reservations
// need to be split and merged as page allocations can only replace an entire
// placeholder mapping, not parts of it. This must be done by the users of
// this API as it requires a RegionAllocator (or equivalent) to keep track of
// sub-regions and decide when to split and when to coalesce multiple free
// regions into a single one.
V8_WARN_UNUSED_RESULT bool SplitPlaceholder(void* address, size_t size);
V8_WARN_UNUSED_RESULT bool MergePlaceholders(void* address, size_t size);
#endif // V8_OS_WIN
private:
friend class OS;
#if V8_OS_FUCHSIA
AddressSpaceReservation(void* base, size_t size, zx_handle_t vmar)
: base_(base), size_(size), vmar_(vmar) {}
#else
AddressSpaceReservation(void* base, size_t size) : base_(base), size_(size) {}
#endif // V8_OS_FUCHSIA
void* base_ = nullptr;
size_t size_ = 0;
#if V8_OS_FUCHSIA
// On Fuchsia, address space reservations are backed by VMARs.
zx_handle_t vmar_ = ZX_HANDLE_INVALID;
#endif // V8_OS_FUCHSIA
};
// ----------------------------------------------------------------------------
// Thread
//
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment