Commit 0481b23e authored by Bill Budge's avatar Bill Budge Committed by Commit Bot

[Memory] Rewrite platform OS Commit / Uncommit in terms of permissions.

- Eliminates CommitRegion and UncommitRegion methods, replacing them with
  calls to SetPermissions.
- Makes a similar change to the API of VirtualMemory.
- This changes system calls from mmap to mprotect on most POSIX platforms.

Bug: chromium:756050
Cq-Include-Trybots: master.tryserver.chromium.linux:linux_chromium_rel_ng
Change-Id: Ib10f8293c9398c6c1e729cd7d686b7c97e6a5d75
Reviewed-on: https://chromium-review.googlesource.com/769679Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Bill Budge <bbudge@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49552}
parent 8036c41f
......@@ -138,21 +138,12 @@ void VirtualMemory::Reset() {
size_ = 0;
}
bool VirtualMemory::Commit(void* address, size_t size) {
bool VirtualMemory::SetPermissions(void* address, size_t size,
base::OS::MemoryPermission access) {
CHECK(InVM(address, size));
return base::OS::CommitRegion(address, size);
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
CHECK(InVM(address, size));
return base::OS::UncommitRegion(address, size);
}
bool VirtualMemory::Guard(void* address) {
size_t page_size = base::OS::CommitPageSize();
CHECK(InVM(address, page_size));
bool result = base::OS::SetPermissions(address, page_size,
base::OS::MemoryPermission::kNoAccess);
bool result = base::OS::SetPermissions(address, size, access);
DCHECK(result);
USE(result);
return result;
}
......
......@@ -128,14 +128,10 @@ class V8_EXPORT_PRIVATE VirtualMemory {
// than the requested size.
size_t size() const { return size_; }
// Commits real memory. Returns whether the operation succeeded.
bool Commit(void* address, size_t size);
// Uncommit real memory. Returns whether the operation succeeded.
bool Uncommit(void* address, size_t size);
// Creates a single guard page at the given address.
bool Guard(void* address);
// Sets permissions according to the access argument. address and size must be
// multiples of CommitPageSize(). Returns true on success, otherwise false.
bool SetPermissions(void* address, size_t size,
base::OS::MemoryPermission access);
// Releases the memory after |free_start|. Returns the bytes released.
size_t ReleasePartial(void* free_start);
......
......@@ -29,10 +29,6 @@ namespace base {
namespace {
// The memory allocation implementation is taken from platform-win32.cc.
// The mmap-based memory allocation implementation as it is used on most posix
// platforms does not work well because Cygwin does not support MAP_FIXED.
// This causes OS::CommitRegion to not always commit the memory region
// specified.
DWORD GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
switch (access) {
......@@ -156,16 +152,6 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
return VirtualAlloc(address, size, MEM_COMMIT, protect) != nullptr;
}
// static
bool OS::CommitRegion(void* address, size_t size) {
return VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE) != nullptr;
}
// static
bool OS::UncommitRegion(void* address, size_t size) {
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
}
// static
bool OS::ReleasePartialRegion(void* address, size_t size) {
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
......
......@@ -109,20 +109,6 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
prot) == ZX_OK;
}
// static
bool OS::CommitRegion(void* address, size_t size) {
return zx_vmar_protect(zx_vmar_root_self(),
reinterpret_cast<uintptr_t>(address), size,
ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE) == ZX_OK;
}
// static
bool OS::UncommitRegion(void* address, size_t size) {
return zx_vmar_protect(zx_vmar_root_self(),
reinterpret_cast<uintptr_t>(address), size,
0 /*no permissions*/) == ZX_OK;
}
// static
bool OS::ReleasePartialRegion(void* address, size_t size) {
return zx_vmar_unmap(zx_vmar_root_self(),
......
......@@ -283,37 +283,6 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
return mprotect(address, size, prot) == 0;
}
// static
bool OS::CommitRegion(void* address, size_t size) {
#if !V8_OS_AIX
if (MAP_FAILED == mmap(address, size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
kMmapFdOffset)) {
return false;
}
#else
if (mprotect(address, size, PROT_READ | PROT_WRITE) == -1) return false;
#endif // !V8_OS_AIX
return true;
}
// static
bool OS::UncommitRegion(void* address, size_t size) {
#if !V8_OS_AIX
int map_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED;
#if !V8_OS_FREEBSD && !V8_OS_QNX
map_flags |= MAP_NORESERVE;
#endif // !V8_OS_FREEBSD && !V8_OS_QNX
#if V8_OS_QNX
map_flags |= MAP_LAZY;
#endif // V8_OS_QNX
return mmap(address, size, PROT_NONE, map_flags, kMmapFd, kMmapFdOffset) !=
MAP_FAILED;
#else // V8_OS_AIX
return mprotect(address, size, PROT_NONE) != -1;
#endif // V8_OS_AIX
}
// static
bool OS::ReleasePartialRegion(void* address, size_t size) {
return munmap(address, size) == 0;
......
......@@ -834,16 +834,6 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
return VirtualAlloc(address, size, MEM_COMMIT, protect) != nullptr;
}
// static
bool OS::CommitRegion(void* address, size_t size) {
return VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE) != nullptr;
}
// static
bool OS::UncommitRegion(void* address, size_t size) {
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
}
// static
bool OS::ReleasePartialRegion(void* address, size_t size) {
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
......
......@@ -192,10 +192,6 @@ class V8_BASE_EXPORT OS {
V8_WARN_UNUSED_RESULT static bool SetPermissions(void* address, size_t size,
MemoryPermission access);
V8_WARN_UNUSED_RESULT static bool CommitRegion(void* address, size_t size);
V8_WARN_UNUSED_RESULT static bool UncommitRegion(void* address, size_t size);
// Release part of a reserved address range.
V8_WARN_UNUSED_RESULT static bool ReleasePartialRegion(void* address,
size_t size);
......
......@@ -131,7 +131,9 @@ bool CodeRange::SetUp(size_t requested) {
// On some platforms, specifically Win64, we need to reserve some pages at
// the beginning of an executable space.
if (reserved_area > 0) {
if (!reservation.Commit(base, reserved_area)) return false;
if (!reservation.SetPermissions(base, reserved_area,
base::OS::MemoryPermission::kReadWrite))
return false;
base += reserved_area;
}
......@@ -196,17 +198,16 @@ bool CodeRange::GetNextAllocationBlock(size_t requested) {
Address CodeRange::AllocateRawMemory(const size_t requested_size,
const size_t commit_size,
size_t* allocated) {
// request_size includes guards while committed_size does not. Make sure
// callers know about the invariant.
CHECK_LE(commit_size,
requested_size - 2 * MemoryAllocator::CodePageGuardSize());
// requested_size includes the header and two guard regions, while commit_size
// only includes the header.
DCHECK_LE(commit_size,
requested_size - 2 * MemoryAllocator::CodePageGuardSize());
FreeBlock current;
if (!ReserveBlock(requested_size, &current)) {
*allocated = 0;
return nullptr;
}
*allocated = current.size;
DCHECK(*allocated <= current.size);
DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
if (!isolate_->heap()->memory_allocator()->CommitExecutableMemory(
&virtual_memory_, current.start, commit_size, *allocated)) {
......@@ -225,7 +226,8 @@ bool CodeRange::CommitRawMemory(Address start, size_t length) {
bool CodeRange::UncommitRawMemory(Address start, size_t length) {
return virtual_memory_.Uncommit(start, length);
return virtual_memory_.SetPermissions(start, length,
base::OS::MemoryPermission::kNoAccess);
}
......@@ -233,7 +235,8 @@ void CodeRange::FreeRawMemory(Address address, size_t length) {
DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
base::LockGuard<base::Mutex> guard(&code_range_mutex_);
free_list_.emplace_back(address, length);
virtual_memory_.Uncommit(address, length);
virtual_memory_.SetPermissions(address, length,
base::OS::MemoryPermission::kNoAccess);
}
bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
......@@ -414,7 +417,8 @@ bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
bool MemoryAllocator::CommitMemory(Address base, size_t size,
Executability executable) {
if (!base::OS::CommitRegion(base, size)) {
if (!base::OS::SetPermissions(base, size,
base::OS::MemoryPermission::kReadWrite)) {
return false;
}
UpdateAllocatedSpaceLimits(base, base + size);
......@@ -475,7 +479,8 @@ Address MemoryAllocator::AllocateAlignedMemory(
base = nullptr;
}
} else {
if (reservation.Commit(base, commit_size)) {
if (reservation.SetPermissions(base, commit_size,
base::OS::MemoryPermission::kReadWrite)) {
UpdateAllocatedSpaceLimits(base, base + commit_size);
} else {
base = nullptr;
......@@ -777,15 +782,14 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
//
if (executable == EXECUTABLE) {
chunk_size = ::RoundUp(CodePageAreaStartOffset() + reserve_area_size,
GetCommitPageSize()) +
CodePageGuardSize();
chunk_size = ::RoundUp(
CodePageAreaStartOffset() + reserve_area_size + CodePageGuardSize(),
GetCommitPageSize());
// Size of header (not executable) plus area (executable).
size_t commit_size = ::RoundUp(
CodePageGuardStartOffset() + commit_area_size, GetCommitPageSize());
// Allocate executable memory either from code range or from the
// OS.
// Allocate executable memory either from code range or from the OS.
#ifdef V8_TARGET_ARCH_MIPS64
// Use code range only for large object space on mips64 to keep address
// range within 256-MB memory region.
......@@ -965,11 +969,14 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
chunk->size_ -= bytes_to_free;
chunk->area_end_ = new_area_end;
if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
// Add guard page at the end.
size_t page_size = GetCommitPageSize();
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(chunk->area_end_) %
static_cast<uintptr_t>(GetCommitPageSize()));
static_cast<uintptr_t>(page_size));
DCHECK_EQ(chunk->address() + chunk->size(),
chunk->area_end() + CodePageGuardSize());
reservation->Guard(chunk->area_end_);
reservation->SetPermissions(chunk->area_end_, page_size,
base::OS::MemoryPermission::kNoAccess);
}
// On e.g. Windows, a reservation may be larger than a page and releasing
// partially starting at |start_free| will also release the potentially
......@@ -1125,7 +1132,9 @@ bool MemoryAllocator::CommitBlock(Address start, size_t size,
bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
if (!base::OS::UncommitRegion(start, size)) return false;
if (!base::OS::SetPermissions(start, size,
base::OS::MemoryPermission::kNoAccess))
return false;
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
return true;
}
......@@ -1178,27 +1187,40 @@ intptr_t MemoryAllocator::GetCommitPageSize() {
bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
size_t commit_size,
size_t reserved_size) {
// Commit page header (not executable).
Address header = start;
size_t header_size = CodePageGuardStartOffset();
if (vm->Commit(header, header_size)) {
// Create guard page after the header.
if (vm->Guard(start + CodePageGuardStartOffset())) {
// Commit page body (executable).
Address body = start + CodePageAreaStartOffset();
size_t body_size = commit_size - CodePageGuardStartOffset();
if (vm->Commit(body, body_size)) {
// Create guard page before the end.
if (vm->Guard(start + reserved_size - CodePageGuardSize())) {
UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
commit_size -
CodePageGuardStartOffset());
const size_t page_size = GetCommitPageSize();
// All addresses and sizes must be aligned to the commit page size.
DCHECK(IsAddressAligned(start, page_size));
DCHECK_EQ(0, commit_size % page_size);
DCHECK_EQ(0, reserved_size % page_size);
const size_t guard_size = CodePageGuardSize();
const size_t pre_guard_offset = CodePageGuardStartOffset();
const size_t code_area_offset = CodePageAreaStartOffset();
// reserved_size includes two guard regions, commit_size does not.
DCHECK_LE(commit_size, reserved_size - 2 * guard_size);
const Address pre_guard_page = start + pre_guard_offset;
const Address code_area = start + code_area_offset;
const Address post_guard_page = start + reserved_size - guard_size;
// Commit the non-executable header, from start to pre-code guard page.
if (vm->SetPermissions(start, pre_guard_offset,
base::OS::MemoryPermission::kReadWrite)) {
// Create the pre-code guard page, following the header.
if (vm->SetPermissions(pre_guard_page, page_size,
base::OS::MemoryPermission::kNoAccess)) {
// Commit the executable code body.
if (vm->SetPermissions(code_area, commit_size - pre_guard_offset,
base::OS::MemoryPermission::kReadWrite)) {
// Create the post-code guard page.
if (vm->SetPermissions(post_guard_page, page_size,
base::OS::MemoryPermission::kNoAccess)) {
UpdateAllocatedSpaceLimits(start, code_area + commit_size);
return true;
}
vm->Uncommit(body, body_size);
vm->SetPermissions(code_area, commit_size,
base::OS::MemoryPermission::kNoAccess);
}
}
vm->Uncommit(header, header_size);
vm->SetPermissions(start, pre_guard_offset,
base::OS::MemoryPermission::kNoAccess);
}
return false;
}
......
......@@ -56,8 +56,9 @@ void StoreBuffer::SetUp() {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(limit_[i]) & kStoreBufferMask);
}
if (!reservation.Commit(reinterpret_cast<Address>(start_[0]),
kStoreBufferSize * kStoreBuffers)) {
if (!reservation.SetPermissions(reinterpret_cast<Address>(start_[0]),
kStoreBufferSize * kStoreBuffers,
base::OS::MemoryPermission::kReadWrite)) {
V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
}
current_ = 0;
......
......@@ -664,9 +664,8 @@ bool WasmCodeManager::Commit(Address start, size_t size) {
return false;
}
// TODO(v8:7105) Enable W^X instead of setting W|X permissions below.
bool ret = base::OS::CommitRegion(start, size) &&
base::OS::SetPermissions(
start, size, base::OS::MemoryPermission::kReadWriteExecute);
bool ret = base::OS::SetPermissions(
start, size, base::OS::MemoryPermission::kReadWriteExecute);
if (!ret) {
// Highly unlikely.
remaining_uncommitted_.Increment(size);
......
......@@ -228,6 +228,7 @@ TEST(CodeRange) {
// kMaxRegularHeapObjectSize.
size_t requested = (kMaxRegularHeapObjectSize << (Pseudorandom() % 3)) +
Pseudorandom() % 5000 + 1;
requested = RoundUp(requested, MemoryAllocator::GetCommitPageSize());
size_t allocated = 0;
// The request size has to be at least 2 code guard pages larger than the
......
......@@ -41,11 +41,13 @@ TEST(OSReserveMemory) {
CHECK_NE(0, page_size);
CHECK_NOT_NULL(mem_addr);
size_t commit_size = OS::CommitPageSize();
CHECK(OS::CommitRegion(mem_addr, commit_size));
CHECK(OS::SetPermissions(mem_addr, commit_size,
OS::MemoryPermission::kReadWrite));
// Check whether we can write to memory.
int* addr = static_cast<int*>(mem_addr);
addr[KB - 1] = 2;
CHECK(OS::UncommitRegion(mem_addr, commit_size));
CHECK(OS::SetPermissions(mem_addr, commit_size,
OS::MemoryPermission::kNoAccess));
CHECK(OS::Free(mem_addr, kAllocationSize));
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment