Commit b73ee334 authored by Bill Budge's avatar Bill Budge Committed by Commit Bot

[Memory] Eliminate OS::ReleaseRegion.

- Eliminates OS::ReleaseRegion, replacing with calls to OS::Free.
- Adds bool return value to OS::Free.
- Cleans up types of flags, protection on Windows and Cygwin.

Bug: chromium:756050
Cq-Include-Trybots: master.tryserver.chromium.linux:linux_chromium_rel_ng
Change-Id: I6a642374e33876966a5552fb0cdf552dc6d79aaa
Reviewed-on: https://chromium-review.googlesource.com/762345
Commit-Queue: Bill Budge <bbudge@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49305}
parent 4d3bc552
......@@ -129,9 +129,7 @@ VirtualMemory::VirtualMemory(size_t size, void* hint, size_t alignment)
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
bool result = base::OS::ReleaseRegion(address(), size());
DCHECK(result);
USE(result);
Release();
}
}
......@@ -185,7 +183,7 @@ void VirtualMemory::Release() {
size_t size = size_;
CHECK(InVM(address, size));
Reset();
bool result = base::OS::ReleaseRegion(address, size);
bool result = base::OS::Free(address, size);
USE(result);
DCHECK(result);
}
......
......@@ -509,7 +509,9 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
return Free(data, length);
}
case v8::ArrayBuffer::Allocator::AllocationMode::kReservation: {
base::OS::ReleaseRegion(data, length);
bool result = base::OS::Free(data, length);
DCHECK(result);
USE(result);
return;
}
}
......
......@@ -28,13 +28,13 @@ namespace base {
namespace {
// The VirtualMemory implementation is taken from platform-win32.cc.
// The memory allocation implementation is taken from platform-win32.cc.
// The mmap-based virtual memory implementation as it is used on most posix
// platforms does not work well because Cygwin does not support MAP_FIXED.
// This causes VirtualMemory::Commit to not always commit the memory region
// This causes OS::CommitRegion to not always commit the memory region
// specified.
int GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
DWORD GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
switch (access) {
case OS::MemoryPermission::kNoAccess:
return PAGE_NOACCESS;
......@@ -46,17 +46,17 @@ int GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
UNREACHABLE();
}
static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
void* hint) {
void* RandomizedVirtualAlloc(size_t size, DWORD flags, DWORD protect,
void* hint) {
LPVOID base = nullptr;
if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
if (protect == PAGE_EXECUTE_READWRITE || protect == PAGE_NOACCESS) {
// For exectutable pages try and randomize the allocation address
base = VirtualAlloc(hint, size, action, protection);
base = VirtualAlloc(hint, size, flags, protect);
}
// After three attempts give up and let the OS find an address to use.
if (base == nullptr) base = VirtualAlloc(nullptr, size, action, protection);
// If that fails, let the OS find an address to use.
if (base == nullptr) base = VirtualAlloc(nullptr, size, flags, protect);
return base;
}
......@@ -92,6 +92,7 @@ double CygwinTimezoneCache::LocalTimeOffset() {
(loc->tm_isdst > 0 ? 3600 * msPerSecond : 0));
}
// static
void* OS::Allocate(void* address, size_t size, size_t alignment,
MemoryPermission access) {
size_t page_size = AllocatePageSize();
......@@ -101,12 +102,12 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
// Add the maximum misalignment so we are guaranteed an aligned base address.
size_t request_size = size + (alignment - page_size);
int flags = (access == OS::MemoryPermission::kNoAccess)
? MEM_RESERVE
: MEM_RESERVE | MEM_COMMIT;
int prot = GetProtectionFromMemoryPermission(access);
DWORD flags = (access == OS::MemoryPermission::kNoAccess)
? MEM_RESERVE
: MEM_RESERVE | MEM_COMMIT;
DWORD protect = GetProtectionFromMemoryPermission(access);
void* base = RandomizedVirtualAlloc(request_size, flags, prot, address);
void* base = RandomizedVirtualAlloc(request_size, flags, protect, address);
if (base == nullptr) return nullptr;
uint8_t* aligned_base = RoundUp(static_cast<uint8_t*>(base), alignment);
......@@ -117,7 +118,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
// base. Retry logic is needed since we may lose the memory due to a race.
Free(base, request_size);
if (resize_attempts == kMaxResizeAttempts) return nullptr;
base = RandomizedVirtualAlloc(size, flags, prot, aligned_base);
base = RandomizedVirtualAlloc(size, flags, protect, aligned_base);
if (base == nullptr) return nullptr;
aligned_base = RoundUp(static_cast<uint8_t*>(base), alignment);
resize_attempts++;
......@@ -126,19 +127,15 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
return static_cast<void*>(aligned_base);
}
void OS::Free(void* address, const size_t size) {
// TODO(1240712): VirtualFree has a return value which is ignored here.
VirtualFree(address, 0, MEM_RELEASE);
USE(size);
// static
bool OS::Free(void* address, const size_t size) {
return VirtualFree(address, 0, MEM_RELEASE) != 0;
}
// static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
if (nullptr == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
return false;
}
return true;
DWORD protect = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
return VirtualAlloc(address, size, MEM_COMMIT, protect) != nullptr;
}
// static
......@@ -146,11 +143,6 @@ bool OS::UncommitRegion(void* address, size_t size) {
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
}
// static
bool OS::ReleaseRegion(void* address, size_t size) {
return VirtualFree(address, 0, MEM_RELEASE) != 0;
}
// static
bool OS::ReleasePartialRegion(void* address, size_t size) {
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
......
......@@ -73,6 +73,12 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
return static_cast<void*>(aligned_base);
}
// static
bool OS::Free(void* address, size_t size) {
return zx_vmar_unmap(zx_vmar_root_self(),
reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
}
// static
void OS::Guard(void* address, size_t size) {
CHECK_EQ(ZX_OK, zx_vmar_protect(zx_vmar_root_self(),
......@@ -96,12 +102,6 @@ bool OS::UncommitRegion(void* address, size_t size) {
0 /*no permissions*/) == ZX_OK;
}
// static
bool OS::ReleaseRegion(void* address, size_t size) {
return zx_vmar_unmap(zx_vmar_root_self(),
reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
}
// static
bool OS::ReleasePartialRegion(void* address, size_t size) {
return zx_vmar_unmap(zx_vmar_root_self(),
......
......@@ -230,6 +230,7 @@ void* OS::GetRandomMmapAddr() {
// TODO(bbudge) Move Cygwin and Fuschia stuff into platform-specific files.
#if !V8_OS_CYGWIN && !V8_OS_FUCHSIA
// static
void* OS::Allocate(void* address, size_t size, size_t alignment,
MemoryPermission access) {
size_t page_size = AllocatePageSize();
......@@ -261,16 +262,12 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
DCHECK_EQ(size, request_size);
return static_cast<void*>(aligned_base);
}
#endif // !V8_OS_CYGWIN && !V8_OS_FUCHSIA
#if !V8_OS_CYGWIN
void OS::Free(void* address, const size_t size) {
// TODO(1240712): munmap has a return value which is ignored here.
int result = munmap(address, size);
USE(result);
DCHECK_EQ(0, result);
// static
bool OS::Free(void* address, const size_t size) {
return munmap(address, size) == 0;
}
#endif // !V8_OS_CYGWIN
#endif // !V8_OS_CYGWIN && !V8_OS_FUCHSIA
void OS::SetReadAndExecutable(void* address, const size_t size) {
#if V8_OS_CYGWIN
......@@ -348,11 +345,6 @@ bool OS::UncommitRegion(void* address, size_t size) {
#endif // V8_OS_AIX
}
// static
bool OS::ReleaseRegion(void* address, size_t size) {
return munmap(address, size) == 0;
}
// static
bool OS::ReleasePartialRegion(void* address, size_t size) {
return munmap(address, size) == 0;
......
......@@ -732,7 +732,7 @@ void* OS::GetRandomMmapAddr() {
namespace {
int GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
DWORD GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
switch (access) {
case OS::MemoryPermission::kNoAccess:
return PAGE_NOACCESS;
......@@ -744,8 +744,8 @@ int GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
UNREACHABLE();
}
static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
void* hint) {
void* RandomizedVirtualAlloc(size_t size, DWORD flags, DWORD protect,
void* hint) {
LPVOID base = NULL;
static BOOL use_aslr = -1;
#ifdef V8_HOST_ARCH_32_BIT
......@@ -758,19 +758,20 @@ static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
#endif
if (use_aslr &&
(protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS)) {
(protect == PAGE_EXECUTE_READWRITE || protect == PAGE_NOACCESS)) {
// For executable or reserved pages try to randomize the allocation address.
base = VirtualAlloc(hint, size, action, protection);
base = VirtualAlloc(hint, size, flags, protect);
}
// On failure, let the OS find an address to use.
if (base == NULL) base = VirtualAlloc(nullptr, size, action, protection);
if (base == NULL) base = VirtualAlloc(nullptr, size, flags, protect);
return base;
}
} // namespace
// static
void* OS::Allocate(void* address, size_t size, size_t alignment,
MemoryPermission access) {
size_t page_size = AllocatePageSize();
......@@ -783,9 +784,9 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
int flags = (access == OS::MemoryPermission::kNoAccess)
? MEM_RESERVE
: MEM_RESERVE | MEM_COMMIT;
int prot = GetProtectionFromMemoryPermission(access);
int protect = GetProtectionFromMemoryPermission(access);
void* base = RandomizedVirtualAlloc(request_size, flags, prot, address);
void* base = RandomizedVirtualAlloc(request_size, flags, protect, address);
if (base == nullptr) return nullptr;
uint8_t* aligned_base = RoundUp(static_cast<uint8_t*>(base), alignment);
......@@ -796,7 +797,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
// base. Retry logic is needed since we may lose the memory due to a race.
Free(base, request_size);
if (resize_attempts == kMaxResizeAttempts) return nullptr;
base = RandomizedVirtualAlloc(size, flags, prot, aligned_base);
base = RandomizedVirtualAlloc(size, flags, protect, aligned_base);
if (base == nullptr) return nullptr;
aligned_base = RoundUp(static_cast<uint8_t*>(base), alignment);
resize_attempts++;
......@@ -805,10 +806,9 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
return static_cast<void*>(aligned_base);
}
void OS::Free(void* address, const size_t size) {
// TODO(1240712): VirtualFree has a return value which is ignored here.
VirtualFree(address, 0, MEM_RELEASE);
USE(size);
// static
bool OS::Free(void* address, const size_t size) {
return VirtualFree(address, 0, MEM_RELEASE) != 0;
}
void OS::SetReadAndExecutable(void* address, const size_t size) {
......@@ -840,11 +840,8 @@ void OS::SetReadWriteAndExecutable(void* address, const size_t size) {
// static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
return false;
}
return true;
DWORD protect = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
return VirtualAlloc(address, size, MEM_COMMIT, protect) != nullptr;
}
// static
......@@ -852,11 +849,6 @@ bool OS::UncommitRegion(void* address, size_t size) {
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
}
// static
bool OS::ReleaseRegion(void* address, size_t size) {
return VirtualFree(address, 0, MEM_RELEASE) != 0;
}
// static
bool OS::ReleasePartialRegion(void* address, size_t size) {
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
......
......@@ -179,7 +179,7 @@ class V8_BASE_EXPORT OS {
MemoryPermission access);
// Frees memory allocated by a call to Allocate.
static void Free(void* address, const size_t size);
static bool Free(void* address, const size_t size);
// Mark a region of memory executable and readable but not writable.
static void SetReadAndExecutable(void* address, const size_t size);
......@@ -198,8 +198,6 @@ class V8_BASE_EXPORT OS {
static bool UncommitRegion(void* address, size_t size);
static bool ReleaseRegion(void* address, size_t size);
// Release part of a reserved address range.
static bool ReleasePartialRegion(void* address, size_t size);
......
......@@ -142,7 +142,9 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
void Free(void* data, size_t length) override {
#if USE_VM
if (RoundToPageSize(&length)) {
base::OS::ReleaseRegion(data, length);
bool result = base::OS::Free(data, length);
DCHECK(result);
USE(result);
return;
}
#endif
......
......@@ -443,7 +443,7 @@ void MemoryAllocator::FreeMemory(Address base, size_t size,
code_range()->FreeRawMemory(base, size);
} else {
DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
bool result = base::OS::ReleaseRegion(base, size);
bool result = base::OS::Free(base, size);
USE(result);
DCHECK(result);
}
......
......@@ -24,7 +24,7 @@ TEST(OSReserveMemory) {
int* addr = static_cast<int*>(mem_addr);
addr[KB - 1] = 2;
CHECK(OS::UncommitRegion(mem_addr, commit_size));
OS::ReleaseRegion(mem_addr, page_size);
CHECK(OS::Free(mem_addr, page_size));
}
#ifdef V8_CC_GNU
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment