Commit 32f30f63 authored by Eric Holk's avatar Eric Holk Committed by Commit Bot

[platform] check return values from memory operations

This change adds DCHECKs for calls such as mprotect, as well as marking some of
the memory allocation and deallocation routines as V8_MUST_USE_RESULT. This
additional checking gives us more useful information for failure in the presence
of, for example, address space exhaustion.

Bug: 
Cq-Include-Trybots: master.tryserver.chromium.linux:linux_chromium_rel_ng
Change-Id: I5bc76c1da6160262d3d556fea49d284ddd4e02c5
Reviewed-on: https://chromium-review.googlesource.com/721267
Commit-Queue: Eric Holk <eholk@chromium.org>
Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49164}
parent 2e0a5bb5
......@@ -122,9 +122,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
bool result = base::OS::ReleaseRegion(address(), size());
DCHECK(result);
USE(result);
CHECK(base::OS::ReleaseRegion(address(), size()));
}
}
......@@ -145,8 +143,7 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
bool VirtualMemory::Guard(void* address) {
CHECK(InVM(address, base::OS::CommitPageSize()));
base::OS::Guard(address, base::OS::CommitPageSize());
return true;
return base::OS::Guard(address, base::OS::CommitPageSize());
}
size_t VirtualMemory::ReleasePartial(void* free_start) {
......@@ -163,9 +160,7 @@ size_t VirtualMemory::ReleasePartial(void* free_start) {
__lsan_unregister_root_region(address_, size_);
__lsan_register_root_region(address_, size_ - free_size);
#endif
const bool result = base::OS::ReleasePartialRegion(free_start, free_size);
USE(result);
DCHECK(result);
CHECK(base::OS::ReleasePartialRegion(free_start, free_size));
size_ -= free_size;
return free_size;
}
......@@ -178,9 +173,7 @@ void VirtualMemory::Release() {
size_t size = size_;
CHECK(InVM(address, size));
Reset();
bool result = base::OS::ReleaseRegion(address, size);
USE(result);
DCHECK(result);
CHECK(base::OS::ReleaseRegion(address, size));
}
void VirtualMemory::TakeControl(VirtualMemory* from) {
......
......@@ -504,7 +504,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
return Free(data, length);
}
case v8::ArrayBuffer::Allocator::AllocationMode::kReservation: {
base::OS::ReleaseRegion(data, length);
CHECK(base::OS::ReleaseRegion(data, length));
return;
}
}
......@@ -515,7 +515,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
v8::ArrayBuffer::Allocator::Protection protection) {
switch (protection) {
case v8::ArrayBuffer::Allocator::Protection::kNoAccess: {
base::OS::Guard(data, length);
CHECK(base::OS::Guard(data, length));
return;
}
case v8::ArrayBuffer::Allocator::Protection::kReadWrite: {
......
......@@ -137,10 +137,7 @@ size_t OS::AllocatePageSize() {
return static_cast<size_t>(sysconf(_SC_PAGESIZE));
}
size_t OS::CommitPageSize() {
static size_t page_size = getpagesize();
return page_size;
}
size_t OS::CommitPageSize() { return sysconf(_SC_PAGESIZE); }
void* OS::GetRandomMmapAddr() {
#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
......@@ -240,12 +237,12 @@ void OS::SetReadAndExecutable(void* address, const size_t size) {
// Create guard pages.
#if !V8_OS_FUCHSIA
void OS::Guard(void* address, const size_t size) {
bool OS::Guard(void* address, const size_t size) {
#if V8_OS_CYGWIN
DWORD oldprotect;
VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
return VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
#else
mprotect(address, size, PROT_NONE);
return 0 == mprotect(address, size, PROT_NONE);
#endif
}
#endif // !V8_OS_FUCHSIA
......
......@@ -793,28 +793,31 @@ void* OS::Allocate(const size_t requested, size_t* allocated,
}
void OS::Free(void* address, const size_t size) {
// TODO(1240712): VirtualFree has a return value which is ignored here.
VirtualFree(address, 0, MEM_RELEASE);
DCHECK(IsPageAlignedRange(address, size));
CHECK(VirtualFree(address, 0, MEM_RELEASE));
USE(size);
}
void OS::SetReadAndExecutable(void* address, const size_t size) {
DCHECK(IsPageAlignedRange(address, size));
DWORD old_protect;
CHECK_NE(NULL,
VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect));
CHECK(VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect));
}
void OS::Guard(void* address, const size_t size) {
DWORD oldprotect;
VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
bool OS::Guard(void* address, const size_t size) {
DCHECK(IsPageAlignedRange(address, size));
// Windows requires guard pages to be committed, so we VirtualAlloc to both
// commit and change the protection.
return VirtualAlloc(address, size, MEM_COMMIT, PAGE_NOACCESS) == address;
}
void OS::SetReadAndWritable(void* address, const size_t size, bool commit) {
DCHECK(IsPageAlignedRange(address, size));
if (commit) {
CHECK(VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE));
CHECK_NOT_NULL(VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE));
} else {
DWORD oldprotect;
CHECK_NE(NULL, VirtualProtect(address, size, PAGE_READWRITE, &oldprotect));
CHECK(VirtualProtect(address, size, PAGE_READWRITE, &oldprotect));
}
}
......@@ -843,9 +846,7 @@ void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
}
uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
// Try reducing the size by freeing and then reallocating a specific area.
bool result = ReleaseRegion(address, request_size);
USE(result);
DCHECK(result);
CHECK(ReleaseRegion(address, request_size));
address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
if (address != nullptr) {
request_size = size;
......@@ -860,31 +861,33 @@ void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
}
*allocated = request_size;
return static_cast<void*>(address);
return address;
}
// static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
DCHECK_NOT_NULL(address);
DCHECK(IsPageAlignedRange(address, size));
int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
return false;
}
return true;
return VirtualAlloc(address, size, MEM_COMMIT, prot) == address;
}
// static
bool OS::UncommitRegion(void* address, size_t size) {
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
DCHECK(IsPageAlignedRange(address, size));
return VirtualFree(address, size, MEM_DECOMMIT);
}
// static
bool OS::ReleaseRegion(void* address, size_t size) {
return VirtualFree(address, 0, MEM_RELEASE) != 0;
DCHECK(IsPageAlignedRange(address, size));
return VirtualFree(address, 0, MEM_RELEASE);
}
// static
bool OS::ReleasePartialRegion(void* address, size_t size) {
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
DCHECK(IsPageAlignedRange(address, size));
return VirtualFree(address, size, MEM_DECOMMIT);
}
// static
......
......@@ -179,11 +179,19 @@ class V8_BASE_EXPORT OS {
// Frees memory allocated by a call to Allocate.
static void Free(void* address, const size_t size);
static inline bool IsPageAligned(void* address) {
return reinterpret_cast<uintptr_t>(address) % CommitPageSize() == 0;
}
static inline bool IsPageAlignedRange(void* address, size_t length) {
return IsPageAligned(address) &&
IsPageAligned(static_cast<char*>(address) + length);
}
// Mark a region of memory executable and readable but not writable.
static void SetReadAndExecutable(void* address, const size_t size);
// Assign memory as a guard page so that access will cause an exception.
static void Guard(void* address, const size_t size);
static V8_WARN_UNUSED_RESULT bool Guard(void* address, const size_t size);
// Make a region of memory non-executable but readable and writable.
static void SetReadAndWritable(void* address, const size_t size, bool commit);
......@@ -197,14 +205,16 @@ class V8_BASE_EXPORT OS {
static void* ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated);
static bool CommitRegion(void* address, size_t size, bool is_executable);
static V8_WARN_UNUSED_RESULT bool CommitRegion(void* address, size_t size,
bool is_executable);
static bool UncommitRegion(void* address, size_t size);
static V8_WARN_UNUSED_RESULT bool UncommitRegion(void* address, size_t size);
static bool ReleaseRegion(void* address, size_t size);
static V8_WARN_UNUSED_RESULT bool ReleaseRegion(void* address, size_t size);
// Release part of a reserved address range.
static bool ReleasePartialRegion(void* address, size_t size);
static V8_WARN_UNUSED_RESULT bool ReleasePartialRegion(void* address,
size_t size);
static bool HasLazyCommits();
......
......@@ -142,7 +142,7 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
void Free(void* data, size_t length) override {
#if USE_VM
if (RoundToPageSize(&length)) {
base::OS::ReleaseRegion(data, length);
CHECK(base::OS::ReleaseRegion(data, length));
return;
}
#endif
......@@ -162,7 +162,7 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
void* VirtualMemoryAllocate(size_t length) {
void* data = base::OS::ReserveRegion(length, nullptr);
if (data && !base::OS::CommitRegion(data, length, false)) {
base::OS::ReleaseRegion(data, length);
CHECK(base::OS::ReleaseRegion(data, length));
return nullptr;
}
#if defined(LEAK_SANITIZER)
......
......@@ -443,9 +443,7 @@ void MemoryAllocator::FreeMemory(Address base, size_t size,
code_range()->FreeRawMemory(base, size);
} else {
DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
bool result = base::OS::ReleaseRegion(base, size);
USE(result);
DCHECK(result);
CHECK(base::OS::ReleaseRegion(base, size));
}
}
......
......@@ -228,6 +228,7 @@ TEST(CodeRange) {
// kMaxRegularHeapObjectSize.
size_t requested = (kMaxRegularHeapObjectSize << (Pseudorandom() % 3)) +
Pseudorandom() % 5000 + 1;
requested = RoundUp(requested, MemoryAllocator::CodePageGuardSize());
size_t allocated = 0;
// The request size has to be at least 2 code guard pages larger than the
......
......@@ -24,7 +24,7 @@ TEST(OSReserveMemory) {
int* addr = static_cast<int*>(mem_addr);
addr[KB - 1] = 2;
CHECK(OS::UncommitRegion(mem_addr, block_size));
OS::ReleaseRegion(mem_addr, mem_size);
CHECK(OS::ReleaseRegion(mem_addr, mem_size));
}
#ifdef V8_CC_GNU
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment