Commit 0c3910f8 authored by Eric Holk's avatar Eric Holk Committed by Commit Bot

Revert "[platform] check return values from memory operations"

This reverts commit 32f30f63.

Reason for revert: broken Fuchsia build, https://logs.chromium.org/v/?s=chromium%2Fbb%2Fclient.v8%2FV8_Fuchsia%2F460%2F%2B%2Frecipes%2Fsteps%2Fcompile%2F0%2Fstdout

Original change's description:
> [platform] check return values from memory operations
> 
> This change adds DCHECKs for calls such as mprotect, as well as marking some of
> the memory allocation and deallocation routines as V8_MUST_USE_RESULT. This
> additional checking gives us more useful information for failure in the presence
> of, for example, address space exhaustion.
> 
> Bug: 
> Cq-Include-Trybots: master.tryserver.chromium.linux:linux_chromium_rel_ng
> Change-Id: I5bc76c1da6160262d3d556fea49d284ddd4e02c5
> Reviewed-on: https://chromium-review.googlesource.com/721267
> Commit-Queue: Eric Holk <eholk@chromium.org>
> Reviewed-by: Hannes Payer <hpayer@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#49164}

TBR=hpayer@chromium.org,mlippautz@google.com,eholk@chromium.org

Change-Id: Ie4b57b45c801dcce7884645f50ff74f833de6dc4
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Cq-Include-Trybots: master.tryserver.chromium.linux:linux_chromium_rel_ng
Reviewed-on: https://chromium-review.googlesource.com/756137Reviewed-by: 's avatarEric Holk <eholk@chromium.org>
Commit-Queue: Eric Holk <eholk@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49165}
parent 32f30f63
......@@ -122,7 +122,9 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
CHECK(base::OS::ReleaseRegion(address(), size()));
bool result = base::OS::ReleaseRegion(address(), size());
DCHECK(result);
USE(result);
}
}
......@@ -143,7 +145,8 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
bool VirtualMemory::Guard(void* address) {
CHECK(InVM(address, base::OS::CommitPageSize()));
return base::OS::Guard(address, base::OS::CommitPageSize());
base::OS::Guard(address, base::OS::CommitPageSize());
return true;
}
size_t VirtualMemory::ReleasePartial(void* free_start) {
......@@ -160,7 +163,9 @@ size_t VirtualMemory::ReleasePartial(void* free_start) {
__lsan_unregister_root_region(address_, size_);
__lsan_register_root_region(address_, size_ - free_size);
#endif
CHECK(base::OS::ReleasePartialRegion(free_start, free_size));
const bool result = base::OS::ReleasePartialRegion(free_start, free_size);
USE(result);
DCHECK(result);
size_ -= free_size;
return free_size;
}
......@@ -173,7 +178,9 @@ void VirtualMemory::Release() {
size_t size = size_;
CHECK(InVM(address, size));
Reset();
CHECK(base::OS::ReleaseRegion(address, size));
bool result = base::OS::ReleaseRegion(address, size);
USE(result);
DCHECK(result);
}
void VirtualMemory::TakeControl(VirtualMemory* from) {
......
......@@ -504,7 +504,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
return Free(data, length);
}
case v8::ArrayBuffer::Allocator::AllocationMode::kReservation: {
CHECK(base::OS::ReleaseRegion(data, length));
base::OS::ReleaseRegion(data, length);
return;
}
}
......@@ -515,7 +515,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
v8::ArrayBuffer::Allocator::Protection protection) {
switch (protection) {
case v8::ArrayBuffer::Allocator::Protection::kNoAccess: {
CHECK(base::OS::Guard(data, length));
base::OS::Guard(data, length);
return;
}
case v8::ArrayBuffer::Allocator::Protection::kReadWrite: {
......
......@@ -137,7 +137,10 @@ size_t OS::AllocatePageSize() {
return static_cast<size_t>(sysconf(_SC_PAGESIZE));
}
size_t OS::CommitPageSize() { return sysconf(_SC_PAGESIZE); }
size_t OS::CommitPageSize() {
static size_t page_size = getpagesize();
return page_size;
}
void* OS::GetRandomMmapAddr() {
#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
......@@ -237,12 +240,12 @@ void OS::SetReadAndExecutable(void* address, const size_t size) {
// Create guard pages.
#if !V8_OS_FUCHSIA
bool OS::Guard(void* address, const size_t size) {
void OS::Guard(void* address, const size_t size) {
#if V8_OS_CYGWIN
DWORD oldprotect;
return VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
#else
return 0 == mprotect(address, size, PROT_NONE);
mprotect(address, size, PROT_NONE);
#endif
}
#endif // !V8_OS_FUCHSIA
......
......@@ -793,31 +793,28 @@ void* OS::Allocate(const size_t requested, size_t* allocated,
}
void OS::Free(void* address, const size_t size) {
DCHECK(IsPageAlignedRange(address, size));
CHECK(VirtualFree(address, 0, MEM_RELEASE));
// TODO(1240712): VirtualFree has a return value which is ignored here.
VirtualFree(address, 0, MEM_RELEASE);
USE(size);
}
void OS::SetReadAndExecutable(void* address, const size_t size) {
DCHECK(IsPageAlignedRange(address, size));
DWORD old_protect;
CHECK(VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect));
CHECK_NE(NULL,
VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect));
}
bool OS::Guard(void* address, const size_t size) {
DCHECK(IsPageAlignedRange(address, size));
// Windows requires guard pages to be committed, so we VirtualAlloc to both
// commit and change the protection.
return VirtualAlloc(address, size, MEM_COMMIT, PAGE_NOACCESS) == address;
void OS::Guard(void* address, const size_t size) {
DWORD oldprotect;
VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
}
void OS::SetReadAndWritable(void* address, const size_t size, bool commit) {
DCHECK(IsPageAlignedRange(address, size));
if (commit) {
CHECK_NOT_NULL(VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE));
CHECK(VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE));
} else {
DWORD oldprotect;
CHECK(VirtualProtect(address, size, PAGE_READWRITE, &oldprotect));
CHECK_NE(NULL, VirtualProtect(address, size, PAGE_READWRITE, &oldprotect));
}
}
......@@ -846,7 +843,9 @@ void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
}
uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
// Try reducing the size by freeing and then reallocating a specific area.
CHECK(ReleaseRegion(address, request_size));
bool result = ReleaseRegion(address, request_size);
USE(result);
DCHECK(result);
address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
if (address != nullptr) {
request_size = size;
......@@ -861,33 +860,31 @@ void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
}
*allocated = request_size;
return address;
return static_cast<void*>(address);
}
// static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
DCHECK_NOT_NULL(address);
DCHECK(IsPageAlignedRange(address, size));
int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
return VirtualAlloc(address, size, MEM_COMMIT, prot) == address;
if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
return false;
}
return true;
}
// static
bool OS::UncommitRegion(void* address, size_t size) {
DCHECK(IsPageAlignedRange(address, size));
return VirtualFree(address, size, MEM_DECOMMIT);
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
}
// static
bool OS::ReleaseRegion(void* address, size_t size) {
DCHECK(IsPageAlignedRange(address, size));
return VirtualFree(address, 0, MEM_RELEASE);
return VirtualFree(address, 0, MEM_RELEASE) != 0;
}
// static
bool OS::ReleasePartialRegion(void* address, size_t size) {
DCHECK(IsPageAlignedRange(address, size));
return VirtualFree(address, size, MEM_DECOMMIT);
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
}
// static
......
......@@ -179,19 +179,11 @@ class V8_BASE_EXPORT OS {
// Frees memory allocated by a call to Allocate.
static void Free(void* address, const size_t size);
static inline bool IsPageAligned(void* address) {
return reinterpret_cast<uintptr_t>(address) % CommitPageSize() == 0;
}
static inline bool IsPageAlignedRange(void* address, size_t length) {
return IsPageAligned(address) &&
IsPageAligned(static_cast<char*>(address) + length);
}
// Mark a region of memory executable and readable but not writable.
static void SetReadAndExecutable(void* address, const size_t size);
// Assign memory as a guard page so that access will cause an exception.
static V8_WARN_UNUSED_RESULT bool Guard(void* address, const size_t size);
static void Guard(void* address, const size_t size);
// Make a region of memory non-executable but readable and writable.
static void SetReadAndWritable(void* address, const size_t size, bool commit);
......@@ -205,16 +197,14 @@ class V8_BASE_EXPORT OS {
static void* ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated);
static V8_WARN_UNUSED_RESULT bool CommitRegion(void* address, size_t size,
bool is_executable);
static bool CommitRegion(void* address, size_t size, bool is_executable);
static V8_WARN_UNUSED_RESULT bool UncommitRegion(void* address, size_t size);
static bool UncommitRegion(void* address, size_t size);
static V8_WARN_UNUSED_RESULT bool ReleaseRegion(void* address, size_t size);
static bool ReleaseRegion(void* address, size_t size);
// Release part of a reserved address range.
static V8_WARN_UNUSED_RESULT bool ReleasePartialRegion(void* address,
size_t size);
static bool ReleasePartialRegion(void* address, size_t size);
static bool HasLazyCommits();
......
......@@ -142,7 +142,7 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
void Free(void* data, size_t length) override {
#if USE_VM
if (RoundToPageSize(&length)) {
CHECK(base::OS::ReleaseRegion(data, length));
base::OS::ReleaseRegion(data, length);
return;
}
#endif
......@@ -162,7 +162,7 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
void* VirtualMemoryAllocate(size_t length) {
void* data = base::OS::ReserveRegion(length, nullptr);
if (data && !base::OS::CommitRegion(data, length, false)) {
CHECK(base::OS::ReleaseRegion(data, length));
base::OS::ReleaseRegion(data, length);
return nullptr;
}
#if defined(LEAK_SANITIZER)
......
......@@ -443,7 +443,9 @@ void MemoryAllocator::FreeMemory(Address base, size_t size,
code_range()->FreeRawMemory(base, size);
} else {
DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
CHECK(base::OS::ReleaseRegion(base, size));
bool result = base::OS::ReleaseRegion(base, size);
USE(result);
DCHECK(result);
}
}
......
......@@ -228,7 +228,6 @@ TEST(CodeRange) {
// kMaxRegularHeapObjectSize.
size_t requested = (kMaxRegularHeapObjectSize << (Pseudorandom() % 3)) +
Pseudorandom() % 5000 + 1;
requested = RoundUp(requested, MemoryAllocator::CodePageGuardSize());
size_t allocated = 0;
// The request size has to be at least 2 code guard pages larger than the
......
......@@ -24,7 +24,7 @@ TEST(OSReserveMemory) {
int* addr = static_cast<int*>(mem_addr);
addr[KB - 1] = 2;
CHECK(OS::UncommitRegion(mem_addr, block_size));
CHECK(OS::ReleaseRegion(mem_addr, mem_size));
OS::ReleaseRegion(mem_addr, mem_size);
}
#ifdef V8_CC_GNU
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment