Commit 1a0423b5 authored by vegorov@chromium.org's avatar vegorov@chromium.org

Fix leakage of virtual address space on Linux platform.

Ensure that unmap return values are checked in debug mode.

R=erik.corry@gmail.com
BUG=v8:1701

Review URL: http://codereview.chromium.org/8060052

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9480 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent af20990e
......@@ -477,7 +477,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
if (memory_) munmap(memory_, size_);
if (memory_) OS::Free(memory_, size_);
fclose(file_);
}
......@@ -559,7 +559,7 @@ void OS::SignalCodeMovingGC() {
void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
fileno(f), 0);
ASSERT(addr != MAP_FAILED);
munmap(addr, size);
OS::Free(addr, size);
fclose(f);
}
......@@ -621,21 +621,31 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
kMmapFd,
kMmapFdOffset);
if (reservation == MAP_FAILED) return;
Address base = static_cast<Address>(reservation);
Address aligned_base = RoundUp(base, alignment);
ASSERT(base <= aligned_base);
ASSERT_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
size_t bytes_prior = static_cast<size_t>(aligned_base - base);
if (bytes_prior > 0) {
munmap(base, bytes_prior);
if (aligned_base != base) {
size_t prefix_size = static_cast<size_t>(aligned_base - base);
OS::Free(base, prefix_size);
request_size -= prefix_size;
}
if (static_cast<size_t>(aligned_base - base) < request_size - size) {
munmap(aligned_base + size, request_size - size - bytes_prior);
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
ASSERT_LE(aligned_size, request_size);
if (aligned_size != request_size) {
size_t suffix_size = request_size - aligned_size;
OS::Free(aligned_base + aligned_size, suffix_size);
request_size -= suffix_size;
}
ASSERT(aligned_size == request_size);
address_ = static_cast<void*>(aligned_base);
size_ = size;
size_ = aligned_size;
}
......
......@@ -228,7 +228,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
if (memory_) munmap(memory_, size_);
if (memory_) OS::Free(memory_, size_);
fclose(file_);
}
......@@ -353,21 +353,31 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
kMmapFd,
kMmapFdOffset);
if (reservation == MAP_FAILED) return;
Address base = static_cast<Address>(reservation);
Address aligned_base = RoundUp(base, alignment);
ASSERT(base <= aligned_base);
ASSERT_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
size_t bytes_prior = static_cast<size_t>(aligned_base - base);
if (bytes_prior > 0) {
munmap(base, bytes_prior);
if (aligned_base != base) {
size_t prefix_size = static_cast<size_t>(aligned_base - base);
OS::Free(base, prefix_size);
request_size -= prefix_size;
}
if (static_cast<size_t>(aligned_base - base) < request_size - size) {
munmap(aligned_base + size, request_size - size - bytes_prior);
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
ASSERT_LE(aligned_size, request_size);
if (aligned_size != request_size) {
size_t suffix_size = request_size - aligned_size;
OS::Free(aligned_base + aligned_size, suffix_size);
request_size -= suffix_size;
}
ASSERT(aligned_size == request_size);
address_ = static_cast<void*>(aligned_base);
size_ = size;
size_ = aligned_size;
}
......
......@@ -245,7 +245,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
if (memory_) munmap(memory_, size_);
if (memory_) OS::Free(memory_, size_);
fclose(file_);
}
......@@ -342,7 +342,8 @@ VirtualMemory::VirtualMemory(size_t size) {
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
if (0 == munmap(address(), size())) address_ = MAP_FAILED;
OS::Free(address(), size());
address_ = MAP_FAILED
}
}
......
......@@ -1413,7 +1413,9 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
if (address == NULL) return;
Address base = RoundUp(static_cast<Address>(address), alignment);
// Try reducing the size by freeing and then reallocating a specific area.
ReleaseRegion(address, request_size);
bool result = ReleaseRegion(address, request_size);
USE(result);
ASSERT(result);
address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
if (address != NULL) {
request_size = size;
......
......@@ -351,12 +351,14 @@ class VirtualMemory {
void Release() {
ASSERT(IsReserved());
// Notice: Order is somportant here. The VirtualMemory object might live
// Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region.
void* address = address_;
size_t size = size_;
Reset();
ReleaseRegion(address, size);
bool result = ReleaseRegion(address, size);
USE(result);
ASSERT(result);
}
// Assign control of the reserved region to a different VirtualMemory object.
......
......@@ -341,7 +341,9 @@ void MemoryAllocator::FreeMemory(Address base,
isolate_->code_range()->FreeRawMemory(base, size);
} else {
ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
VirtualMemory::ReleaseRegion(base, size);
bool result = VirtualMemory::ReleaseRegion(base, size);
USE(result);
ASSERT(result);
}
}
......
......@@ -35,8 +35,6 @@ bugs: FAIL
regress/regress-1119: FAIL
##############################################################################
# NewGC: http://code.google.com/p/v8/issues/detail?id=1701
array-join: SKIP
# NewGC: BUG(1719) slow to collect arrays over several contexts.
regress/regress-524: SKIP
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment