Commit 4fc43530 authored by Michael Hablich's avatar Michael Hablich Committed by Commit Bot

Revert "[Memory] Move VirtualMemory out of base:: platform."

This reverts commit 4dd293d9.

Reason for revert: Blocks roll: https://chromium-review.googlesource.com/c/chromium/src/+/669785

Original change's description:
> [Memory] Move VirtualMemory out of base:: platform.
> 
> - Moves base::VirtualMemory to v8::internal::VirtualMemory.
> - Makes VirtualMemory platform-independent by moving internals to new
>   OS:: static methods, for each platform.
> 
> This will make it easier to delegate memory management in VirtualMemory
> to V8::Platform, so that embedders like Blink can override it. We can't
> depend on V8::Platform in base/platform.
> 
> Bug: chromium:756050
> Cq-Include-Trybots: master.tryserver.chromium.linux:linux_chromium_rel_ng
> Change-Id: Iadfe230b6850bd917727a373f277afded9883adf
> Reviewed-on: https://chromium-review.googlesource.com/653214
> Commit-Queue: Bill Budge <bbudge@chromium.org>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#48048}

TBR=bbudge@chromium.org,ulan@chromium.org,hpayer@chromium.org,mlippautz@chromium.org,scottmg@chromium.org

# Not skipping CQ checks because original CL landed > 1 day ago.

Bug: chromium:756050
Change-Id: Ice2618ef72950e1b64c31434a239c626aa5e5970
Cq-Include-Trybots: master.tryserver.chromium.linux:linux_chromium_rel_ng
Reviewed-on: https://chromium-review.googlesource.com/670843Reviewed-by: 's avatarMichael Hablich <hablich@chromium.org>
Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Commit-Queue: Michael Hablich <hablich@chromium.org>
Cr-Commit-Position: refs/heads/master@{#48062}
parent 75877ddb
...@@ -99,106 +99,29 @@ void AlignedFree(void *ptr) { ...@@ -99,106 +99,29 @@ void AlignedFree(void *ptr) {
#endif #endif
} }
VirtualMemory::VirtualMemory() : address_(nullptr), size_(0) {} bool AllocVirtualMemory(size_t size, void* hint, base::VirtualMemory* result) {
base::VirtualMemory first_try(size, hint);
VirtualMemory::VirtualMemory(size_t size, void* hint)
: address_(base::OS::ReserveRegion(size, hint)), size_(size) {}
VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
: address_(nullptr), size_(0) {
address_ = base::OS::ReserveAlignedRegion(size, alignment, hint, &size_);
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
bool result = base::OS::ReleaseRegion(address(), size());
DCHECK(result);
USE(result);
}
}
void VirtualMemory::Reset() {
address_ = nullptr;
size_ = 0;
}
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
CHECK(InVM(address, size));
return base::OS::CommitRegion(address, size, is_executable);
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
CHECK(InVM(address, size));
return base::OS::UncommitRegion(address, size);
}
bool VirtualMemory::Guard(void* address) {
CHECK(InVM(address, base::OS::CommitPageSize()));
base::OS::Guard(address, base::OS::CommitPageSize());
return true;
}
size_t VirtualMemory::ReleasePartial(void* free_start) {
DCHECK(IsReserved());
// Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region.
const size_t size = size_ - (reinterpret_cast<size_t>(free_start) -
reinterpret_cast<size_t>(address_));
CHECK(InVM(free_start, size));
DCHECK_LT(address_, free_start);
DCHECK_LT(free_start, reinterpret_cast<void*>(
reinterpret_cast<size_t>(address_) + size_));
const bool result =
base::OS::ReleasePartialRegion(address_, size_, free_start, size);
USE(result);
DCHECK(result);
size_ -= size;
return size;
}
void VirtualMemory::Release() {
DCHECK(IsReserved());
// Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region.
void* address = address_;
size_t size = size_;
CHECK(InVM(address, size));
Reset();
bool result = base::OS::ReleaseRegion(address, size);
USE(result);
DCHECK(result);
}
void VirtualMemory::TakeControl(VirtualMemory* from) {
DCHECK(!IsReserved());
address_ = from->address_;
size_ = from->size_;
from->Reset();
}
bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result) {
VirtualMemory first_try(size, hint);
if (first_try.IsReserved()) { if (first_try.IsReserved()) {
result->TakeControl(&first_try); result->TakeControl(&first_try);
return true; return true;
} }
V8::GetCurrentPlatform()->OnCriticalMemoryPressure(); V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
VirtualMemory second_try(size, hint); base::VirtualMemory second_try(size, hint);
result->TakeControl(&second_try); result->TakeControl(&second_try);
return result->IsReserved(); return result->IsReserved();
} }
bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint, bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
VirtualMemory* result) { base::VirtualMemory* result) {
VirtualMemory first_try(size, alignment, hint); base::VirtualMemory first_try(size, alignment, hint);
if (first_try.IsReserved()) { if (first_try.IsReserved()) {
result->TakeControl(&first_try); result->TakeControl(&first_try);
return true; return true;
} }
V8::GetCurrentPlatform()->OnCriticalMemoryPressure(); V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
VirtualMemory second_try(size, alignment, hint); base::VirtualMemory second_try(size, alignment, hint);
result->TakeControl(&second_try); result->TakeControl(&second_try);
return result->IsReserved(); return result->IsReserved();
} }
......
...@@ -76,88 +76,9 @@ class FreeStoreAllocationPolicy { ...@@ -76,88 +76,9 @@ class FreeStoreAllocationPolicy {
void* AlignedAlloc(size_t size, size_t alignment); void* AlignedAlloc(size_t size, size_t alignment);
void AlignedFree(void *ptr); void AlignedFree(void *ptr);
// Represents and controls an area of reserved memory. bool AllocVirtualMemory(size_t size, void* hint, base::VirtualMemory* result);
class V8_EXPORT_PRIVATE VirtualMemory {
public:
// Empty VirtualMemory object, controlling no reserved memory.
VirtualMemory();
// Reserves virtual memory with size.
explicit VirtualMemory(size_t size, void* hint);
// Reserves virtual memory containing an area of the given size that
// is aligned per alignment. This may not be at the position returned
// by address().
VirtualMemory(size_t size, size_t alignment, void* hint);
// Construct a virtual memory by assigning it some already mapped address
// and size.
VirtualMemory(void* address, size_t size) : address_(address), size_(size) {}
// Releases the reserved memory, if any, controlled by this VirtualMemory
// object.
~VirtualMemory();
// Returns whether the memory has been reserved.
bool IsReserved() const { return address_ != nullptr; }
// Initialize or resets an embedded VirtualMemory object.
void Reset();
// Returns the start address of the reserved memory.
// If the memory was reserved with an alignment, this address is not
// necessarily aligned. The user might need to round it up to a multiple of
// the alignment to get the start of the aligned block.
void* address() const {
DCHECK(IsReserved());
return address_;
}
void* end() const {
DCHECK(IsReserved());
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address_) +
size_);
}
// Returns the size of the reserved memory. The returned value is only
// meaningful when IsReserved() returns true.
// If the memory was reserved with an alignment, this size may be larger
// than the requested size.
size_t size() const { return size_; }
// Commits real memory. Returns whether the operation succeeded.
bool Commit(void* address, size_t size, bool is_executable);
// Uncommit real memory. Returns whether the operation succeeded.
bool Uncommit(void* address, size_t size);
// Creates a single guard page at the given address.
bool Guard(void* address);
// Releases the memory after |free_start|. Returns the bytes released.
size_t ReleasePartial(void* free_start);
void Release();
// Assign control of the reserved region to a different VirtualMemory object.
// The old object is no longer functional (IsReserved() returns false).
void TakeControl(VirtualMemory* from);
bool InVM(void* address, size_t size) {
return (reinterpret_cast<uintptr_t>(address_) <=
reinterpret_cast<uintptr_t>(address)) &&
((reinterpret_cast<uintptr_t>(address_) + size_) >=
(reinterpret_cast<uintptr_t>(address) + size));
}
private:
void* address_; // Start address of the virtual memory.
size_t size_; // Size of the virtual memory.
};
bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result);
bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint, bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
VirtualMemory* result); base::VirtualMemory* result);
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -485,7 +485,8 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator { ...@@ -485,7 +485,8 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
virtual void Free(void* data, size_t) { free(data); } virtual void Free(void* data, size_t) { free(data); }
virtual void* Reserve(size_t length) { virtual void* Reserve(size_t length) {
return base::OS::ReserveRegion(length, base::OS::GetRandomMmapAddr()); return base::VirtualMemory::ReserveRegion(length,
base::OS::GetRandomMmapAddr());
} }
virtual void Free(void* data, size_t length, virtual void Free(void* data, size_t length,
...@@ -495,7 +496,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator { ...@@ -495,7 +496,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
return Free(data, length); return Free(data, length);
} }
case v8::ArrayBuffer::Allocator::AllocationMode::kReservation: { case v8::ArrayBuffer::Allocator::AllocationMode::kReservation: {
base::OS::ReleaseRegion(data, length); base::VirtualMemory::ReleaseRegion(data, length);
return; return;
} }
} }
......
...@@ -65,46 +65,87 @@ double AIXTimezoneCache::LocalTimeOffset() { ...@@ -65,46 +65,87 @@ double AIXTimezoneCache::LocalTimeOffset() {
TimezoneCache* OS::CreateTimezoneCache() { return new AIXTimezoneCache(); } TimezoneCache* OS::CreateTimezoneCache() { return new AIXTimezoneCache(); }
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
void* OS::Allocate(const size_t requested, size_t* allocated, void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) { OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, getpagesize()); const size_t msize = RoundUp(requested, getpagesize());
int prot = GetProtectionFromMemoryPermission(access); int prot = GetProtectionFromMemoryPermission(access);
void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd, void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
kMmapFdOffset);
if (mbase == MAP_FAILED) return NULL; if (mbase == MAP_FAILED) return NULL;
*allocated = msize; *allocated = msize;
return mbase; return mbase;
} }
// static
void* OS::ReserveRegion(size_t size, void* hint) {
void* result = mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS,
kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return nullptr; static unsigned StringToLong(char* buffer) {
return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
}
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
static const int MAP_LENGTH = 1024;
int fd = open("/proc/self/maps", O_RDONLY);
if (fd < 0) return result;
while (true) {
char addr_buffer[11];
addr_buffer[0] = '0';
addr_buffer[1] = 'x';
addr_buffer[10] = 0;
ssize_t rc = read(fd, addr_buffer + 2, 8);
if (rc < 8) break;
unsigned start = StringToLong(addr_buffer);
rc = read(fd, addr_buffer + 2, 1);
if (rc < 1) break;
if (addr_buffer[2] != '-') break;
rc = read(fd, addr_buffer + 2, 8);
if (rc < 8) break;
unsigned end = StringToLong(addr_buffer);
char buffer[MAP_LENGTH];
int bytes_read = -1;
do {
bytes_read++;
if (bytes_read >= MAP_LENGTH - 1) break;
rc = read(fd, buffer + bytes_read, 1);
if (rc < 1) break;
} while (buffer[bytes_read] != '\n');
buffer[bytes_read] = 0;
// Ignore mappings that are not executable.
if (buffer[3] != 'x') continue;
char* start_of_path = index(buffer, '/');
// There may be no filename in this line. Skip to next.
if (start_of_path == NULL) continue;
buffer[bytes_read] = 0;
result.push_back(SharedLibraryAddress(start_of_path, start, end));
}
close(fd);
return result; return result;
} }
// static
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint, void OS::SignalCodeMovingGC() {}
size_t* allocated) {
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) {}
VirtualMemory::VirtualMemory(size_t size, void* hint)
: address_(ReserveRegion(size, hint)), size_(size) {}
VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
: address_(NULL), size_(0) {
DCHECK((alignment % OS::AllocateAlignment()) == 0); DCHECK((alignment % OS::AllocateAlignment()) == 0);
hint = AlignedAddress(hint, alignment); hint = AlignedAddress(hint, alignment);
size_t request_size = size_t request_size =
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment())); RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
void* result = ReserveRegion(request_size, hint); void* reservation = mmap(hint, request_size, PROT_NONE,
if (result == nullptr) { MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd, kMmapFdOffset);
*allocated = 0; if (reservation == MAP_FAILED) return;
return nullptr;
}
uint8_t* base = static_cast<uint8_t*>(result); uint8_t* base = static_cast<uint8_t*>(reservation);
uint8_t* aligned_base = RoundUp(base, alignment); uint8_t* aligned_base = RoundUp(base, alignment);
DCHECK_LE(base, aligned_base); DCHECK_LE(base, aligned_base);
...@@ -128,84 +169,72 @@ void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint, ...@@ -128,84 +169,72 @@ void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
address_ = static_cast<void*>(aligned_base); address_ = static_cast<void*>(aligned_base);
size_ = aligned_size; size_ = aligned_size;
}
*allocated = aligned_size; VirtualMemory::~VirtualMemory() {
return static_cast<void*>(aligned_base); if (IsReserved()) {
bool result = ReleaseRegion(address(), size());
DCHECK(result);
USE(result);
}
} }
// static void VirtualMemory::Reset() {
bool OS::CommitRegion(void* address, size_t size, bool is_executable) { address_ = NULL;
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); size_ = 0;
}
if (mprotect(address, size, prot) == -1) return false;
return true; bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
return CommitRegion(address, size, is_executable);
} }
// static
bool OS::UncommitRegion(void* address, size_t size) { bool VirtualMemory::Uncommit(void* address, size_t size) {
return mprotect(address, size, PROT_NONE) != -1; return UncommitRegion(address, size);
} }
// static
bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start, bool VirtualMemory::Guard(void* address) {
size_t free_size) { OS::Guard(address, OS::CommitPageSize());
return munmap(free_start, free_size) == 0; return true;
} }
// static void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
bool OS::ReleaseRegion(void* address, size_t size) { void* result = mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS,
return munmap(address, size) == 0; kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return NULL;
return result;
} }
// static
bool OS::HasLazyCommits() { return true; }
static unsigned StringToLong(char* buffer) { bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
if (mprotect(base, size, prot) == -1) return false;
return true;
} }
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result; bool VirtualMemory::UncommitRegion(void* base, size_t size) {
static const int MAP_LENGTH = 1024; return mprotect(base, size, PROT_NONE) != -1;
int fd = open("/proc/self/maps", O_RDONLY); }
if (fd < 0) return result;
while (true) { bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
char addr_buffer[11]; void* free_start, size_t free_size) {
addr_buffer[0] = '0'; return munmap(free_start, free_size) == 0;
addr_buffer[1] = 'x'; }
addr_buffer[10] = 0;
ssize_t rc = read(fd, addr_buffer + 2, 8);
if (rc < 8) break; bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
unsigned start = StringToLong(addr_buffer); return munmap(base, size) == 0;
rc = read(fd, addr_buffer + 2, 1);
if (rc < 1) break;
if (addr_buffer[2] != '-') break;
rc = read(fd, addr_buffer + 2, 8);
if (rc < 8) break;
unsigned end = StringToLong(addr_buffer);
char buffer[MAP_LENGTH];
int bytes_read = -1;
do {
bytes_read++;
if (bytes_read >= MAP_LENGTH - 1) break;
rc = read(fd, buffer + bytes_read, 1);
if (rc < 1) break;
} while (buffer[bytes_read] != '\n');
buffer[bytes_read] = 0;
// Ignore mappings that are not executable.
if (buffer[3] != 'x') continue;
char* start_of_path = index(buffer, '/');
// There may be no filename in this line. Skip to next.
if (start_of_path == NULL) continue;
buffer[bytes_read] = 0;
result.push_back(SharedLibraryAddress(start_of_path, start, end));
}
close(fd);
return result;
} }
void OS::SignalCodeMovingGC() {}
bool VirtualMemory::HasLazyCommits() { return true; }
} // namespace base } // namespace base
} // namespace v8 } // namespace v8
...@@ -26,31 +26,6 @@ ...@@ -26,31 +26,6 @@
namespace v8 { namespace v8 {
namespace base { namespace base {
namespace {
// The VirtualMemory implementation is taken from platform-win32.cc.
// The mmap-based virtual memory implementation as it is used on most posix
// platforms does not work well because Cygwin does not support MAP_FIXED.
// This causes VirtualMemory::Commit to not always commit the memory region
// specified.
static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
void* hint) {
LPVOID base = NULL;
if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
// For exectutable pages try and randomize the allocation address
base = VirtualAlloc(hint, size, action, protection);
}
// After three attempts give up and let the OS find an address to use.
if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
return base;
}
} // namespace
class CygwinTimezoneCache : public PosixTimezoneCache { class CygwinTimezoneCache : public PosixTimezoneCache {
const char* LocalTimezone(double time) override; const char* LocalTimezone(double time) override;
...@@ -90,75 +65,6 @@ void* OS::Allocate(const size_t requested, size_t* allocated, ...@@ -90,75 +65,6 @@ void* OS::Allocate(const size_t requested, size_t* allocated,
return mbase; return mbase;
} }
// static
void* OS::ReserveRegion(size_t size, void* hint) {
return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint);
}
// static
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated) {
hint = AlignedAddress(hint, alignment);
DCHECK((alignment % OS::AllocateAlignment()) == 0);
size_t request_size =
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
void* address = ReserveRegion(request_size, hint);
if (address == NULL) {
*allocated = 0;
return nullptr;
}
uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
// Try reducing the size by freeing and then reallocating a specific area.
bool result = ReleaseRegion(address, request_size);
USE(result);
DCHECK(result);
address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
if (address != nullptr) {
request_size = size;
DCHECK(base == static_cast<uint8_t*>(address));
} else {
// Resizing failed, just go with a bigger area.
address = ReserveRegion(request_size, hint);
if (address == nullptr) {
*allocated = 0;
return nullptr;
}
}
*allocated = request_size;
return static_cast<void*>(address);
}
// static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
return false;
}
return true;
}
// static
bool OS::UncommitRegion(void* address, size_t size) {
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
}
// static
bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start,
size_t free_size) {
return VirtualFree(free_start, free_size, MEM_DECOMMIT) != 0;
}
// static
bool OS::ReleaseRegion(void* address, size_t size) {
return VirtualFree(address, 0, MEM_RELEASE) != 0;
}
// static
bool OS::HasLazyCommits() {
// TODO(alph): implement for the platform.
return false;
}
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddresses> result; std::vector<SharedLibraryAddresses> result;
...@@ -225,5 +131,124 @@ void OS::SignalCodeMovingGC() { ...@@ -225,5 +131,124 @@ void OS::SignalCodeMovingGC() {
// Nothing to do on Cygwin. // Nothing to do on Cygwin.
} }
// The VirtualMemory implementation is taken from platform-win32.cc.
// The mmap-based virtual memory implementation as it is used on most posix
// platforms does not work well because Cygwin does not support MAP_FIXED.
// This causes VirtualMemory::Commit to not always commit the memory region
// specified.
static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
void* hint) {
LPVOID base = NULL;
if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
// For exectutable pages try and randomize the allocation address
base = VirtualAlloc(hint, size, action, protection);
}
// After three attempts give up and let the OS find an address to use.
if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
return base;
}
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
VirtualMemory::VirtualMemory(size_t size, void* hint)
: address_(ReserveRegion(size, hint)), size_(size) {}
VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
: address_(NULL), size_(0) {
hint = AlignedAddress(hint, alignment);
DCHECK((alignment % OS::AllocateAlignment()) == 0);
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
void* address = ReserveRegion(request_size, hint);
if (address == NULL) return;
uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
// Try reducing the size by freeing and then reallocating a specific area.
bool result = ReleaseRegion(address, request_size);
USE(result);
DCHECK(result);
address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
if (address != NULL) {
request_size = size;
DCHECK(base == static_cast<uint8_t*>(address));
} else {
// Resizing failed, just go with a bigger area.
address = ReserveRegion(request_size, hint);
if (address == NULL) return;
}
address_ = address;
size_ = request_size;
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
bool result = ReleaseRegion(address_, size_);
DCHECK(result);
USE(result);
}
}
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
}
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
return CommitRegion(address, size, is_executable);
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
DCHECK(IsReserved());
return UncommitRegion(address, size);
}
void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint);
}
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
return false;
}
return true;
}
bool VirtualMemory::Guard(void* address) {
if (NULL == VirtualAlloc(address,
OS::CommitPageSize(),
MEM_COMMIT,
PAGE_NOACCESS)) {
return false;
}
return true;
}
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
return VirtualFree(base, size, MEM_DECOMMIT) != 0;
}
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
return VirtualFree(base, 0, MEM_RELEASE) != 0;
}
bool VirtualMemory::HasLazyCommits() {
// TODO(alph): implement for the platform.
return false;
}
} // namespace base } // namespace base
} // namespace v8 } // namespace v8
...@@ -40,46 +40,91 @@ TimezoneCache* OS::CreateTimezoneCache() { ...@@ -40,46 +40,91 @@ TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache(); return new PosixDefaultTimezoneCache();
} }
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
void* OS::Allocate(const size_t requested, size_t* allocated, void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) { OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, getpagesize()); const size_t msize = RoundUp(requested, getpagesize());
int prot = GetProtectionFromMemoryPermission(access); int prot = GetProtectionFromMemoryPermission(access);
void* mbase = void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
if (mbase == MAP_FAILED) return NULL; if (mbase == MAP_FAILED) return NULL;
*allocated = msize; *allocated = msize;
return mbase; return mbase;
} }
// static
void* OS::ReserveRegion(size_t size, void* hint) {
void* result = mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON, kMmapFd,
kMmapFdOffset);
if (result == MAP_FAILED) return NULL; static unsigned StringToLong(char* buffer) {
return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
}
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
static const int MAP_LENGTH = 1024;
int fd = open("/proc/self/maps", O_RDONLY);
if (fd < 0) return result;
while (true) {
char addr_buffer[11];
addr_buffer[0] = '0';
addr_buffer[1] = 'x';
addr_buffer[10] = 0;
ssize_t bytes_read = read(fd, addr_buffer + 2, 8);
if (bytes_read < 8) break;
unsigned start = StringToLong(addr_buffer);
bytes_read = read(fd, addr_buffer + 2, 1);
if (bytes_read < 1) break;
if (addr_buffer[2] != '-') break;
bytes_read = read(fd, addr_buffer + 2, 8);
if (bytes_read < 8) break;
unsigned end = StringToLong(addr_buffer);
char buffer[MAP_LENGTH];
bytes_read = -1;
do {
bytes_read++;
if (bytes_read >= MAP_LENGTH - 1)
break;
bytes_read = read(fd, buffer + bytes_read, 1);
if (bytes_read < 1) break;
} while (buffer[bytes_read] != '\n');
buffer[bytes_read] = 0;
// Ignore mappings that are not executable.
if (buffer[3] != 'x') continue;
char* start_of_path = index(buffer, '/');
// There may be no filename in this line. Skip to next.
if (start_of_path == NULL) continue;
buffer[bytes_read] = 0;
result.push_back(SharedLibraryAddress(start_of_path, start, end));
}
close(fd);
return result; return result;
} }
// static
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint, void OS::SignalCodeMovingGC() {
size_t* allocated) { }
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
VirtualMemory::VirtualMemory(size_t size, void* hint)
: address_(ReserveRegion(size, hint)), size_(size) {}
VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
: address_(NULL), size_(0) {
hint = AlignedAddress(hint, alignment); hint = AlignedAddress(hint, alignment);
DCHECK((alignment % OS::AllocateAlignment()) == 0); DCHECK((alignment % OS::AllocateAlignment()) == 0);
size_t request_size = RoundUp(size + alignment, size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment())); static_cast<intptr_t>(OS::AllocateAlignment()));
void* result = ReserveRegion(request_size, hint); void* reservation = mmap(hint, request_size, PROT_NONE,
if (result == nullptr) { MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
*allocated = 0; if (reservation == MAP_FAILED) return;
return nullptr;
}
uint8_t* base = static_cast<uint8_t*>(result); uint8_t* base = static_cast<uint8_t*>(reservation);
uint8_t* aligned_base = RoundUp(base, alignment); uint8_t* aligned_base = RoundUp(base, alignment);
DCHECK_LE(base, aligned_base); DCHECK_LE(base, aligned_base);
...@@ -101,89 +146,87 @@ void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint, ...@@ -101,89 +146,87 @@ void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
DCHECK(aligned_size == request_size); DCHECK(aligned_size == request_size);
*allocated = aligned_size; address_ = static_cast<void*>(aligned_base);
return static_cast<void*>(aligned_base); size_ = aligned_size;
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
bool result = ReleaseRegion(address(), size());
DCHECK(result);
USE(result);
}
}
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
} }
// static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) { bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
return CommitRegion(address, size, is_executable);
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
return UncommitRegion(address, size);
}
bool VirtualMemory::Guard(void* address) {
OS::Guard(address, OS::CommitPageSize());
return true;
}
void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
void* result = mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON, kMmapFd,
kMmapFdOffset);
if (result == MAP_FAILED) return NULL;
return result;
}
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(address, size, prot, if (MAP_FAILED == mmap(base,
MAP_PRIVATE | MAP_ANON | MAP_FIXED, kMmapFd, size,
prot,
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
kMmapFd,
kMmapFdOffset)) { kMmapFdOffset)) {
return false; return false;
} }
return true; return true;
} }
// static
bool OS::UncommitRegion(void* address, size_t size) { bool VirtualMemory::UncommitRegion(void* base, size_t size) {
return mmap(address, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED, return mmap(base,
kMmapFd, kMmapFdOffset) != MAP_FAILED; size,
PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
kMmapFd,
kMmapFdOffset) != MAP_FAILED;
} }
// static bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start, void* free_start, size_t free_size) {
size_t free_size) {
return munmap(free_start, free_size) == 0; return munmap(free_start, free_size) == 0;
} }
// static bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
bool OS::ReleaseRegion(void* address, size_t size) { return munmap(base, size) == 0;
return munmap(address, size) == 0;
} }
// static
bool OS::HasLazyCommits() { bool VirtualMemory::HasLazyCommits() {
// TODO(alph): implement for the platform. // TODO(alph): implement for the platform.
return false; return false;
} }
static unsigned StringToLong(char* buffer) {
return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
}
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
static const int MAP_LENGTH = 1024;
int fd = open("/proc/self/maps", O_RDONLY);
if (fd < 0) return result;
while (true) {
char addr_buffer[11];
addr_buffer[0] = '0';
addr_buffer[1] = 'x';
addr_buffer[10] = 0;
ssize_t bytes_read = read(fd, addr_buffer + 2, 8);
if (bytes_read < 8) break;
unsigned start = StringToLong(addr_buffer);
bytes_read = read(fd, addr_buffer + 2, 1);
if (bytes_read < 1) break;
if (addr_buffer[2] != '-') break;
bytes_read = read(fd, addr_buffer + 2, 8);
if (bytes_read < 8) break;
unsigned end = StringToLong(addr_buffer);
char buffer[MAP_LENGTH];
bytes_read = -1;
do {
bytes_read++;
if (bytes_read >= MAP_LENGTH - 1) break;
bytes_read = read(fd, buffer + bytes_read, 1);
if (bytes_read < 1) break;
} while (buffer[bytes_read] != '\n');
buffer[bytes_read] = 0;
// Ignore mappings that are not executable.
if (buffer[3] != 'x') continue;
char* start_of_path = index(buffer, '/');
// There may be no filename in this line. Skip to next.
if (start_of_path == NULL) continue;
buffer[bytes_read] = 0;
result.push_back(SharedLibraryAddress(start_of_path, start, end));
}
close(fd);
return result;
}
void OS::SignalCodeMovingGC() {}
} // namespace base } // namespace base
} // namespace v8 } // namespace v8
...@@ -17,35 +17,28 @@ TimezoneCache* OS::CreateTimezoneCache() { ...@@ -17,35 +17,28 @@ TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache(); return new PosixDefaultTimezoneCache();
} }
// static
void* OS::Allocate(const size_t requested, size_t* allocated, void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) { OS::MemoryPermission access, void* hint) {
CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217. CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217.
return nullptr; return nullptr;
} }
// static std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
bool OS::Guard(void* address, size_t size) { CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217.
return mx_vmar_protect(mx_vmar_root_self(), return std::vector<SharedLibraryAddress>();
reinterpret_cast<uintptr_t>(address), size,
0 /*no permissions*/) == MX_OK;
} }
// static void OS::SignalCodeMovingGC() {
void* OS::ReserveRegion(size_t size, void* hint) { CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217.
mx_handle_t vmo;
if (mx_vmo_create(size, 0, &vmo) != MX_OK) return nullptr;
uintptr_t result;
mx_status_t status = mx_vmar_map(mx_vmar_root_self(), 0, vmo, 0, size,
0 /*no permissions*/, &result);
mx_handle_close(vmo);
if (status != MX_OK) return nullptr;
return reinterpret_cast<void*>(result);
} }
// static VirtualMemory::VirtualMemory() : address_(nullptr), size_(0) {}
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated) { VirtualMemory::VirtualMemory(size_t size, void* hint)
: address_(ReserveRegion(size, hint)), size_(size) {}
VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
: address_(nullptr), size_(0) {
DCHECK((alignment % OS::AllocateAlignment()) == 0); DCHECK((alignment % OS::AllocateAlignment()) == 0);
hint = AlignedAddress(hint, alignment); hint = AlignedAddress(hint, alignment);
size_t request_size = size_t request_size =
...@@ -62,10 +55,7 @@ void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint, ...@@ -62,10 +55,7 @@ void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
// Either the vmo is now referenced by the vmar, or we failed and are bailing, // Either the vmo is now referenced by the vmar, or we failed and are bailing,
// so close the vmo either way. // so close the vmo either way.
mx_handle_close(vmo); mx_handle_close(vmo);
if (status != MX_OK) { if (status != MX_OK) return;
*allocated = 0;
return nullptr;
}
uint8_t* base = reinterpret_cast<uint8_t*>(reservation); uint8_t* base = reinterpret_cast<uint8_t*>(reservation);
uint8_t* aligned_base = RoundUp(base, alignment); uint8_t* aligned_base = RoundUp(base, alignment);
...@@ -92,54 +82,83 @@ void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint, ...@@ -92,54 +82,83 @@ void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
DCHECK(aligned_size == request_size); DCHECK(aligned_size == request_size);
*allocated = aligned_size; address_ = static_cast<void*>(aligned_base);
return static_cast<void*>(aligned_base); size_ = aligned_size;
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
bool result = ReleaseRegion(address(), size());
DCHECK(result);
USE(result);
}
}
void VirtualMemory::Reset() {
address_ = nullptr;
size_ = 0;
}
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
CHECK(InVM(address, size));
return CommitRegion(address, size, is_executable);
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
return UncommitRegion(address, size);
}
bool VirtualMemory::Guard(void* address) {
return mx_vmar_protect(mx_vmar_root_self(),
reinterpret_cast<uintptr_t>(address),
OS::CommitPageSize(), 0 /*no permissions*/) == MX_OK;
}
// static
void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
mx_handle_t vmo;
if (mx_vmo_create(size, 0, &vmo) != MX_OK) return nullptr;
uintptr_t result;
mx_status_t status = mx_vmar_map(mx_vmar_root_self(), 0, vmo, 0, size,
0 /*no permissions*/, &result);
mx_handle_close(vmo);
if (status != MX_OK) return nullptr;
return reinterpret_cast<void*>(result);
} }
// static // static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) { bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
uint32_t prot = MX_VM_FLAG_PERM_READ | MX_VM_FLAG_PERM_WRITE | uint32_t prot = MX_VM_FLAG_PERM_READ | MX_VM_FLAG_PERM_WRITE |
(is_executable ? MX_VM_FLAG_PERM_EXECUTE : 0); (is_executable ? MX_VM_FLAG_PERM_EXECUTE : 0);
return mx_vmar_protect(mx_vmar_root_self(), return mx_vmar_protect(mx_vmar_root_self(), reinterpret_cast<uintptr_t>(base),
reinterpret_cast<uintptr_t>(address), size, size, prot) == MX_OK;
prot) == MX_OK;
} }
// static // static
bool OS::UncommitRegion(void* address, size_t size) { bool VirtualMemory::UncommitRegion(void* base, size_t size) {
return mx_vmar_protect(mx_vmar_root_self(), return mx_vmar_protect(mx_vmar_root_self(), reinterpret_cast<uintptr_t>(base),
reinterpret_cast<uintptr_t>(address), size, size, 0 /*no permissions*/) == MX_OK;
0 /*no permissions*/) == MX_OK;
} }
// static // static
bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start, bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
size_t free_size) { void* free_start, size_t free_size) {
return mx_vmar_unmap(mx_vmar_root_self(), return mx_vmar_unmap(mx_vmar_root_self(),
reinterpret_cast<uintptr_t>(free_start), reinterpret_cast<uintptr_t>(free_start),
free_size) == MX_OK; free_size) == MX_OK;
} }
// static // static
bool OS::ReleaseRegion(void* address, size_t size) { bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
return mx_vmar_unmap(mx_vmar_root_self(), return mx_vmar_unmap(mx_vmar_root_self(), reinterpret_cast<uintptr_t>(base),
reinterpret_cast<uintptr_t>(address), size) == MX_OK; size) == MX_OK;
} }
// static // static
bool OS::HasLazyCommits() { bool VirtualMemory::HasLazyCommits() {
// TODO(scottmg): Port, https://crbug.com/731217. // TODO(scottmg): Port, https://crbug.com/731217.
return false; return false;
} }
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217.
return std::vector<SharedLibraryAddress>();
}
void OS::SignalCodeMovingGC() {
CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217.
}
} // namespace base } // namespace base
} // namespace v8 } // namespace v8
...@@ -97,120 +97,16 @@ TimezoneCache* OS::CreateTimezoneCache() { ...@@ -97,120 +97,16 @@ TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache(); return new PosixDefaultTimezoneCache();
} }
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
void* OS::Allocate(const size_t requested, size_t* allocated, void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) { OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, AllocateAlignment()); const size_t msize = RoundUp(requested, AllocateAlignment());
int prot = GetProtectionFromMemoryPermission(access); int prot = GetProtectionFromMemoryPermission(access);
void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd, void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
kMmapFdOffset);
if (mbase == MAP_FAILED) return NULL; if (mbase == MAP_FAILED) return NULL;
*allocated = msize; *allocated = msize;
return mbase; return mbase;
} }
// static
void* OS::ReserveRegion(size_t size, void* hint) {
void* result =
mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return NULL;
#if defined(LEAK_SANITIZER)
__lsan_register_root_region(result, size);
#endif
return result;
}
// static
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated) {
DCHECK((alignment % OS::AllocateAlignment()) == 0);
hint = AlignedAddress(hint, alignment);
size_t request_size =
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
void* reservation =
mmap(hint, request_size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
if (reservation == MAP_FAILED) {
*allocated = 0;
return nullptr;
}
uint8_t* base = static_cast<uint8_t*>(reservation);
uint8_t* aligned_base = RoundUp(base, alignment);
DCHECK_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
size_t prefix_size = static_cast<size_t>(aligned_base - base);
OS::Free(base, prefix_size);
request_size -= prefix_size;
}
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
DCHECK_LE(aligned_size, request_size);
if (aligned_size != request_size) {
size_t suffix_size = request_size - aligned_size;
OS::Free(aligned_base + aligned_size, suffix_size);
request_size -= suffix_size;
}
DCHECK(aligned_size == request_size);
#if defined(LEAK_SANITIZER)
__lsan_register_root_region(static_cast<void*>(aligned_base), aligned_size);
#endif
*allocated = aligned_size;
return static_cast<void*>(aligned_base);
}
// static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(address, size, prot,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
kMmapFdOffset)) {
return false;
}
return true;
}
// static
bool OS::UncommitRegion(void* address, size_t size) {
return mmap(address, size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, kMmapFd,
kMmapFdOffset) != MAP_FAILED;
}
// static
bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start,
size_t free_size) {
#if defined(LEAK_SANITIZER)
__lsan_unregister_root_region(address, size);
__lsan_register_root_region(address, size - free_size);
#endif
return munmap(free_start, free_size) == 0;
}
// static
bool OS::ReleaseRegion(void* address, size_t size) {
#if defined(LEAK_SANITIZER)
__lsan_unregister_root_region(address, size);
#endif
return munmap(address, size) == 0;
}
// static
bool OS::HasLazyCommits() { return true; }
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result; std::vector<SharedLibraryAddress> result;
// This function assumes that the layout of the file is as follows: // This function assumes that the layout of the file is as follows:
...@@ -294,5 +190,131 @@ void OS::SignalCodeMovingGC() { ...@@ -294,5 +190,131 @@ void OS::SignalCodeMovingGC() {
fclose(f); fclose(f);
} }
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) {}
VirtualMemory::VirtualMemory(size_t size, void* hint)
: address_(ReserveRegion(size, hint)), size_(size) {}
VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
: address_(NULL), size_(0) {
DCHECK((alignment % OS::AllocateAlignment()) == 0);
hint = AlignedAddress(hint, alignment);
size_t request_size =
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
void* reservation =
mmap(hint, request_size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
if (reservation == MAP_FAILED) return;
uint8_t* base = static_cast<uint8_t*>(reservation);
uint8_t* aligned_base = RoundUp(base, alignment);
DCHECK_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
size_t prefix_size = static_cast<size_t>(aligned_base - base);
OS::Free(base, prefix_size);
request_size -= prefix_size;
}
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
DCHECK_LE(aligned_size, request_size);
if (aligned_size != request_size) {
size_t suffix_size = request_size - aligned_size;
OS::Free(aligned_base + aligned_size, suffix_size);
request_size -= suffix_size;
}
DCHECK(aligned_size == request_size);
address_ = static_cast<void*>(aligned_base);
size_ = aligned_size;
#if defined(LEAK_SANITIZER)
__lsan_register_root_region(address_, size_);
#endif
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
bool result = ReleaseRegion(address(), size());
DCHECK(result);
USE(result);
}
}
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
}
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
CHECK(InVM(address, size));
return CommitRegion(address, size, is_executable);
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
CHECK(InVM(address, size));
return UncommitRegion(address, size);
}
bool VirtualMemory::Guard(void* address) {
CHECK(InVM(address, OS::CommitPageSize()));
OS::Guard(address, OS::CommitPageSize());
return true;
}
void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
void* result =
mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return NULL;
#if defined(LEAK_SANITIZER)
__lsan_register_root_region(result, size);
#endif
return result;
}
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(base, size, prot,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
kMmapFdOffset)) {
return false;
}
return true;
}
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
return mmap(base, size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, kMmapFd,
kMmapFdOffset) != MAP_FAILED;
}
bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
void* free_start, size_t free_size) {
#if defined(LEAK_SANITIZER)
__lsan_unregister_root_region(base, size);
__lsan_register_root_region(base, size - free_size);
#endif
return munmap(free_start, free_size) == 0;
}
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
#if defined(LEAK_SANITIZER)
__lsan_unregister_root_region(base, size);
#endif
return munmap(base, size) == 0;
}
bool VirtualMemory::HasLazyCommits() { return true; }
} // namespace base } // namespace base
} // namespace v8 } // namespace v8
...@@ -51,7 +51,6 @@ namespace base { ...@@ -51,7 +51,6 @@ namespace base {
static const int kMmapFd = VM_MAKE_TAG(255); static const int kMmapFd = VM_MAKE_TAG(255);
static const off_t kMmapFdOffset = 0; static const off_t kMmapFdOffset = 0;
// static
void* OS::Allocate(const size_t requested, size_t* allocated, void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) { OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, getpagesize()); const size_t msize = RoundUp(requested, getpagesize());
...@@ -63,31 +62,58 @@ void* OS::Allocate(const size_t requested, size_t* allocated, ...@@ -63,31 +62,58 @@ void* OS::Allocate(const size_t requested, size_t* allocated,
return mbase; return mbase;
} }
// static
void* OS::ReserveRegion(size_t size, void* hint) {
void* result =
mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return nullptr;
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
unsigned int images_count = _dyld_image_count();
for (unsigned int i = 0; i < images_count; ++i) {
const mach_header* header = _dyld_get_image_header(i);
if (header == NULL) continue;
#if V8_HOST_ARCH_X64
uint64_t size;
char* code_ptr = getsectdatafromheader_64(
reinterpret_cast<const mach_header_64*>(header),
SEG_TEXT,
SECT_TEXT,
&size);
#else
unsigned int size;
char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
#endif
if (code_ptr == NULL) continue;
const intptr_t slide = _dyld_get_image_vmaddr_slide(i);
const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
result.push_back(SharedLibraryAddress(_dyld_get_image_name(i), start,
start + size, slide));
}
return result; return result;
} }
// static
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint, void OS::SignalCodeMovingGC() {
size_t* allocated) { }
TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache();
}
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
VirtualMemory::VirtualMemory(size_t size, void* hint)
: address_(ReserveRegion(size, hint)), size_(size) {}
VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
: address_(NULL), size_(0) {
DCHECK((alignment % OS::AllocateAlignment()) == 0); DCHECK((alignment % OS::AllocateAlignment()) == 0);
hint = AlignedAddress(hint, alignment); hint = AlignedAddress(hint, alignment);
size_t request_size = RoundUp(size + alignment, size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment())); static_cast<intptr_t>(OS::AllocateAlignment()));
void* result = ReserveRegion(request_size, hint); void* reservation =
if (result == nullptr) { mmap(hint, request_size, PROT_NONE,
*allocated = 0; MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
return nullptr; if (reservation == MAP_FAILED) return;
}
uint8_t* base = static_cast<uint8_t*>(result); uint8_t* base = static_cast<uint8_t*>(reservation);
uint8_t* aligned_base = RoundUp(base, alignment); uint8_t* aligned_base = RoundUp(base, alignment);
DCHECK_LE(base, aligned_base); DCHECK_LE(base, aligned_base);
...@@ -109,12 +135,54 @@ void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint, ...@@ -109,12 +135,54 @@ void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
DCHECK(aligned_size == request_size); DCHECK(aligned_size == request_size);
*allocated = aligned_size; address_ = static_cast<void*>(aligned_base);
return static_cast<void*>(aligned_base); size_ = aligned_size;
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
bool result = ReleaseRegion(address(), size());
DCHECK(result);
USE(result);
}
}
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
}
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
return CommitRegion(address, size, is_executable);
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
return UncommitRegion(address, size);
}
bool VirtualMemory::Guard(void* address) {
OS::Guard(address, OS::CommitPageSize());
return true;
}
void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
void* result =
mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return NULL;
return result;
} }
// static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) { bool VirtualMemory::CommitRegion(void* address,
size_t size,
bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(address, if (MAP_FAILED == mmap(address,
size, size,
...@@ -127,8 +195,8 @@ bool OS::CommitRegion(void* address, size_t size, bool is_executable) { ...@@ -127,8 +195,8 @@ bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
return true; return true;
} }
// static
bool OS::UncommitRegion(void* address, size_t size) { bool VirtualMemory::UncommitRegion(void* address, size_t size) {
return mmap(address, return mmap(address,
size, size,
PROT_NONE, PROT_NONE,
...@@ -137,49 +205,16 @@ bool OS::UncommitRegion(void* address, size_t size) { ...@@ -137,49 +205,16 @@ bool OS::UncommitRegion(void* address, size_t size) {
kMmapFdOffset) != MAP_FAILED; kMmapFdOffset) != MAP_FAILED;
} }
// static bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start, void* free_start, size_t free_size) {
size_t free_size) {
return munmap(free_start, free_size) == 0; return munmap(free_start, free_size) == 0;
} }
// static bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
bool OS::ReleaseRegion(void* address, size_t size) {
return munmap(address, size) == 0; return munmap(address, size) == 0;
} }
// static bool VirtualMemory::HasLazyCommits() { return true; }
bool OS::HasLazyCommits() { return true; }
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
unsigned int images_count = _dyld_image_count();
for (unsigned int i = 0; i < images_count; ++i) {
const mach_header* header = _dyld_get_image_header(i);
if (header == NULL) continue;
#if V8_HOST_ARCH_X64
uint64_t size;
char* code_ptr = getsectdatafromheader_64(
reinterpret_cast<const mach_header_64*>(header), SEG_TEXT, SECT_TEXT,
&size);
#else
unsigned int size;
char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
#endif
if (code_ptr == NULL) continue;
const intptr_t slide = _dyld_get_image_vmaddr_slide(i);
const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
result.push_back(SharedLibraryAddress(_dyld_get_image_name(i), start,
start + size, slide));
}
return result;
}
void OS::SignalCodeMovingGC() {}
TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache();
}
} // namespace base } // namespace base
} // namespace v8 } // namespace v8
...@@ -38,105 +38,16 @@ TimezoneCache* OS::CreateTimezoneCache() { ...@@ -38,105 +38,16 @@ TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache(); return new PosixDefaultTimezoneCache();
} }
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
void* OS::Allocate(const size_t requested, size_t* allocated, void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) { OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, AllocateAlignment()); const size_t msize = RoundUp(requested, AllocateAlignment());
int prot = GetProtectionFromMemoryPermission(access); int prot = GetProtectionFromMemoryPermission(access);
void* mbase = void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
if (mbase == MAP_FAILED) return NULL; if (mbase == MAP_FAILED) return NULL;
*allocated = msize; *allocated = msize;
return mbase; return mbase;
} }
// static
void* OS::ReserveRegion(size_t size, void* hint) {
void* result =
mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return NULL;
return result;
}
// static
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated) {
DCHECK((alignment % OS::AllocateAlignment()) == 0);
hint = AlignedAddress(hint, alignment);
size_t request_size =
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
void* result = ReserveRegion(request_size, hint);
if (result == nullptr) {
*allocated = 0;
return nullptr;
}
uint8_t* base = static_cast<uint8_t*>(result);
uint8_t* aligned_base = RoundUp(base, alignment);
DCHECK_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
size_t prefix_size = static_cast<size_t>(aligned_base - base);
OS::Free(base, prefix_size);
request_size -= prefix_size;
}
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
DCHECK_LE(aligned_size, request_size);
if (aligned_size != request_size) {
size_t suffix_size = request_size - aligned_size;
OS::Free(aligned_base + aligned_size, suffix_size);
request_size -= suffix_size;
}
DCHECK(aligned_size == request_size);
*allocated = aligned_size;
return static_cast<void*>(aligned_base);
}
// static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(address, size, prot,
MAP_PRIVATE | MAP_ANON | MAP_FIXED, kMmapFd,
kMmapFdOffset)) {
return false;
}
return true;
}
// static
bool OS::UncommitRegion(void* address, size_t size) {
return mmap(address, size, PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, kMmapFd,
kMmapFdOffset) != MAP_FAILED;
}
// static
bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start,
size_t free_size) {
return munmap(free_start, free_size) == 0;
}
// static
bool OS::ReleaseRegion(void* address, size_t size) {
return munmap(address, size) == 0;
}
// static
bool OS::HasLazyCommits() {
// TODO(alph): implement for the platform.
return false;
}
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result; std::vector<SharedLibraryAddress> result;
...@@ -221,5 +132,133 @@ void OS::SignalCodeMovingGC() { ...@@ -221,5 +132,133 @@ void OS::SignalCodeMovingGC() {
fclose(f); fclose(f);
} }
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
VirtualMemory::VirtualMemory(size_t size, void* hint)
: address_(ReserveRegion(size, hint)), size_(size) {}
VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
: address_(NULL), size_(0) {
DCHECK((alignment % OS::AllocateAlignment()) == 0);
hint = AlignedAddress(hint, alignment);
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
void* reservation =
mmap(hint, request_size, PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
if (reservation == MAP_FAILED) return;
uint8_t* base = static_cast<uint8_t*>(reservation);
uint8_t* aligned_base = RoundUp(base, alignment);
DCHECK_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
size_t prefix_size = static_cast<size_t>(aligned_base - base);
OS::Free(base, prefix_size);
request_size -= prefix_size;
}
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
DCHECK_LE(aligned_size, request_size);
if (aligned_size != request_size) {
size_t suffix_size = request_size - aligned_size;
OS::Free(aligned_base + aligned_size, suffix_size);
request_size -= suffix_size;
}
DCHECK(aligned_size == request_size);
address_ = static_cast<void*>(aligned_base);
size_ = aligned_size;
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
bool result = ReleaseRegion(address(), size());
DCHECK(result);
USE(result);
}
}
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
}
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
return CommitRegion(address, size, is_executable);
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
return UncommitRegion(address, size);
}
bool VirtualMemory::Guard(void* address) {
OS::Guard(address, OS::CommitPageSize());
return true;
}
void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
void* result =
mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return NULL;
return result;
}
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(base,
size,
prot,
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
kMmapFd,
kMmapFdOffset)) {
return false;
}
return true;
}
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
return mmap(base,
size,
PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
kMmapFd,
kMmapFdOffset) != MAP_FAILED;
}
bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
void* free_start, size_t free_size) {
return munmap(free_start, free_size) == 0;
}
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
return munmap(base, size) == 0;
}
bool VirtualMemory::HasLazyCommits() {
// TODO(alph): implement for the platform.
return false;
}
} // namespace base } // namespace base
} // namespace v8 } // namespace v8
...@@ -129,7 +129,6 @@ void OS::ProtectCode(void* address, const size_t size) { ...@@ -129,7 +129,6 @@ void OS::ProtectCode(void* address, const size_t size) {
// Create guard pages. // Create guard pages.
#if !V8_OS_FUCHSIA
void OS::Guard(void* address, const size_t size) { void OS::Guard(void* address, const size_t size) {
#if V8_OS_CYGWIN #if V8_OS_CYGWIN
DWORD oldprotect; DWORD oldprotect;
...@@ -138,7 +137,6 @@ void OS::Guard(void* address, const size_t size) { ...@@ -138,7 +137,6 @@ void OS::Guard(void* address, const size_t size) {
mprotect(address, size, PROT_NONE); mprotect(address, size, PROT_NONE);
#endif #endif
} }
#endif // !V8_OS_FUCHSIA
// Make a region of memory readable and writable. // Make a region of memory readable and writable.
void OS::Unprotect(void* address, const size_t size) { void OS::Unprotect(void* address, const size_t size) {
......
...@@ -89,46 +89,99 @@ TimezoneCache* OS::CreateTimezoneCache() { ...@@ -89,46 +89,99 @@ TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache(); return new PosixDefaultTimezoneCache();
} }
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
void* OS::Allocate(const size_t requested, size_t* allocated, void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) { OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, AllocateAlignment()); const size_t msize = RoundUp(requested, AllocateAlignment());
int prot = GetProtectionFromMemoryPermission(access); int prot = GetProtectionFromMemoryPermission(access);
void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd, void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
kMmapFdOffset);
if (mbase == MAP_FAILED) return NULL; if (mbase == MAP_FAILED) return NULL;
*allocated = msize; *allocated = msize;
return mbase; return mbase;
} }
// static
void* OS::ReserveRegion(size_t size, void* hint) {
void* result =
mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return NULL; std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
procfs_mapinfo *mapinfos = NULL, *mapinfo;
int proc_fd, num, i;
struct {
procfs_debuginfo info;
char buff[PATH_MAX];
} map;
char buf[PATH_MAX + 1];
snprintf(buf, PATH_MAX + 1, "/proc/%d/as", getpid());
if ((proc_fd = open(buf, O_RDONLY)) == -1) {
close(proc_fd);
return result;
}
/* Get the number of map entries. */
if (devctl(proc_fd, DCMD_PROC_MAPINFO, NULL, 0, &num) != EOK) {
close(proc_fd);
return result;
}
mapinfos = reinterpret_cast<procfs_mapinfo *>(
malloc(num * sizeof(procfs_mapinfo)));
if (mapinfos == NULL) {
close(proc_fd);
return result;
}
/* Fill the map entries. */
if (devctl(proc_fd, DCMD_PROC_PAGEDATA,
mapinfos, num * sizeof(procfs_mapinfo), &num) != EOK) {
free(mapinfos);
close(proc_fd);
return result;
}
for (i = 0; i < num; i++) {
mapinfo = mapinfos + i;
if (mapinfo->flags & MAP_ELF) {
map.info.vaddr = mapinfo->vaddr;
if (devctl(proc_fd, DCMD_PROC_MAPDEBUG, &map, sizeof(map), 0) != EOK) {
continue;
}
result.push_back(SharedLibraryAddress(
map.info.path, mapinfo->vaddr, mapinfo->vaddr + mapinfo->size));
}
}
free(mapinfos);
close(proc_fd);
return result; return result;
} }
// static
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint, void OS::SignalCodeMovingGC() {
size_t* allocated) { }
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
VirtualMemory::VirtualMemory(size_t size, void* hint)
: address_(ReserveRegion(size, hint)), size_(size) {}
VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
: address_(NULL), size_(0) {
DCHECK((alignment % OS::AllocateAlignment()) == 0); DCHECK((alignment % OS::AllocateAlignment()) == 0);
hint = AlignedAddress(hint, alignment); hint = AlignedAddress(hint, alignment);
size_t request_size = RoundUp(size + alignment, size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment())); static_cast<intptr_t>(OS::AllocateAlignment()));
void* result = ReserveRegion(request_size, hint); void* reservation =
if (result == nullptr) { mmap(hint, request_size, PROT_NONE,
*allocated = 0; MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY, kMmapFd, kMmapFdOffset);
return nullptr; if (reservation == MAP_FAILED) return;
}
uint8_t* base = static_cast<uint8_t*>(result); uint8_t* base = static_cast<uint8_t*>(reservation);
uint8_t* aligned_base = RoundUp(base, alignment); uint8_t* aligned_base = RoundUp(base, alignment);
DCHECK_LE(base, aligned_base); DCHECK_LE(base, aligned_base);
...@@ -150,99 +203,84 @@ void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint, ...@@ -150,99 +203,84 @@ void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
DCHECK(aligned_size == request_size); DCHECK(aligned_size == request_size);
*allocated = aligned_size; address_ = static_cast<void*>(aligned_base);
return static_cast<void*>(aligned_base); size_ = aligned_size;
} }
// static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) { VirtualMemory::~VirtualMemory() {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); if (IsReserved()) {
if (MAP_FAILED == mmap(address, size, prot, bool result = ReleaseRegion(address(), size());
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd, DCHECK(result);
kMmapFdOffset)) { USE(result);
return false;
} }
}
return true; void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
} }
// static
bool OS::UncommitRegion(void* address, size_t size) { bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
return mmap(address, size, PROT_NONE, return CommitRegion(address, size, is_executable);
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_LAZY, kMmapFd,
kMmapFdOffset) != MAP_FAILED;
} }
// static
bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start, bool VirtualMemory::Uncommit(void* address, size_t size) {
size_t free_size) { return UncommitRegion(address, size);
return munmap(free_start, free_size) == 0;
} }
// static
bool OS::ReleaseRegion(void* address, size_t size) { bool VirtualMemory::Guard(void* address) {
return munmap(address, size) == 0; OS::Guard(address, OS::CommitPageSize());
return true;
} }
// static void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
bool OS::HasLazyCommits() { return false; } void* result =
mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
kMmapFd, kMmapFdOffset);
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() { if (result == MAP_FAILED) return NULL;
std::vector<SharedLibraryAddress> result;
procfs_mapinfo *mapinfos = NULL, *mapinfo;
int proc_fd, num, i;
struct { return result;
procfs_debuginfo info; }
char buff[PATH_MAX];
} map;
char buf[PATH_MAX + 1];
snprintf(buf, PATH_MAX + 1, "/proc/%d/as", getpid());
if ((proc_fd = open(buf, O_RDONLY)) == -1) { bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
close(proc_fd); int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
return result; if (MAP_FAILED == mmap(base,
size,
prot,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
kMmapFd,
kMmapFdOffset)) {
return false;
} }
/* Get the number of map entries. */ return true;
if (devctl(proc_fd, DCMD_PROC_MAPINFO, NULL, 0, &num) != EOK) { }
close(proc_fd);
return result;
}
mapinfos =
reinterpret_cast<procfs_mapinfo*>(malloc(num * sizeof(procfs_mapinfo)));
if (mapinfos == NULL) {
close(proc_fd);
return result;
}
/* Fill the map entries. */ bool VirtualMemory::UncommitRegion(void* base, size_t size) {
if (devctl(proc_fd, DCMD_PROC_PAGEDATA, mapinfos, return mmap(base,
num * sizeof(procfs_mapinfo), &num) != EOK) { size,
free(mapinfos); PROT_NONE,
close(proc_fd); MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_LAZY,
return result; kMmapFd,
} kMmapFdOffset) != MAP_FAILED;
}
for (i = 0; i < num; i++) {
mapinfo = mapinfos + i; bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
if (mapinfo->flags & MAP_ELF) { return munmap(base, size) == 0;
map.info.vaddr = mapinfo->vaddr;
if (devctl(proc_fd, DCMD_PROC_MAPDEBUG, &map, sizeof(map), 0) != EOK) {
continue;
}
result.push_back(SharedLibraryAddress(map.info.path, mapinfo->vaddr,
mapinfo->vaddr + mapinfo->size));
}
}
free(mapinfos);
close(proc_fd);
return result;
} }
void OS::SignalCodeMovingGC() {}
bool VirtualMemory::HasLazyCommits() {
return false;
}
} // namespace base } // namespace base
} // namespace v8 } // namespace v8
...@@ -58,47 +58,49 @@ double SolarisTimezoneCache::LocalTimeOffset() { ...@@ -58,47 +58,49 @@ double SolarisTimezoneCache::LocalTimeOffset() {
TimezoneCache* OS::CreateTimezoneCache() { return new SolarisTimezoneCache(); } TimezoneCache* OS::CreateTimezoneCache() { return new SolarisTimezoneCache(); }
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
void* OS::Allocate(const size_t requested, size_t* allocated, void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) { OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, getpagesize()); const size_t msize = RoundUp(requested, getpagesize());
int prot = GetProtectionFromMemoryPermission(access); int prot = GetProtectionFromMemoryPermission(access);
void* mbase = void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
if (mbase == MAP_FAILED) return NULL; if (mbase == MAP_FAILED) return NULL;
*allocated = msize; *allocated = msize;
return mbase; return mbase;
} }
// static
void* OS::ReserveRegion(size_t size, void* hint) {
void* result =
mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return NULL; std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return std::vector<SharedLibraryAddress>();
}
return result;
void OS::SignalCodeMovingGC() {
} }
// static
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint, // Constants used for mmap.
size_t* allocated) { static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
VirtualMemory::VirtualMemory(size_t size, void* hint)
: address_(ReserveRegion(size, hint)), size_(size) {}
VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
: address_(NULL), size_(0) {
DCHECK((alignment % OS::AllocateAlignment()) == 0); DCHECK((alignment % OS::AllocateAlignment()) == 0);
hint = AlignedAddress(hint, alignment); hint = AlignedAddress(hint, alignment);
size_t request_size = RoundUp(size + alignment, size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment())); static_cast<intptr_t>(OS::AllocateAlignment()));
void* result = ReserveRegion(request_size, hint); void* reservation =
if (result == nullptr) { mmap(hint, request_size, PROT_NONE,
*allocated = 0; MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
return nullptr; if (reservation == MAP_FAILED) return;
}
uint8_t* base = static_cast<uint8_t*>(result); uint8_t* base = static_cast<uint8_t*>(reservation);
uint8_t* aligned_base = RoundUp(base, alignment); uint8_t* aligned_base = RoundUp(base, alignment);
DCHECK_LE(base, aligned_base); DCHECK_LE(base, aligned_base);
...@@ -120,50 +122,88 @@ void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint, ...@@ -120,50 +122,88 @@ void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
DCHECK(aligned_size == request_size); DCHECK(aligned_size == request_size);
*allocated = aligned_size; address_ = static_cast<void*>(aligned_base);
return static_cast<void*>(aligned_base); size_ = aligned_size;
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
bool result = ReleaseRegion(address(), size());
DCHECK(result);
USE(result);
}
}
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
} }
// static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) { bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
return CommitRegion(address, size, is_executable);
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
return UncommitRegion(address, size);
}
bool VirtualMemory::Guard(void* address) {
OS::Guard(address, OS::CommitPageSize());
return true;
}
void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
void* result =
mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return NULL;
return result;
}
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(address, size, prot, if (MAP_FAILED == mmap(base,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd, size,
prot,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
kMmapFd,
kMmapFdOffset)) { kMmapFdOffset)) {
return false; return false;
} }
return true; return true;
} }
// static
bool OS::UncommitRegion(void* address, size_t size) { bool VirtualMemory::UncommitRegion(void* base, size_t size) {
return mmap(address, size, PROT_NONE, return mmap(base,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, kMmapFd, size,
PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
kMmapFd,
kMmapFdOffset) != MAP_FAILED; kMmapFdOffset) != MAP_FAILED;
} }
// static bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start, void* free_start, size_t free_size) {
size_t free_size) {
return munmap(free_start, free_size) == 0; return munmap(free_start, free_size) == 0;
} }
// static bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
bool OS::ReleaseRegion(void* address, size_t size) { return munmap(base, size) == 0;
return munmap(address, size) == 0;
} }
// static
bool OS::HasLazyCommits() { bool VirtualMemory::HasLazyCommits() {
// TODO(alph): implement for the platform. // TODO(alph): implement for the platform.
return false; return false;
} }
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return std::vector<SharedLibraryAddress>();
}
void OS::SignalCodeMovingGC() {}
} // namespace base } // namespace base
} // namespace v8 } // namespace v8
...@@ -737,8 +737,6 @@ void* OS::GetRandomMmapAddr() { ...@@ -737,8 +737,6 @@ void* OS::GetRandomMmapAddr() {
return reinterpret_cast<void *>(address); return reinterpret_cast<void *>(address);
} }
namespace {
static void* RandomizedVirtualAlloc(size_t size, int action, int protection, static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
void* hint) { void* hint) {
LPVOID base = NULL; LPVOID base = NULL;
...@@ -764,8 +762,6 @@ static void* RandomizedVirtualAlloc(size_t size, int action, int protection, ...@@ -764,8 +762,6 @@ static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
return base; return base;
} }
} // namespace
void* OS::Allocate(const size_t requested, size_t* allocated, void* OS::Allocate(const size_t requested, size_t* allocated,
bool is_executable, void* hint) { bool is_executable, void* hint) {
return OS::Allocate(requested, allocated, return OS::Allocate(requested, allocated,
...@@ -813,15 +809,18 @@ void OS::Free(void* address, const size_t size) { ...@@ -813,15 +809,18 @@ void OS::Free(void* address, const size_t size) {
USE(size); USE(size);
} }
intptr_t OS::CommitPageSize() { intptr_t OS::CommitPageSize() {
return 4096; return 4096;
} }
void OS::ProtectCode(void* address, const size_t size) { void OS::ProtectCode(void* address, const size_t size) {
DWORD old_protect; DWORD old_protect;
VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect); VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
} }
void OS::Guard(void* address, const size_t size) { void OS::Guard(void* address, const size_t size) {
DWORD oldprotect; DWORD oldprotect;
VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect); VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
...@@ -832,76 +831,6 @@ void OS::Unprotect(void* address, const size_t size) { ...@@ -832,76 +831,6 @@ void OS::Unprotect(void* address, const size_t size) {
USE(result); USE(result);
} }
// static
// static
void* OS::ReserveRegion(size_t size, void* hint) {
return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint);
}
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated) {
DCHECK((alignment % OS::AllocateAlignment()) == 0);
hint = AlignedAddress(hint, alignment);
size_t request_size =
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
void* address = ReserveRegion(request_size, hint);
if (address == nullptr) {
*allocated = 0;
return nullptr;
}
uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
// Try reducing the size by freeing and then reallocating a specific area.
bool result = ReleaseRegion(address, request_size);
USE(result);
DCHECK(result);
address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
if (address != nullptr) {
request_size = size;
DCHECK(base == static_cast<uint8_t*>(address));
} else {
// Resizing failed, just go with a bigger area.
address = ReserveRegion(request_size, hint);
if (address == nullptr) {
*allocated = 0;
return nullptr;
}
}
*allocated = request_size;
return static_cast<void*>(address);
}
// static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
return false;
}
return true;
}
// static
bool OS::UncommitRegion(void* address, size_t size) {
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
}
// static
bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start,
size_t free_size) {
return VirtualFree(free_start, free_size, MEM_DECOMMIT) != 0;
}
// static
bool OS::ReleaseRegion(void* address, size_t size) {
return VirtualFree(address, 0, MEM_RELEASE) != 0;
}
// static
bool OS::HasLazyCommits() {
// TODO(alph): implement for the platform.
return false;
}
void OS::Sleep(TimeDelta interval) { void OS::Sleep(TimeDelta interval) {
::Sleep(static_cast<DWORD>(interval.InMilliseconds())); ::Sleep(static_cast<DWORD>(interval.InMilliseconds()));
} }
...@@ -1275,6 +1204,108 @@ int OS::ActivationFrameAlignment() { ...@@ -1275,6 +1204,108 @@ int OS::ActivationFrameAlignment() {
#endif #endif
} }
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
VirtualMemory::VirtualMemory(size_t size, void* hint)
: address_(ReserveRegion(size, hint)), size_(size) {}
VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
: address_(NULL), size_(0) {
DCHECK((alignment % OS::AllocateAlignment()) == 0);
hint = AlignedAddress(hint, alignment);
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
void* address = ReserveRegion(request_size, hint);
if (address == NULL) return;
uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
// Try reducing the size by freeing and then reallocating a specific area.
bool result = ReleaseRegion(address, request_size);
USE(result);
DCHECK(result);
address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
if (address != NULL) {
request_size = size;
DCHECK(base == static_cast<uint8_t*>(address));
} else {
// Resizing failed, just go with a bigger area.
address = ReserveRegion(request_size, hint);
if (address == NULL) return;
}
address_ = address;
size_ = request_size;
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
bool result = ReleaseRegion(address(), size());
DCHECK(result);
USE(result);
}
}
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
}
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
return CommitRegion(address, size, is_executable);
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
DCHECK(IsReserved());
return UncommitRegion(address, size);
}
bool VirtualMemory::Guard(void* address) {
if (NULL == VirtualAlloc(address,
OS::CommitPageSize(),
MEM_COMMIT,
PAGE_NOACCESS)) {
return false;
}
return true;
}
void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint);
}
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
return false;
}
return true;
}
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
return VirtualFree(base, size, MEM_DECOMMIT) != 0;
}
bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
void* free_start, size_t free_size) {
return VirtualFree(free_start, free_size, MEM_DECOMMIT) != 0;
}
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
return VirtualFree(base, 0, MEM_RELEASE) != 0;
}
bool VirtualMemory::HasLazyCommits() {
// TODO(alph): implement for the platform.
return false;
}
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Win32 thread support. // Win32 thread support.
......
...@@ -197,22 +197,6 @@ class V8_BASE_EXPORT OS { ...@@ -197,22 +197,6 @@ class V8_BASE_EXPORT OS {
// Get the Alignment guaranteed by Allocate(). // Get the Alignment guaranteed by Allocate().
static size_t AllocateAlignment(); static size_t AllocateAlignment();
static void* ReserveRegion(size_t size, void* hint);
static void* ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated);
static bool CommitRegion(void* address, size_t size, bool is_executable);
static bool UncommitRegion(void* address, size_t size);
static bool ReleasePartialRegion(void* base, size_t size, void* free_start,
size_t free_size);
static bool ReleaseRegion(void* address, size_t size);
static bool HasLazyCommits();
// Sleep for a specified time interval. // Sleep for a specified time interval.
static void Sleep(TimeDelta interval); static void Sleep(TimeDelta interval);
...@@ -301,6 +285,141 @@ class V8_BASE_EXPORT OS { ...@@ -301,6 +285,141 @@ class V8_BASE_EXPORT OS {
DISALLOW_IMPLICIT_CONSTRUCTORS(OS); DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
}; };
// Represents and controls an area of reserved memory.
// Control of the reserved memory can be assigned to another VirtualMemory
// object by calling TakeControl. This removes the reserved memory from the
// 'from' instance.
class V8_BASE_EXPORT VirtualMemory {
public:
// Empty VirtualMemory object, controlling no reserved memory.
VirtualMemory();
// Reserves virtual memory with size.
explicit VirtualMemory(size_t size, void* hint);
// Reserves virtual memory containing an area of the given size that
// is aligned per alignment. This may not be at the position returned
// by address().
VirtualMemory(size_t size, size_t alignment, void* hint);
// Construct a virtual memory by assigning it some already mapped address
// and size.
VirtualMemory(void* address, size_t size) : address_(address), size_(size) {}
// Releases the reserved memory, if any, controlled by this VirtualMemory
// object.
~VirtualMemory();
// Returns whether the memory has been reserved.
bool IsReserved() const { return address_ != nullptr; }
// Initialize or resets an embedded VirtualMemory object.
void Reset();
// Returns the start address of the reserved memory.
// If the memory was reserved with an alignment, this address is not
// necessarily aligned. The user might need to round it up to a multiple of
// the alignment to get the start of the aligned block.
void* address() const {
DCHECK(IsReserved());
return address_;
}
void* end() const {
DCHECK(IsReserved());
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address_) +
size_);
}
// Returns the size of the reserved memory. The returned value is only
// meaningful when IsReserved() returns true.
// If the memory was reserved with an alignment, this size may be larger
// than the requested size.
size_t size() const { return size_; }
// Commits real memory. Returns whether the operation succeeded.
bool Commit(void* address, size_t size, bool is_executable);
// Uncommit real memory. Returns whether the operation succeeded.
bool Uncommit(void* address, size_t size);
// Creates a single guard page at the given address.
bool Guard(void* address);
// Releases the memory after |free_start|. Returns the bytes released.
size_t ReleasePartial(void* free_start) {
DCHECK(IsReserved());
// Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region.
const size_t size = size_ - (reinterpret_cast<size_t>(free_start) -
reinterpret_cast<size_t>(address_));
CHECK(InVM(free_start, size));
DCHECK_LT(address_, free_start);
DCHECK_LT(free_start, reinterpret_cast<void*>(
reinterpret_cast<size_t>(address_) + size_));
const bool result = ReleasePartialRegion(address_, size_, free_start, size);
USE(result);
DCHECK(result);
size_ -= size;
return size;
}
void Release() {
DCHECK(IsReserved());
// Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region.
void* address = address_;
size_t size = size_;
CHECK(InVM(address, size));
Reset();
bool result = ReleaseRegion(address, size);
USE(result);
DCHECK(result);
}
// Assign control of the reserved region to a different VirtualMemory object.
// The old object is no longer functional (IsReserved() returns false).
void TakeControl(VirtualMemory* from) {
DCHECK(!IsReserved());
address_ = from->address_;
size_ = from->size_;
from->Reset();
}
static void* ReserveRegion(size_t size, void* hint);
static bool CommitRegion(void* base, size_t size, bool is_executable);
static bool UncommitRegion(void* base, size_t size);
// Must be called with a base pointer that has been returned by ReserveRegion
// and the same size it was reserved with.
static bool ReleaseRegion(void* base, size_t size);
// Must be called with a base pointer that has been returned by ReserveRegion
// and the same size it was reserved with.
// [free_start, free_start + free_size] is the memory that will be released.
static bool ReleasePartialRegion(void* base, size_t size, void* free_start,
size_t free_size);
// Returns true if OS performs lazy commits, i.e. the memory allocation call
// defers actual physical memory allocation till the first memory access.
// Otherwise returns false.
static bool HasLazyCommits();
private:
bool InVM(void* address, size_t size) {
return (reinterpret_cast<uintptr_t>(address_) <=
reinterpret_cast<uintptr_t>(address)) &&
((reinterpret_cast<uintptr_t>(address_) + size_) >=
(reinterpret_cast<uintptr_t>(address) + size));
}
void* address_; // Start address of the virtual memory.
size_t size_; // Size of the virtual memory.
};
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Thread // Thread
// //
......
...@@ -138,7 +138,7 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase { ...@@ -138,7 +138,7 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
void Free(void* data, size_t length) override { void Free(void* data, size_t length) override {
#if USE_VM #if USE_VM
if (RoundToPageSize(&length)) { if (RoundToPageSize(&length)) {
base::OS::ReleaseRegion(data, length); base::VirtualMemory::ReleaseRegion(data, length);
return; return;
} }
#endif #endif
...@@ -156,9 +156,9 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase { ...@@ -156,9 +156,9 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
} }
#if USE_VM #if USE_VM
void* VirtualMemoryAllocate(size_t length) { void* VirtualMemoryAllocate(size_t length) {
void* data = base::OS::ReserveRegion(length, nullptr); void* data = base::VirtualMemory::ReserveRegion(length, nullptr);
if (data && !base::OS::CommitRegion(data, length, false)) { if (data && !base::VirtualMemory::CommitRegion(data, length, false)) {
base::OS::ReleaseRegion(data, length); base::VirtualMemory::ReleaseRegion(data, length);
return nullptr; return nullptr;
} }
MSAN_MEMORY_IS_INITIALIZED(data, length); MSAN_MEMORY_IS_INITIALIZED(data, length);
......
...@@ -13,7 +13,7 @@ namespace v8 { ...@@ -13,7 +13,7 @@ namespace v8 {
namespace internal { namespace internal {
void SequentialMarkingDeque::SetUp() { void SequentialMarkingDeque::SetUp() {
VirtualMemory reservation; base::VirtualMemory reservation;
if (!AllocVirtualMemory(kMaxSize, heap_->GetRandomMmapAddr(), &reservation)) { if (!AllocVirtualMemory(kMaxSize, heap_->GetRandomMmapAddr(), &reservation)) {
V8::FatalProcessOutOfMemory("SequentialMarkingDeque::SetUp"); V8::FatalProcessOutOfMemory("SequentialMarkingDeque::SetUp");
} }
......
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
#include <deque> #include <deque>
#include "src/allocation.h"
#include "src/base/platform/mutex.h" #include "src/base/platform/mutex.h"
#include "src/base/platform/platform.h" #include "src/base/platform/platform.h"
#include "src/cancelable-task.h" #include "src/cancelable-task.h"
...@@ -132,7 +131,7 @@ class SequentialMarkingDeque { ...@@ -132,7 +131,7 @@ class SequentialMarkingDeque {
base::Mutex mutex_; base::Mutex mutex_;
VirtualMemory backing_store_; base::VirtualMemory backing_store_;
size_t backing_store_committed_size_; size_t backing_store_committed_size_;
HeapObject** array_; HeapObject** array_;
// array_[(top - 1) & mask_] is the top element in the deque. The Deque is // array_[(top - 1) & mask_] is the top element in the deque. The Deque is
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include "src/base/bits.h" #include "src/base/bits.h"
#include "src/base/macros.h" #include "src/base/macros.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/semaphore.h" #include "src/base/platform/semaphore.h"
#include "src/counters.h" #include "src/counters.h"
#include "src/heap/array-buffer-tracker.h" #include "src/heap/array-buffer-tracker.h"
...@@ -117,7 +118,7 @@ bool CodeRange::SetUp(size_t requested) { ...@@ -117,7 +118,7 @@ bool CodeRange::SetUp(size_t requested) {
DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize); DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
VirtualMemory reservation; base::VirtualMemory reservation;
if (!AlignedAllocVirtualMemory( if (!AlignedAllocVirtualMemory(
requested, requested,
Max(kCodeRangeAreaAlignment, Max(kCodeRangeAreaAlignment,
...@@ -407,14 +408,16 @@ bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) { ...@@ -407,14 +408,16 @@ bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
bool MemoryAllocator::CommitMemory(Address base, size_t size, bool MemoryAllocator::CommitMemory(Address base, size_t size,
Executability executable) { Executability executable) {
if (!base::OS::CommitRegion(base, size, executable == EXECUTABLE)) { if (!base::VirtualMemory::CommitRegion(base, size,
executable == EXECUTABLE)) {
return false; return false;
} }
UpdateAllocatedSpaceLimits(base, base + size); UpdateAllocatedSpaceLimits(base, base + size);
return true; return true;
} }
void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
Executability executable) { Executability executable) {
// TODO(gc) make code_range part of memory allocator? // TODO(gc) make code_range part of memory allocator?
// Code which is part of the code-range does not have its own VirtualMemory. // Code which is part of the code-range does not have its own VirtualMemory.
...@@ -436,7 +439,7 @@ void MemoryAllocator::FreeMemory(Address base, size_t size, ...@@ -436,7 +439,7 @@ void MemoryAllocator::FreeMemory(Address base, size_t size,
code_range()->FreeRawMemory(base, size); code_range()->FreeRawMemory(base, size);
} else { } else {
DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid()); DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
bool result = base::OS::ReleaseRegion(base, size); bool result = base::VirtualMemory::ReleaseRegion(base, size);
USE(result); USE(result);
DCHECK(result); DCHECK(result);
} }
...@@ -444,8 +447,8 @@ void MemoryAllocator::FreeMemory(Address base, size_t size, ...@@ -444,8 +447,8 @@ void MemoryAllocator::FreeMemory(Address base, size_t size,
Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment, Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
void* hint, void* hint,
VirtualMemory* controller) { base::VirtualMemory* controller) {
VirtualMemory reservation; base::VirtualMemory reservation;
if (!AlignedAllocVirtualMemory(size, alignment, hint, &reservation)) if (!AlignedAllocVirtualMemory(size, alignment, hint, &reservation))
return nullptr; return nullptr;
...@@ -462,9 +465,9 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment, ...@@ -462,9 +465,9 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
Address MemoryAllocator::AllocateAlignedMemory( Address MemoryAllocator::AllocateAlignedMemory(
size_t reserve_size, size_t commit_size, size_t alignment, size_t reserve_size, size_t commit_size, size_t alignment,
Executability executable, void* hint, VirtualMemory* controller) { Executability executable, void* hint, base::VirtualMemory* controller) {
DCHECK(commit_size <= reserve_size); DCHECK(commit_size <= reserve_size);
VirtualMemory reservation; base::VirtualMemory reservation;
Address base = Address base =
ReserveAlignedMemory(reserve_size, alignment, hint, &reservation); ReserveAlignedMemory(reserve_size, alignment, hint, &reservation);
if (base == NULL) return NULL; if (base == NULL) return NULL;
...@@ -522,7 +525,7 @@ void MemoryChunk::InitializationMemoryFence() { ...@@ -522,7 +525,7 @@ void MemoryChunk::InitializationMemoryFence() {
MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end, Address area_start, Address area_end,
Executability executable, Space* owner, Executability executable, Space* owner,
VirtualMemory* reservation) { base::VirtualMemory* reservation) {
MemoryChunk* chunk = FromAddress(base); MemoryChunk* chunk = FromAddress(base);
DCHECK(base == chunk->address()); DCHECK(base == chunk->address());
...@@ -683,7 +686,7 @@ bool MemoryChunk::CommitArea(size_t requested) { ...@@ -683,7 +686,7 @@ bool MemoryChunk::CommitArea(size_t requested) {
} }
size_t MemoryChunk::CommittedPhysicalMemory() { size_t MemoryChunk::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE) if (!base::VirtualMemory::HasLazyCommits() || owner()->identity() == LO_SPACE)
return size(); return size();
return high_water_mark_.Value(); return high_water_mark_.Value();
} }
...@@ -716,7 +719,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size, ...@@ -716,7 +719,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
size_t chunk_size; size_t chunk_size;
Heap* heap = isolate_->heap(); Heap* heap = isolate_->heap();
Address base = nullptr; Address base = nullptr;
VirtualMemory reservation; base::VirtualMemory reservation;
Address area_start = nullptr; Address area_start = nullptr;
Address area_end = nullptr; Address area_end = nullptr;
void* address_hint = heap->GetRandomMmapAddr(); void* address_hint = heap->GetRandomMmapAddr();
...@@ -857,7 +860,7 @@ size_t Page::AvailableInFreeList() { ...@@ -857,7 +860,7 @@ size_t Page::AvailableInFreeList() {
size_t Page::ShrinkToHighWaterMark() { size_t Page::ShrinkToHighWaterMark() {
// Shrinking only makes sense outside of the CodeRange, where we don't care // Shrinking only makes sense outside of the CodeRange, where we don't care
// about address space fragmentation. // about address space fragmentation.
VirtualMemory* reservation = reserved_memory(); base::VirtualMemory* reservation = reserved_memory();
if (!reservation->IsReserved()) return 0; if (!reservation->IsReserved()) return 0;
// Shrink pages to high water mark. The water mark points either to a filler // Shrink pages to high water mark. The water mark points either to a filler
...@@ -935,7 +938,7 @@ void Page::DestroyBlackArea(Address start, Address end) { ...@@ -935,7 +938,7 @@ void Page::DestroyBlackArea(Address start, Address end) {
void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free, void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
size_t bytes_to_free, size_t bytes_to_free,
Address new_area_end) { Address new_area_end) {
VirtualMemory* reservation = chunk->reserved_memory(); base::VirtualMemory* reservation = chunk->reserved_memory();
DCHECK(reservation->IsReserved()); DCHECK(reservation->IsReserved());
chunk->size_ -= bytes_to_free; chunk->size_ -= bytes_to_free;
chunk->area_end_ = new_area_end; chunk->area_end_ = new_area_end;
...@@ -963,7 +966,7 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) { ...@@ -963,7 +966,7 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk), isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
chunk->IsEvacuationCandidate()); chunk->IsEvacuationCandidate());
VirtualMemory* reservation = chunk->reserved_memory(); base::VirtualMemory* reservation = chunk->reserved_memory();
const size_t size = const size_t size =
reservation->IsReserved() ? reservation->size() : chunk->size(); reservation->IsReserved() ? reservation->size() : chunk->size();
DCHECK_GE(size_.Value(), static_cast<size_t>(size)); DCHECK_GE(size_.Value(), static_cast<size_t>(size));
...@@ -982,7 +985,7 @@ void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) { ...@@ -982,7 +985,7 @@ void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED)); DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
chunk->ReleaseAllocatedMemory(); chunk->ReleaseAllocatedMemory();
VirtualMemory* reservation = chunk->reserved_memory(); base::VirtualMemory* reservation = chunk->reserved_memory();
if (chunk->IsFlagSet(MemoryChunk::POOLED)) { if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize); UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
} else { } else {
...@@ -1075,7 +1078,7 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) { ...@@ -1075,7 +1078,7 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
if (!CommitBlock(reinterpret_cast<Address>(chunk), size, NOT_EXECUTABLE)) { if (!CommitBlock(reinterpret_cast<Address>(chunk), size, NOT_EXECUTABLE)) {
return nullptr; return nullptr;
} }
VirtualMemory reservation(start, size); base::VirtualMemory reservation(start, size);
MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end, MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
NOT_EXECUTABLE, owner, &reservation); NOT_EXECUTABLE, owner, &reservation);
size_.Increment(size); size_.Increment(size);
...@@ -1096,7 +1099,7 @@ bool MemoryAllocator::CommitBlock(Address start, size_t size, ...@@ -1096,7 +1099,7 @@ bool MemoryAllocator::CommitBlock(Address start, size_t size,
bool MemoryAllocator::UncommitBlock(Address start, size_t size) { bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
if (!base::OS::UncommitRegion(start, size)) return false; if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
return true; return true;
} }
...@@ -1148,8 +1151,9 @@ intptr_t MemoryAllocator::GetCommitPageSize() { ...@@ -1148,8 +1151,9 @@ intptr_t MemoryAllocator::GetCommitPageSize() {
} }
} }
bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
size_t commit_size, bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
Address start, size_t commit_size,
size_t reserved_size) { size_t reserved_size) {
// Commit page header (not executable). // Commit page header (not executable).
Address header = start; Address header = start;
...@@ -1455,7 +1459,7 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { ...@@ -1455,7 +1459,7 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
size_t PagedSpace::CommittedPhysicalMemory() { size_t PagedSpace::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits()) return CommittedMemory(); if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
size_t size = 0; size_t size = 0;
for (Page* page : *this) { for (Page* page : *this) {
...@@ -2626,7 +2630,7 @@ void NewSpace::RecordPromotion(HeapObject* obj) { ...@@ -2626,7 +2630,7 @@ void NewSpace::RecordPromotion(HeapObject* obj) {
size_t NewSpace::CommittedPhysicalMemory() { size_t NewSpace::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits()) return CommittedMemory(); if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
size_t size = to_space_.CommittedPhysicalMemory(); size_t size = to_space_.CommittedPhysicalMemory();
if (from_space_.is_committed()) { if (from_space_.is_committed()) {
......
...@@ -354,7 +354,7 @@ class MemoryChunk { ...@@ -354,7 +354,7 @@ class MemoryChunk {
+ kUIntptrSize // uintptr_t flags_ + kUIntptrSize // uintptr_t flags_
+ kPointerSize // Address area_start_ + kPointerSize // Address area_start_
+ kPointerSize // Address area_end_ + kPointerSize // Address area_end_
+ 2 * kPointerSize // VirtualMemory reservation_ + 2 * kPointerSize // base::VirtualMemory reservation_
+ kPointerSize // Address owner_ + kPointerSize // Address owner_
+ kPointerSize // Heap* heap_ + kPointerSize // Heap* heap_
+ kIntptrSize // intptr_t progress_bar_ + kIntptrSize // intptr_t progress_bar_
...@@ -631,12 +631,12 @@ class MemoryChunk { ...@@ -631,12 +631,12 @@ class MemoryChunk {
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size, static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end, Address area_start, Address area_end,
Executability executable, Space* owner, Executability executable, Space* owner,
VirtualMemory* reservation); base::VirtualMemory* reservation);
// Should be called when memory chunk is about to be freed. // Should be called when memory chunk is about to be freed.
void ReleaseAllocatedMemory(); void ReleaseAllocatedMemory();
VirtualMemory* reserved_memory() { return &reservation_; } base::VirtualMemory* reserved_memory() { return &reservation_; }
size_t size_; size_t size_;
uintptr_t flags_; uintptr_t flags_;
...@@ -646,7 +646,7 @@ class MemoryChunk { ...@@ -646,7 +646,7 @@ class MemoryChunk {
Address area_end_; Address area_end_;
// If the chunk needs to remember its memory reservation, it is stored here. // If the chunk needs to remember its memory reservation, it is stored here.
VirtualMemory reservation_; base::VirtualMemory reservation_;
// The identity of the owning space. This is tagged as a failure pointer, but // The identity of the owning space. This is tagged as a failure pointer, but
// no failure can be in an object, so this can be distinguished from any entry // no failure can be in an object, so this can be distinguished from any entry
...@@ -1070,7 +1070,7 @@ class CodeRange { ...@@ -1070,7 +1070,7 @@ class CodeRange {
Isolate* isolate_; Isolate* isolate_;
// The reserved range of virtual memory that all code objects are put in. // The reserved range of virtual memory that all code objects are put in.
VirtualMemory virtual_memory_; base::VirtualMemory virtual_memory_;
// The global mutex guards free_list_ and allocation_list_ as GC threads may // The global mutex guards free_list_ and allocation_list_ as GC threads may
// access both lists concurrently to the main thread. // access both lists concurrently to the main thread.
...@@ -1338,14 +1338,14 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1338,14 +1338,14 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
Executability executable, Space* space); Executability executable, Space* space);
Address ReserveAlignedMemory(size_t requested, size_t alignment, void* hint, Address ReserveAlignedMemory(size_t requested, size_t alignment, void* hint,
VirtualMemory* controller); base::VirtualMemory* controller);
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size, Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
size_t alignment, Executability executable, size_t alignment, Executability executable,
void* hint, VirtualMemory* controller); void* hint, base::VirtualMemory* controller);
bool CommitMemory(Address addr, size_t size, Executability executable); bool CommitMemory(Address addr, size_t size, Executability executable);
void FreeMemory(VirtualMemory* reservation, Executability executable); void FreeMemory(base::VirtualMemory* reservation, Executability executable);
void FreeMemory(Address addr, size_t size, Executability executable); void FreeMemory(Address addr, size_t size, Executability executable);
// Partially release |bytes_to_free| bytes starting at |start_free|. Note that // Partially release |bytes_to_free| bytes starting at |start_free|. Note that
...@@ -1371,8 +1371,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1371,8 +1371,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// filling it up with a recognizable non-NULL bit pattern. // filling it up with a recognizable non-NULL bit pattern.
void ZapBlock(Address start, size_t size); void ZapBlock(Address start, size_t size);
MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory* vm, Address start, MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
size_t commit_size, Address start, size_t commit_size,
size_t reserved_size); size_t reserved_size);
CodeRange* code_range() { return code_range_; } CodeRange* code_range() { return code_range_; }
...@@ -1435,7 +1435,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1435,7 +1435,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
base::AtomicValue<void*> lowest_ever_allocated_; base::AtomicValue<void*> lowest_ever_allocated_;
base::AtomicValue<void*> highest_ever_allocated_; base::AtomicValue<void*> highest_ever_allocated_;
VirtualMemory last_chunk_; base::VirtualMemory last_chunk_;
Unmapper unmapper_; Unmapper unmapper_;
friend class heap::TestCodeRangeScope; friend class heap::TestCodeRangeScope;
...@@ -2746,7 +2746,8 @@ class NewSpace : public Space { ...@@ -2746,7 +2746,8 @@ class NewSpace : public Space {
// The semispaces. // The semispaces.
SemiSpace to_space_; SemiSpace to_space_;
SemiSpace from_space_; SemiSpace from_space_;
VirtualMemory reservation_; base::VirtualMemory reservation_;
HistogramInfo* allocated_histogram_; HistogramInfo* allocated_histogram_;
HistogramInfo* promoted_histogram_; HistogramInfo* promoted_histogram_;
......
...@@ -32,7 +32,7 @@ void StoreBuffer::SetUp() { ...@@ -32,7 +32,7 @@ void StoreBuffer::SetUp() {
// Allocate 3x the buffer size, so that we can start the new store buffer // Allocate 3x the buffer size, so that we can start the new store buffer
// aligned to 2x the size. This lets us use a bit test to detect the end of // aligned to 2x the size. This lets us use a bit test to detect the end of
// the area. // the area.
VirtualMemory reservation; base::VirtualMemory reservation;
if (!AllocVirtualMemory(kStoreBufferSize * 3, heap_->GetRandomMmapAddr(), if (!AllocVirtualMemory(kStoreBufferSize * 3, heap_->GetRandomMmapAddr(),
&reservation)) { &reservation)) {
V8::FatalProcessOutOfMemory("StoreBuffer::SetUp"); V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
......
...@@ -208,7 +208,7 @@ class StoreBuffer { ...@@ -208,7 +208,7 @@ class StoreBuffer {
// IN_GC mode. // IN_GC mode.
StoreBufferMode mode_; StoreBufferMode mode_;
VirtualMemory virtual_memory_; base::VirtualMemory virtual_memory_;
// Callbacks are more efficient than reading out the gc state for every // Callbacks are more efficient than reading out the gc state for every
// store buffer operation. // store buffer operation.
......
...@@ -130,7 +130,7 @@ TEST(AlignedAllocOOM) { ...@@ -130,7 +130,7 @@ TEST(AlignedAllocOOM) {
TEST(AllocVirtualMemoryOOM) { TEST(AllocVirtualMemoryOOM) {
AllocationPlatform platform; AllocationPlatform platform;
CHECK(!platform.oom_callback_called); CHECK(!platform.oom_callback_called);
v8::internal::VirtualMemory result; v8::base::VirtualMemory result;
bool success = bool success =
v8::internal::AllocVirtualMemory(GetHugeMemoryAmount(), nullptr, &result); v8::internal::AllocVirtualMemory(GetHugeMemoryAmount(), nullptr, &result);
// On a few systems, allocation somehow succeeds. // On a few systems, allocation somehow succeeds.
...@@ -141,7 +141,7 @@ TEST(AllocVirtualMemoryOOM) { ...@@ -141,7 +141,7 @@ TEST(AllocVirtualMemoryOOM) {
TEST(AlignedAllocVirtualMemoryOOM) { TEST(AlignedAllocVirtualMemoryOOM) {
AllocationPlatform platform; AllocationPlatform platform;
CHECK(!platform.oom_callback_called); CHECK(!platform.oom_callback_called);
v8::internal::VirtualMemory result; v8::base::VirtualMemory result;
bool success = v8::internal::AlignedAllocVirtualMemory( bool success = v8::internal::AlignedAllocVirtualMemory(
GetHugeMemoryAmount(), v8::base::OS::AllocateAlignment(), nullptr, GetHugeMemoryAmount(), v8::base::OS::AllocateAlignment(), nullptr,
&result); &result);
......
...@@ -25,28 +25,32 @@ ...@@ -25,28 +25,32 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// //
// Tests of the TokenLock class from lock.h
#include <pthread.h>
#include <stdlib.h>
#include <unistd.h> // for usleep()
#include "src/v8.h"
#include "src/base/platform/platform.h" #include "src/base/platform/platform.h"
#include "test/cctest/cctest.h" #include "test/cctest/cctest.h"
using OS = v8::base::OS;
namespace v8 { namespace v8 {
namespace internal { namespace internal {
TEST(OSReserveMemory) { TEST(VirtualMemory) {
size_t mem_size = 0; v8::base::VirtualMemory* vm =
void* mem_addr = OS::ReserveAlignedRegion(1 * MB, OS::AllocateAlignment(), new v8::base::VirtualMemory(1 * MB, v8::base::OS::GetRandomMmapAddr());
OS::GetRandomMmapAddr(), &mem_size); CHECK(vm->IsReserved());
CHECK_NE(0, mem_size); void* block_addr = vm->address();
CHECK_NOT_NULL(mem_addr);
size_t block_size = 4 * KB; size_t block_size = 4 * KB;
CHECK(OS::CommitRegion(mem_addr, block_size, false)); CHECK(vm->Commit(block_addr, block_size, false));
// Check whether we can write to memory. // Check whether we can write to memory.
int* addr = static_cast<int*>(mem_addr); int* addr = static_cast<int*>(block_addr);
addr[KB - 1] = 2; addr[KB-1] = 2;
CHECK(OS::UncommitRegion(mem_addr, block_size)); CHECK(vm->Uncommit(block_addr, block_size));
OS::ReleaseRegion(mem_addr, mem_size); delete vm;
} }
} // namespace internal } // namespace internal
......
...@@ -25,29 +25,26 @@ ...@@ -25,29 +25,26 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// //
// Tests of the TokenLock class from lock.h
#include "src/base/platform/platform.h" #include <stdlib.h>
#include "test/cctest/cctest.h"
using OS = v8::base::OS; #include "src/v8.h"
namespace v8 { #include "src/base/platform/platform.h"
namespace internal { #include "src/base/win32-headers.h"
#include "test/cctest/cctest.h"
TEST(OSReserveMemory) { TEST(VirtualMemory) {
size_t mem_size = 0; v8::base::VirtualMemory* vm =
void* mem_addr = OS::ReserveAlignedRegion(1 * MB, OS::AllocateAlignment(), new v8::base::VirtualMemory(1 * i::MB, v8::base::OS::GetRandomMmapAddr());
OS::GetRandomMmapAddr(), &mem_size); CHECK(vm->IsReserved());
CHECK_NE(0, mem_size); void* block_addr = vm->address();
CHECK_NOT_NULL(mem_addr); size_t block_size = 4 * i::KB;
size_t block_size = 4 * KB; CHECK(vm->Commit(block_addr, block_size, false));
CHECK(OS::CommitRegion(mem_addr, block_size, false));
// Check whether we can write to memory. // Check whether we can write to memory.
int* addr = static_cast<int*>(mem_addr); int* addr = static_cast<int*>(block_addr);
addr[KB - 1] = 2; addr[i::KB - 1] = 2;
CHECK(OS::UncommitRegion(mem_addr, block_size)); CHECK(vm->Uncommit(block_addr, block_size));
OS::ReleaseRegion(mem_addr, mem_size); delete vm;
} }
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment