Commit 0b0bfc4b authored by Bill Budge's avatar Bill Budge Committed by Commit Bot

[Memory] Unify POSIX memory management calls.

- Moves base::OS memory management calls into platform-posix.cc,
  using preprocessor to adjust for each platform.

Bug: chromium:756050
Change-Id: I2af4dce4379ad1fe9e22e5ab5c6d6a7faa3655b3
Reviewed-on: https://chromium-review.googlesource.com/738890
Commit-Queue: Bill Budge <bbudge@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#48981}
parent 799cb6f3
......@@ -65,101 +65,6 @@ double AIXTimezoneCache::LocalTimeOffset() {
TimezoneCache* OS::CreateTimezoneCache() { return new AIXTimezoneCache(); }
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, getpagesize());
int prot = GetProtectionFromMemoryPermission(access);
void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd,
kMmapFdOffset);
if (mbase == MAP_FAILED) return nullptr;
*allocated = msize;
return mbase;
}
// static
void* OS::ReserveRegion(size_t size, void* hint) {
void* result = mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS,
kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return nullptr;
return result;
}
// static
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated) {
DCHECK_EQ(alignment % OS::AllocateAlignment(), 0);
hint = AlignedAddress(hint, alignment);
size_t request_size =
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
void* result = ReserveRegion(request_size, hint);
if (result == nullptr) {
*allocated = 0;
return nullptr;
}
uint8_t* base = static_cast<uint8_t*>(result);
uint8_t* aligned_base = RoundUp(base, alignment);
DCHECK_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
size_t prefix_size = static_cast<size_t>(aligned_base - base);
OS::Free(base, prefix_size);
request_size -= prefix_size;
}
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
DCHECK_LE(aligned_size, request_size);
if (aligned_size != request_size) {
size_t suffix_size = request_size - aligned_size;
OS::Free(aligned_base + aligned_size, suffix_size);
request_size -= suffix_size;
}
DCHECK(aligned_size == request_size);
address_ = static_cast<void*>(aligned_base);
size_ = aligned_size;
*allocated = aligned_size;
return static_cast<void*>(aligned_base);
}
// static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
if (mprotect(address, size, prot) == -1) return false;
return true;
}
// static
bool OS::UncommitRegion(void* address, size_t size) {
return mprotect(address, size, PROT_NONE) != -1;
}
// static
bool OS::ReleaseRegion(void* address, size_t size) {
return munmap(address, size) == 0;
}
// static
bool OS::ReleasePartialRegion(void* address, size_t size) {
return munmap(address, size) == 0;
}
// static
bool OS::HasLazyCommits() { return true; }
static unsigned StringToLong(char* buffer) {
return static_cast<unsigned>(strtol(buffer, nullptr, 16)); // NOLINT
}
......
......@@ -80,16 +80,6 @@ double CygwinTimezoneCache::LocalTimeOffset() {
(loc->tm_isdst > 0 ? 3600 * msPerSecond : 0));
}
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
int prot = GetProtectionFromMemoryPermission(access);
void* mbase = mmap(nullptr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mbase == MAP_FAILED) return nullptr;
*allocated = msize;
return mbase;
}
// static
void* OS::ReserveRegion(size_t size, void* hint) {
return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint);
......
......@@ -40,104 +40,6 @@ TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache();
}
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, getpagesize());
int prot = GetProtectionFromMemoryPermission(access);
void* mbase =
mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
if (mbase == MAP_FAILED) return nullptr;
*allocated = msize;
return mbase;
}
// static
void* OS::ReserveRegion(size_t size, void* hint) {
void* result = mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON, kMmapFd,
kMmapFdOffset);
if (result == MAP_FAILED) return nullptr;
return result;
}
// static
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated) {
hint = AlignedAddress(hint, alignment);
DCHECK_EQ(alignment % OS::AllocateAlignment(), 0);
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
void* result = ReserveRegion(request_size, hint);
if (result == nullptr) {
*allocated = 0;
return nullptr;
}
uint8_t* base = static_cast<uint8_t*>(result);
uint8_t* aligned_base = RoundUp(base, alignment);
DCHECK_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
size_t prefix_size = static_cast<size_t>(aligned_base - base);
OS::Free(base, prefix_size);
request_size -= prefix_size;
}
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
DCHECK_LE(aligned_size, request_size);
if (aligned_size != request_size) {
size_t suffix_size = request_size - aligned_size;
OS::Free(aligned_base + aligned_size, suffix_size);
request_size -= suffix_size;
}
DCHECK(aligned_size == request_size);
*allocated = aligned_size;
return static_cast<void*>(aligned_base);
}
// static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(address, size, prot,
MAP_PRIVATE | MAP_ANON | MAP_FIXED, kMmapFd,
kMmapFdOffset)) {
return false;
}
return true;
}
// static
bool OS::UncommitRegion(void* address, size_t size) {
return mmap(address, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED,
kMmapFd, kMmapFdOffset) != MAP_FAILED;
}
// static
bool OS::ReleaseRegion(void* address, size_t size) {
return munmap(address, size) == 0;
}
// static
bool OS::ReleasePartialRegion(void* address, size_t size) {
return munmap(address, size) == 0;
}
// static
bool OS::HasLazyCommits() {
// TODO(alph): implement for the platform.
return false;
}
static unsigned StringToLong(char* buffer) {
return static_cast<unsigned>(strtol(buffer, nullptr, 16)); // NOLINT
}
......
......@@ -93,102 +93,6 @@ TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache();
}
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, AllocateAlignment());
int prot = GetProtectionFromMemoryPermission(access);
void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd,
kMmapFdOffset);
if (mbase == MAP_FAILED) return nullptr;
*allocated = msize;
return mbase;
}
// static
void* OS::ReserveRegion(size_t size, void* hint) {
void* result =
mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return nullptr;
return result;
}
// static
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated) {
DCHECK_EQ(alignment % OS::AllocateAlignment(), 0);
hint = AlignedAddress(hint, alignment);
size_t request_size =
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
void* result = ReserveRegion(request_size, hint);
if (result == nullptr) {
*allocated = 0;
return nullptr;
}
uint8_t* base = static_cast<uint8_t*>(result);
uint8_t* aligned_base = RoundUp(base, alignment);
DCHECK_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
size_t prefix_size = static_cast<size_t>(aligned_base - base);
OS::Free(base, prefix_size);
request_size -= prefix_size;
}
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
DCHECK_LE(aligned_size, request_size);
if (aligned_size != request_size) {
size_t suffix_size = request_size - aligned_size;
OS::Free(aligned_base + aligned_size, suffix_size);
request_size -= suffix_size;
}
DCHECK(aligned_size == request_size);
*allocated = aligned_size;
return static_cast<void*>(aligned_base);
}
// static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(address, size, prot,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
kMmapFdOffset)) {
return false;
}
return true;
}
// static
bool OS::UncommitRegion(void* address, size_t size) {
return mmap(address, size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, kMmapFd,
kMmapFdOffset) != MAP_FAILED;
}
// static
bool OS::ReleaseRegion(void* address, size_t size) {
return munmap(address, size) == 0;
}
// static
bool OS::ReleasePartialRegion(void* address, size_t size) {
return munmap(address, size) == 0;
}
// static
bool OS::HasLazyCommits() { return true; }
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
// This function assumes that the layout of the file is as follows:
......
......@@ -43,113 +43,6 @@
namespace v8 {
namespace base {
// Constants used for mmap.
// kMmapFd is used to pass vm_alloc flags to tag the region with the user
// defined tag 255 This helps identify V8-allocated regions in memory analysis
// tools like vmmap(1).
static const int kMmapFd = VM_MAKE_TAG(255);
static const off_t kMmapFdOffset = 0;
// static
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, getpagesize());
int prot = GetProtectionFromMemoryPermission(access);
void* mbase =
mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
if (mbase == MAP_FAILED) return nullptr;
*allocated = msize;
return mbase;
}
// static
void* OS::ReserveRegion(size_t size, void* hint) {
void* result =
mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return nullptr;
return result;
}
// static
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated) {
DCHECK_EQ(alignment % OS::AllocateAlignment(), 0);
hint = AlignedAddress(hint, alignment);
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
void* result = ReserveRegion(request_size, hint);
if (result == nullptr) {
*allocated = 0;
return nullptr;
}
uint8_t* base = static_cast<uint8_t*>(result);
uint8_t* aligned_base = RoundUp(base, alignment);
DCHECK_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
size_t prefix_size = static_cast<size_t>(aligned_base - base);
OS::Free(base, prefix_size);
request_size -= prefix_size;
}
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
DCHECK_LE(aligned_size, request_size);
if (aligned_size != request_size) {
size_t suffix_size = request_size - aligned_size;
OS::Free(aligned_base + aligned_size, suffix_size);
request_size -= suffix_size;
}
DCHECK(aligned_size == request_size);
*allocated = aligned_size;
return static_cast<void*>(aligned_base);
}
// static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(address,
size,
prot,
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
kMmapFd,
kMmapFdOffset)) {
return false;
}
return true;
}
// static
bool OS::UncommitRegion(void* address, size_t size) {
return mmap(address,
size,
PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
kMmapFd,
kMmapFdOffset) != MAP_FAILED;
}
// static
bool OS::ReleaseRegion(void* address, size_t size) {
return munmap(address, size) == 0;
}
// static
bool OS::ReleasePartialRegion(void* address, size_t size) {
return munmap(address, size) == 0;
}
// static
bool OS::HasLazyCommits() { return true; }
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
unsigned int images_count = _dyld_image_count();
......
......@@ -38,105 +38,6 @@ TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache();
}
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, AllocateAlignment());
int prot = GetProtectionFromMemoryPermission(access);
void* mbase =
mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
if (mbase == MAP_FAILED) return nullptr;
*allocated = msize;
return mbase;
}
// static
void* OS::ReserveRegion(size_t size, void* hint) {
void* result =
mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return nullptr;
return result;
}
// static
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated) {
DCHECK_EQ(alignment % OS::AllocateAlignment(), 0);
hint = AlignedAddress(hint, alignment);
size_t request_size =
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
void* result = ReserveRegion(request_size, hint);
if (result == nullptr) {
*allocated = 0;
return nullptr;
}
uint8_t* base = static_cast<uint8_t*>(result);
uint8_t* aligned_base = RoundUp(base, alignment);
DCHECK_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
size_t prefix_size = static_cast<size_t>(aligned_base - base);
OS::Free(base, prefix_size);
request_size -= prefix_size;
}
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
DCHECK_LE(aligned_size, request_size);
if (aligned_size != request_size) {
size_t suffix_size = request_size - aligned_size;
OS::Free(aligned_base + aligned_size, suffix_size);
request_size -= suffix_size;
}
DCHECK(aligned_size == request_size);
*allocated = aligned_size;
return static_cast<void*>(aligned_base);
}
// static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(address, size, prot,
MAP_PRIVATE | MAP_ANON | MAP_FIXED, kMmapFd,
kMmapFdOffset)) {
return false;
}
return true;
}
// static
bool OS::UncommitRegion(void* address, size_t size) {
return mmap(address, size, PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, kMmapFd,
kMmapFdOffset) != MAP_FAILED;
}
// static
bool OS::ReleaseRegion(void* address, size_t size) {
return munmap(address, size) == 0;
}
// static
bool OS::ReleasePartialRegion(void* address, size_t size) {
return munmap(address, size) == 0;
}
// static
bool OS::HasLazyCommits() {
// TODO(alph): implement for the platform.
return false;
}
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
// This function assumes that the layout of the file is as follows:
......
......@@ -73,8 +73,30 @@ bool g_hard_abort = false;
const char* g_gc_fake_mmap = nullptr;
} // namespace
#if V8_OS_MACOSX
// kMmapFd is used to pass vm_alloc flags to tag the region with the user
// defined tag 255 This helps identify V8-allocated regions in memory analysis
// tools like vmmap(1).
const int kMmapFd = VM_MAKE_TAG(255);
#else // !V8_OS_MACOSX
const int kMmapFd = -1;
#endif // !V8_OS_MACOSX
const int kMmapFdOffset = 0;
int GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
switch (access) {
case OS::MemoryPermission::kNoAccess:
return PROT_NONE;
case OS::MemoryPermission::kReadWrite:
return PROT_READ | PROT_WRITE;
case OS::MemoryPermission::kReadWriteExecute:
return PROT_READ | PROT_WRITE | PROT_EXEC;
}
UNREACHABLE();
}
} // namespace
int OS::ActivationFrameAlignment() {
#if V8_TARGET_ARCH_ARM
......@@ -95,7 +117,6 @@ int OS::ActivationFrameAlignment() {
#endif
}
intptr_t OS::CommitPageSize() {
static intptr_t page_size = getpagesize();
return page_size;
......@@ -109,11 +130,25 @@ void* OS::Allocate(const size_t requested, size_t* allocated,
hint);
}
// TODO(bbudge) Move Cygwin and Fuschia stuff into platform-specific files.
#if !V8_OS_FUCHSIA
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, AllocateAlignment());
int prot = GetProtectionFromMemoryPermission(access);
void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd,
kMmapFdOffset);
if (mbase == MAP_FAILED) return nullptr;
*allocated = msize;
return mbase;
}
#endif // !V8_OS_FUCHSIA
void OS::Free(void* address, const size_t size) {
// TODO(1240712): munmap has a return value which is ignored here.
int result = munmap(address, size);
USE(result);
DCHECK_EQ(result, 0);
DCHECK_EQ(0, result);
}
void OS::SetReadAndExecutable(void* address, const size_t size) {
......@@ -126,7 +161,6 @@ void OS::SetReadAndExecutable(void* address, const size_t size) {
#endif
}
// Create guard pages.
#if !V8_OS_FUCHSIA
void OS::Guard(void* address, const size_t size) {
......@@ -149,6 +183,114 @@ void OS::SetReadAndWritable(void* address, const size_t size, bool commit) {
#endif
}
#if !V8_OS_CYGWIN && !V8_OS_FUCHSIA
// static
void* OS::ReserveRegion(size_t size, void* hint) {
int map_flags = MAP_PRIVATE | MAP_ANONYMOUS;
#if !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX
map_flags |= MAP_NORESERVE;
#endif
#if V8_OS_QNX
map_flags |= MAP_LAZY;
#endif // V8_OS_QNX
void* result = mmap(hint, size, PROT_NONE, map_flags, kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return nullptr;
return result;
}
// static
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated) {
DCHECK_EQ(0, alignment % OS::AllocateAlignment());
hint = AlignedAddress(hint, alignment);
size_t request_size =
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
void* result = ReserveRegion(request_size, hint);
if (result == nullptr) {
*allocated = 0;
return nullptr;
}
uint8_t* base = static_cast<uint8_t*>(result);
uint8_t* aligned_base = RoundUp(base, alignment);
DCHECK_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
size_t prefix_size = static_cast<size_t>(aligned_base - base);
OS::Free(base, prefix_size);
request_size -= prefix_size;
}
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
DCHECK_LE(aligned_size, request_size);
if (aligned_size != request_size) {
size_t suffix_size = request_size - aligned_size;
OS::Free(aligned_base + aligned_size, suffix_size);
request_size -= suffix_size;
}
DCHECK(aligned_size == request_size);
*allocated = aligned_size;
return static_cast<void*>(aligned_base);
}
// static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
#if !V8_OS_AIX
if (MAP_FAILED == mmap(address, size, prot,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
kMmapFdOffset)) {
return false;
}
#else
if (mprotect(base, size, prot) == -1) return false;
#endif // !V8_OS_AIX
return true;
}
// static
bool OS::UncommitRegion(void* address, size_t size) {
#if !V8_OS_AIX
int map_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED;
#if !V8_OS_FREEBSD && !V8_OS_QNX
map_flags |= MAP_NORESERVE;
#endif // !V8_OS_FREEBSD && !V8_OS_QNX
#if V8_OS_QNX
map_flags |= MAP_LAZY;
#endif // V8_OS_QNX
return mmap(address, size, PROT_NONE, map_flags, kMmapFd, kMmapFdOffset) !=
MAP_FAILED;
#else // V8_OS_AIX
return mprotect(base, size, PROT_NONE) != -1;
#endif // V8_OS_AIX
}
// static
bool OS::ReleaseRegion(void* address, size_t size) {
return munmap(address, size) == 0;
}
// static
bool OS::ReleasePartialRegion(void* address, size_t size) {
return munmap(address, size) == 0;
}
// static
bool OS::HasLazyCommits() {
#if V8_OS_AIX || V8_OS_LINUX || V8_OS_MACOSX
return true;
#else
// TODO(bbudge) Return true for all POSIX platforms.
return false;
#endif
}
#endif // !V8_OS_CYGWIN && !V8_OS_FUCHSIA
static LazyInstance<RandomNumberGenerator>::type
platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
......@@ -161,7 +303,6 @@ void OS::Initialize(int64_t random_seed, bool hard_abort,
g_gc_fake_mmap = gc_fake_mmap;
}
const char* OS::GetGCFakeMMapFile() {
return g_gc_fake_mmap;
}
......@@ -170,7 +311,7 @@ void* OS::GetRandomMmapAddr() {
#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
defined(THREAD_SANITIZER)
// Dynamic tools do not support custom mmap addresses.
return NULL;
return nullptr;
#endif
uintptr_t raw_addr;
platform_random_number_generator.Pointer()->NextBytes(&raw_addr,
......@@ -747,17 +888,5 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
USE(result);
}
int GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
switch (access) {
case OS::MemoryPermission::kNoAccess:
return PROT_NONE;
case OS::MemoryPermission::kReadWrite:
return PROT_READ | PROT_WRITE;
case OS::MemoryPermission::kReadWriteExecute:
return PROT_READ | PROT_WRITE | PROT_EXEC;
}
UNREACHABLE();
}
} // namespace base
} // namespace v8
......@@ -21,8 +21,6 @@ class PosixTimezoneCache : public TimezoneCache {
static const int msPerSecond = 1000;
};
int GetProtectionFromMemoryPermission(OS::MemoryPermission access);
} // namespace base
} // namespace v8
......
......@@ -89,103 +89,6 @@ TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache();
}
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, AllocateAlignment());
int prot = GetProtectionFromMemoryPermission(access);
void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd,
kMmapFdOffset);
if (mbase == MAP_FAILED) return nullptr;
*allocated = msize;
return mbase;
}
// static
void* OS::ReserveRegion(size_t size, void* hint) {
void* result =
mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return nullptr;
return result;
}
// static
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated) {
DCHECK_EQ(alignment % OS::AllocateAlignment(), 0);
hint = AlignedAddress(hint, alignment);
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
void* result = ReserveRegion(request_size, hint);
if (result == nullptr) {
*allocated = 0;
return nullptr;
}
uint8_t* base = static_cast<uint8_t*>(result);
uint8_t* aligned_base = RoundUp(base, alignment);
DCHECK_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
size_t prefix_size = static_cast<size_t>(aligned_base - base);
OS::Free(base, prefix_size);
request_size -= prefix_size;
}
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
DCHECK_LE(aligned_size, request_size);
if (aligned_size != request_size) {
size_t suffix_size = request_size - aligned_size;
OS::Free(aligned_base + aligned_size, suffix_size);
request_size -= suffix_size;
}
DCHECK(aligned_size == request_size);
*allocated = aligned_size;
return static_cast<void*>(aligned_base);
}
// static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(address, size, prot,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
kMmapFdOffset)) {
return false;
}
return true;
}
// static
bool OS::UncommitRegion(void* address, size_t size) {
return mmap(address, size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_LAZY, kMmapFd,
kMmapFdOffset) != MAP_FAILED;
}
// static
bool OS::ReleaseRegion(void* address, size_t size) {
return munmap(address, size) == 0;
}
// static
bool OS::ReleasePartialRegion(void* address, size_t size) {
return munmap(address, size) == 0;
}
// static
bool OS::HasLazyCommits() { return false; }
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
procfs_mapinfo *mapinfos = nullptr, *mapinfo;
......
......@@ -58,106 +58,6 @@ double SolarisTimezoneCache::LocalTimeOffset() {
TimezoneCache* OS::CreateTimezoneCache() { return new SolarisTimezoneCache(); }
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, getpagesize());
int prot = GetProtectionFromMemoryPermission(access);
void* mbase =
mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
if (mbase == MAP_FAILED) return nullptr;
*allocated = msize;
return mbase;
}
// static
void* OS::ReserveRegion(size_t size, void* hint) {
void* result =
mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return nullptr;
return result;
}
// static
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated) {
DCHECK_EQ(alignment % OS::AllocateAlignment(), 0);
hint = AlignedAddress(hint, alignment);
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
void* result = ReserveRegion(request_size, hint);
if (result == nullptr) {
*allocated = 0;
return nullptr;
}
uint8_t* base = static_cast<uint8_t*>(result);
uint8_t* aligned_base = RoundUp(base, alignment);
DCHECK_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
size_t prefix_size = static_cast<size_t>(aligned_base - base);
OS::Free(base, prefix_size);
request_size -= prefix_size;
}
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
DCHECK_LE(aligned_size, request_size);
if (aligned_size != request_size) {
size_t suffix_size = request_size - aligned_size;
OS::Free(aligned_base + aligned_size, suffix_size);
request_size -= suffix_size;
}
DCHECK(aligned_size == request_size);
*allocated = aligned_size;
return static_cast<void*>(aligned_base);
}
// static
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(address, size, prot,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
kMmapFdOffset)) {
return false;
}
return true;
}
// static
bool OS::UncommitRegion(void* address, size_t size) {
return mmap(address, size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, kMmapFd,
kMmapFdOffset) != MAP_FAILED;
}
// static
bool OS::ReleaseRegion(void* address, size_t size) {
return munmap(address, size) == 0;
}
// static
bool OS::ReleasePartialRegion(void* address, size_t size) {
return munmap(address, size) == 0;
}
// static
bool OS::HasLazyCommits() {
// TODO(alph): implement for the platform.
return false;
}
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return std::vector<SharedLibraryAddress>();
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment