Commit 6346cc53 authored by Bill Budge's avatar Bill Budge Committed by Commit Bot

[Memory] Clean up base OS memory abstractions.

- Sanitize Windows page size / alignment code.
- Reorder some methods to match header file.
- Rename AllocateAlignment to AllocatePageSize to be consistent
  with CommitPageSize.
- Eliminate OS::Allocate overload with is_executable argument.
- Eliminate base::OS::AllocateGuarded - it's not implemented.

Bug: chromium:756050
Change-Id: I046bb019cddde0c0063d617adc2c94a23989d9d1
Reviewed-on: https://chromium-review.googlesource.com/742684
Commit-Queue: Bill Budge <bbudge@chromium.org>
Reviewed-by: 's avatarBill Budge <bbudge@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49114}
parent 91ec9872
......@@ -24,8 +24,8 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
return stub;
#else
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
if (buffer == nullptr) return stub;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
......@@ -183,8 +183,8 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
return stub;
#else
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
if (buffer == nullptr) return stub;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
......@@ -272,8 +272,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
return nullptr;
#else
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
......
......@@ -89,9 +89,9 @@ void* OS::ReserveRegion(size_t size, void* hint) {
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated) {
hint = AlignedAddress(hint, alignment);
DCHECK_EQ(alignment % OS::AllocateAlignment(), 0);
DCHECK_EQ(alignment % OS::AllocatePageSize(), 0);
size_t request_size =
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocatePageSize()));
void* address = ReserveRegion(request_size, hint);
if (address == nullptr) {
*allocated = 0;
......
......@@ -46,10 +46,10 @@ void* OS::ReserveRegion(size_t size, void* hint) {
// static
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated) {
DCHECK_EQ(alignment % OS::AllocateAlignment(), 0);
DCHECK_EQ(alignment % OS::AllocatePageSize(), 0);
hint = AlignedAddress(hint, alignment);
size_t request_size =
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocatePageSize()));
zx_handle_t vmo;
if (zx_vmo_create(request_size, 0, &vmo) != ZX_OK) {
......@@ -82,7 +82,7 @@ void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
request_size -= prefix_size;
}
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
size_t aligned_size = RoundUp(size, OS::AllocatePageSize());
DCHECK_LE(aligned_size, request_size);
if (aligned_size != request_size) {
......
......@@ -71,6 +71,9 @@ bool g_hard_abort = false;
const char* g_gc_fake_mmap = nullptr;
static LazyInstance<RandomNumberGenerator>::type
platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
#if !V8_OS_FUCHSIA
#if V8_OS_MACOSX
// kMmapFd is used to pass vm_alloc flags to tag the region with the user
......@@ -102,6 +105,15 @@ int GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
#define MAP_ANONYMOUS MAP_ANON
#endif
void OS::Initialize(int64_t random_seed, bool hard_abort,
const char* const gc_fake_mmap) {
if (random_seed) {
platform_random_number_generator.Pointer()->SetSeed(random_seed);
}
g_hard_abort = hard_abort;
g_gc_fake_mmap = gc_fake_mmap;
}
int OS::ActivationFrameAlignment() {
#if V8_TARGET_ARCH_ARM
// On EABI ARM targets this is required for fp correctness in the
......@@ -121,24 +133,85 @@ int OS::ActivationFrameAlignment() {
#endif
}
intptr_t OS::CommitPageSize() {
static intptr_t page_size = getpagesize();
size_t OS::AllocatePageSize() {
return static_cast<size_t>(sysconf(_SC_PAGESIZE));
}
size_t OS::CommitPageSize() {
static size_t page_size = getpagesize();
return page_size;
}
void* OS::Allocate(const size_t requested, size_t* allocated,
bool is_executable, void* hint) {
return OS::Allocate(requested, allocated,
is_executable ? OS::MemoryPermission::kReadWriteExecute
: OS::MemoryPermission::kReadWrite,
hint);
void* OS::GetRandomMmapAddr() {
#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
defined(THREAD_SANITIZER)
// Dynamic tools do not support custom mmap addresses.
return nullptr;
#endif
uintptr_t raw_addr;
platform_random_number_generator.Pointer()->NextBytes(&raw_addr,
sizeof(raw_addr));
#if V8_TARGET_ARCH_X64
// Currently available CPUs have 48 bits of virtual addressing. Truncate
// the hint address to 46 bits to give the kernel a fighting chance of
// fulfilling our placement request.
raw_addr &= V8_UINT64_C(0x3ffffffff000);
#elif V8_TARGET_ARCH_PPC64
#if V8_OS_AIX
// AIX: 64 bits of virtual addressing, but we limit address range to:
// a) minimize Segment Lookaside Buffer (SLB) misses and
raw_addr &= V8_UINT64_C(0x3ffff000);
// Use extra address space to isolate the mmap regions.
raw_addr += V8_UINT64_C(0x400000000000);
#elif V8_TARGET_BIG_ENDIAN
// Big-endian Linux: 44 bits of virtual addressing.
raw_addr &= V8_UINT64_C(0x03fffffff000);
#else
// Little-endian Linux: 48 bits of virtual addressing.
raw_addr &= V8_UINT64_C(0x3ffffffff000);
#endif
#elif V8_TARGET_ARCH_S390X
// Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
// of virtual addressing. Truncate to 40 bits to allow kernel chance to
// fulfill request.
raw_addr &= V8_UINT64_C(0xfffffff000);
#elif V8_TARGET_ARCH_S390
// 31 bits of virtual addressing. Truncate to 29 bits to allow kernel chance
// to fulfill request.
raw_addr &= 0x1ffff000;
#else
raw_addr &= 0x3ffff000;
#ifdef __sun
// For our Solaris/illumos mmap hint, we pick a random address in the bottom
// half of the top half of the address space (that is, the third quarter).
// Because we do not MAP_FIXED, this will be treated only as a hint -- the
// system will not fail to mmap() because something else happens to already
// be mapped at our random address. We deliberately set the hint high enough
// to get well above the system's break (that is, the heap); Solaris and
// illumos will try the hint and if that fails allocate as if there were
// no hint at all. The high hint prevents the break from getting hemmed in
// at low values, ceding half of the address space to the system heap.
raw_addr += 0x80000000;
#elif V8_OS_AIX
// The range 0x30000000 - 0xD0000000 is available on AIX;
// choose the upper range.
raw_addr += 0x90000000;
#else
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a
// variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
// 10.6 and 10.7.
raw_addr += 0x20000000;
#endif
#endif
return reinterpret_cast<void*>(raw_addr);
}
// TODO(bbudge) Move Cygwin and Fuschia stuff into platform-specific files.
#if !V8_OS_FUCHSIA
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, AllocateAlignment());
const size_t msize = RoundUp(requested, AllocatePageSize());
int prot = GetProtectionFromMemoryPermission(access);
void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd,
kMmapFdOffset);
......@@ -206,10 +279,10 @@ void* OS::ReserveRegion(size_t size, void* hint) {
// static
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated) {
DCHECK_EQ(0, alignment % OS::AllocateAlignment());
DCHECK_EQ(0, alignment % OS::AllocatePageSize());
hint = AlignedAddress(hint, alignment);
size_t request_size =
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocatePageSize()));
void* result = ReserveRegion(request_size, hint);
if (result == nullptr) {
*allocated = 0;
......@@ -227,7 +300,7 @@ void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
request_size -= prefix_size;
}
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
size_t aligned_size = RoundUp(size, OS::AllocatePageSize());
DCHECK_LE(aligned_size, request_size);
if (aligned_size != request_size) {
......@@ -295,91 +368,10 @@ bool OS::HasLazyCommits() {
}
#endif // !V8_OS_CYGWIN && !V8_OS_FUCHSIA
static LazyInstance<RandomNumberGenerator>::type
platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
void OS::Initialize(int64_t random_seed, bool hard_abort,
const char* const gc_fake_mmap) {
if (random_seed) {
platform_random_number_generator.Pointer()->SetSeed(random_seed);
}
g_hard_abort = hard_abort;
g_gc_fake_mmap = gc_fake_mmap;
}
const char* OS::GetGCFakeMMapFile() {
return g_gc_fake_mmap;
}
void* OS::GetRandomMmapAddr() {
#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
defined(THREAD_SANITIZER)
// Dynamic tools do not support custom mmap addresses.
return nullptr;
#endif
uintptr_t raw_addr;
platform_random_number_generator.Pointer()->NextBytes(&raw_addr,
sizeof(raw_addr));
#if V8_TARGET_ARCH_X64
// Currently available CPUs have 48 bits of virtual addressing. Truncate
// the hint address to 46 bits to give the kernel a fighting chance of
// fulfilling our placement request.
raw_addr &= V8_UINT64_C(0x3ffffffff000);
#elif V8_TARGET_ARCH_PPC64
#if V8_OS_AIX
// AIX: 64 bits of virtual addressing, but we limit address range to:
// a) minimize Segment Lookaside Buffer (SLB) misses and
raw_addr &= V8_UINT64_C(0x3ffff000);
// Use extra address space to isolate the mmap regions.
raw_addr += V8_UINT64_C(0x400000000000);
#elif V8_TARGET_BIG_ENDIAN
// Big-endian Linux: 44 bits of virtual addressing.
raw_addr &= V8_UINT64_C(0x03fffffff000);
#else
// Little-endian Linux: 48 bits of virtual addressing.
raw_addr &= V8_UINT64_C(0x3ffffffff000);
#endif
#elif V8_TARGET_ARCH_S390X
// Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
// of virtual addressing. Truncate to 40 bits to allow kernel chance to
// fulfill request.
raw_addr &= V8_UINT64_C(0xfffffff000);
#elif V8_TARGET_ARCH_S390
// 31 bits of virtual addressing. Truncate to 29 bits to allow kernel chance
// to fulfill request.
raw_addr &= 0x1ffff000;
#else
raw_addr &= 0x3ffff000;
#ifdef __sun
// For our Solaris/illumos mmap hint, we pick a random address in the bottom
// half of the top half of the address space (that is, the third quarter).
// Because we do not MAP_FIXED, this will be treated only as a hint -- the
// system will not fail to mmap() because something else happens to already
// be mapped at our random address. We deliberately set the hint high enough
// to get well above the system's break (that is, the heap); Solaris and
// illumos will try the hint and if that fails allocate as if there were
// no hint at all. The high hint prevents the break from getting hemmed in
// at low values, ceding half of the address space to the system heap.
raw_addr += 0x80000000;
#elif V8_OS_AIX
// The range 0x30000000 - 0xD0000000 is available on AIX;
// choose the upper range.
raw_addr += 0x90000000;
#else
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a
// variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
// 10.6 and 10.7.
raw_addr += 0x20000000;
#endif
#endif
return reinterpret_cast<void*>(raw_addr);
}
size_t OS::AllocateAlignment() {
return static_cast<size_t>(sysconf(_SC_PAGESIZE));
}
void OS::Sleep(TimeDelta interval) {
usleep(static_cast<useconds_t>(interval.InMicroseconds()));
......
......@@ -674,24 +674,9 @@ void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
#undef _TRUNCATE
#undef STRUNCATE
// Get the system's page size used by VirtualAlloc() or the next power
// of two. The reason for always returning a power of two is that the
// rounding up in OS::Allocate expects that.
static size_t GetPageSize() {
static size_t page_size = 0;
if (page_size == 0) {
SYSTEM_INFO info;
GetSystemInfo(&info);
page_size = base::bits::RoundUpToPowerOfTwo32(info.dwPageSize);
}
return page_size;
}
// The allocation alignment is the guaranteed alignment for
// VirtualAlloc'ed blocks of memory.
size_t OS::AllocateAlignment() {
size_t OS::AllocatePageSize() {
static size_t allocate_alignment = 0;
if (allocate_alignment == 0) {
SYSTEM_INFO info;
......@@ -701,6 +686,17 @@ size_t OS::AllocateAlignment() {
return allocate_alignment;
}
size_t OS::CommitPageSize() {
static size_t page_size = 0;
if (page_size == 0) {
SYSTEM_INFO info;
GetSystemInfo(&info);
page_size = info.dwPageSize;
DCHECK_EQ(4096, page_size);
}
return page_size;
}
static LazyInstance<RandomNumberGenerator>::type
platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
......@@ -763,18 +759,10 @@ static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
} // namespace
void* OS::Allocate(const size_t requested, size_t* allocated,
bool is_executable, void* hint) {
return OS::Allocate(requested, allocated,
is_executable ? OS::MemoryPermission::kReadWriteExecute
: OS::MemoryPermission::kReadWrite,
hint);
}
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access, void* hint) {
// VirtualAlloc rounds allocated size to page size automatically.
size_t msize = RoundUp(requested, static_cast<int>(GetPageSize()));
size_t msize = RoundUp(requested, static_cast<int>(AllocatePageSize()));
// Windows XP SP2 allows Data Excution Prevention (DEP).
int prot = PAGE_NOACCESS;
......@@ -798,7 +786,7 @@ void* OS::Allocate(const size_t requested, size_t* allocated,
if (mbase == NULL) return NULL;
DCHECK_EQ(reinterpret_cast<uintptr_t>(mbase) % OS::AllocateAlignment(), 0);
DCHECK_EQ(reinterpret_cast<uintptr_t>(mbase) % OS::AllocatePageSize(), 0);
*allocated = msize;
return mbase;
......@@ -810,10 +798,6 @@ void OS::Free(void* address, const size_t size) {
USE(size);
}
intptr_t OS::CommitPageSize() {
return 4096;
}
void OS::SetReadAndExecutable(void* address, const size_t size) {
DWORD old_protect;
CHECK_NE(NULL,
......@@ -841,10 +825,10 @@ void* OS::ReserveRegion(size_t size, void* hint) {
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
size_t* allocated) {
DCHECK_EQ(alignment % OS::AllocateAlignment(), 0);
DCHECK_EQ(alignment % OS::AllocatePageSize(), 0);
hint = AlignedAddress(hint, alignment);
size_t request_size =
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocatePageSize()));
void* address = ReserveRegion(request_size, hint);
if (address == nullptr) {
*allocated = 0;
......
......@@ -161,25 +161,23 @@ class V8_BASE_EXPORT OS {
// here even though most systems support additional modes.
enum class MemoryPermission { kNoAccess, kReadWrite, kReadWriteExecute };
// Allocate/Free memory used by JS heap. Permissions are set according to the
// is_* flags. Returns the address of allocated memory, or nullptr if failed.
// Gets the page granularity for Allocate. Addresses returned by Allocate are
// aligned to this size.
static size_t AllocatePageSize();
// Gets the granularity at which the permissions and commit calls can be made.
static size_t CommitPageSize();
// Generate a random address to be used for hinting allocation calls.
static void* GetRandomMmapAddr();
// Allocates memory. Permissions are set according to the access argument.
// Returns the address of the allocated memory, or nullptr on failure.
static void* Allocate(const size_t requested, size_t* allocated,
MemoryPermission access, void* hint = nullptr);
// Allocate/Free memory used by JS heap. Pages are readable/writable, but
// they are not guaranteed to be executable unless 'executable' is true.
// Returns the address of allocated memory, or nullptr if failed.
static void* Allocate(const size_t requested, size_t* allocated,
bool is_executable, void* hint = nullptr);
static void Free(void* address, const size_t size);
// Allocates a region of memory that is inaccessible. On Windows this reserves
// but does not commit the memory. On POSIX systems it allocates memory as
// PROT_NONE, which also prevents it from being committed.
static void* AllocateGuarded(const size_t requested);
// This is the granularity at which the SetReadAndExecutable(...) call can
// set page permissions.
static intptr_t CommitPageSize();
// Frees memory allocated by a call to Allocate.
static void Free(void* address, const size_t size);
// Mark a region of memory executable and readable but not writable.
static void SetReadAndExecutable(void* address, const size_t size);
......@@ -190,12 +188,6 @@ class V8_BASE_EXPORT OS {
// Make a region of memory non-executable but readable and writable.
static void SetReadAndWritable(void* address, const size_t size, bool commit);
// Generate a random address to be used for hinting mmap().
static void* GetRandomMmapAddr();
// Get the Alignment guaranteed by Allocate().
static size_t AllocateAlignment();
static void* ReserveRegion(size_t size, void* hint);
static void* ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
......
......@@ -119,9 +119,7 @@ bool CodeRange::SetUp(size_t requested) {
VirtualMemory reservation;
if (!AlignedAllocVirtualMemory(
requested,
Max(kCodeRangeAreaAlignment,
static_cast<size_t>(base::OS::AllocateAlignment())),
requested, Max(kCodeRangeAreaAlignment, base::OS::AllocatePageSize()),
base::OS::GetRandomMmapAddr(), &reservation)) {
return false;
}
......@@ -2403,7 +2401,7 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
DCHECK_LE(new_capacity, maximum_capacity_);
DCHECK_GT(new_capacity, current_capacity_);
const size_t delta = new_capacity - current_capacity_;
DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
DCHECK(IsAligned(delta, base::OS::AllocatePageSize()));
const int delta_pages = static_cast<int>(delta / Page::kPageSize);
Page* last_page = anchor()->prev_page();
DCHECK_NE(last_page, anchor());
......@@ -2447,7 +2445,7 @@ bool SemiSpace::ShrinkTo(size_t new_capacity) {
DCHECK_LT(new_capacity, current_capacity_);
if (is_committed()) {
const size_t delta = current_capacity_ - new_capacity;
DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
DCHECK(IsAligned(delta, base::OS::AllocatePageSize()));
int delta_pages = static_cast<int>(delta / Page::kPageSize);
Page* new_last_page;
Page* last_page;
......
......@@ -18,8 +18,8 @@ namespace internal {
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
......@@ -134,8 +134,8 @@ class LabelConverter {
MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
......
......@@ -24,8 +24,8 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
return stub;
#else
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(3 * KB, &actual_size, true));
byte* buffer = static_cast<byte*>(base::OS::Allocate(
3 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
if (buffer == nullptr) return stub;
// This code assumes that cache lines are 32 bytes and if the cache line is
......@@ -556,8 +556,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
return nullptr;
#else
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
......
......@@ -25,8 +25,8 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
#else
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(3 * KB, &actual_size, true));
byte* buffer = static_cast<byte*>(base::OS::Allocate(
3 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
if (buffer == nullptr) return stub;
// This code assumes that cache lines are 32 bytes and if the cache line is
......@@ -558,8 +558,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
return nullptr;
#else
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
......
......@@ -21,8 +21,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
return nullptr;
#else
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
......
......@@ -20,8 +20,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
return nullptr;
#else
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute));
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
......
......@@ -27,7 +27,7 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
allocation_length = RoundUp(kWasmMaxHeapOffset, base::OS::CommitPageSize());
DCHECK_EQ(0, size % base::OS::CommitPageSize());
// AllocateGuarded makes the whole region inaccessible by default.
// The Reserve makes the whole region inaccessible by default.
allocation_base =
isolate->array_buffer_allocator()->Reserve(allocation_length);
if (allocation_base == nullptr) {
......
......@@ -18,7 +18,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer = static_cast<byte*>(base::OS::Allocate(
1 * KB, &actual_size, true, isolate->heap()->GetRandomMmapAddr()));
1 * KB, &actual_size, base::OS::MemoryPermission::kReadWriteExecute,
isolate->heap()->GetRandomMmapAddr()));
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
......
......@@ -122,7 +122,7 @@ TEST(AlignedAllocOOM) {
// On failure, this won't return, since an AlignedAlloc failure is fatal.
// In that case, behavior is checked in OnAlignedAllocOOM before exit.
void* result = v8::internal::AlignedAlloc(GetHugeMemoryAmount(),
v8::base::OS::AllocateAlignment());
v8::base::OS::AllocatePageSize());
// On a few systems, allocation somehow succeeds.
CHECK_EQ(result == nullptr, platform.oom_callback_called);
}
......@@ -143,7 +143,7 @@ TEST(AlignedAllocVirtualMemoryOOM) {
CHECK(!platform.oom_callback_called);
v8::internal::VirtualMemory result;
bool success = v8::internal::AlignedAllocVirtualMemory(
GetHugeMemoryAmount(), v8::base::OS::AllocateAlignment(), nullptr,
GetHugeMemoryAmount(), v8::base::OS::AllocatePageSize(), nullptr,
&result);
// On a few systems, allocation somehow succeeds.
CHECK_IMPLIES(success, result.IsReserved());
......
......@@ -174,15 +174,15 @@ static void InitializeVM() {
#else // ifdef USE_SIMULATOR.
// Run the test on real hardware or models.
#define SETUP_SIZE(buf_size) \
Isolate* isolate = CcTest::i_isolate(); \
HandleScope scope(isolate); \
CHECK_NOT_NULL(isolate); \
size_t actual_size; \
byte* buf = static_cast<byte*>( \
v8::base::OS::Allocate(buf_size, &actual_size, true)); \
MacroAssembler masm(isolate, buf, static_cast<unsigned>(actual_size), \
v8::internal::CodeObjectRequired::kYes); \
#define SETUP_SIZE(buf_size) \
Isolate* isolate = CcTest::i_isolate(); \
HandleScope scope(isolate); \
CHECK_NOT_NULL(isolate); \
size_t actual_size; \
byte* buf = static_cast<byte*>(v8::base::OS::Allocate( \
buf_size, &actual_size, base::OS::MemoryPermission::kReadWriteExecute)); \
MacroAssembler masm(isolate, buf, static_cast<unsigned>(actual_size), \
v8::internal::CodeObjectRequired::kYes); \
RegisterDump core;
#define RESET() \
......@@ -15655,3 +15655,23 @@ TEST(internal_reference_linked) {
} // namespace internal
} // namespace v8
#undef __
#undef BUF_SIZE
#undef SETUP
#undef INIT_V8
#undef SETUP_SIZE
#undef RESET
#undef START_AFTER_RESET
#undef START
#undef RUN
#undef END
#undef TEARDOWN
#undef CHECK_EQUAL_NZCV
#undef CHECK_EQUAL_REGISTERS
#undef CHECK_EQUAL_32
#undef CHECK_EQUAL_FP32
#undef CHECK_EQUAL_64
#undef CHECK_EQUAL_FP64
#undef CHECK_EQUAL_128
#undef CHECK_CONSTANT_POOL_SIZE
This diff is collapsed.
......@@ -48,7 +48,8 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize, &actual_size, true));
Assembler::kMinimalBufferSize, &actual_size,
v8::base::OS::MemoryPermission::kReadWriteExecute));
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
......
......@@ -47,8 +47,9 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Register destination_reg) {
// Allocate an executable page of memory.
size_t actual_size = 4 * Assembler::kMinimalBufferSize;
byte* buffer = static_cast<byte*>(
v8::base::OS::Allocate(actual_size, &actual_size, true));
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
actual_size, &actual_size,
v8::base::OS::MemoryPermission::kReadWriteExecute));
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
......
......@@ -49,7 +49,8 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize, &actual_size, true));
Assembler::kMinimalBufferSize, &actual_size,
v8::base::OS::MemoryPermission::kReadWriteExecute));
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size),
......
......@@ -50,7 +50,8 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize, &actual_size, true));
Assembler::kMinimalBufferSize, &actual_size,
v8::base::OS::MemoryPermission::kReadWriteExecute));
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
......
......@@ -50,7 +50,8 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize, &actual_size, true));
Assembler::kMinimalBufferSize, &actual_size,
v8::base::OS::MemoryPermission::kReadWriteExecute));
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
......
......@@ -49,7 +49,8 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize, &actual_size, true));
Assembler::kMinimalBufferSize, &actual_size,
v8::base::OS::MemoryPermission::kReadWriteExecute));
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size),
......
......@@ -45,16 +45,22 @@ typedef void* (*F)(int x, int y, int p2, int p3, int p4);
typedef Object* (*F3)(void* p0, int p1, int p2, int p3, int p4);
typedef int (*F5)(void*, void*, void*, void*, void*);
byte* AllocateExecutablePage(int* actual_size) {
size_t allocated = 0;
void* result =
v8::base::OS::Allocate(Assembler::kMinimalBufferSize, &allocated,
v8::base::OS::MemoryPermission::kReadWriteExecute);
CHECK(result);
*actual_size = static_cast<int>(allocated);
return static_cast<byte*>(result);
}
TEST(LoadAndStoreWithRepresentation) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
__ sub(sp, sp, Operand(1 * kPointerSize));
......@@ -138,14 +144,11 @@ TEST(LoadAndStoreWithRepresentation) {
TEST(ExtractLane) {
if (!CpuFeatures::IsSupported(NEON)) return;
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
......@@ -281,14 +284,11 @@ TEST(ExtractLane) {
TEST(ReplaceLane) {
if (!CpuFeatures::IsSupported(NEON)) return;
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
......
......@@ -52,6 +52,15 @@ typedef int (*F0)();
#define __ masm->
byte* AllocateExecutablePage(int* actual_size) {
size_t allocated = 0;
void* result =
v8::base::OS::Allocate(Assembler::kMinimalBufferSize, &allocated,
v8::base::OS::MemoryPermission::kReadWriteExecute);
CHECK(result);
*actual_size = static_cast<int>(allocated);
return static_cast<byte*>(result);
}
static void EntryCode(MacroAssembler* masm) {
// Smi constant register is callee save.
......@@ -98,14 +107,11 @@ static void TestMoveSmi(MacroAssembler* masm, Label* exit, int id, Smi* value) {
// Test that we can move a Smi value literally into a register.
TEST(SmiMove) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
EntryCode(masm);
......@@ -184,14 +190,11 @@ void TestSmiCompare(MacroAssembler* masm, Label* exit, int id, int x, int y) {
// Test that we can compare smis for equality (and more).
TEST(SmiCompare) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize * 2, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
......@@ -233,14 +236,11 @@ TEST(SmiCompare) {
TEST(Integer32ToSmi) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
......@@ -333,14 +333,11 @@ TEST(Integer32ToSmi) {
}
TEST(SmiCheck) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
......@@ -433,14 +430,11 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
}
TEST(SmiIndex) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize * 5, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
......@@ -469,14 +463,11 @@ TEST(OperandOffset) {
uint32_t data[256];
for (uint32_t i = 0; i < 256; i++) { data[i] = i * 0x01010101; }
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize * 2, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
......@@ -820,15 +811,13 @@ TEST(OperandOffset) {
TEST(LoadAndStoreWithRepresentation) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
EntryCode(masm);
__ subq(rsp, Immediate(1 * kPointerSize));
......@@ -1089,14 +1078,11 @@ void TestFloat64x2Neg(MacroAssembler* masm, Label* exit, double x, double y) {
}
TEST(SIMDMacros) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
Assembler::kMinimalBufferSize * 2, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
int actual_size;
byte* buffer = AllocateExecutablePage(&actual_size);
MacroAssembler assembler(isolate, buffer, actual_size,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
......
......@@ -14,7 +14,7 @@ namespace internal {
TEST(OSReserveMemory) {
size_t mem_size = 0;
void* mem_addr = OS::ReserveAlignedRegion(1 * MB, OS::AllocateAlignment(),
void* mem_addr = OS::ReserveAlignedRegion(1 * MB, OS::AllocatePageSize(),
OS::GetRandomMmapAddr(), &mem_size);
CHECK_NE(0, mem_size);
CHECK_NOT_NULL(mem_addr);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment