Commit a797a359 authored by bmeurer@chromium.org's avatar bmeurer@chromium.org

Refactor and cleanup VirtualMemory.

Remove a lot of platform duplication, and simplify the virtual
memory implementation. Also improve readability by avoiding bool
parameters for executability (use a dedicated Executability type
instead).

Get rid of the Isolate::UncheckedCurrent() call in the platform
code, as part of the Isolate TLS cleanup.

Use a dedicated random number generator for the address
randomization, instead of messing with the per-isolate random
number generators.

TEST=cctest/test-virtual-memory
R=verwaest@chromium.org

Review URL: https://codereview.chromium.org/23641009

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@16637 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 9a8344b1
......@@ -187,6 +187,7 @@
// supported
// V8_HAS_ATTRIBUTE_DEPRECATED - __attribute__((deprecated)) supported
// V8_HAS_ATTRIBUTE_NOINLINE - __attribute__((noinline)) supported
// V8_HAS_ATTRIBUTE_PURE - __attribute__((pure)) supported
// V8_HAS_ATTRIBUTE_VISIBILITY - __attribute__((visibility)) supported
// V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT - __attribute__((warn_unused_result))
// supported
......@@ -216,6 +217,7 @@
# define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(always_inline))
# define V8_HAS_ATTRIBUTE_DEPRECATED (__has_attribute(deprecated))
# define V8_HAS_ATTRIBUTE_NOINLINE (__has_attribute(noinline))
# define V8_HAS_ATTRIBUTE_PURE (__has_attribute(pure))
# define V8_HAS_ATTRIBUTE_VISIBILITY (__has_attribute(visibility))
# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
(__has_attribute(warn_unused_result))
......@@ -246,6 +248,7 @@
# define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (V8_GNUC_PREREQ(4, 4, 0))
# define V8_HAS_ATTRIBUTE_DEPRECATED (V8_GNUC_PREREQ(3, 4, 0))
# define V8_HAS_ATTRIBUTE_NOINLINE (V8_GNUC_PREREQ(3, 4, 0))
# define V8_HAS_ATTRIBUTE_PURE (V8_GNUC_PREREQ(2, 96, 0))
# define V8_HAS_ATTRIBUTE_VISIBILITY (V8_GNUC_PREREQ(4, 3, 0))
# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
(!V8_CC_INTEL && V8_GNUC_PREREQ(4, 1, 0))
......@@ -325,6 +328,28 @@
#endif
// Many functions have no effects except the return value and their return value
// depends only on the parameters and/or global variables. Such a function can
// be subject to common subexpression elimination and loop optimization just as
// an arithmetic operator would be. These functions should be declared with the
// attribute V8_PURE. For example,
//
// int square (int) V8_PURE;
//
// says that the hypothetical function square is safe to call fewer times than
// the program says.
//
// Some of common examples of pure functions are strlen or memcmp. Interesting
// non-V8_PURE functions are functions with infinite loops or those depending
// on volatile memory or other system resource, that may change between two
// consecutive calls (such as feof in a multithreaded environment).
#if V8_HAS_ATTRIBUTE_PURE
# define V8_PURE __attribute__((pure))
#else
# define V8_PURE /* NOT SUPPORTED */
#endif
// Annotate a function indicating the caller must examine the return value.
// Use like:
// int foo() V8_WARN_UNUSED_RESULT;
......
......@@ -64,7 +64,8 @@ double fast_exp_simulator(double x) {
UnaryMathFunction CreateExpFunction() {
if (!FLAG_fast_math) return &exp;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
if (buffer == NULL) return &exp;
ExternalReference::InitializeMathExpData();
......@@ -102,7 +103,9 @@ UnaryMathFunction CreateExpFunction() {
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
ASSERT(result);
USE(result);
#if !defined(USE_SIMULATOR)
return FUNCTION_CAST<UnaryMathFunction>(buffer);
......@@ -122,7 +125,8 @@ OS::MemCopyUint8Function CreateMemCopyUint8Function(
return stub;
}
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
if (buffer == NULL) return stub;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
......@@ -264,7 +268,9 @@ OS::MemCopyUint8Function CreateMemCopyUint8Function(
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
ASSERT(result);
USE(result);
return FUNCTION_CAST<OS::MemCopyUint8Function>(buffer);
#endif
}
......@@ -280,7 +286,8 @@ OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
return stub;
}
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
if (buffer == NULL) return stub;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
......@@ -352,7 +359,9 @@ OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
masm.GetCode(&desc);
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
ASSERT(result);
USE(result);
return FUNCTION_CAST<OS::MemCopyUint16Uint8Function>(buffer);
#endif
......
......@@ -42,14 +42,8 @@ namespace internal {
static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(),
OS::CommitPageSize(),
#if defined(__native_client__)
// The Native Client port of V8 uses an interpreter,
// so code pages don't need PROT_EXEC.
NOT_EXECUTABLE,
#else
EXECUTABLE,
#endif
VirtualMemory::GetPageSize(),
VirtualMemory::EXECUTABLE,
NULL);
}
......@@ -128,7 +122,7 @@ static const int kDeoptTableMaxEpilogueCodeSize = 2 * KB;
size_t Deoptimizer::GetMaxDeoptTableSize() {
int entries_size =
Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_;
int commit_page_size = static_cast<int>(OS::CommitPageSize());
int commit_page_size = static_cast<int>(VirtualMemory::GetPageSize());
int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) /
commit_page_size) + 1;
return static_cast<size_t>(commit_page_size * page_count);
......
......@@ -144,7 +144,7 @@ MaybeObject* Heap::AllocateOneByteInternalizedString(Vector<const uint8_t> str,
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
? lo_space_->AllocateRaw(size, VirtualMemory::NOT_EXECUTABLE)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
......@@ -178,7 +178,7 @@ MaybeObject* Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
? lo_space_->AllocateRaw(size, VirtualMemory::NOT_EXECUTABLE)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
......@@ -242,7 +242,8 @@ MaybeObject* Heap::AllocateRaw(int size_in_bytes,
} else if (CODE_SPACE == space) {
result = code_space_->AllocateRaw(size_in_bytes);
} else if (LO_SPACE == space) {
result = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
result = lo_space_->AllocateRaw(
size_in_bytes, VirtualMemory::NOT_EXECUTABLE);
} else if (CELL_SPACE == space) {
result = cell_space_->AllocateRaw(size_in_bytes);
} else if (PROPERTY_CELL_SPACE == space) {
......
......@@ -172,8 +172,7 @@ Heap::Heap()
max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
#endif
intptr_t max_virtual = OS::MaxVirtualMemory();
intptr_t max_virtual = static_cast<intptr_t>(VirtualMemory::GetLimit());
if (max_virtual > 0) {
if (code_range_size_ > 0) {
// Reserve no more than 1/8 of the memory for the code range.
......@@ -4151,7 +4150,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
HeapObject* result;
bool force_lo_space = obj_size > code_space()->AreaSize();
if (force_lo_space) {
maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
maybe_result = lo_space_->AllocateRaw(obj_size, VirtualMemory::EXECUTABLE);
} else {
maybe_result = code_space_->AllocateRaw(obj_size);
}
......@@ -4163,7 +4162,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
// Discard the first code allocation, which was on a page where it could be
// moved.
CreateFillerObjectAt(result->address(), obj_size);
maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
maybe_result = lo_space_->AllocateRaw(obj_size, VirtualMemory::EXECUTABLE);
if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
}
......@@ -4214,7 +4213,7 @@ MaybeObject* Heap::CopyCode(Code* code) {
int obj_size = code->Size();
MaybeObject* maybe_result;
if (obj_size > code_space()->AreaSize()) {
maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
maybe_result = lo_space_->AllocateRaw(obj_size, VirtualMemory::EXECUTABLE);
} else {
maybe_result = code_space_->AllocateRaw(obj_size);
}
......@@ -4257,7 +4256,8 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
MaybeObject* maybe_result;
if (new_obj_size > code_space()->AreaSize()) {
maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
maybe_result = lo_space_->AllocateRaw(
new_obj_size, VirtualMemory::EXECUTABLE);
} else {
maybe_result = code_space_->AllocateRaw(new_obj_size);
}
......@@ -5370,7 +5370,7 @@ MaybeObject* Heap::AllocateInternalizedStringImpl(
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
? lo_space_->AllocateRaw(size, VirtualMemory::NOT_EXECUTABLE)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
......@@ -5523,7 +5523,7 @@ MaybeObject* Heap::AllocateRawFixedArray(int length) {
int size = FixedArray::SizeFor(length);
return size <= Page::kMaxNonCodeHeapObjectSize
? new_space_.AllocateRaw(size)
: lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
: lo_space_->AllocateRaw(size, VirtualMemory::NOT_EXECUTABLE);
}
......@@ -6878,7 +6878,7 @@ bool Heap::SetUp() {
new OldSpace(this,
max_old_generation_size_,
OLD_POINTER_SPACE,
NOT_EXECUTABLE);
VirtualMemory::NOT_EXECUTABLE);
if (old_pointer_space_ == NULL) return false;
if (!old_pointer_space_->SetUp()) return false;
......@@ -6887,7 +6887,7 @@ bool Heap::SetUp() {
new OldSpace(this,
max_old_generation_size_,
OLD_DATA_SPACE,
NOT_EXECUTABLE);
VirtualMemory::NOT_EXECUTABLE);
if (old_data_space_ == NULL) return false;
if (!old_data_space_->SetUp()) return false;
......@@ -6901,8 +6901,8 @@ bool Heap::SetUp() {
}
}
code_space_ =
new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
code_space_ = new OldSpace(
this, max_old_generation_size_, CODE_SPACE, VirtualMemory::EXECUTABLE);
if (code_space_ == NULL) return false;
if (!code_space_->SetUp()) return false;
......@@ -7999,8 +7999,9 @@ void Heap::FreeQueuedChunks() {
MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
while (inner <= inner_last) {
// Size of a large chunk is always a multiple of
// OS::AllocateAlignment() so there is always
// enough space for a fake MemoryChunk header.
// VirtualMemory::GetAllocationGranularity() so
// there is always enough space for a fake
// MemoryChunk header.
Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
// Guard against overflow.
if (area_end < inner->address()) area_end = chunk_end;
......
......@@ -60,9 +60,8 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
&actual_size,
true));
byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
if (buffer == NULL) {
// Fallback to library function if function cannot be created.
switch (type) {
......@@ -97,7 +96,9 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
ASSERT(result);
USE(result);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
......@@ -106,7 +107,8 @@ UnaryMathFunction CreateExpFunction() {
if (!CpuFeatures::IsSupported(SSE2)) return &exp;
if (!FLAG_fast_math) return &exp;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
if (buffer == NULL) return &exp;
ExternalReference::InitializeMathExpData();
......@@ -135,7 +137,9 @@ UnaryMathFunction CreateExpFunction() {
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
ASSERT(result);
USE(result);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
......@@ -143,9 +147,8 @@ UnaryMathFunction CreateExpFunction() {
UnaryMathFunction CreateSqrtFunction() {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
&actual_size,
true));
byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
// If SSE2 is not available, we can use libc's implementation to ensure
// consistency since code by fullcodegen's calls into runtime in that case.
if (buffer == NULL || !CpuFeatures::IsSupported(SSE2)) return &sqrt;
......@@ -168,7 +171,9 @@ UnaryMathFunction CreateSqrtFunction() {
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
ASSERT(result);
USE(result);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
......@@ -262,7 +267,8 @@ class LabelConverter {
OS::MemMoveFunction CreateMemMoveFunction() {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
if (buffer == NULL) return NULL;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
LabelConverter conv(buffer);
......@@ -639,7 +645,9 @@ OS::MemMoveFunction CreateMemMoveFunction() {
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
ASSERT(result);
USE(result);
// TODO(jkummerow): It would be nice to register this code creation event
// with the PROFILE / GDBJIT system.
return FUNCTION_CAST<OS::MemMoveFunction>(buffer);
......
......@@ -558,7 +558,7 @@ void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
bool success = marking_deque_memory_->Commit(
reinterpret_cast<Address>(marking_deque_memory_->address()),
marking_deque_memory_->size(),
false); // Not executable.
VirtualMemory::NOT_EXECUTABLE);
CHECK(success);
marking_deque_memory_committed_ = true;
}
......
......@@ -64,7 +64,8 @@ double fast_exp_simulator(double x) {
UnaryMathFunction CreateExpFunction() {
if (!FLAG_fast_math) return &exp;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
if (buffer == NULL) return &exp;
ExternalReference::InitializeMathExpData();
......@@ -102,7 +103,9 @@ UnaryMathFunction CreateExpFunction() {
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
ASSERT(result);
USE(result);
#if !defined(USE_SIMULATOR)
return FUNCTION_CAST<UnaryMathFunction>(buffer);
......
......@@ -73,21 +73,6 @@ double OS::LocalTimeOffset() {
}
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mbase == MAP_FAILED) {
LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
return mbase;
}
void OS::DumpBacktrace() {
// Currently unsupported.
}
......@@ -224,7 +209,8 @@ static void* GetRandomAddr() {
// CpuFeatures::Probe. We don't care about randomization in this case because
// the code page is immediately freed.
if (isolate != NULL) {
// The address range used to randomize RWX allocations in OS::Allocate
// The address range used to randomize RWX allocations in
// VirtualMemory::AllocateRegion().
// Try not to map pages into the default range that windows loads DLLs
// Use a multiple of 64k to prevent committing unused memory.
// Note: This does not guarantee RWX regions will be within the
......@@ -245,126 +231,4 @@ static void* GetRandomAddr() {
return NULL;
}
static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
LPVOID base = NULL;
if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
// For exectutable pages try and randomize the allocation address
for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
base = VirtualAlloc(GetRandomAddr(), size, action, protection);
}
}
// After three attempts give up and let the OS find an address to use.
if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
return base;
}
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
VirtualMemory::VirtualMemory(size_t size)
: address_(ReserveRegion(size)), size_(size) { }
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
: address_(NULL), size_(0) {
ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
void* address = ReserveRegion(request_size);
if (address == NULL) return;
Address base = RoundUp(static_cast<Address>(address), alignment);
// Try reducing the size by freeing and then reallocating a specific area.
bool result = ReleaseRegion(address, request_size);
USE(result);
ASSERT(result);
address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
if (address != NULL) {
request_size = size;
ASSERT(base == static_cast<Address>(address));
} else {
// Resizing failed, just go with a bigger area.
address = ReserveRegion(request_size);
if (address == NULL) return;
}
address_ = address;
size_ = request_size;
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
bool result = ReleaseRegion(address_, size_);
ASSERT(result);
USE(result);
}
}
bool VirtualMemory::IsReserved() {
return address_ != NULL;
}
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
}
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
return CommitRegion(address, size, is_executable);
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
ASSERT(IsReserved());
return UncommitRegion(address, size);
}
void* VirtualMemory::ReserveRegion(size_t size) {
return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS);
}
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
return false;
}
return true;
}
bool VirtualMemory::Guard(void* address) {
if (NULL == VirtualAlloc(address,
OS::CommitPageSize(),
MEM_COMMIT,
PAGE_NOACCESS)) {
return false;
}
return true;
}
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
return VirtualFree(base, size, MEM_DECOMMIT) != 0;
}
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
return VirtualFree(base, 0, MEM_RELEASE) != 0;
}
bool VirtualMemory::HasLazyCommits() {
// TODO(alph): implement for the platform.
return false;
}
} } // namespace v8::internal
......@@ -81,22 +81,6 @@ double OS::LocalTimeOffset() {
}
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool executable) {
const size_t msize = RoundUp(requested, getpagesize());
int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) {
LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
return mbase;
}
void OS::DumpBacktrace() {
POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace();
}
......@@ -203,141 +187,4 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
}
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
VirtualMemory::VirtualMemory(size_t size)
: address_(ReserveRegion(size)), size_(size) { }
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
: address_(NULL), size_(0) {
ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
void* reservation = mmap(OS::GetRandomMmapAddr(),
request_size,
PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
kMmapFd,
kMmapFdOffset);
if (reservation == MAP_FAILED) return;
Address base = static_cast<Address>(reservation);
Address aligned_base = RoundUp(base, alignment);
ASSERT_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
size_t prefix_size = static_cast<size_t>(aligned_base - base);
OS::Free(base, prefix_size);
request_size -= prefix_size;
}
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
ASSERT_LE(aligned_size, request_size);
if (aligned_size != request_size) {
size_t suffix_size = request_size - aligned_size;
OS::Free(aligned_base + aligned_size, suffix_size);
request_size -= suffix_size;
}
ASSERT(aligned_size == request_size);
address_ = static_cast<void*>(aligned_base);
size_ = aligned_size;
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
bool result = ReleaseRegion(address(), size());
ASSERT(result);
USE(result);
}
}
bool VirtualMemory::IsReserved() {
return address_ != NULL;
}
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
}
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
return CommitRegion(address, size, is_executable);
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
return UncommitRegion(address, size);
}
bool VirtualMemory::Guard(void* address) {
OS::Guard(address, OS::CommitPageSize());
return true;
}
void* VirtualMemory::ReserveRegion(size_t size) {
void* result = mmap(OS::GetRandomMmapAddr(),
size,
PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
kMmapFd,
kMmapFdOffset);
if (result == MAP_FAILED) return NULL;
return result;
}
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(base,
size,
prot,
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
kMmapFd,
kMmapFdOffset)) {
return false;
}
return true;
}
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
return mmap(base,
size,
PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
kMmapFd,
kMmapFdOffset) != MAP_FAILED;
}
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
return munmap(base, size) == 0;
}
bool VirtualMemory::HasLazyCommits() {
// TODO(alph): implement for the platform.
return false;
}
} } // namespace v8::internal
......@@ -137,23 +137,6 @@ double OS::LocalTimeOffset() {
}
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
const size_t msize = RoundUp(requested, AllocateAlignment());
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
void* addr = OS::GetRandomMmapAddr();
void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mbase == MAP_FAILED) {
LOG(i::Isolate::Current(),
StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
return mbase;
}
void OS::DumpBacktrace() {
// backtrace is a glibc extension.
#if defined(__GLIBC__) && !defined(__UCLIBC__)
......@@ -184,12 +167,16 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
int size = ftell(file);
void* memory =
mmap(OS::GetRandomMmapAddr(),
mmap(NULL,
size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
fileno(file),
0);
if (memory == MAP_FAILED) {
fclose(file);
return NULL;
}
return new PosixMemoryMappedFile(file, memory, size);
}
......@@ -204,18 +191,24 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
return NULL;
}
void* memory =
mmap(OS::GetRandomMmapAddr(),
mmap(NULL,
size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
fileno(file),
0);
if (memory == MAP_FAILED) {
fclose(file);
return NULL;
}
return new PosixMemoryMappedFile(file, memory, size);
}
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
if (memory_) OS::Free(memory_, size_);
int result = munmap(memory_, size_);
ASSERT_EQ(0, result);
USE(result);
fclose(file_);
}
......@@ -296,7 +289,7 @@ void OS::SignalCodeMovingGC() {
OS::PrintError("Failed to open %s\n", FLAG_gc_fake_mmap);
OS::Abort();
}
void* addr = mmap(OS::GetRandomMmapAddr(),
void* addr = mmap(NULL,
size,
#if defined(__native_client__)
// The Native Client port of V8 uses an interpreter,
......@@ -309,7 +302,9 @@ void OS::SignalCodeMovingGC() {
fileno(f),
0);
ASSERT(addr != MAP_FAILED);
OS::Free(addr, size);
int result = munmap(addr, size);
ASSERT_EQ(0, result);
USE(result);
fclose(f);
}
......@@ -323,147 +318,4 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
#endif
}
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
VirtualMemory::VirtualMemory(size_t size)
: address_(ReserveRegion(size)), size_(size) { }
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
: address_(NULL), size_(0) {
ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
void* reservation = mmap(OS::GetRandomMmapAddr(),
request_size,
PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
kMmapFd,
kMmapFdOffset);
if (reservation == MAP_FAILED) return;
Address base = static_cast<Address>(reservation);
Address aligned_base = RoundUp(base, alignment);
ASSERT_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
size_t prefix_size = static_cast<size_t>(aligned_base - base);
OS::Free(base, prefix_size);
request_size -= prefix_size;
}
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
ASSERT_LE(aligned_size, request_size);
if (aligned_size != request_size) {
size_t suffix_size = request_size - aligned_size;
OS::Free(aligned_base + aligned_size, suffix_size);
request_size -= suffix_size;
}
ASSERT(aligned_size == request_size);
address_ = static_cast<void*>(aligned_base);
size_ = aligned_size;
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
bool result = ReleaseRegion(address(), size());
ASSERT(result);
USE(result);
}
}
bool VirtualMemory::IsReserved() {
return address_ != NULL;
}
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
}
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
return CommitRegion(address, size, is_executable);
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
return UncommitRegion(address, size);
}
bool VirtualMemory::Guard(void* address) {
OS::Guard(address, OS::CommitPageSize());
return true;
}
void* VirtualMemory::ReserveRegion(size_t size) {
void* result = mmap(OS::GetRandomMmapAddr(),
size,
PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
kMmapFd,
kMmapFdOffset);
if (result == MAP_FAILED) return NULL;
return result;
}
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
#if defined(__native_client__)
// The Native Client port of V8 uses an interpreter,
// so code pages don't need PROT_EXEC.
int prot = PROT_READ | PROT_WRITE;
#else
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
#endif
if (MAP_FAILED == mmap(base,
size,
prot,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
kMmapFd,
kMmapFdOffset)) {
return false;
}
return true;
}
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
return mmap(base,
size,
PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
kMmapFd,
kMmapFdOffset) != MAP_FAILED;
}
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
return munmap(base, size) == 0;
}
bool VirtualMemory::HasLazyCommits() {
return true;
}
} } // namespace v8::internal
......@@ -79,34 +79,6 @@ namespace v8 {
namespace internal {
// Constants used for mmap.
// kMmapFd is used to pass vm_alloc flags to tag the region with the user
// defined tag 255 This helps identify V8-allocated regions in memory analysis
// tools like vmmap(1).
static const int kMmapFd = VM_MAKE_TAG(255);
static const off_t kMmapFdOffset = 0;
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
const size_t msize = RoundUp(requested, getpagesize());
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
void* mbase = mmap(OS::GetRandomMmapAddr(),
msize,
prot,
MAP_PRIVATE | MAP_ANON,
kMmapFd,
kMmapFdOffset);
if (mbase == MAP_FAILED) {
LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
return mbase;
}
void OS::DumpBacktrace() {
// If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
if (backtrace == NULL) return;
......@@ -137,7 +109,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
int size = ftell(file);
void* memory =
mmap(OS::GetRandomMmapAddr(),
mmap(NULL,
size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
......@@ -157,7 +129,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
return NULL;
}
void* memory =
mmap(OS::GetRandomMmapAddr(),
mmap(NULL,
size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
......@@ -168,7 +140,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
if (memory_) OS::Free(memory_, size_);
if (memory_) munmap(memory_, size_);
fclose(file_);
}
......@@ -227,137 +199,4 @@ int OS::StackWalk(Vector<StackFrame> frames) {
return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
}
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
VirtualMemory::VirtualMemory(size_t size)
: address_(ReserveRegion(size)), size_(size) { }
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
: address_(NULL), size_(0) {
ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
void* reservation = mmap(OS::GetRandomMmapAddr(),
request_size,
PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
kMmapFd,
kMmapFdOffset);
if (reservation == MAP_FAILED) return;
Address base = static_cast<Address>(reservation);
Address aligned_base = RoundUp(base, alignment);
ASSERT_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
size_t prefix_size = static_cast<size_t>(aligned_base - base);
OS::Free(base, prefix_size);
request_size -= prefix_size;
}
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
ASSERT_LE(aligned_size, request_size);
if (aligned_size != request_size) {
size_t suffix_size = request_size - aligned_size;
OS::Free(aligned_base + aligned_size, suffix_size);
request_size -= suffix_size;
}
ASSERT(aligned_size == request_size);
address_ = static_cast<void*>(aligned_base);
size_ = aligned_size;
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
bool result = ReleaseRegion(address(), size());
ASSERT(result);
USE(result);
}
}
bool VirtualMemory::IsReserved() {
return address_ != NULL;
}
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
}
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
return CommitRegion(address, size, is_executable);
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
return UncommitRegion(address, size);
}
bool VirtualMemory::Guard(void* address) {
OS::Guard(address, OS::CommitPageSize());
return true;
}
void* VirtualMemory::ReserveRegion(size_t size) {
void* result = mmap(OS::GetRandomMmapAddr(),
size,
PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
kMmapFd,
kMmapFdOffset);
if (result == MAP_FAILED) return NULL;
return result;
}
bool VirtualMemory::CommitRegion(void* address,
size_t size,
bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(address,
size,
prot,
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
kMmapFd,
kMmapFdOffset)) {
return false;
}
return true;
}
bool VirtualMemory::UncommitRegion(void* address, size_t size) {
return mmap(address,
size,
PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
kMmapFd,
kMmapFdOffset) != MAP_FAILED;
}
bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
return munmap(address, size) == 0;
}
bool VirtualMemory::HasLazyCommits() {
return false;
}
} } // namespace v8::internal
......@@ -79,23 +79,6 @@ double OS::LocalTimeOffset() {
}
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
const size_t msize = RoundUp(requested, AllocateAlignment());
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
void* addr = OS::GetRandomMmapAddr();
void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) {
LOG(i::Isolate::Current(),
StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
return mbase;
}
void OS::DumpBacktrace() {
// Currently unsupported.
}
......@@ -260,141 +243,4 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
return frames_count;
}
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
VirtualMemory::VirtualMemory(size_t size)
: address_(ReserveRegion(size)), size_(size) { }
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
: address_(NULL), size_(0) {
ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
void* reservation = mmap(OS::GetRandomMmapAddr(),
request_size,
PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
kMmapFd,
kMmapFdOffset);
if (reservation == MAP_FAILED) return;
Address base = static_cast<Address>(reservation);
Address aligned_base = RoundUp(base, alignment);
ASSERT_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
size_t prefix_size = static_cast<size_t>(aligned_base - base);
OS::Free(base, prefix_size);
request_size -= prefix_size;
}
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
ASSERT_LE(aligned_size, request_size);
if (aligned_size != request_size) {
size_t suffix_size = request_size - aligned_size;
OS::Free(aligned_base + aligned_size, suffix_size);
request_size -= suffix_size;
}
ASSERT(aligned_size == request_size);
address_ = static_cast<void*>(aligned_base);
size_ = aligned_size;
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
bool result = ReleaseRegion(address(), size());
ASSERT(result);
USE(result);
}
}
bool VirtualMemory::IsReserved() {
return address_ != NULL;
}
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
}
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
return CommitRegion(address, size, is_executable);
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
return UncommitRegion(address, size);
}
bool VirtualMemory::Guard(void* address) {
OS::Guard(address, OS::CommitPageSize());
return true;
}
void* VirtualMemory::ReserveRegion(size_t size) {
void* result = mmap(OS::GetRandomMmapAddr(),
size,
PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
kMmapFd,
kMmapFdOffset);
if (result == MAP_FAILED) return NULL;
return result;
}
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(base,
size,
prot,
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
kMmapFd,
kMmapFdOffset)) {
return false;
}
return true;
}
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
return mmap(base,
size,
PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
kMmapFd,
kMmapFdOffset) != MAP_FAILED;
}
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
return munmap(base, size) == 0;
}
bool VirtualMemory::HasLazyCommits() {
// TODO(alph): implement for the platform.
return false;
}
} } // namespace v8::internal
......@@ -91,17 +91,6 @@ uint64_t OS::CpuFeaturesImpliedByPlatform() {
}
// Maximum size of the virtual memory. 0 means there is no artificial
// limit.
intptr_t OS::MaxVirtualMemory() {
struct rlimit limit;
int result = getrlimit(RLIMIT_DATA, &limit);
if (result != 0) return 0;
return limit.rlim_cur;
}
int OS::ActivationFrameAlignment() {
#if V8_TARGET_ARCH_ARM
// On EABI ARM targets this is required for fp correctness in the
......@@ -120,97 +109,6 @@ int OS::ActivationFrameAlignment() {
}
intptr_t OS::CommitPageSize() {
static intptr_t page_size = getpagesize();
return page_size;
}
void OS::Free(void* address, const size_t size) {
// TODO(1240712): munmap has a return value which is ignored here.
int result = munmap(address, size);
USE(result);
ASSERT(result == 0);
}
// Get rid of writable permission on code allocations.
void OS::ProtectCode(void* address, const size_t size) {
#if defined(__CYGWIN__)
DWORD old_protect;
VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
#elif defined(__native_client__)
// The Native Client port of V8 uses an interpreter, so
// code pages don't need PROT_EXEC.
mprotect(address, size, PROT_READ);
#else
mprotect(address, size, PROT_READ | PROT_EXEC);
#endif
}
// Create guard pages.
void OS::Guard(void* address, const size_t size) {
#if defined(__CYGWIN__)
DWORD oldprotect;
VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
#else
mprotect(address, size, PROT_NONE);
#endif
}
void* OS::GetRandomMmapAddr() {
#if defined(__native_client__)
// TODO(bradchen): restore randomization once Native Client gets
// smarter about using mmap address hints.
// See http://code.google.com/p/nativeclient/issues/3341
return NULL;
#endif
Isolate* isolate = Isolate::UncheckedCurrent();
// Note that the current isolate isn't set up in a call path via
// CpuFeatures::Probe. We don't care about randomization in this case because
// the code page is immediately freed.
if (isolate != NULL) {
uintptr_t raw_addr;
isolate->random_number_generator()->NextBytes(&raw_addr, sizeof(raw_addr));
#if V8_TARGET_ARCH_X64
// Currently available CPUs have 48 bits of virtual addressing. Truncate
// the hint address to 46 bits to give the kernel a fighting chance of
// fulfilling our placement request.
raw_addr &= V8_UINT64_C(0x3ffffffff000);
#else
raw_addr &= 0x3ffff000;
# ifdef __sun
// For our Solaris/illumos mmap hint, we pick a random address in the bottom
// half of the top half of the address space (that is, the third quarter).
// Because we do not MAP_FIXED, this will be treated only as a hint -- the
// system will not fail to mmap() because something else happens to already
// be mapped at our random address. We deliberately set the hint high enough
// to get well above the system's break (that is, the heap); Solaris and
// illumos will try the hint and if that fails allocate as if there were
// no hint at all. The high hint prevents the break from getting hemmed in
// at low values, ceding half of the address space to the system heap.
raw_addr += 0x80000000;
# else
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a
// variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
// 10.6 and 10.7.
raw_addr += 0x20000000;
# endif
#endif
return reinterpret_cast<void*>(raw_addr);
}
return NULL;
}
size_t OS::AllocateAlignment() {
return getpagesize();
}
void OS::Sleep(int milliseconds) {
useconds_t ms = static_cast<useconds_t>(milliseconds);
usleep(1000 * ms);
......
......@@ -96,22 +96,6 @@ double OS::LocalTimeOffset() {
}
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
const size_t msize = RoundUp(requested, getpagesize());
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) {
LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
return mbase;
}
void OS::DumpBacktrace() {
// Currently unsupported.
}
......@@ -224,141 +208,4 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
return walker.index;
}
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
VirtualMemory::VirtualMemory(size_t size)
: address_(ReserveRegion(size)), size_(size) { }
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
: address_(NULL), size_(0) {
ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
void* reservation = mmap(OS::GetRandomMmapAddr(),
request_size,
PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
kMmapFd,
kMmapFdOffset);
if (reservation == MAP_FAILED) return;
Address base = static_cast<Address>(reservation);
Address aligned_base = RoundUp(base, alignment);
ASSERT_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
size_t prefix_size = static_cast<size_t>(aligned_base - base);
OS::Free(base, prefix_size);
request_size -= prefix_size;
}
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
ASSERT_LE(aligned_size, request_size);
if (aligned_size != request_size) {
size_t suffix_size = request_size - aligned_size;
OS::Free(aligned_base + aligned_size, suffix_size);
request_size -= suffix_size;
}
ASSERT(aligned_size == request_size);
address_ = static_cast<void*>(aligned_base);
size_ = aligned_size;
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
bool result = ReleaseRegion(address(), size());
ASSERT(result);
USE(result);
}
}
bool VirtualMemory::IsReserved() {
return address_ != NULL;
}
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
}
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
return CommitRegion(address, size, is_executable);
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
return UncommitRegion(address, size);
}
bool VirtualMemory::Guard(void* address) {
OS::Guard(address, OS::CommitPageSize());
return true;
}
void* VirtualMemory::ReserveRegion(size_t size) {
void* result = mmap(OS::GetRandomMmapAddr(),
size,
PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
kMmapFd,
kMmapFdOffset);
if (result == MAP_FAILED) return NULL;
return result;
}
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(base,
size,
prot,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
kMmapFd,
kMmapFdOffset)) {
return false;
}
return true;
}
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
return mmap(base,
size,
PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
kMmapFd,
kMmapFdOffset) != MAP_FAILED;
}
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
return munmap(base, size) == 0;
}
bool VirtualMemory::HasLazyCommits() {
// TODO(alph): implement for the platform.
return false;
}
} } // namespace v8::internal
......@@ -69,11 +69,6 @@ int strncasecmp(const char* s1, const char* s2, int n) {
#define _TRUNCATE 0
#define STRUNCATE 80
inline void MemoryBarrier() {
int barrier = 0;
__asm__ __volatile__("xchgl %%eax,%0 ":"=r" (barrier));
}
#endif // __MINGW64_VERSION_MAJOR
......@@ -128,11 +123,6 @@ int strncpy_s(char* dest, size_t dest_size, const char* source, size_t count) {
namespace v8 {
namespace internal {
intptr_t OS::MaxVirtualMemory() {
return 0;
}
double ceiling(double x) {
return ceil(x);
}
......@@ -743,127 +733,6 @@ void OS::StrNCpy(Vector<char> dest, const char* src, size_t n) {
#undef STRUNCATE
// Get the system's page size used by VirtualAlloc() or the next power
// of two. The reason for always returning a power of two is that the
// rounding up in OS::Allocate expects that.
static size_t GetPageSize() {
static size_t page_size = 0;
if (page_size == 0) {
SYSTEM_INFO info;
GetSystemInfo(&info);
page_size = RoundUpToPowerOf2(info.dwPageSize);
}
return page_size;
}
// The allocation alignment is the guaranteed alignment for
// VirtualAlloc'ed blocks of memory.
size_t OS::AllocateAlignment() {
static size_t allocate_alignment = 0;
if (allocate_alignment == 0) {
SYSTEM_INFO info;
GetSystemInfo(&info);
allocate_alignment = info.dwAllocationGranularity;
}
return allocate_alignment;
}
void* OS::GetRandomMmapAddr() {
Isolate* isolate = Isolate::UncheckedCurrent();
// Note that the current isolate isn't set up in a call path via
// CpuFeatures::Probe. We don't care about randomization in this case because
// the code page is immediately freed.
if (isolate != NULL) {
// The address range used to randomize RWX allocations in OS::Allocate
// Try not to map pages into the default range that windows loads DLLs
// Use a multiple of 64k to prevent committing unused memory.
// Note: This does not guarantee RWX regions will be within the
// range kAllocationRandomAddressMin to kAllocationRandomAddressMax
#ifdef V8_HOST_ARCH_64_BIT
static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000;
static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
#else
static const intptr_t kAllocationRandomAddressMin = 0x04000000;
static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
#endif
uintptr_t address =
(isolate->random_number_generator()->NextInt() << kPageSizeBits) |
kAllocationRandomAddressMin;
address &= kAllocationRandomAddressMax;
return reinterpret_cast<void *>(address);
}
return NULL;
}
static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
LPVOID base = NULL;
if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
// For exectutable pages try and randomize the allocation address
for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
base = VirtualAlloc(OS::GetRandomMmapAddr(), size, action, protection);
}
}
// After three attempts give up and let the OS find an address to use.
if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
return base;
}
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
// VirtualAlloc rounds allocated size to page size automatically.
size_t msize = RoundUp(requested, static_cast<int>(GetPageSize()));
// Windows XP SP2 allows Data Excution Prevention (DEP).
int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
LPVOID mbase = RandomizedVirtualAlloc(msize,
MEM_COMMIT | MEM_RESERVE,
prot);
if (mbase == NULL) {
LOG(Isolate::Current(), StringEvent("OS::Allocate", "VirtualAlloc failed"));
return NULL;
}
ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment()));
*allocated = msize;
return mbase;
}
void OS::Free(void* address, const size_t size) {
// TODO(1240712): VirtualFree has a return value which is ignored here.
VirtualFree(address, 0, MEM_RELEASE);
USE(size);
}
intptr_t OS::CommitPageSize() {
return 4096;
}
void OS::ProtectCode(void* address, const size_t size) {
DWORD old_protect;
VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
}
void OS::Guard(void* address, const size_t size) {
DWORD oldprotect;
VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
}
void OS::Sleep(int milliseconds) {
::Sleep(milliseconds);
}
......@@ -1368,111 +1237,6 @@ int OS::ActivationFrameAlignment() {
}
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
VirtualMemory::VirtualMemory(size_t size)
: address_(ReserveRegion(size)), size_(size) { }
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
: address_(NULL), size_(0) {
ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
void* address = ReserveRegion(request_size);
if (address == NULL) return;
Address base = RoundUp(static_cast<Address>(address), alignment);
// Try reducing the size by freeing and then reallocating a specific area.
bool result = ReleaseRegion(address, request_size);
USE(result);
ASSERT(result);
address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
if (address != NULL) {
request_size = size;
ASSERT(base == static_cast<Address>(address));
} else {
// Resizing failed, just go with a bigger area.
address = ReserveRegion(request_size);
if (address == NULL) return;
}
address_ = address;
size_ = request_size;
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
bool result = ReleaseRegion(address(), size());
ASSERT(result);
USE(result);
}
}
bool VirtualMemory::IsReserved() {
return address_ != NULL;
}
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
}
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
return CommitRegion(address, size, is_executable);
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
ASSERT(IsReserved());
return UncommitRegion(address, size);
}
bool VirtualMemory::Guard(void* address) {
if (NULL == VirtualAlloc(address,
OS::CommitPageSize(),
MEM_COMMIT,
PAGE_NOACCESS)) {
return false;
}
return true;
}
void* VirtualMemory::ReserveRegion(size_t size) {
return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS);
}
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
return false;
}
return true;
}
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
return VirtualFree(base, size, MEM_DECOMMIT) != 0;
}
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
return VirtualFree(base, 0, MEM_RELEASE) != 0;
}
bool VirtualMemory::HasLazyCommits() {
// TODO(alph): implement for the platform.
return false;
}
// ----------------------------------------------------------------------------
// Win32 thread support.
......
......@@ -219,30 +219,6 @@ class OS {
static void PrintError(const char* format, ...);
static void VPrintError(const char* format, va_list args);
// Allocate/Free memory used by JS heap. Pages are readable/writable, but
// they are not guaranteed to be executable unless 'executable' is true.
// Returns the address of allocated memory, or NULL if failed.
static void* Allocate(const size_t requested,
size_t* allocated,
bool is_executable);
static void Free(void* address, const size_t size);
// This is the granularity at which the ProtectCode(...) call can set page
// permissions.
static intptr_t CommitPageSize();
// Mark code segments non-writable.
static void ProtectCode(void* address, const size_t size);
// Assign memory as a guard page so that access will cause an exception.
static void Guard(void* address, const size_t size);
// Generate a random address to be used for hinting mmap().
static void* GetRandomMmapAddr();
// Get the Alignment guaranteed by Allocate().
static size_t AllocateAlignment();
// Sleep for a number of milliseconds.
static void Sleep(const int milliseconds);
......@@ -303,10 +279,6 @@ class OS {
// positions indicated by the members of the CpuFeature enum from globals.h
static uint64_t CpuFeaturesImpliedByPlatform();
// Maximum size of the virtual memory. 0 means there is no artificial
// limit.
static intptr_t MaxVirtualMemory();
// Returns the double constant NAN
static double nan_value();
......@@ -386,99 +358,6 @@ class OS {
DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
};
// Represents and controls an area of reserved memory.
// Control of the reserved memory can be assigned to another VirtualMemory
// object by assignment or copy-contructing. This removes the reserved memory
// from the original object.
class VirtualMemory {
public:
// Empty VirtualMemory object, controlling no reserved memory.
VirtualMemory();
// Reserves virtual memory with size.
explicit VirtualMemory(size_t size);
// Reserves virtual memory containing an area of the given size that
// is aligned per alignment. This may not be at the position returned
// by address().
VirtualMemory(size_t size, size_t alignment);
// Releases the reserved memory, if any, controlled by this VirtualMemory
// object.
~VirtualMemory();
// Returns whether the memory has been reserved.
bool IsReserved();
// Initialize or resets an embedded VirtualMemory object.
void Reset();
// Returns the start address of the reserved memory.
// If the memory was reserved with an alignment, this address is not
// necessarily aligned. The user might need to round it up to a multiple of
// the alignment to get the start of the aligned block.
void* address() {
ASSERT(IsReserved());
return address_;
}
// Returns the size of the reserved memory. The returned value is only
// meaningful when IsReserved() returns true.
// If the memory was reserved with an alignment, this size may be larger
// than the requested size.
size_t size() { return size_; }
// Commits real memory. Returns whether the operation succeeded.
bool Commit(void* address, size_t size, bool is_executable);
// Uncommit real memory. Returns whether the operation succeeded.
bool Uncommit(void* address, size_t size);
// Creates a single guard page at the given address.
bool Guard(void* address);
void Release() {
ASSERT(IsReserved());
// Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region.
void* address = address_;
size_t size = size_;
Reset();
bool result = ReleaseRegion(address, size);
USE(result);
ASSERT(result);
}
// Assign control of the reserved region to a different VirtualMemory object.
// The old object is no longer functional (IsReserved() returns false).
void TakeControl(VirtualMemory* from) {
ASSERT(!IsReserved());
address_ = from->address_;
size_ = from->size_;
from->Reset();
}
static void* ReserveRegion(size_t size);
static bool CommitRegion(void* base, size_t size, bool is_executable);
static bool UncommitRegion(void* base, size_t size);
// Must be called with a base pointer that has been returned by ReserveRegion
// and the same size it was reserved with.
static bool ReleaseRegion(void* base, size_t size);
// Returns true if OS performs lazy commits, i.e. the memory allocation call
// defers actual physical memory allocation till the first memory access.
// Otherwise returns false.
static bool HasLazyCommits();
private:
void* address_; // Start address of the virtual memory.
size_t size_; // Size of the virtual memory.
};
// ----------------------------------------------------------------------------
// Thread
//
......
This diff is collapsed.
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_PLATFORM_VIRTUAL_MEMORY_H_
#define V8_PLATFORM_VIRTUAL_MEMORY_H_
#include "checks.h"
#include "globals.h"
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
// VirtualMemory
//
// This class represents and controls an area of reserved memory.
// Control of the reserved memory can be assigned to another VirtualMemory
// object by assignment or contructing. This removes the reserved memory from
// the original object.
class VirtualMemory V8_FINAL {
public:
// The executability of a memory region.
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
// Empty VirtualMemory object, controlling no reserved memory.
VirtualMemory() : address_(NULL), size_(0) {}
// Reserves virtual memory with size.
explicit VirtualMemory(size_t size) : size_(0) {
address_ = ReserveRegion(size, &size_);
}
// Reserves virtual memory containing an area of the given size that
// is aligned per alignment. This may not be at the position returned
// by address().
VirtualMemory(size_t size, size_t alignment) : size_(0) {
address_ = ReserveRegion(size, &size_, alignment);
}
// Releases the reserved memory, if any, controlled by this VirtualMemory
// object.
~VirtualMemory() {
if (IsReserved()) {
bool result = ReleaseRegion(address_, size_);
ASSERT(result);
USE(result);
}
}
// Returns whether the memory contains the specified address.
bool Contains(const void* address) const V8_WARN_UNUSED_RESULT {
if (!IsReserved()) return false;
if (address < address_) return false;
if (address >= reinterpret_cast<uint8_t*>(address_) + size_) return false;
return true;
}
// Returns whether the memory has been reserved.
bool IsReserved() const V8_WARN_UNUSED_RESULT {
return address_ != NULL;
}
// Initialize or resets an embedded VirtualMemory object.
void Reset() {
address_ = NULL;
size_ = 0;
}
// Returns the start address of the reserved memory. The returned value is
// only meaningful if |IsReserved()| returns true.
// If the memory was reserved with an alignment, this address is not
// necessarily aligned. The user might need to round it up to a multiple of
// the alignment to get the start of the aligned block.
void* address() const V8_WARN_UNUSED_RESULT { return address_; }
// Returns the size of the reserved memory. The returned value is only
// meaningful when |IsReserved()| returns true.
// If the memory was reserved with an alignment, this size may be larger
// than the requested size.
size_t size() const V8_WARN_UNUSED_RESULT { return size_; }
// Commits real memory. Returns whether the operation succeeded.
bool Commit(void* address,
size_t size,
Executability executability) V8_WARN_UNUSED_RESULT {
ASSERT(IsReserved());
ASSERT(Contains(address));
ASSERT(Contains(reinterpret_cast<uint8_t*>(address) + size - 1));
return CommitRegion(address, size, executability);
}
// Uncommit real memory. Returns whether the operation succeeded.
bool Uncommit(void* address, size_t size) V8_WARN_UNUSED_RESULT {
ASSERT(IsReserved());
ASSERT(Contains(address));
ASSERT(Contains(reinterpret_cast<uint8_t*>(address) + size - 1));
return UncommitRegion(address, size);
}
// Creates guard pages at the given address.
bool Guard(void* address, size_t size) V8_WARN_UNUSED_RESULT {
// We can simply uncommit the specified pages. Any access
// to them will cause a processor exception.
return Uncommit(address, size);
}
void Release() {
ASSERT(IsReserved());
// WARNING: Order is important here. The VirtualMemory
// object might live inside the allocated region.
void* address = address_;
size_t size = size_;
Reset();
bool result = ReleaseRegion(address, size);
USE(result);
ASSERT(result);
}
// Assign control of the reserved region to a different VirtualMemory object.
// The old object is no longer functional (IsReserved() returns false).
void TakeControl(VirtualMemory* from) {
ASSERT(!IsReserved());
address_ = from->address_;
size_ = from->size_;
from->Reset();
}
// Allocates a region of memory pages. The pages are readable/writable,
// but are not guaranteed to be executable unless explicitly requested.
// Returns the base address of the allocated memory region, or NULL in
// case of an error.
static void* AllocateRegion(size_t size,
size_t* size_return,
Executability executability)
V8_WARN_UNUSED_RESULT;
static void* ReserveRegion(size_t size,
size_t* size_return) V8_WARN_UNUSED_RESULT;
static void* ReserveRegion(size_t size,
size_t* size_return,
size_t alignment) V8_WARN_UNUSED_RESULT;
static bool CommitRegion(void* address,
size_t size,
Executability executability) V8_WARN_UNUSED_RESULT;
static bool UncommitRegion(void* address, size_t size) V8_WARN_UNUSED_RESULT;
// Mark code segments readable-executable.
static bool WriteProtectRegion(void* address,
size_t size) V8_WARN_UNUSED_RESULT;
// Must be called with a base pointer that has been returned by ReserveRegion
// and the same size it was reserved with.
static bool ReleaseRegion(void* address, size_t size) V8_WARN_UNUSED_RESULT;
// The granularity for the starting address at which virtual memory can be
// reserved (or allocated in terms of the underlying operating system).
static size_t GetAllocationGranularity() V8_PURE;
// The maximum size of the virtual memory. 0 means there is no artificial
// limit.
static size_t GetLimit() V8_PURE;
// The page size and the granularity of page protection and commitment.
static size_t GetPageSize() V8_PURE;
// Returns true if OS performs lazy commits, i.e. the memory allocation call
// defers actual physical memory allocation till the first memory access.
// Otherwise returns false.
static V8_INLINE(bool HasLazyCommits()) {
#if V8_OS_LINUX
return true;
#else
return false;
#endif
}
private:
void* address_; // Start address of the virtual memory.
size_t size_; // Size of the virtual memory.
};
} } // namespace v8::internal
#endif // V8_PLATFORM_VIRTUAL_MEMORY_H_
......@@ -125,43 +125,11 @@ HeapObject* HeapObjectIterator::FromCurrentPage() {
}
// -----------------------------------------------------------------------------
// MemoryAllocator
#ifdef ENABLE_HEAP_PROTECTION
void MemoryAllocator::Protect(Address start, size_t size) {
OS::Protect(start, size);
}
void MemoryAllocator::Unprotect(Address start,
size_t size,
Executability executable) {
OS::Unprotect(start, size, executable);
}
void MemoryAllocator::ProtectChunkFromPage(Page* page) {
int id = GetChunkId(page);
OS::Protect(chunks_[id].address(), chunks_[id].size());
}
void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
int id = GetChunkId(page);
OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
chunks_[id].owner()->executable() == EXECUTABLE);
}
#endif
// --------------------------------------------------------------------------
// PagedSpace
Page* Page::Initialize(Heap* heap,
MemoryChunk* chunk,
Executability executable,
VirtualMemory::Executability executability,
PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk);
ASSERT(page->area_size() <= kNonCodeObjectAreaSize);
......
This diff is collapsed.
......@@ -33,6 +33,7 @@
#include "list.h"
#include "log.h"
#include "platform/mutex.h"
#include "platform/virtual-memory.h"
#include "v8utils.h"
namespace v8 {
......@@ -573,8 +574,10 @@ class MemoryChunk {
area_end_ = area_end;
}
Executability executable() {
return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
VirtualMemory::Executability executability() {
return IsFlagSet(IS_EXECUTABLE)
? VirtualMemory::EXECUTABLE
: VirtualMemory::NOT_EXECUTABLE;
}
bool ContainsOnlyData() {
......@@ -716,7 +719,7 @@ class MemoryChunk {
size_t size,
Address area_start,
Address area_end,
Executability executable,
VirtualMemory::Executability executability,
Space* owner);
friend class MemoryAllocator;
......@@ -796,7 +799,7 @@ class Page : public MemoryChunk {
static inline Page* Initialize(Heap* heap,
MemoryChunk* chunk,
Executability executable,
VirtualMemory::Executability executable,
PagedSpace* owner);
void InitializeAsAnchor(PagedSpace* owner);
......@@ -862,15 +865,17 @@ STATIC_CHECK(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
// Space is the abstract superclass for all allocation spaces.
class Space : public Malloced {
public:
Space(Heap* heap, AllocationSpace id, Executability executable)
: heap_(heap), id_(id), executable_(executable) {}
Space(Heap* heap,
AllocationSpace id,
VirtualMemory::Executability executability)
: heap_(heap), id_(id), executability_(executability) {}
virtual ~Space() {}
Heap* heap() const { return heap_; }
// Does the space need executable memory?
Executability executable() { return executable_; }
VirtualMemory::Executability executability() { return executability_; }
// Identity used in error reporting.
AllocationSpace identity() { return id_; }
......@@ -897,7 +902,7 @@ class Space : public Malloced {
private:
Heap* heap_;
AllocationSpace id_;
Executability executable_;
VirtualMemory::Executability executability_;
};
......@@ -1055,11 +1060,13 @@ class MemoryAllocator {
void TearDown();
Page* AllocatePage(
intptr_t size, PagedSpace* owner, Executability executable);
Page* AllocatePage(intptr_t size,
PagedSpace* owner,
VirtualMemory::Executability executability);
LargePage* AllocateLargePage(
intptr_t object_size, Space* owner, Executability executable);
LargePage* AllocateLargePage(intptr_t object_size,
Space* owner,
VirtualMemory::Executability executability);
void Free(MemoryChunk* chunk);
......@@ -1100,7 +1107,7 @@ class MemoryAllocator {
// could be committed later by calling MemoryChunk::CommitArea.
MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
intptr_t commit_area_size,
Executability executable,
VirtualMemory::Executability executability,
Space* space);
Address ReserveAlignedMemory(size_t requested,
......@@ -1109,19 +1116,26 @@ class MemoryAllocator {
Address AllocateAlignedMemory(size_t reserve_size,
size_t commit_size,
size_t alignment,
Executability executable,
VirtualMemory::Executability executability,
VirtualMemory* controller);
bool CommitMemory(Address addr, size_t size, Executability executable);
bool CommitMemory(Address addr,
size_t size,
VirtualMemory::Executability executability);
void FreeMemory(VirtualMemory* reservation, Executability executable);
void FreeMemory(Address addr, size_t size, Executability executable);
void FreeMemory(VirtualMemory* reservation,
VirtualMemory::Executability executability);
void FreeMemory(Address addr,
size_t size,
VirtualMemory::Executability executability);
// Commit a contiguous block of memory from the initial chunk. Assumes that
// the address is not NULL, the size is greater than zero, and that the
// block is contained in the initial chunk. Returns true if it succeeded
// and false otherwise.
bool CommitBlock(Address start, size_t size, Executability executable);
bool CommitBlock(Address start,
size_t size,
VirtualMemory::Executability executability);
// Uncommit a contiguous block of memory [start..(start+size)[.
// start is not NULL, the size is greater than zero, and the
......@@ -1612,7 +1626,7 @@ class PagedSpace : public Space {
PagedSpace(Heap* heap,
intptr_t max_capacity,
AllocationSpace id,
Executability executable);
VirtualMemory::Executability executability);
virtual ~PagedSpace() {}
......@@ -2037,7 +2051,7 @@ class SemiSpace : public Space {
public:
// Constructor.
SemiSpace(Heap* heap, SemiSpaceId semispace)
: Space(heap, NEW_SPACE, NOT_EXECUTABLE),
: Space(heap, NEW_SPACE, VirtualMemory::NOT_EXECUTABLE),
start_(NULL),
age_mark_(NULL),
id_(semispace),
......@@ -2290,7 +2304,7 @@ class NewSpace : public Space {
public:
// Constructor.
explicit NewSpace(Heap* heap)
: Space(heap, NEW_SPACE, NOT_EXECUTABLE),
: Space(heap, NEW_SPACE, VirtualMemory::NOT_EXECUTABLE),
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace),
reservation_(),
......@@ -2555,8 +2569,8 @@ class OldSpace : public PagedSpace {
OldSpace(Heap* heap,
intptr_t max_capacity,
AllocationSpace id,
Executability executable)
: PagedSpace(heap, max_capacity, id, executable) {
VirtualMemory::Executability executability)
: PagedSpace(heap, max_capacity, id, executability) {
page_extra_ = 0;
}
......@@ -2587,7 +2601,7 @@ class FixedSpace : public PagedSpace {
intptr_t max_capacity,
AllocationSpace id,
int object_size_in_bytes)
: PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
: PagedSpace(heap, max_capacity, id, VirtualMemory::NOT_EXECUTABLE),
object_size_in_bytes_(object_size_in_bytes) {
page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes;
}
......@@ -2727,8 +2741,8 @@ class LargeObjectSpace : public Space {
// Shared implementation of AllocateRaw, AllocateRawCode and
// AllocateRawFixedArray.
MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size,
Executability executable);
MUST_USE_RESULT MaybeObject* AllocateRaw(
int object_size, VirtualMemory::Executability executability);
// Available bytes for objects in this space.
inline intptr_t Available();
......
......@@ -72,7 +72,8 @@ void StoreBuffer::SetUp() {
// Don't know the alignment requirements of the OS, but it is certainly not
// less than 0xfff.
ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
int initial_length = static_cast<int>(OS::CommitPageSize() / kPointerSize);
int initial_length =
static_cast<int>(VirtualMemory::GetPageSize() / kPointerSize);
ASSERT(initial_length > 0);
ASSERT(initial_length <= kOldStoreBufferLength);
old_limit_ = old_start_ + initial_length;
......@@ -81,7 +82,7 @@ void StoreBuffer::SetUp() {
CHECK(old_virtual_memory_->Commit(
reinterpret_cast<void*>(old_start_),
(old_limit_ - old_start_) * kPointerSize,
false));
VirtualMemory::NOT_EXECUTABLE));
ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
......@@ -97,7 +98,7 @@ void StoreBuffer::SetUp() {
CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
kStoreBufferSize,
false)); // Not executable.
VirtualMemory::NOT_EXECUTABLE));
heap_->public_set_store_buffer_top(start_);
hash_set_1_ = new uintptr_t[kHashSetLength];
......@@ -154,7 +155,7 @@ void StoreBuffer::EnsureSpace(intptr_t space_needed) {
size_t grow = old_limit_ - old_start_; // Double size.
CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
grow * kPointerSize,
false));
VirtualMemory::NOT_EXECUTABLE));
old_limit_ += grow;
}
......
......@@ -201,8 +201,6 @@ enum PretenureFlag { NOT_TENURED, TENURED };
enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
enum VisitMode {
VISIT_ALL,
VISIT_ALL_IN_SCAVENGE,
......
......@@ -58,9 +58,8 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
&actual_size,
true));
byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
if (buffer == NULL) {
// Fallback to library function if function cannot be created.
switch (type) {
......@@ -94,7 +93,9 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
ASSERT(result);
USE(result);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
......@@ -102,7 +103,8 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
UnaryMathFunction CreateExpFunction() {
if (!FLAG_fast_math) return &exp;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
if (buffer == NULL) return &exp;
ExternalReference::InitializeMathExpData();
......@@ -125,7 +127,9 @@ UnaryMathFunction CreateExpFunction() {
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
bool ok = VirtualMemory::WriteProtectRegion(buffer, actual_size);
ASSERT(ok);
USE(ok);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
......@@ -133,9 +137,8 @@ UnaryMathFunction CreateExpFunction() {
UnaryMathFunction CreateSqrtFunction() {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
&actual_size,
true));
byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
1 * KB, &actual_size, VirtualMemory::EXECUTABLE));
if (buffer == NULL) return &sqrt;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
......@@ -149,7 +152,9 @@ UnaryMathFunction CreateSqrtFunction() {
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
ASSERT(result);
USE(result);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
......@@ -237,7 +242,9 @@ ModuloFunction CreateModuloFunction() {
CodeDesc desc;
masm.GetCode(&desc);
OS::ProtectCode(buffer, actual_size);
bool result = VirtualMemory::WriteProtectRegion(buffer, actual_size);
ASSERT(result);
USE(result);
// Call the function from C++ through this pointer.
return FUNCTION_CAST<ModuloFunction>(buffer);
}
......
......@@ -108,6 +108,7 @@
'test-unbound-queue.cc',
'test-utils.cc',
'test-version.cc',
'test-virtual-memory.cc',
'test-weakmaps.cc',
'test-weaksets.cc',
'test-weaktypedarrays.cc'
......
......@@ -35,34 +35,7 @@
#include "serialize.h"
#include "cctest.h"
using v8::internal::Assembler;
using v8::internal::Code;
using v8::internal::CodeDesc;
using v8::internal::FUNCTION_CAST;
using v8::internal::Immediate;
using v8::internal::Isolate;
using v8::internal::Label;
using v8::internal::OS;
using v8::internal::Operand;
using v8::internal::byte;
using v8::internal::greater;
using v8::internal::less_equal;
using v8::internal::equal;
using v8::internal::not_equal;
using v8::internal::r13;
using v8::internal::r15;
using v8::internal::r8;
using v8::internal::r9;
using v8::internal::rax;
using v8::internal::rbx;
using v8::internal::rbp;
using v8::internal::rcx;
using v8::internal::rdi;
using v8::internal::rdx;
using v8::internal::rsi;
using v8::internal::rsp;
using v8::internal::times_1;
using v8::internal::xmm0;
using namespace v8::internal;
// Test the x64 assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the
......@@ -92,9 +65,10 @@ static const v8::internal::Register arg2 = rsi;
TEST(AssemblerX64ReturnOperation) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
&actual_size,
true));
byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
Assembler::kMinimalBufferSize,
&actual_size,
VirtualMemory::EXECUTABLE));
CHECK(buffer);
Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
......@@ -114,9 +88,10 @@ TEST(AssemblerX64ReturnOperation) {
TEST(AssemblerX64StackOperations) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
&actual_size,
true));
byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
Assembler::kMinimalBufferSize,
&actual_size,
VirtualMemory::EXECUTABLE));
CHECK(buffer);
Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
......@@ -146,9 +121,10 @@ TEST(AssemblerX64StackOperations) {
TEST(AssemblerX64ArithmeticOperations) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
&actual_size,
true));
byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
Assembler::kMinimalBufferSize,
&actual_size,
VirtualMemory::EXECUTABLE));
CHECK(buffer);
Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
......@@ -168,9 +144,10 @@ TEST(AssemblerX64ArithmeticOperations) {
TEST(AssemblerX64ImulOperation) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
&actual_size,
true));
byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
Assembler::kMinimalBufferSize,
&actual_size,
VirtualMemory::EXECUTABLE));
CHECK(buffer);
Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
......@@ -196,9 +173,10 @@ TEST(AssemblerX64ImulOperation) {
TEST(AssemblerX64MemoryOperands) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
&actual_size,
true));
byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
Assembler::kMinimalBufferSize,
&actual_size,
VirtualMemory::EXECUTABLE));
CHECK(buffer);
Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
......@@ -230,9 +208,10 @@ TEST(AssemblerX64MemoryOperands) {
TEST(AssemblerX64ControlFlow) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
&actual_size,
true));
byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
Assembler::kMinimalBufferSize,
&actual_size,
VirtualMemory::EXECUTABLE));
CHECK(buffer);
Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
......@@ -259,9 +238,10 @@ TEST(AssemblerX64ControlFlow) {
TEST(AssemblerX64LoopImmediates) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
&actual_size,
true));
byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
Assembler::kMinimalBufferSize,
&actual_size,
VirtualMemory::EXECUTABLE));
CHECK(buffer);
Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
// Assemble two loops using rax as counter, and verify the ending counts.
......
......@@ -47,9 +47,10 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
bool inline_fastpath) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
&actual_size,
true));
byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
Assembler::kMinimalBufferSize,
&actual_size,
VirtualMemory::EXECUTABLE));
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size));
......
......@@ -47,9 +47,10 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Register destination_reg) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
&actual_size,
true));
byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
Assembler::kMinimalBufferSize,
&actual_size,
VirtualMemory::EXECUTABLE));
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size));
......
......@@ -46,9 +46,10 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Register destination_reg) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
&actual_size,
true));
byte* buffer = static_cast<byte*>(VirtualMemory::AllocateRegion(
Assembler::kMinimalBufferSize,
&actual_size,
VirtualMemory::EXECUTABLE));
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size));
......
This diff is collapsed.
......@@ -39,20 +39,6 @@
using namespace ::v8::internal;
TEST(VirtualMemory) {
VirtualMemory* vm = new VirtualMemory(1 * MB);
CHECK(vm->IsReserved());
void* block_addr = vm->address();
size_t block_size = 4 * KB;
CHECK(vm->Commit(block_addr, block_size, false));
// Check whether we can write to memory.
int* addr = static_cast<int*>(block_addr);
addr[KB-1] = 2;
CHECK(vm->Uncommit(block_addr, block_size));
delete vm;
}
TEST(GetCurrentProcessId) {
CHECK_EQ(static_cast<int>(getpid()), OS::GetCurrentProcessId());
}
......@@ -38,20 +38,6 @@
using namespace ::v8::internal;
TEST(VirtualMemory) {
VirtualMemory* vm = new VirtualMemory(1 * MB);
CHECK(vm->IsReserved());
void* block_addr = vm->address();
size_t block_size = 4 * KB;
CHECK(vm->Commit(block_addr, block_size, false));
// Check whether we can write to memory.
int* addr = static_cast<int*>(block_addr);
addr[KB-1] = 2;
CHECK(vm->Uncommit(block_addr, block_size));
delete vm;
}
TEST(GetCurrentProcessId) {
CHECK_EQ(static_cast<int>(::GetCurrentProcessId()),
OS::GetCurrentProcessId());
......
......@@ -151,30 +151,30 @@ static void VerifyMemoryChunk(Isolate* isolate,
size_t reserve_area_size,
size_t commit_area_size,
size_t second_commit_area_size,
Executability executable) {
VirtualMemory::Executability executability) {
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
CHECK(memory_allocator->SetUp(heap->MaxReserved(),
heap->MaxExecutableSize()));
TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
TestCodeRangeScope test_code_range_scope(isolate, code_range);
size_t header_size = (executable == EXECUTABLE)
size_t header_size = (executability == VirtualMemory::EXECUTABLE)
? MemoryAllocator::CodePageGuardStartOffset()
: MemoryChunk::kObjectStartOffset;
size_t guard_size = (executable == EXECUTABLE)
size_t guard_size = (executability == VirtualMemory::EXECUTABLE)
? MemoryAllocator::CodePageGuardSize()
: 0;
MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(reserve_area_size,
commit_area_size,
executable,
executability,
NULL);
size_t alignment = code_range->exists() ?
MemoryChunk::kAlignment : OS::CommitPageSize();
size_t reserved_size = ((executable == EXECUTABLE))
MemoryChunk::kAlignment : VirtualMemory::GetPageSize();
size_t reserved_size = ((executability == VirtualMemory::EXECUTABLE))
? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
alignment)
: RoundUp(header_size + reserve_area_size, OS::CommitPageSize());
: RoundUp(header_size + reserve_area_size, VirtualMemory::GetPageSize());
CHECK(memory_chunk->size() == reserved_size);
CHECK(memory_chunk->area_start() < memory_chunk->address() +
memory_chunk->size());
......@@ -230,7 +230,7 @@ TEST(MemoryChunk) {
reserve_area_size,
initial_commit_area_size,
second_commit_area_size,
EXECUTABLE);
VirtualMemory::EXECUTABLE);
VerifyMemoryChunk(isolate,
heap,
......@@ -238,7 +238,7 @@ TEST(MemoryChunk) {
reserve_area_size,
initial_commit_area_size,
second_commit_area_size,
NOT_EXECUTABLE);
VirtualMemory::NOT_EXECUTABLE);
delete code_range;
// Without CodeRange.
......@@ -249,7 +249,7 @@ TEST(MemoryChunk) {
reserve_area_size,
initial_commit_area_size,
second_commit_area_size,
EXECUTABLE);
VirtualMemory::EXECUTABLE);
VerifyMemoryChunk(isolate,
heap,
......@@ -257,7 +257,7 @@ TEST(MemoryChunk) {
reserve_area_size,
initial_commit_area_size,
second_commit_area_size,
NOT_EXECUTABLE);
VirtualMemory::NOT_EXECUTABLE);
}
}
......@@ -276,9 +276,9 @@ TEST(MemoryAllocator) {
OldSpace faked_space(heap,
heap->MaxReserved(),
OLD_POINTER_SPACE,
NOT_EXECUTABLE);
VirtualMemory::NOT_EXECUTABLE);
Page* first_page = memory_allocator->AllocatePage(
faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
faked_space.AreaSize(), &faked_space, VirtualMemory::NOT_EXECUTABLE);
first_page->InsertAfter(faked_space.anchor()->prev_page());
CHECK(first_page->is_valid());
......@@ -291,7 +291,7 @@ TEST(MemoryAllocator) {
// Again, we should get n or n - 1 pages.
Page* other = memory_allocator->AllocatePage(
faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
faked_space.AreaSize(), &faked_space, VirtualMemory::NOT_EXECUTABLE);
CHECK(other->is_valid());
total_pages++;
other->InsertAfter(first_page);
......@@ -353,7 +353,7 @@ TEST(OldSpace) {
OldSpace* s = new OldSpace(heap,
heap->MaxOldGenerationSize(),
OLD_POINTER_SPACE,
NOT_EXECUTABLE);
VirtualMemory::NOT_EXECUTABLE);
CHECK(s != NULL);
CHECK(s->SetUp());
......@@ -377,7 +377,8 @@ TEST(LargeObjectSpace) {
int lo_size = Page::kPageSize;
Object* obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE)->ToObjectUnchecked();
Object* obj = lo->AllocateRaw(
lo_size, VirtualMemory::NOT_EXECUTABLE)->ToObjectUnchecked();
CHECK(obj->IsHeapObject());
HeapObject* ho = HeapObject::cast(obj);
......@@ -390,7 +391,8 @@ TEST(LargeObjectSpace) {
while (true) {
intptr_t available = lo->Available();
{ MaybeObject* maybe_obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE);
{ MaybeObject* maybe_obj = lo->AllocateRaw(
lo_size, VirtualMemory::NOT_EXECUTABLE);
if (!maybe_obj->ToObject(&obj)) break;
}
CHECK(lo->Available() < available);
......@@ -398,5 +400,5 @@ TEST(LargeObjectSpace) {
CHECK(!lo->IsEmpty());
CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE)->IsFailure());
CHECK(lo->AllocateRaw(lo_size, VirtualMemory::NOT_EXECUTABLE)->IsFailure());
}
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "cctest.h"
#include "platform/virtual-memory.h"
using namespace ::v8::internal;
TEST(CommitAndUncommit) {
static const size_t kSize = 1 * MB;
static const size_t kBlockSize = 4 * KB;
VirtualMemory vm(kSize);
CHECK(vm.IsReserved());
void* block_addr = vm.address();
CHECK(vm.Commit(block_addr, kBlockSize, VirtualMemory::NOT_EXECUTABLE));
// Check whether we can write to memory.
int* addr = static_cast<int*>(block_addr);
addr[5] = 2;
CHECK(vm.Uncommit(block_addr, kBlockSize));
}
TEST(Release) {
static const size_t kSize = 4 * KB;
VirtualMemory vm(kSize);
CHECK(vm.IsReserved());
CHECK_LE(kSize, vm.size());
CHECK_NE(NULL, vm.address());
vm.Release();
CHECK(!vm.IsReserved());
}
TEST(TakeControl) {
static const size_t kSize = 64 * KB;
VirtualMemory vm1(kSize);
size_t size1 = vm1.size();
CHECK(vm1.IsReserved());
CHECK_LE(kSize, size1);
VirtualMemory vm2;
CHECK(!vm2.IsReserved());
vm2.TakeControl(&vm1);
CHECK(vm2.IsReserved());
CHECK(!vm1.IsReserved());
CHECK(vm2.size() == size1);
}
TEST(AllocationGranularityIsPowerOf2) {
CHECK(IsPowerOf2(VirtualMemory::GetAllocationGranularity()));
}
TEST(PageSizeIsPowerOf2) {
CHECK(IsPowerOf2(VirtualMemory::GetPageSize()));
}
......@@ -449,6 +449,8 @@
'../../src/platform/semaphore.h',
'../../src/platform/socket.cc',
'../../src/platform/socket.h',
'../../src/platform/virtual-memory.cc',
'../../src/platform/virtual-memory.h',
'../../src/preparse-data-format.h',
'../../src/preparse-data.cc',
'../../src/preparse-data.h',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment