Commit 2fdadd77 authored by bmeurer@chromium.org's avatar bmeurer@chromium.org

Drop OS::IsOutsideAllocatedSpace() and move the tracking to the MemoryAllocator.

Instead of globally tracking allocated space limits, which was
not implemented properly anyway (i.e. lack of synchronization
on the reading side), track it per MemoryAllocator (that is
per heap/isolate).

In particular, avoid to call IsBadWritePtr() on Windows, it is
obsolete and Microsoft strongly discourages its usage.

R=mstarzinger@chromium.org

Review URL: https://codereview.chromium.org/23903008

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@16542 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 03e8c9d0
...@@ -6204,7 +6204,7 @@ bool Heap::Contains(HeapObject* value) { ...@@ -6204,7 +6204,7 @@ bool Heap::Contains(HeapObject* value) {
bool Heap::Contains(Address addr) { bool Heap::Contains(Address addr) {
if (OS::IsOutsideAllocatedSpace(addr)) return false; if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
return HasBeenSetUp() && return HasBeenSetUp() &&
(new_space_.ToSpaceContains(addr) || (new_space_.ToSpaceContains(addr) ||
old_pointer_space_->Contains(addr) || old_pointer_space_->Contains(addr) ||
...@@ -6223,7 +6223,7 @@ bool Heap::InSpace(HeapObject* value, AllocationSpace space) { ...@@ -6223,7 +6223,7 @@ bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
bool Heap::InSpace(Address addr, AllocationSpace space) { bool Heap::InSpace(Address addr, AllocationSpace space) {
if (OS::IsOutsideAllocatedSpace(addr)) return false; if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
if (!HasBeenSetUp()) return false; if (!HasBeenSetUp()) return false;
switch (space) { switch (space) {
......
...@@ -52,9 +52,6 @@ namespace v8 { ...@@ -52,9 +52,6 @@ namespace v8 {
namespace internal { namespace internal {
static Mutex* limit_mutex = NULL;
const char* OS::LocalTimezone(double time) { const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return ""; if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond)); time_t tv = static_cast<time_t>(floor(time/msPerSecond));
...@@ -76,31 +73,6 @@ double OS::LocalTimeOffset() { ...@@ -76,31 +73,6 @@ double OS::LocalTimeOffset() {
} }
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
// and verification). The estimate is conservative, i.e., not all addresses in
// 'allocated' space are actually allocated to our heap. The range is
// [lowest, highest), inclusive on the low and and exclusive on the high end.
static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
static void* highest_ever_allocated = reinterpret_cast<void*>(0);
static void UpdateAllocatedSpaceLimits(void* address, int size) {
ASSERT(limit_mutex != NULL);
LockGuard<Mutex> lock_guard(limit_mutex);
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
Max(highest_ever_allocated,
reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
}
bool OS::IsOutsideAllocatedSpace(void* address) {
return address < lowest_ever_allocated || address >= highest_ever_allocated;
}
void* OS::Allocate(const size_t requested, void* OS::Allocate(const size_t requested,
size_t* allocated, size_t* allocated,
bool is_executable) { bool is_executable) {
...@@ -112,7 +84,6 @@ void* OS::Allocate(const size_t requested, ...@@ -112,7 +84,6 @@ void* OS::Allocate(const size_t requested,
return NULL; return NULL;
} }
*allocated = msize; *allocated = msize;
UpdateAllocatedSpaceLimits(mbase, msize);
return mbase; return mbase;
} }
...@@ -365,8 +336,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { ...@@ -365,8 +336,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) { if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
return false; return false;
} }
UpdateAllocatedSpaceLimits(base, static_cast<int>(size));
return true; return true;
} }
...@@ -406,12 +375,6 @@ void OS::SetUp() { ...@@ -406,12 +375,6 @@ void OS::SetUp() {
// call this setup code within the same millisecond. // call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()); uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed)); srandom(static_cast<unsigned int>(seed));
limit_mutex = new Mutex();
}
void OS::TearDown() {
delete limit_mutex;
} }
......
...@@ -63,9 +63,6 @@ namespace v8 { ...@@ -63,9 +63,6 @@ namespace v8 {
namespace internal { namespace internal {
static Mutex* limit_mutex = NULL;
const char* OS::LocalTimezone(double time) { const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return ""; if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond)); time_t tv = static_cast<time_t>(floor(time/msPerSecond));
...@@ -84,31 +81,6 @@ double OS::LocalTimeOffset() { ...@@ -84,31 +81,6 @@ double OS::LocalTimeOffset() {
} }
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
// and verification). The estimate is conservative, i.e., not all addresses in
// 'allocated' space are actually allocated to our heap. The range is
// [lowest, highest), inclusive on the low and and exclusive on the high end.
static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
static void* highest_ever_allocated = reinterpret_cast<void*>(0);
static void UpdateAllocatedSpaceLimits(void* address, int size) {
ASSERT(limit_mutex != NULL);
LockGuard<Mutex> lock_guard(limit_mutex);
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
Max(highest_ever_allocated,
reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
}
bool OS::IsOutsideAllocatedSpace(void* address) {
return address < lowest_ever_allocated || address >= highest_ever_allocated;
}
void* OS::Allocate(const size_t requested, void* OS::Allocate(const size_t requested,
size_t* allocated, size_t* allocated,
bool executable) { bool executable) {
...@@ -121,7 +93,6 @@ void* OS::Allocate(const size_t requested, ...@@ -121,7 +93,6 @@ void* OS::Allocate(const size_t requested,
return NULL; return NULL;
} }
*allocated = msize; *allocated = msize;
UpdateAllocatedSpaceLimits(mbase, msize);
return mbase; return mbase;
} }
...@@ -345,8 +316,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { ...@@ -345,8 +316,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
kMmapFdOffset)) { kMmapFdOffset)) {
return false; return false;
} }
UpdateAllocatedSpaceLimits(base, size);
return true; return true;
} }
...@@ -380,12 +349,6 @@ void OS::SetUp() { ...@@ -380,12 +349,6 @@ void OS::SetUp() {
// call this setup code within the same millisecond. // call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()); uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed)); srandom(static_cast<unsigned int>(seed));
limit_mutex = new Mutex();
}
void OS::TearDown() {
delete limit_mutex;
} }
......
...@@ -76,9 +76,6 @@ namespace v8 { ...@@ -76,9 +76,6 @@ namespace v8 {
namespace internal { namespace internal {
static Mutex* limit_mutex = NULL;
#ifdef __arm__ #ifdef __arm__
bool OS::ArmUsingHardFloat() { bool OS::ArmUsingHardFloat() {
...@@ -140,31 +137,6 @@ double OS::LocalTimeOffset() { ...@@ -140,31 +137,6 @@ double OS::LocalTimeOffset() {
} }
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
// and verification). The estimate is conservative, i.e., not all addresses in
// 'allocated' space are actually allocated to our heap. The range is
// [lowest, highest), inclusive on the low and and exclusive on the high end.
static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
static void* highest_ever_allocated = reinterpret_cast<void*>(0);
static void UpdateAllocatedSpaceLimits(void* address, int size) {
ASSERT(limit_mutex != NULL);
LockGuard<Mutex> lock_guard(limit_mutex);
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
Max(highest_ever_allocated,
reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
}
bool OS::IsOutsideAllocatedSpace(void* address) {
return address < lowest_ever_allocated || address >= highest_ever_allocated;
}
void* OS::Allocate(const size_t requested, void* OS::Allocate(const size_t requested,
size_t* allocated, size_t* allocated,
bool is_executable) { bool is_executable) {
...@@ -178,7 +150,6 @@ void* OS::Allocate(const size_t requested, ...@@ -178,7 +150,6 @@ void* OS::Allocate(const size_t requested,
return NULL; return NULL;
} }
*allocated = msize; *allocated = msize;
UpdateAllocatedSpaceLimits(mbase, msize);
return mbase; return mbase;
} }
...@@ -472,7 +443,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { ...@@ -472,7 +443,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
return false; return false;
} }
UpdateAllocatedSpaceLimits(base, size);
return true; return true;
} }
...@@ -501,12 +471,6 @@ void OS::SetUp() { ...@@ -501,12 +471,6 @@ void OS::SetUp() {
// Seed the random number generator. We preserve microsecond resolution. // Seed the random number generator. We preserve microsecond resolution.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()) ^ (getpid() << 16); uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()) ^ (getpid() << 16);
srandom(static_cast<unsigned int>(seed)); srandom(static_cast<unsigned int>(seed));
limit_mutex = new Mutex();
}
void OS::TearDown() {
delete limit_mutex;
} }
......
...@@ -79,34 +79,6 @@ namespace v8 { ...@@ -79,34 +79,6 @@ namespace v8 {
namespace internal { namespace internal {
static Mutex* limit_mutex = NULL;
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
// and verification). The estimate is conservative, i.e., not all addresses in
// 'allocated' space are actually allocated to our heap. The range is
// [lowest, highest), inclusive on the low and and exclusive on the high end.
static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
static void* highest_ever_allocated = reinterpret_cast<void*>(0);
static void UpdateAllocatedSpaceLimits(void* address, int size) {
ASSERT(limit_mutex != NULL);
LockGuard<Mutex> lock(limit_mutex);
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
Max(highest_ever_allocated,
reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
}
bool OS::IsOutsideAllocatedSpace(void* address) {
return address < lowest_ever_allocated || address >= highest_ever_allocated;
}
// Constants used for mmap. // Constants used for mmap.
// kMmapFd is used to pass vm_alloc flags to tag the region with the user // kMmapFd is used to pass vm_alloc flags to tag the region with the user
// defined tag 255 This helps identify V8-allocated regions in memory analysis // defined tag 255 This helps identify V8-allocated regions in memory analysis
...@@ -131,7 +103,6 @@ void* OS::Allocate(const size_t requested, ...@@ -131,7 +103,6 @@ void* OS::Allocate(const size_t requested,
return NULL; return NULL;
} }
*allocated = msize; *allocated = msize;
UpdateAllocatedSpaceLimits(mbase, msize);
return mbase; return mbase;
} }
...@@ -366,8 +337,6 @@ bool VirtualMemory::CommitRegion(void* address, ...@@ -366,8 +337,6 @@ bool VirtualMemory::CommitRegion(void* address,
kMmapFdOffset)) { kMmapFdOffset)) {
return false; return false;
} }
UpdateAllocatedSpaceLimits(address, size);
return true; return true;
} }
...@@ -396,12 +365,6 @@ void OS::SetUp() { ...@@ -396,12 +365,6 @@ void OS::SetUp() {
// Seed the random number generator. We preserve microsecond resolution. // Seed the random number generator. We preserve microsecond resolution.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()) ^ (getpid() << 16); uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()) ^ (getpid() << 16);
srandom(static_cast<unsigned int>(seed)); srandom(static_cast<unsigned int>(seed));
limit_mutex = new Mutex();
}
void OS::TearDown() {
delete limit_mutex;
} }
......
...@@ -61,9 +61,6 @@ namespace v8 { ...@@ -61,9 +61,6 @@ namespace v8 {
namespace internal { namespace internal {
static Mutex* limit_mutex = NULL;
const char* OS::LocalTimezone(double time) { const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return ""; if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond)); time_t tv = static_cast<time_t>(floor(time/msPerSecond));
...@@ -82,31 +79,6 @@ double OS::LocalTimeOffset() { ...@@ -82,31 +79,6 @@ double OS::LocalTimeOffset() {
} }
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
// and verification). The estimate is conservative, i.e., not all addresses in
// 'allocated' space are actually allocated to our heap. The range is
// [lowest, highest), inclusive on the low and and exclusive on the high end.
static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
static void* highest_ever_allocated = reinterpret_cast<void*>(0);
static void UpdateAllocatedSpaceLimits(void* address, int size) {
ASSERT(limit_mutex != NULL);
LockGuard<Mutex> lock(limit_mutex);
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
Max(highest_ever_allocated,
reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
}
bool OS::IsOutsideAllocatedSpace(void* address) {
return address < lowest_ever_allocated || address >= highest_ever_allocated;
}
void* OS::Allocate(const size_t requested, void* OS::Allocate(const size_t requested,
size_t* allocated, size_t* allocated,
bool is_executable) { bool is_executable) {
...@@ -120,7 +92,6 @@ void* OS::Allocate(const size_t requested, ...@@ -120,7 +92,6 @@ void* OS::Allocate(const size_t requested,
return NULL; return NULL;
} }
*allocated = msize; *allocated = msize;
UpdateAllocatedSpaceLimits(mbase, msize);
return mbase; return mbase;
} }
...@@ -402,8 +373,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { ...@@ -402,8 +373,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
kMmapFdOffset)) { kMmapFdOffset)) {
return false; return false;
} }
UpdateAllocatedSpaceLimits(base, size);
return true; return true;
} }
...@@ -433,12 +402,6 @@ void OS::SetUp() { ...@@ -433,12 +402,6 @@ void OS::SetUp() {
// Seed the random number generator. We preserve microsecond resolution. // Seed the random number generator. We preserve microsecond resolution.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()) ^ (getpid() << 16); uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()) ^ (getpid() << 16);
srandom(static_cast<unsigned int>(seed)); srandom(static_cast<unsigned int>(seed));
limit_mutex = new Mutex();
}
void OS::TearDown() {
delete limit_mutex;
} }
......
...@@ -81,9 +81,6 @@ namespace v8 { ...@@ -81,9 +81,6 @@ namespace v8 {
namespace internal { namespace internal {
static Mutex* limit_mutex = NULL;
const char* OS::LocalTimezone(double time) { const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return ""; if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond)); time_t tv = static_cast<time_t>(floor(time/msPerSecond));
...@@ -99,31 +96,6 @@ double OS::LocalTimeOffset() { ...@@ -99,31 +96,6 @@ double OS::LocalTimeOffset() {
} }
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
// and verification). The estimate is conservative, i.e., not all addresses in
// 'allocated' space are actually allocated to our heap. The range is
// [lowest, highest), inclusive on the low and and exclusive on the high end.
static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
static void* highest_ever_allocated = reinterpret_cast<void*>(0);
static void UpdateAllocatedSpaceLimits(void* address, int size) {
ASSERT(limit_mutex != NULL);
LockGuard<Mutex> lock_guard(limit_mutex);
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
Max(highest_ever_allocated,
reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
}
bool OS::IsOutsideAllocatedSpace(void* address) {
return address < lowest_ever_allocated || address >= highest_ever_allocated;
}
void* OS::Allocate(const size_t requested, void* OS::Allocate(const size_t requested,
size_t* allocated, size_t* allocated,
bool is_executable) { bool is_executable) {
...@@ -136,7 +108,6 @@ void* OS::Allocate(const size_t requested, ...@@ -136,7 +108,6 @@ void* OS::Allocate(const size_t requested,
return NULL; return NULL;
} }
*allocated = msize; *allocated = msize;
UpdateAllocatedSpaceLimits(mbase, msize);
return mbase; return mbase;
} }
...@@ -366,8 +337,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { ...@@ -366,8 +337,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
kMmapFdOffset)) { kMmapFdOffset)) {
return false; return false;
} }
UpdateAllocatedSpaceLimits(base, size);
return true; return true;
} }
...@@ -401,12 +370,6 @@ void OS::SetUp() { ...@@ -401,12 +370,6 @@ void OS::SetUp() {
// call this setup code within the same millisecond. // call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()); uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed)); srandom(static_cast<unsigned int>(seed));
limit_mutex = new Mutex();
}
void OS::TearDown() {
delete limit_mutex;
} }
......
...@@ -144,8 +144,6 @@ double ceiling(double x) { ...@@ -144,8 +144,6 @@ double ceiling(double x) {
} }
static Mutex* limit_mutex = NULL;
#if V8_TARGET_ARCH_IA32 #if V8_TARGET_ARCH_IA32
static void MemMoveWrapper(void* dest, const void* src, size_t size) { static void MemMoveWrapper(void* dest, const void* src, size_t size) {
memmove(dest, src, size); memmove(dest, src, size);
...@@ -750,35 +748,6 @@ void OS::StrNCpy(Vector<char> dest, const char* src, size_t n) { ...@@ -750,35 +748,6 @@ void OS::StrNCpy(Vector<char> dest, const char* src, size_t n) {
#undef _TRUNCATE #undef _TRUNCATE
#undef STRUNCATE #undef STRUNCATE
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
// and verification). The estimate is conservative, i.e., not all addresses in
// 'allocated' space are actually allocated to our heap. The range is
// [lowest, highest), inclusive on the low and and exclusive on the high end.
static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
static void* highest_ever_allocated = reinterpret_cast<void*>(0);
static void UpdateAllocatedSpaceLimits(void* address, int size) {
ASSERT(limit_mutex != NULL);
LockGuard<Mutex> lock_guard(limit_mutex);
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
Max(highest_ever_allocated,
reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
}
bool OS::IsOutsideAllocatedSpace(void* pointer) {
if (pointer < lowest_ever_allocated || pointer >= highest_ever_allocated)
return true;
// Ask the Windows API
if (IsBadWritePtr(pointer, 1))
return true;
return false;
}
// Get the system's page size used by VirtualAlloc() or the next power // Get the system's page size used by VirtualAlloc() or the next power
// of two. The reason for always returning a power of two is that the // of two. The reason for always returning a power of two is that the
...@@ -872,7 +841,6 @@ void* OS::Allocate(const size_t requested, ...@@ -872,7 +841,6 @@ void* OS::Allocate(const size_t requested,
ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment())); ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment()));
*allocated = msize; *allocated = msize;
UpdateAllocatedSpaceLimits(mbase, static_cast<int>(msize));
return mbase; return mbase;
} }
...@@ -1490,8 +1458,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { ...@@ -1490,8 +1458,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) { if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
return false; return false;
} }
UpdateAllocatedSpaceLimits(base, static_cast<int>(size));
return true; return true;
} }
...@@ -1623,13 +1589,6 @@ void OS::SetUp() { ...@@ -1623,13 +1589,6 @@ void OS::SetUp() {
// call this setup code within the same millisecond. // call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()); uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srand(static_cast<unsigned int>(seed)); srand(static_cast<unsigned int>(seed));
limit_mutex = new Mutex();
} }
void OS::TearDown() {
delete limit_mutex;
}
} } // namespace v8::internal } } // namespace v8::internal
...@@ -178,9 +178,6 @@ class OS { ...@@ -178,9 +178,6 @@ class OS {
// called after CPU initialization. // called after CPU initialization.
static void PostSetUp(); static void PostSetUp();
// Clean up platform-OS-related things. Called once at VM shutdown.
static void TearDown();
// Returns the accumulated user time for thread. This routine // Returns the accumulated user time for thread. This routine
// can be used for profiling. The implementation should // can be used for profiling. The implementation should
// strive for high-precision timer resolution, preferable // strive for high-precision timer resolution, preferable
...@@ -254,13 +251,6 @@ class OS { ...@@ -254,13 +251,6 @@ class OS {
// Get the Alignment guaranteed by Allocate(). // Get the Alignment guaranteed by Allocate().
static size_t AllocateAlignment(); static size_t AllocateAlignment();
// Returns an indication of whether a pointer is in a space that
// has been allocated by Allocate(). This method may conservatively
// always return false, but giving more accurate information may
// improve the robustness of the stack dump code in the presence of
// heap corruption.
static bool IsOutsideAllocatedSpace(void* pointer);
// Sleep for a number of milliseconds. // Sleep for a number of milliseconds.
static void Sleep(const int milliseconds); static void Sleep(const int milliseconds);
......
...@@ -228,10 +228,10 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size, ...@@ -228,10 +228,10 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size,
} }
ASSERT(*allocated <= current.size); ASSERT(*allocated <= current.size);
ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
if (!MemoryAllocator::CommitExecutableMemory(code_range_, if (!isolate_->memory_allocator()->CommitExecutableMemory(code_range_,
current.start, current.start,
commit_size, commit_size,
*allocated)) { *allocated)) {
*allocated = 0; *allocated = 0;
return NULL; return NULL;
} }
...@@ -245,7 +245,7 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size, ...@@ -245,7 +245,7 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size,
bool CodeRange::CommitRawMemory(Address start, size_t length) { bool CodeRange::CommitRawMemory(Address start, size_t length) {
return code_range_->Commit(start, length, true); return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
} }
...@@ -278,7 +278,9 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate) ...@@ -278,7 +278,9 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate)
capacity_(0), capacity_(0),
capacity_executable_(0), capacity_executable_(0),
size_(0), size_(0),
size_executable_(0) { size_executable_(0),
lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
highest_ever_allocated_(reinterpret_cast<void*>(0)) {
} }
...@@ -304,6 +306,17 @@ void MemoryAllocator::TearDown() { ...@@ -304,6 +306,17 @@ void MemoryAllocator::TearDown() {
} }
bool MemoryAllocator::CommitMemory(Address base,
size_t size,
Executability executable) {
if (!VirtualMemory::CommitRegion(base, size, executable == EXECUTABLE)) {
return false;
}
UpdateAllocatedSpaceLimits(base, base + size);
return true;
}
void MemoryAllocator::FreeMemory(VirtualMemory* reservation, void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
Executability executable) { Executability executable) {
// TODO(gc) make code_range part of memory allocator? // TODO(gc) make code_range part of memory allocator?
...@@ -383,7 +396,9 @@ Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size, ...@@ -383,7 +396,9 @@ Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size,
base = NULL; base = NULL;
} }
} else { } else {
if (!reservation.Commit(base, commit_size, false)) { if (reservation.Commit(base, commit_size, false)) {
UpdateAllocatedSpaceLimits(base, base + commit_size);
} else {
base = NULL; base = NULL;
} }
} }
...@@ -509,7 +524,10 @@ bool MemoryChunk::CommitArea(size_t requested) { ...@@ -509,7 +524,10 @@ bool MemoryChunk::CommitArea(size_t requested) {
Address start = address() + committed_size + guard_size; Address start = address() + committed_size + guard_size;
size_t length = commit_size - committed_size; size_t length = commit_size - committed_size;
if (reservation_.IsReserved()) { if (reservation_.IsReserved()) {
if (!reservation_.Commit(start, length, IsFlagSet(IS_EXECUTABLE))) { Executability executable = IsFlagSet(IS_EXECUTABLE)
? EXECUTABLE : NOT_EXECUTABLE;
if (!heap()->isolate()->memory_allocator()->CommitMemory(
start, length, executable)) {
return false; return false;
} }
} else { } else {
...@@ -763,7 +781,7 @@ void MemoryAllocator::Free(MemoryChunk* chunk) { ...@@ -763,7 +781,7 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
bool MemoryAllocator::CommitBlock(Address start, bool MemoryAllocator::CommitBlock(Address start,
size_t size, size_t size,
Executability executable) { Executability executable) {
if (!VirtualMemory::CommitRegion(start, size, executable)) return false; if (!CommitMemory(start, size, executable)) return false;
if (Heap::ShouldZapGarbage()) { if (Heap::ShouldZapGarbage()) {
ZapBlock(start, size); ZapBlock(start, size);
...@@ -899,6 +917,9 @@ bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, ...@@ -899,6 +917,9 @@ bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm,
return false; return false;
} }
UpdateAllocatedSpaceLimits(start,
start + CodePageAreaStartOffset() +
commit_size - CodePageGuardStartOffset());
return true; return true;
} }
......
...@@ -1083,6 +1083,13 @@ class MemoryAllocator { ...@@ -1083,6 +1083,13 @@ class MemoryAllocator {
return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize; return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
} }
// Returns an indication of whether a pointer is in a space that has
// been allocated by this MemoryAllocator.
V8_INLINE(bool IsOutsideAllocatedSpace(const void* address)) const {
return address < lowest_ever_allocated_ ||
address >= highest_ever_allocated_;
}
#ifdef DEBUG #ifdef DEBUG
// Reports statistic info of the space. // Reports statistic info of the space.
void ReportStatistics(); void ReportStatistics();
...@@ -1105,6 +1112,8 @@ class MemoryAllocator { ...@@ -1105,6 +1112,8 @@ class MemoryAllocator {
Executability executable, Executability executable,
VirtualMemory* controller); VirtualMemory* controller);
bool CommitMemory(Address addr, size_t size, Executability executable);
void FreeMemory(VirtualMemory* reservation, Executability executable); void FreeMemory(VirtualMemory* reservation, Executability executable);
void FreeMemory(Address addr, size_t size, Executability executable); void FreeMemory(Address addr, size_t size, Executability executable);
...@@ -1150,10 +1159,10 @@ class MemoryAllocator { ...@@ -1150,10 +1159,10 @@ class MemoryAllocator {
return CodePageAreaEndOffset() - CodePageAreaStartOffset(); return CodePageAreaEndOffset() - CodePageAreaStartOffset();
} }
MUST_USE_RESULT static bool CommitExecutableMemory(VirtualMemory* vm, MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
Address start, Address start,
size_t commit_size, size_t commit_size,
size_t reserved_size); size_t reserved_size);
private: private:
Isolate* isolate_; Isolate* isolate_;
...@@ -1168,6 +1177,14 @@ class MemoryAllocator { ...@@ -1168,6 +1177,14 @@ class MemoryAllocator {
// Allocated executable space size in bytes. // Allocated executable space size in bytes.
size_t size_executable_; size_t size_executable_;
// We keep the lowest and highest addresses allocated as a quick way
// of determining that pointers are outside the heap. The estimate is
// conservative, i.e. not all addrsses in 'allocated' space are allocated
// to our heap. The range is [lowest, highest[, inclusive on the low end
// and exclusive on the high end.
void* lowest_ever_allocated_;
void* highest_ever_allocated_;
struct MemoryAllocationCallbackRegistration { struct MemoryAllocationCallbackRegistration {
MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
ObjectSpace space, ObjectSpace space,
...@@ -1190,6 +1207,11 @@ class MemoryAllocator { ...@@ -1190,6 +1207,11 @@ class MemoryAllocator {
Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
PagedSpace* owner); PagedSpace* owner);
void UpdateAllocatedSpaceLimits(void* low, void* high) {
lowest_ever_allocated_ = Min(lowest_ever_allocated_, low);
highest_ever_allocated_ = Max(highest_ever_allocated_, high);
}
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
}; };
......
...@@ -112,7 +112,6 @@ void V8::TearDown() { ...@@ -112,7 +112,6 @@ void V8::TearDown() {
call_completed_callbacks_ = NULL; call_completed_callbacks_ = NULL;
Sampler::TearDown(); Sampler::TearDown();
OS::TearDown();
} }
......
...@@ -186,10 +186,9 @@ class Block { ...@@ -186,10 +186,9 @@ class Block {
TEST(CodeRange) { TEST(CodeRange) {
const int code_range_size = 32*MB; const int code_range_size = 32*MB;
OS::SetUp(); CcTest::InitializeVM();
Isolate::Current()->InitializeLoggingAndCounters(); CodeRange code_range(reinterpret_cast<Isolate*>(CcTest::isolate()));
CodeRange* code_range = new CodeRange(Isolate::Current()); code_range.SetUp(code_range_size);
code_range->SetUp(code_range_size);
int current_allocated = 0; int current_allocated = 0;
int total_allocated = 0; int total_allocated = 0;
List<Block> blocks(1000); List<Block> blocks(1000);
...@@ -205,9 +204,9 @@ TEST(CodeRange) { ...@@ -205,9 +204,9 @@ TEST(CodeRange) {
(Page::kMaxNonCodeHeapObjectSize << (Pseudorandom() % 3)) + (Page::kMaxNonCodeHeapObjectSize << (Pseudorandom() % 3)) +
Pseudorandom() % 5000 + 1; Pseudorandom() % 5000 + 1;
size_t allocated = 0; size_t allocated = 0;
Address base = code_range->AllocateRawMemory(requested, Address base = code_range.AllocateRawMemory(requested,
requested, requested,
&allocated); &allocated);
CHECK(base != NULL); CHECK(base != NULL);
blocks.Add(Block(base, static_cast<int>(allocated))); blocks.Add(Block(base, static_cast<int>(allocated)));
current_allocated += static_cast<int>(allocated); current_allocated += static_cast<int>(allocated);
...@@ -215,7 +214,7 @@ TEST(CodeRange) { ...@@ -215,7 +214,7 @@ TEST(CodeRange) {
} else { } else {
// Free a block. // Free a block.
int index = Pseudorandom() % blocks.length(); int index = Pseudorandom() % blocks.length();
code_range->FreeRawMemory(blocks[index].base, blocks[index].size); code_range.FreeRawMemory(blocks[index].base, blocks[index].size);
current_allocated -= blocks[index].size; current_allocated -= blocks[index].size;
if (index < blocks.length() - 1) { if (index < blocks.length() - 1) {
blocks[index] = blocks.RemoveLast(); blocks[index] = blocks.RemoveLast();
...@@ -225,6 +224,5 @@ TEST(CodeRange) { ...@@ -225,6 +224,5 @@ TEST(CodeRange) {
} }
} }
code_range->TearDown(); code_range.TearDown();
delete code_range;
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment