Commit e372a2dd authored by ager@chromium.org's avatar ager@chromium.org

Add guard pages in front of executable allocations

BUG=89247

Review URL: http://codereview.chromium.org/7379004
Patch from Chris Neckar <cdn@chromium.org>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@8687 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 5df08869
...@@ -217,6 +217,11 @@ void OS::Free(void* buf, const size_t length) { ...@@ -217,6 +217,11 @@ void OS::Free(void* buf, const size_t length) {
} }
void OS::Guard(void* address, const size_t size) {
UNIMPLEMENTED();
}
void OS::Sleep(int milliseconds) { void OS::Sleep(int milliseconds) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <errno.h> #include <errno.h>
#include <time.h> #include <time.h>
#include <sys/mman.h>
#include <sys/socket.h> #include <sys/socket.h>
#include <sys/resource.h> #include <sys/resource.h>
#include <sys/time.h> #include <sys/time.h>
...@@ -43,6 +44,8 @@ ...@@ -43,6 +44,8 @@
#include <netinet/in.h> #include <netinet/in.h>
#include <netdb.h> #include <netdb.h>
#undef MAP_TYPE
#if defined(ANDROID) #if defined(ANDROID)
#define LOG_TAG "v8" #define LOG_TAG "v8"
#include <utils/Log.h> // LOG_PRI_VA #include <utils/Log.h> // LOG_PRI_VA
...@@ -67,6 +70,12 @@ intptr_t OS::MaxVirtualMemory() { ...@@ -67,6 +70,12 @@ intptr_t OS::MaxVirtualMemory() {
} }
// Create guard pages.
void OS::Guard(void* address, const size_t size) {
mprotect(address, size, PROT_NONE);
}
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Math functions // Math functions
......
...@@ -957,6 +957,12 @@ void OS::Free(void* address, const size_t size) { ...@@ -957,6 +957,12 @@ void OS::Free(void* address, const size_t size) {
} }
void OS::Guard(void* address, const size_t size) {
DWORD oldprotect;
VirtualProtect(address, size, PAGE_READONLY | PAGE_GUARD, &oldprotect);
}
void OS::Sleep(int milliseconds) { void OS::Sleep(int milliseconds) {
::Sleep(milliseconds); ::Sleep(milliseconds);
} }
......
...@@ -206,6 +206,10 @@ class OS { ...@@ -206,6 +206,10 @@ class OS {
size_t* allocated, size_t* allocated,
bool is_executable); bool is_executable);
static void Free(void* address, const size_t size); static void Free(void* address, const size_t size);
// Assign memory as a guard page so that access will cause an exception.
static void Guard(void* address, const size_t size);
// Get the Alignment guaranteed by Allocate(). // Get the Alignment guaranteed by Allocate().
static size_t AllocateAlignment(); static size_t AllocateAlignment();
......
...@@ -402,7 +402,9 @@ void MemoryAllocator::FreeRawMemory(void* mem, ...@@ -402,7 +402,9 @@ void MemoryAllocator::FreeRawMemory(void* mem,
size_t length, size_t length,
Executability executable) { Executability executable) {
#ifdef DEBUG #ifdef DEBUG
ZapBlock(reinterpret_cast<Address>(mem), length); // Do not try to zap the guard page.
size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
ZapBlock(reinterpret_cast<Address>(mem) + guard_size, length - guard_size);
#endif #endif
if (isolate_->code_range()->contains(static_cast<Address>(mem))) { if (isolate_->code_range()->contains(static_cast<Address>(mem))) {
isolate_->code_range()->FreeRawMemory(mem, length); isolate_->code_range()->FreeRawMemory(mem, length);
...@@ -504,14 +506,28 @@ Page* MemoryAllocator::AllocatePages(int requested_pages, ...@@ -504,14 +506,28 @@ Page* MemoryAllocator::AllocatePages(int requested_pages,
LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size)); LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size));
*allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size); *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
// We may 'lose' a page due to alignment. // We may 'lose' a page due to alignment.
ASSERT(*allocated_pages >= kPagesPerChunk - 1); ASSERT(*allocated_pages >= kPagesPerChunk - 1);
if (*allocated_pages == 0) {
FreeRawMemory(chunk, chunk_size, owner->executable()); size_t guard_size = (owner->executable() == EXECUTABLE) ? Page::kPageSize : 0;
// Check that we got at least one page that we can use.
if (*allocated_pages <= ((guard_size != 0) ? 1 : 0)) {
FreeRawMemory(chunk,
chunk_size,
owner->executable());
LOG(isolate_, DeleteEvent("PagedChunk", chunk)); LOG(isolate_, DeleteEvent("PagedChunk", chunk));
return Page::FromAddress(NULL); return Page::FromAddress(NULL);
} }
if (guard_size != 0) {
OS::Guard(chunk, guard_size);
chunk_size -= guard_size;
chunk = static_cast<Address>(chunk) + guard_size;
--*allocated_pages;
}
int chunk_id = Pop(); int chunk_id = Pop();
chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner); chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
...@@ -681,7 +697,8 @@ void MemoryAllocator::DeleteChunk(int chunk_id) { ...@@ -681,7 +697,8 @@ void MemoryAllocator::DeleteChunk(int chunk_id) {
LOG(isolate_, DeleteEvent("PagedChunk", c.address())); LOG(isolate_, DeleteEvent("PagedChunk", c.address()));
ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity()); ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity());
size_t size = c.size(); size_t size = c.size();
FreeRawMemory(c.address(), size, c.executable()); size_t guard_size = (c.executable() == EXECUTABLE) ? Page::kPageSize : 0;
FreeRawMemory(c.address() - guard_size, size + guard_size, c.executable());
PerformAllocationCallback(space, kAllocationActionFree, size); PerformAllocationCallback(space, kAllocationActionFree, size);
} }
c.init(NULL, 0, NULL); c.init(NULL, 0, NULL);
...@@ -2672,9 +2689,10 @@ LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes, ...@@ -2672,9 +2689,10 @@ LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
Executability executable) { Executability executable) {
size_t requested = ChunkSizeFor(size_in_bytes); size_t requested = ChunkSizeFor(size_in_bytes);
size_t size; size_t size;
size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
Isolate* isolate = Isolate::Current(); Isolate* isolate = Isolate::Current();
void* mem = isolate->memory_allocator()->AllocateRawMemory( void* mem = isolate->memory_allocator()->AllocateRawMemory(
requested, &size, executable); requested + guard_size, &size, executable);
if (mem == NULL) return NULL; if (mem == NULL) return NULL;
// The start of the chunk may be overlayed with a page so we have to // The start of the chunk may be overlayed with a page so we have to
...@@ -2682,13 +2700,19 @@ LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes, ...@@ -2682,13 +2700,19 @@ LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
ASSERT((size & Page::kPageFlagMask) == 0); ASSERT((size & Page::kPageFlagMask) == 0);
LOG(isolate, NewEvent("LargeObjectChunk", mem, size)); LOG(isolate, NewEvent("LargeObjectChunk", mem, size));
if (size < requested) { if (size < requested + guard_size) {
isolate->memory_allocator()->FreeRawMemory( isolate->memory_allocator()->FreeRawMemory(
mem, size, executable); mem, size, executable);
LOG(isolate, DeleteEvent("LargeObjectChunk", mem)); LOG(isolate, DeleteEvent("LargeObjectChunk", mem));
return NULL; return NULL;
} }
if (guard_size != 0) {
OS::Guard(mem, guard_size);
size -= guard_size;
mem = static_cast<Address>(mem) + guard_size;
}
ObjectSpace space = (executable == EXECUTABLE) ObjectSpace space = (executable == EXECUTABLE)
? kObjectSpaceCodeSpace ? kObjectSpaceCodeSpace
: kObjectSpaceLoSpace; : kObjectSpaceLoSpace;
...@@ -2742,9 +2766,11 @@ void LargeObjectSpace::TearDown() { ...@@ -2742,9 +2766,11 @@ void LargeObjectSpace::TearDown() {
ObjectSpace space = kObjectSpaceLoSpace; ObjectSpace space = kObjectSpaceLoSpace;
if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace; if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
size_t size = chunk->size(); size_t size = chunk->size();
heap()->isolate()->memory_allocator()->FreeRawMemory(chunk->address(), size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
size, heap()->isolate()->memory_allocator()->FreeRawMemory(
executable); chunk->address() - guard_size,
size + guard_size,
executable);
heap()->isolate()->memory_allocator()->PerformAllocationCallback( heap()->isolate()->memory_allocator()->PerformAllocationCallback(
space, kAllocationActionFree, size); space, kAllocationActionFree, size);
} }
...@@ -2941,10 +2967,15 @@ void LargeObjectSpace::FreeUnmarkedObjects() { ...@@ -2941,10 +2967,15 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
objects_size_ -= object->Size(); objects_size_ -= object->Size();
page_count_--; page_count_--;
ObjectSpace space = kObjectSpaceLoSpace; ObjectSpace space = kObjectSpaceLoSpace;
if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace; size_t guard_size = 0;
heap()->isolate()->memory_allocator()->FreeRawMemory(chunk_address, if (executable == EXECUTABLE) {
chunk_size, space = kObjectSpaceCodeSpace;
executable); guard_size = Page::kPageSize;
}
heap()->isolate()->memory_allocator()->FreeRawMemory(
chunk_address - guard_size,
chunk_size + guard_size,
executable);
heap()->isolate()->memory_allocator()->PerformAllocationCallback( heap()->isolate()->memory_allocator()->PerformAllocationCallback(
space, kAllocationActionFree, size_); space, kAllocationActionFree, size_);
LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk_address)); LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk_address));
......
...@@ -647,13 +647,11 @@ class MemoryAllocator { ...@@ -647,13 +647,11 @@ class MemoryAllocator {
#ifdef V8_TARGET_ARCH_X64 #ifdef V8_TARGET_ARCH_X64
static const int kPagesPerChunk = 32; static const int kPagesPerChunk = 32;
// On 64 bit the chunk table consists of 4 levels of 4096-entry tables. // On 64 bit the chunk table consists of 4 levels of 4096-entry tables.
static const int kPagesPerChunkLog2 = 5;
static const int kChunkTableLevels = 4; static const int kChunkTableLevels = 4;
static const int kChunkTableBitsPerLevel = 12; static const int kChunkTableBitsPerLevel = 12;
#else #else
static const int kPagesPerChunk = 16; static const int kPagesPerChunk = 16;
// On 32 bit the chunk table consists of 2 levels of 256-entry tables. // On 32 bit the chunk table consists of 2 levels of 256-entry tables.
static const int kPagesPerChunkLog2 = 4;
static const int kChunkTableLevels = 2; static const int kChunkTableLevels = 2;
static const int kChunkTableBitsPerLevel = 8; static const int kChunkTableBitsPerLevel = 8;
#endif #endif
...@@ -662,7 +660,6 @@ class MemoryAllocator { ...@@ -662,7 +660,6 @@ class MemoryAllocator {
MemoryAllocator(); MemoryAllocator();
static const int kChunkSize = kPagesPerChunk * Page::kPageSize; static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
static const int kChunkSizeLog2 = kPagesPerChunkLog2 + kPageSizeBits;
// Maximum space size in bytes. // Maximum space size in bytes.
intptr_t capacity_; intptr_t capacity_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment