Commit e372a2dd authored by ager@chromium.org's avatar ager@chromium.org

Add guard pages in front of executable allocations

BUG=89247

Review URL: http://codereview.chromium.org/7379004
Patch from Chris Neckar <cdn@chromium.org>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@8687 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 5df08869
......@@ -217,6 +217,11 @@ void OS::Free(void* buf, const size_t length) {
}
void OS::Guard(void* address, const size_t size) {
UNIMPLEMENTED();
}
void OS::Sleep(int milliseconds) {
UNIMPLEMENTED();
}
......
......@@ -33,6 +33,7 @@
#include <errno.h>
#include <time.h>
#include <sys/mman.h>
#include <sys/socket.h>
#include <sys/resource.h>
#include <sys/time.h>
......@@ -43,6 +44,8 @@
#include <netinet/in.h>
#include <netdb.h>
#undef MAP_TYPE
#if defined(ANDROID)
#define LOG_TAG "v8"
#include <utils/Log.h> // LOG_PRI_VA
......@@ -67,6 +70,12 @@ intptr_t OS::MaxVirtualMemory() {
}
// Create guard pages.
void OS::Guard(void* address, const size_t size) {
mprotect(address, size, PROT_NONE);
}
// ----------------------------------------------------------------------------
// Math functions
......
......@@ -957,6 +957,12 @@ void OS::Free(void* address, const size_t size) {
}
void OS::Guard(void* address, const size_t size) {
DWORD oldprotect;
VirtualProtect(address, size, PAGE_READONLY | PAGE_GUARD, &oldprotect);
}
void OS::Sleep(int milliseconds) {
::Sleep(milliseconds);
}
......
......@@ -206,6 +206,10 @@ class OS {
size_t* allocated,
bool is_executable);
static void Free(void* address, const size_t size);
// Assign memory as a guard page so that access will cause an exception.
static void Guard(void* address, const size_t size);
// Get the Alignment guaranteed by Allocate().
static size_t AllocateAlignment();
......
......@@ -402,7 +402,9 @@ void MemoryAllocator::FreeRawMemory(void* mem,
size_t length,
Executability executable) {
#ifdef DEBUG
ZapBlock(reinterpret_cast<Address>(mem), length);
// Do not try to zap the guard page.
size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
ZapBlock(reinterpret_cast<Address>(mem) + guard_size, length - guard_size);
#endif
if (isolate_->code_range()->contains(static_cast<Address>(mem))) {
isolate_->code_range()->FreeRawMemory(mem, length);
......@@ -504,14 +506,28 @@ Page* MemoryAllocator::AllocatePages(int requested_pages,
LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size));
*allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
// We may 'lose' a page due to alignment.
ASSERT(*allocated_pages >= kPagesPerChunk - 1);
if (*allocated_pages == 0) {
FreeRawMemory(chunk, chunk_size, owner->executable());
size_t guard_size = (owner->executable() == EXECUTABLE) ? Page::kPageSize : 0;
// Check that we got at least one page that we can use.
if (*allocated_pages <= ((guard_size != 0) ? 1 : 0)) {
FreeRawMemory(chunk,
chunk_size,
owner->executable());
LOG(isolate_, DeleteEvent("PagedChunk", chunk));
return Page::FromAddress(NULL);
}
if (guard_size != 0) {
OS::Guard(chunk, guard_size);
chunk_size -= guard_size;
chunk = static_cast<Address>(chunk) + guard_size;
--*allocated_pages;
}
int chunk_id = Pop();
chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
......@@ -681,7 +697,8 @@ void MemoryAllocator::DeleteChunk(int chunk_id) {
LOG(isolate_, DeleteEvent("PagedChunk", c.address()));
ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity());
size_t size = c.size();
FreeRawMemory(c.address(), size, c.executable());
size_t guard_size = (c.executable() == EXECUTABLE) ? Page::kPageSize : 0;
FreeRawMemory(c.address() - guard_size, size + guard_size, c.executable());
PerformAllocationCallback(space, kAllocationActionFree, size);
}
c.init(NULL, 0, NULL);
......@@ -2672,9 +2689,10 @@ LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
Executability executable) {
size_t requested = ChunkSizeFor(size_in_bytes);
size_t size;
size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
Isolate* isolate = Isolate::Current();
void* mem = isolate->memory_allocator()->AllocateRawMemory(
requested, &size, executable);
requested + guard_size, &size, executable);
if (mem == NULL) return NULL;
// The start of the chunk may be overlayed with a page so we have to
......@@ -2682,13 +2700,19 @@ LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
ASSERT((size & Page::kPageFlagMask) == 0);
LOG(isolate, NewEvent("LargeObjectChunk", mem, size));
if (size < requested) {
if (size < requested + guard_size) {
isolate->memory_allocator()->FreeRawMemory(
mem, size, executable);
LOG(isolate, DeleteEvent("LargeObjectChunk", mem));
return NULL;
}
if (guard_size != 0) {
OS::Guard(mem, guard_size);
size -= guard_size;
mem = static_cast<Address>(mem) + guard_size;
}
ObjectSpace space = (executable == EXECUTABLE)
? kObjectSpaceCodeSpace
: kObjectSpaceLoSpace;
......@@ -2742,9 +2766,11 @@ void LargeObjectSpace::TearDown() {
ObjectSpace space = kObjectSpaceLoSpace;
if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
size_t size = chunk->size();
heap()->isolate()->memory_allocator()->FreeRawMemory(chunk->address(),
size,
executable);
size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
heap()->isolate()->memory_allocator()->FreeRawMemory(
chunk->address() - guard_size,
size + guard_size,
executable);
heap()->isolate()->memory_allocator()->PerformAllocationCallback(
space, kAllocationActionFree, size);
}
......@@ -2941,10 +2967,15 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
objects_size_ -= object->Size();
page_count_--;
ObjectSpace space = kObjectSpaceLoSpace;
if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
heap()->isolate()->memory_allocator()->FreeRawMemory(chunk_address,
chunk_size,
executable);
size_t guard_size = 0;
if (executable == EXECUTABLE) {
space = kObjectSpaceCodeSpace;
guard_size = Page::kPageSize;
}
heap()->isolate()->memory_allocator()->FreeRawMemory(
chunk_address - guard_size,
chunk_size + guard_size,
executable);
heap()->isolate()->memory_allocator()->PerformAllocationCallback(
space, kAllocationActionFree, size_);
LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk_address));
......
......@@ -647,13 +647,11 @@ class MemoryAllocator {
#ifdef V8_TARGET_ARCH_X64
static const int kPagesPerChunk = 32;
// On 64 bit the chunk table consists of 4 levels of 4096-entry tables.
static const int kPagesPerChunkLog2 = 5;
static const int kChunkTableLevels = 4;
static const int kChunkTableBitsPerLevel = 12;
#else
static const int kPagesPerChunk = 16;
// On 32 bit the chunk table consists of 2 levels of 256-entry tables.
static const int kPagesPerChunkLog2 = 4;
static const int kChunkTableLevels = 2;
static const int kChunkTableBitsPerLevel = 8;
#endif
......@@ -662,7 +660,6 @@ class MemoryAllocator {
MemoryAllocator();
static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
static const int kChunkSizeLog2 = kPagesPerChunkLog2 + kPageSizeBits;
// Maximum space size in bytes.
intptr_t capacity_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment