Commit fbc230e4 authored by vegorov@chromium.org's avatar vegorov@chromium.org

Ensure that executable pages are properly guarded.

Split executable memory chunks into two pieces: header with all metadata (protection: RW) and body (protection: RWX). Separate header from metadata with a guard page and add a guard page after the page body.

R=erik.corry@gmail.com
BUG=http://crbug.com/115151

Review URL: https://chromiumcodereview.appspot.com/9452002

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10809 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 1a356cff
......@@ -451,7 +451,7 @@ Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
base = data->lazy_deoptimization_entry_code_;
}
return
static_cast<Address>(base->body()) + (id * table_entry_size_);
static_cast<Address>(base->area_start()) + (id * table_entry_size_);
}
......@@ -464,14 +464,14 @@ int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
base = data->lazy_deoptimization_entry_code_;
}
if (base == NULL ||
addr < base->body() ||
addr >= base->body() +
addr < base->area_start() ||
addr >= base->area_start() +
(kNumberOfEntries * table_entry_size_)) {
return kNotDeoptimizationEntry;
}
ASSERT_EQ(0,
static_cast<int>(addr - base->body()) % table_entry_size_);
return static_cast<int>(addr - base->body()) / table_entry_size_;
static_cast<int>(addr - base->area_start()) % table_entry_size_);
return static_cast<int>(addr - base->area_start()) / table_entry_size_;
}
......@@ -1152,11 +1152,12 @@ MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
EXECUTABLE,
NULL);
ASSERT(chunk->area_size() >= desc.instr_size);
if (chunk == NULL) {
V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table");
}
memcpy(chunk->body(), desc.buffer, desc.instr_size);
CPU::FlushICache(chunk->body(), desc.instr_size);
memcpy(chunk->area_start(), desc.buffer, desc.instr_size);
CPU::FlushICache(chunk->area_start(), desc.instr_size);
return chunk;
}
......
......@@ -49,7 +49,7 @@ void PromotionQueue::insert(HeapObject* target, int size) {
NewSpacePage* rear_page =
NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
ASSERT(!rear_page->prev_page()->is_anchor());
rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->body_limit());
rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->area_end());
ActivateGuardIfOnTheSamePage();
}
......@@ -81,11 +81,6 @@ void PromotionQueue::ActivateGuardIfOnTheSamePage() {
}
int Heap::MaxObjectSizeInPagedSpace() {
return Page::kMaxHeapObjectSize;
}
MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> str,
PretenureFlag pretenure) {
// Check for ASCII first since this is the common case.
......@@ -119,7 +114,7 @@ MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str,
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
{ MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
......@@ -153,7 +148,7 @@ MaybeObject* Heap::AllocateTwoByteSymbol(Vector<const uc16> str,
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
{ MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
......
This diff is collapsed.
......@@ -345,7 +345,7 @@ class PromotionQueue {
NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
ASSERT(!front_page->prev_page()->is_anchor());
front_ =
reinterpret_cast<intptr_t*>(front_page->prev_page()->body_limit());
reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end());
}
*target = reinterpret_cast<HeapObject*>(*(--front_));
*size = static_cast<int>(*(--front_));
......@@ -484,9 +484,6 @@ class Heap {
// all available bytes. Check MaxHeapObjectSize() instead.
intptr_t Available();
// Returns the maximum object size in paged space.
inline int MaxObjectSizeInPagedSpace();
// Returns of size of all objects residing in the heap.
intptr_t SizeOfObjects();
......
This diff is collapsed.
......@@ -135,7 +135,7 @@ class StaticVisitorBase : public AllStatic {
(base == kVisitJSObject));
ASSERT(IsAligned(object_size, kPointerSize));
ASSERT(kMinObjectSizeInWords * kPointerSize <= object_size);
ASSERT(object_size < Page::kMaxHeapObjectSize);
ASSERT(object_size < Page::kMaxNonCodeHeapObjectSize);
const VisitorId specialization = static_cast<VisitorId>(
base + (object_size >> kPointerSizeLog2) - kMinObjectSizeInWords);
......
......@@ -355,6 +355,17 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
}
bool VirtualMemory::Guard(void* address) {
if (NULL == VirtualAlloc(address,
OS::CommitPageSize(),
MEM_COMMIT,
PAGE_READONLY | PAGE_GUARD)) {
return false;
}
return true;
}
class Thread::PlatformData : public Malloced {
public:
PlatformData() : thread_(kNoThread) {}
......
......@@ -411,6 +411,12 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
}
bool VirtualMemory::Guard(void* address) {
OS::Guard(address, OS::CommitPageSize());
return true;
}
void* VirtualMemory::ReserveRegion(size_t size) {
void* result = mmap(OS::GetRandomMmapAddr(),
size,
......
......@@ -666,6 +666,12 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
}
bool VirtualMemory::Guard(void* address) {
OS::Guard(address, OS::CommitPageSize());
return true;
}
void* VirtualMemory::ReserveRegion(size_t size) {
void* result = mmap(OS::GetRandomMmapAddr(),
size,
......
......@@ -429,6 +429,12 @@ bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
}
bool VirtualMemory::Guard(void* address) {
OS::Guard(address, OS::CommitPageSize());
return true;
}
bool VirtualMemory::CommitRegion(void* address,
size_t size,
bool is_executable) {
......
......@@ -295,6 +295,12 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
}
bool VirtualMemory::Guard(void* address) {
UNIMPLEMENTED();
return false;
}
class Thread::PlatformData : public Malloced {
public:
PlatformData() {
......
......@@ -458,6 +458,12 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
}
bool VirtualMemory::Guard(void* address) {
OS::Guard(address, OS::CommitPageSize());
return true;
}
void* VirtualMemory::ReserveRegion(size_t size) {
void* result = mmap(GetRandomMmapAddr(),
size,
......
......@@ -401,6 +401,12 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
}
bool VirtualMemory::Guard(void* address) {
OS::Guard(address, OS::CommitPageSize());
return true;
}
void* VirtualMemory::ReserveRegion(size_t size) {
void* result = mmap(OS::GetRandomMmapAddr(),
size,
......
......@@ -1511,6 +1511,17 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
}
bool VirtualMemory::Guard(void* address) {
if (NULL == VirtualAlloc(address,
OS::CommitPageSize(),
MEM_COMMIT,
PAGE_READONLY | PAGE_GUARD)) {
return false;
}
return true;
}
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
return VirtualFree(base, size, MEM_DECOMMIT) != 0;
}
......
......@@ -356,6 +356,9 @@ class VirtualMemory {
// Uncommit real memory. Returns whether the operation succeeded.
bool Uncommit(void* address, size_t size);
// Creates a single guard page at the given address.
bool Guard(void* address);
void Release() {
ASSERT(IsReserved());
// Notice: Order is important here. The VirtualMemory object might live
......
......@@ -1088,9 +1088,10 @@ Serializer::Serializer(SnapshotByteSink* sink)
external_reference_encoder_(new ExternalReferenceEncoder),
large_object_total_(0),
root_index_wave_front_(0) {
isolate_ = Isolate::Current();
// The serializer is meant to be used only to generate initial heap images
// from a context in which there is only one isolate.
ASSERT(Isolate::Current()->IsDefaultIsolate());
ASSERT(isolate_->IsDefaultIsolate());
for (int i = 0; i <= LAST_SPACE; i++) {
fullness_[i] = 0;
}
......@@ -1642,8 +1643,8 @@ int Serializer::Allocate(int space, int size, bool* new_page) {
// serialized address.
CHECK(IsPowerOf2(Page::kPageSize));
int used_in_this_page = (fullness_[space] & (Page::kPageSize - 1));
CHECK(size <= Page::kObjectAreaSize);
if (used_in_this_page + size > Page::kObjectAreaSize) {
CHECK(size <= SpaceAreaSize(space));
if (used_in_this_page + size > SpaceAreaSize(space)) {
*new_page = true;
fullness_[space] = RoundUp(fullness_[space], Page::kPageSize);
}
......@@ -1654,4 +1655,13 @@ int Serializer::Allocate(int space, int size, bool* new_page) {
}
int Serializer::SpaceAreaSize(int space) {
if (space == CODE_SPACE) {
return isolate_->memory_allocator()->CodePageAreaSize();
} else {
return Page::kPageSize - Page::kObjectStartOffset;
}
}
} } // namespace v8::internal
......@@ -556,6 +556,9 @@ class Serializer : public SerializerDeserializer {
return external_reference_encoder_->Encode(addr);
}
int SpaceAreaSize(int space);
Isolate* isolate_;
// Keep track of the fullness of each space in order to generate
// relative addresses for back references. Large objects are
// just numbered sequentially since relative addresses make no
......
......@@ -166,10 +166,8 @@ Page* Page::Initialize(Heap* heap,
Page* page = reinterpret_cast<Page*>(chunk);
ASSERT(chunk->size() == static_cast<size_t>(kPageSize));
ASSERT(chunk->owner() == owner);
owner->IncreaseCapacity(Page::kObjectAreaSize);
owner->Free(page->ObjectAreaStart(),
static_cast<int>(page->ObjectAreaEnd() -
page->ObjectAreaStart()));
owner->IncreaseCapacity(page->area_size());
owner->Free(page->area_start(), page->area_size());
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
......
This diff is collapsed.
This diff is collapsed.
......@@ -453,14 +453,14 @@ void StoreBuffer::FindPointersToNewSpaceInRegion(
// Compute start address of the first map following given addr.
static inline Address MapStartAlign(Address addr) {
Address page = Page::FromAddress(addr)->ObjectAreaStart();
Address page = Page::FromAddress(addr)->area_start();
return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
}
// Compute end address of the first map preceding given addr.
static inline Address MapEndAlign(Address addr) {
Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
Address page = Page::FromAllocationTop(addr)->area_start();
return page + ((addr - page) / Map::kSize * Map::kSize);
}
......@@ -523,8 +523,8 @@ void StoreBuffer::FindPointersToNewSpaceOnPage(
Page* page,
RegionCallback region_callback,
ObjectSlotCallback slot_callback) {
Address visitable_start = page->ObjectAreaStart();
Address end_of_page = page->ObjectAreaEnd();
Address visitable_start = page->area_start();
Address end_of_page = page->area_end();
Address visitable_end = visitable_start;
......
......@@ -88,7 +88,7 @@ static MaybeObject* AllocateAfterFailures() {
static const int kLargeObjectSpaceFillerLength = 300000;
static const int kLargeObjectSpaceFillerSize = FixedArray::SizeFor(
kLargeObjectSpaceFillerLength);
ASSERT(kLargeObjectSpaceFillerSize > heap->MaxObjectSizeInPagedSpace());
ASSERT(kLargeObjectSpaceFillerSize > heap->old_pointer_space()->AreaSize());
while (heap->OldGenerationSpaceAvailable() > kLargeObjectSpaceFillerSize) {
CHECK(!heap->AllocateFixedArray(kLargeObjectSpaceFillerLength, TENURED)->
IsFailure());
......@@ -214,11 +214,13 @@ TEST(CodeRange) {
while (total_allocated < 5 * code_range_size) {
if (current_allocated < code_range_size / 10) {
// Allocate a block.
// Geometrically distributed sizes, greater than Page::kMaxHeapObjectSize.
// Geometrically distributed sizes, greater than
// Page::kMaxNonCodeHeapObjectSize (which is greater than code page area).
// TODO(gc): instead of using 3 use some contant based on code_range_size
// kMaxHeapObjectSize.
size_t requested = (Page::kMaxHeapObjectSize << (Pseudorandom() % 3)) +
Pseudorandom() % 5000 + 1;
size_t requested =
(Page::kMaxNonCodeHeapObjectSize << (Pseudorandom() % 3)) +
Pseudorandom() % 5000 + 1;
size_t allocated = 0;
Address base = code_range->AllocateRawMemory(requested, &allocated);
CHECK(base != NULL);
......
......@@ -820,7 +820,7 @@ TEST(Iteration) {
FACTORY->NewStringFromAscii(CStrVector("abcdefghij"), TENURED);
// Allocate a large string (for large object space).
int large_size = HEAP->MaxObjectSizeInPagedSpace() + 1;
int large_size = Page::kMaxNonCodeHeapObjectSize + 1;
char* str = new char[large_size];
for (int i = 0; i < large_size - 1; ++i) str[i] = 'a';
str[large_size - 1] = '\0';
......
......@@ -94,7 +94,7 @@ TEST(Promotion) {
// Allocate a fixed array in the new space.
int array_size =
(HEAP->MaxObjectSizeInPagedSpace() - FixedArray::kHeaderSize) /
(Page::kMaxNonCodeHeapObjectSize - FixedArray::kHeaderSize) /
(kPointerSize * 4);
Object* obj = HEAP->AllocateFixedArray(array_size)->ToObjectChecked();
......@@ -125,7 +125,7 @@ TEST(NoPromotion) {
// Allocate a big Fixed array in the new space.
int max_size =
Min(HEAP->MaxObjectSizeInPagedSpace(), HEAP->MaxObjectSizeInNewSpace());
Min(Page::kMaxNonCodeHeapObjectSize, HEAP->MaxObjectSizeInNewSpace());
int length = (max_size - FixedArray::kHeaderSize) / (2*kPointerSize);
Object* obj = i::Isolate::Current()->heap()->AllocateFixedArray(length)->
......
......@@ -558,7 +558,8 @@ DEPENDENT_TEST(ContextDeserialization, ContextSerialization) {
TEST(LinearAllocation) {
v8::V8::Initialize();
int new_space_max = 512 * KB;
int paged_space_max = Page::kMaxHeapObjectSize;
int paged_space_max = Page::kMaxNonCodeHeapObjectSize;
int code_space_max = HEAP->code_space()->AreaSize();
for (int size = 1000; size < 5 * MB; size += size >> 1) {
size &= ~8; // Round.
......@@ -568,7 +569,7 @@ TEST(LinearAllocation) {
new_space_size,
paged_space_size, // Old pointer space.
paged_space_size, // Old data space.
HEAP->code_space()->RoundSizeDownToObjectAlignment(paged_space_size),
HEAP->code_space()->RoundSizeDownToObjectAlignment(code_space_max),
HEAP->map_space()->RoundSizeDownToObjectAlignment(paged_space_size),
HEAP->cell_space()->RoundSizeDownToObjectAlignment(paged_space_size),
size); // Large object space.
......@@ -604,7 +605,7 @@ TEST(LinearAllocation) {
int old_page_fullness = i % Page::kPageSize;
int page_fullness = (i + kSmallFixedArraySize) % Page::kPageSize;
if (page_fullness < old_page_fullness ||
page_fullness > Page::kObjectAreaSize) {
page_fullness > HEAP->old_pointer_space()->AreaSize()) {
i = RoundUp(i, Page::kPageSize);
pointer_last = NULL;
}
......@@ -624,7 +625,7 @@ TEST(LinearAllocation) {
int old_page_fullness = i % Page::kPageSize;
int page_fullness = (i + kSmallStringSize) % Page::kPageSize;
if (page_fullness < old_page_fullness ||
page_fullness > Page::kObjectAreaSize) {
page_fullness > HEAP->old_data_space()->AreaSize()) {
i = RoundUp(i, Page::kPageSize);
data_last = NULL;
}
......@@ -642,7 +643,7 @@ TEST(LinearAllocation) {
int old_page_fullness = i % Page::kPageSize;
int page_fullness = (i + kMapSize) % Page::kPageSize;
if (page_fullness < old_page_fullness ||
page_fullness > Page::kObjectAreaSize) {
page_fullness > HEAP->map_space()->AreaSize()) {
i = RoundUp(i, Page::kPageSize);
map_last = NULL;
}
......@@ -653,7 +654,7 @@ TEST(LinearAllocation) {
map_last = obj;
}
if (size > Page::kObjectAreaSize) {
if (size > Page::kMaxNonCodeHeapObjectSize) {
// Support for reserving space in large object space is not there yet,
// but using an always-allocate scope is fine for now.
AlwaysAllocateScope always;
......
......@@ -191,9 +191,10 @@ TEST(NewSpace) {
HEAP->ReservedSemiSpaceSize()));
CHECK(new_space.HasBeenSetUp());
while (new_space.Available() >= Page::kMaxHeapObjectSize) {
while (new_space.Available() >= Page::kMaxNonCodeHeapObjectSize) {
Object* obj =
new_space.AllocateRaw(Page::kMaxHeapObjectSize)->ToObjectUnchecked();
new_space.AllocateRaw(Page::kMaxNonCodeHeapObjectSize)->
ToObjectUnchecked();
CHECK(new_space.Contains(HeapObject::cast(obj)));
}
......@@ -223,7 +224,7 @@ TEST(OldSpace) {
CHECK(s->SetUp());
while (s->Available() > 0) {
s->AllocateRaw(Page::kMaxHeapObjectSize)->ToObjectUnchecked();
s->AllocateRaw(Page::kMaxNonCodeHeapObjectSize)->ToObjectUnchecked();
}
s->TearDown();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment