Commit fbc230e4 authored by vegorov@chromium.org's avatar vegorov@chromium.org

Ensure that executable pages are properly guarded.

Split executable memory chunks into two pieces: header with all metadata (protection: RW) and body (protection: RWX). Separate header from metadata with a guard page and add a guard page after the page body.

R=erik.corry@gmail.com
BUG=http://crbug.com/115151

Review URL: https://chromiumcodereview.appspot.com/9452002

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10809 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 1a356cff
......@@ -451,7 +451,7 @@ Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
base = data->lazy_deoptimization_entry_code_;
}
return
static_cast<Address>(base->body()) + (id * table_entry_size_);
static_cast<Address>(base->area_start()) + (id * table_entry_size_);
}
......@@ -464,14 +464,14 @@ int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
base = data->lazy_deoptimization_entry_code_;
}
if (base == NULL ||
addr < base->body() ||
addr >= base->body() +
addr < base->area_start() ||
addr >= base->area_start() +
(kNumberOfEntries * table_entry_size_)) {
return kNotDeoptimizationEntry;
}
ASSERT_EQ(0,
static_cast<int>(addr - base->body()) % table_entry_size_);
return static_cast<int>(addr - base->body()) / table_entry_size_;
static_cast<int>(addr - base->area_start()) % table_entry_size_);
return static_cast<int>(addr - base->area_start()) / table_entry_size_;
}
......@@ -1152,11 +1152,12 @@ MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
EXECUTABLE,
NULL);
ASSERT(chunk->area_size() >= desc.instr_size);
if (chunk == NULL) {
V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table");
}
memcpy(chunk->body(), desc.buffer, desc.instr_size);
CPU::FlushICache(chunk->body(), desc.instr_size);
memcpy(chunk->area_start(), desc.buffer, desc.instr_size);
CPU::FlushICache(chunk->area_start(), desc.instr_size);
return chunk;
}
......
......@@ -49,7 +49,7 @@ void PromotionQueue::insert(HeapObject* target, int size) {
NewSpacePage* rear_page =
NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
ASSERT(!rear_page->prev_page()->is_anchor());
rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->body_limit());
rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->area_end());
ActivateGuardIfOnTheSamePage();
}
......@@ -81,11 +81,6 @@ void PromotionQueue::ActivateGuardIfOnTheSamePage() {
}
int Heap::MaxObjectSizeInPagedSpace() {
return Page::kMaxHeapObjectSize;
}
MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> str,
PretenureFlag pretenure) {
// Check for ASCII first since this is the common case.
......@@ -119,7 +114,7 @@ MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str,
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
{ MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
......@@ -153,7 +148,7 @@ MaybeObject* Heap::AllocateTwoByteSymbol(Vector<const uc16> str,
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
{ MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
......
This diff is collapsed.
......@@ -345,7 +345,7 @@ class PromotionQueue {
NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
ASSERT(!front_page->prev_page()->is_anchor());
front_ =
reinterpret_cast<intptr_t*>(front_page->prev_page()->body_limit());
reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end());
}
*target = reinterpret_cast<HeapObject*>(*(--front_));
*size = static_cast<int>(*(--front_));
......@@ -484,9 +484,6 @@ class Heap {
// all available bytes. Check MaxHeapObjectSize() instead.
intptr_t Available();
// Returns the maximum object size in paged space.
inline int MaxObjectSizeInPagedSpace();
// Returns of size of all objects residing in the heap.
intptr_t SizeOfObjects();
......
This diff is collapsed.
......@@ -135,7 +135,7 @@ class StaticVisitorBase : public AllStatic {
(base == kVisitJSObject));
ASSERT(IsAligned(object_size, kPointerSize));
ASSERT(kMinObjectSizeInWords * kPointerSize <= object_size);
ASSERT(object_size < Page::kMaxHeapObjectSize);
ASSERT(object_size < Page::kMaxNonCodeHeapObjectSize);
const VisitorId specialization = static_cast<VisitorId>(
base + (object_size >> kPointerSizeLog2) - kMinObjectSizeInWords);
......
......@@ -355,6 +355,17 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
}
bool VirtualMemory::Guard(void* address) {
if (NULL == VirtualAlloc(address,
OS::CommitPageSize(),
MEM_COMMIT,
PAGE_READONLY | PAGE_GUARD)) {
return false;
}
return true;
}
class Thread::PlatformData : public Malloced {
public:
PlatformData() : thread_(kNoThread) {}
......
......@@ -411,6 +411,12 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
}
bool VirtualMemory::Guard(void* address) {
OS::Guard(address, OS::CommitPageSize());
return true;
}
void* VirtualMemory::ReserveRegion(size_t size) {
void* result = mmap(OS::GetRandomMmapAddr(),
size,
......
......@@ -666,6 +666,12 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
}
bool VirtualMemory::Guard(void* address) {
OS::Guard(address, OS::CommitPageSize());
return true;
}
void* VirtualMemory::ReserveRegion(size_t size) {
void* result = mmap(OS::GetRandomMmapAddr(),
size,
......
......@@ -429,6 +429,12 @@ bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
}
bool VirtualMemory::Guard(void* address) {
OS::Guard(address, OS::CommitPageSize());
return true;
}
bool VirtualMemory::CommitRegion(void* address,
size_t size,
bool is_executable) {
......
......@@ -295,6 +295,12 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
}
bool VirtualMemory::Guard(void* address) {
UNIMPLEMENTED();
return false;
}
class Thread::PlatformData : public Malloced {
public:
PlatformData() {
......
......@@ -458,6 +458,12 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
}
bool VirtualMemory::Guard(void* address) {
OS::Guard(address, OS::CommitPageSize());
return true;
}
void* VirtualMemory::ReserveRegion(size_t size) {
void* result = mmap(GetRandomMmapAddr(),
size,
......
......@@ -401,6 +401,12 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
}
bool VirtualMemory::Guard(void* address) {
OS::Guard(address, OS::CommitPageSize());
return true;
}
void* VirtualMemory::ReserveRegion(size_t size) {
void* result = mmap(OS::GetRandomMmapAddr(),
size,
......
......@@ -1511,6 +1511,17 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
}
bool VirtualMemory::Guard(void* address) {
if (NULL == VirtualAlloc(address,
OS::CommitPageSize(),
MEM_COMMIT,
PAGE_READONLY | PAGE_GUARD)) {
return false;
}
return true;
}
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
return VirtualFree(base, size, MEM_DECOMMIT) != 0;
}
......
......@@ -356,6 +356,9 @@ class VirtualMemory {
// Uncommit real memory. Returns whether the operation succeeded.
bool Uncommit(void* address, size_t size);
// Creates a single guard page at the given address.
bool Guard(void* address);
void Release() {
ASSERT(IsReserved());
// Notice: Order is important here. The VirtualMemory object might live
......
......@@ -1088,9 +1088,10 @@ Serializer::Serializer(SnapshotByteSink* sink)
external_reference_encoder_(new ExternalReferenceEncoder),
large_object_total_(0),
root_index_wave_front_(0) {
isolate_ = Isolate::Current();
// The serializer is meant to be used only to generate initial heap images
// from a context in which there is only one isolate.
ASSERT(Isolate::Current()->IsDefaultIsolate());
ASSERT(isolate_->IsDefaultIsolate());
for (int i = 0; i <= LAST_SPACE; i++) {
fullness_[i] = 0;
}
......@@ -1642,8 +1643,8 @@ int Serializer::Allocate(int space, int size, bool* new_page) {
// serialized address.
CHECK(IsPowerOf2(Page::kPageSize));
int used_in_this_page = (fullness_[space] & (Page::kPageSize - 1));
CHECK(size <= Page::kObjectAreaSize);
if (used_in_this_page + size > Page::kObjectAreaSize) {
CHECK(size <= SpaceAreaSize(space));
if (used_in_this_page + size > SpaceAreaSize(space)) {
*new_page = true;
fullness_[space] = RoundUp(fullness_[space], Page::kPageSize);
}
......@@ -1654,4 +1655,13 @@ int Serializer::Allocate(int space, int size, bool* new_page) {
}
int Serializer::SpaceAreaSize(int space) {
if (space == CODE_SPACE) {
return isolate_->memory_allocator()->CodePageAreaSize();
} else {
return Page::kPageSize - Page::kObjectStartOffset;
}
}
} } // namespace v8::internal
......@@ -556,6 +556,9 @@ class Serializer : public SerializerDeserializer {
return external_reference_encoder_->Encode(addr);
}
int SpaceAreaSize(int space);
Isolate* isolate_;
// Keep track of the fullness of each space in order to generate
// relative addresses for back references. Large objects are
// just numbered sequentially since relative addresses make no
......
......@@ -166,10 +166,8 @@ Page* Page::Initialize(Heap* heap,
Page* page = reinterpret_cast<Page*>(chunk);
ASSERT(chunk->size() == static_cast<size_t>(kPageSize));
ASSERT(chunk->owner() == owner);
owner->IncreaseCapacity(Page::kObjectAreaSize);
owner->Free(page->ObjectAreaStart(),
static_cast<int>(page->ObjectAreaEnd() -
page->ObjectAreaStart()));
owner->IncreaseCapacity(page->area_size());
owner->Free(page->area_start(), page->area_size());
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
......
This diff is collapsed.
......@@ -103,7 +103,7 @@ class Isolate;
ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
#define ASSERT_OBJECT_SIZE(size) \
ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
#define ASSERT_PAGE_OFFSET(offset) \
ASSERT((Page::kObjectStartOffset <= offset) \
......@@ -361,21 +361,15 @@ class MemoryChunk {
store_buffer_counter_ = counter;
}
Address body() { return address() + kObjectStartOffset; }
Address body_limit() { return address() + size(); }
int body_size() { return static_cast<int>(size() - kObjectStartOffset); }
bool Contains(Address addr) {
return addr >= body() && addr < address() + size();
return addr >= area_start() && addr < area_end();
}
// Checks whether addr can be a limit of addresses in this page.
// It's a limit if it's in the page, or if it's just after the
// last byte of the page.
bool ContainsLimit(Address addr) {
return addr >= body() && addr <= address() + size();
return addr >= area_start() && addr <= area_end();
}
enum MemoryChunkFlags {
......@@ -488,6 +482,7 @@ class MemoryChunk {
static const intptr_t kLiveBytesOffset =
kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
kPointerSize + kPointerSize +
kPointerSize + kPointerSize + kPointerSize + kIntSize;
static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
......@@ -594,12 +589,22 @@ class MemoryChunk {
ClearFlag(EVACUATION_CANDIDATE);
}
Address area_start() { return area_start_; }
Address area_end() { return area_end_; }
int area_size() {
return static_cast<int>(area_end() - area_start());
}
protected:
MemoryChunk* next_chunk_;
MemoryChunk* prev_chunk_;
size_t size_;
intptr_t flags_;
// Start and end of allocatable memory on this chunk.
Address area_start_;
Address area_end_;
// If the chunk needs to remember its memory reservation, it is stored here.
VirtualMemory reservation_;
// The identity of the owning space. This is tagged as a failure pointer, but
......@@ -618,6 +623,8 @@ class MemoryChunk {
static MemoryChunk* Initialize(Heap* heap,
Address base,
size_t size,
Address area_start,
Address area_end,
Executability executable,
Space* owner);
......@@ -657,12 +664,6 @@ class Page : public MemoryChunk {
inline void set_next_page(Page* page);
inline void set_prev_page(Page* page);
// Returns the start address of the object area in this page.
Address ObjectAreaStart() { return address() + kObjectStartOffset; }
// Returns the end address (exclusive) of the object area in this page.
Address ObjectAreaEnd() { return address() + Page::kPageSize; }
// Checks whether an address is page aligned.
static bool IsAlignedToPageSize(Address a) {
return 0 == (OffsetFrom(a) & kPageAlignmentMask);
......@@ -685,21 +686,14 @@ class Page : public MemoryChunk {
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
// Page size mask.
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
// Object area size in bytes.
static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
static const int kNonCodeObjectAreaSize = kPageSize - kObjectStartOffset;
// Maximum object size that fits in a page.
static const int kMaxHeapObjectSize = kObjectAreaSize;
static const int kFirstUsedCell =
(kObjectStartOffset/kPointerSize) >> Bitmap::kBitsPerCellLog2;
static const int kMaxNonCodeHeapObjectSize = kNonCodeObjectAreaSize;
static const int kLastUsedCell =
((kPageSize - kPointerSize)/kPointerSize) >>
Bitmap::kBitsPerCellLog2;
// Page size mask.
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
inline void ClearGCFields();
......@@ -734,7 +728,7 @@ STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize);
class LargePage : public MemoryChunk {
public:
HeapObject* GetObject() {
return HeapObject::FromAddress(body());
return HeapObject::FromAddress(area_start());
}
inline LargePage* next_page() const {
......@@ -975,7 +969,7 @@ class MemoryAllocator {
// Returns maximum available bytes that the old space can have.
intptr_t MaxAvailable() {
return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
}
#ifdef DEBUG
......@@ -1028,6 +1022,20 @@ class MemoryAllocator {
bool MemoryAllocationCallbackRegistered(
MemoryAllocationCallback callback);
static int CodePageGuardStartOffset();
static int CodePageGuardSize();
static int CodePageAreaStartOffset();
static int CodePageAreaEndOffset();
static int CodePageAreaSize() {
return CodePageAreaEndOffset() - CodePageAreaStartOffset();
}
static bool CommitCodePage(VirtualMemory* vm, Address start, size_t size);
private:
Isolate* isolate_;
......@@ -1380,7 +1388,7 @@ class FreeList BASE_EMBEDDED {
private:
// The size range of blocks, in bytes.
static const int kMinBlockSize = 3 * kPointerSize;
static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
......@@ -1572,12 +1580,12 @@ class PagedSpace : public Space {
void IncreaseUnsweptFreeBytes(Page* p) {
ASSERT(ShouldBeSweptLazily(p));
unswept_free_bytes_ += (Page::kObjectAreaSize - p->LiveBytes());
unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
}
void DecreaseUnsweptFreeBytes(Page* p) {
ASSERT(ShouldBeSweptLazily(p));
unswept_free_bytes_ -= (Page::kObjectAreaSize - p->LiveBytes());
unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
}
bool AdvanceSweeper(intptr_t bytes_to_sweep);
......@@ -1600,7 +1608,14 @@ class PagedSpace : public Space {
// Returns the number of total pages in this space.
int CountTotalPages();
// Return size of allocatable area on a page in this space.
inline int AreaSize() {
return area_size_;
}
protected:
int area_size_;
// Maximum capacity of this space.
intptr_t max_capacity_;
......@@ -1702,6 +1717,8 @@ class NewSpacePage : public MemoryChunk {
(1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
(1 << MemoryChunk::SCAN_ON_SCAVENGE);
static const int kAreaSize = Page::kNonCodeObjectAreaSize;
inline NewSpacePage* next_page() const {
return static_cast<NewSpacePage*>(next_chunk());
}
......@@ -1814,22 +1831,22 @@ class SemiSpace : public Space {
// Returns the start address of the first page of the space.
Address space_start() {
ASSERT(anchor_.next_page() != &anchor_);
return anchor_.next_page()->body();
return anchor_.next_page()->area_start();
}
// Returns the start address of the current page of the space.
Address page_low() {
return current_page_->body();
return current_page_->area_start();
}
// Returns one past the end address of the space.
Address space_end() {
return anchor_.prev_page()->body_limit();
return anchor_.prev_page()->area_end();
}
// Returns one past the end address of the current page of the space.
Address page_high() {
return current_page_->body_limit();
return current_page_->area_end();
}
bool AdvancePage() {
......@@ -1965,7 +1982,7 @@ class SemiSpaceIterator : public ObjectIterator {
NewSpacePage* page = NewSpacePage::FromLimit(current_);
page = page->next_page();
ASSERT(!page->is_anchor());
current_ = page->body();
current_ = page->area_start();
if (current_ == limit_) return NULL;
}
......@@ -2073,7 +2090,7 @@ class NewSpace : public Space {
// Return the allocated bytes in the active semispace.
virtual intptr_t Size() {
return pages_used_ * Page::kObjectAreaSize +
return pages_used_ * NewSpacePage::kAreaSize +
static_cast<int>(top() - to_space_.page_low());
}
......@@ -2085,7 +2102,7 @@ class NewSpace : public Space {
// Return the current capacity of a semispace.
intptr_t EffectiveCapacity() {
SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity());
return (to_space_.Capacity() / Page::kPageSize) * Page::kObjectAreaSize;
return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
}
// Return the current capacity of a semispace.
......@@ -2302,7 +2319,7 @@ class OldSpace : public PagedSpace {
// The limit of allocation for a page in this space.
virtual Address PageAllocationLimit(Page* page) {
return page->ObjectAreaEnd();
return page->area_end();
}
public:
......@@ -2331,12 +2348,12 @@ class FixedSpace : public PagedSpace {
: PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
object_size_in_bytes_(object_size_in_bytes),
name_(name) {
page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes;
}
// The limit of allocation for a page in this space.
virtual Address PageAllocationLimit(Page* page) {
return page->ObjectAreaEnd() - page_extra_;
return page->area_end() - page_extra_;
}
int object_size_in_bytes() { return object_size_in_bytes_; }
......@@ -2387,7 +2404,7 @@ class MapSpace : public FixedSpace {
#endif
private:
static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize;
// Do map space compaction if there is a page gap.
int CompactionThreshold() {
......
......@@ -453,14 +453,14 @@ void StoreBuffer::FindPointersToNewSpaceInRegion(
// Compute start address of the first map following given addr.
static inline Address MapStartAlign(Address addr) {
Address page = Page::FromAddress(addr)->ObjectAreaStart();
Address page = Page::FromAddress(addr)->area_start();
return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
}
// Compute end address of the first map preceding given addr.
static inline Address MapEndAlign(Address addr) {
Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
Address page = Page::FromAllocationTop(addr)->area_start();
return page + ((addr - page) / Map::kSize * Map::kSize);
}
......@@ -523,8 +523,8 @@ void StoreBuffer::FindPointersToNewSpaceOnPage(
Page* page,
RegionCallback region_callback,
ObjectSlotCallback slot_callback) {
Address visitable_start = page->ObjectAreaStart();
Address end_of_page = page->ObjectAreaEnd();
Address visitable_start = page->area_start();
Address end_of_page = page->area_end();
Address visitable_end = visitable_start;
......
......@@ -88,7 +88,7 @@ static MaybeObject* AllocateAfterFailures() {
static const int kLargeObjectSpaceFillerLength = 300000;
static const int kLargeObjectSpaceFillerSize = FixedArray::SizeFor(
kLargeObjectSpaceFillerLength);
ASSERT(kLargeObjectSpaceFillerSize > heap->MaxObjectSizeInPagedSpace());
ASSERT(kLargeObjectSpaceFillerSize > heap->old_pointer_space()->AreaSize());
while (heap->OldGenerationSpaceAvailable() > kLargeObjectSpaceFillerSize) {
CHECK(!heap->AllocateFixedArray(kLargeObjectSpaceFillerLength, TENURED)->
IsFailure());
......@@ -214,10 +214,12 @@ TEST(CodeRange) {
while (total_allocated < 5 * code_range_size) {
if (current_allocated < code_range_size / 10) {
// Allocate a block.
// Geometrically distributed sizes, greater than Page::kMaxHeapObjectSize.
// Geometrically distributed sizes, greater than
// Page::kMaxNonCodeHeapObjectSize (which is greater than code page area).
// TODO(gc): instead of using 3 use some contant based on code_range_size
// kMaxHeapObjectSize.
size_t requested = (Page::kMaxHeapObjectSize << (Pseudorandom() % 3)) +
size_t requested =
(Page::kMaxNonCodeHeapObjectSize << (Pseudorandom() % 3)) +
Pseudorandom() % 5000 + 1;
size_t allocated = 0;
Address base = code_range->AllocateRawMemory(requested, &allocated);
......
......@@ -820,7 +820,7 @@ TEST(Iteration) {
FACTORY->NewStringFromAscii(CStrVector("abcdefghij"), TENURED);
// Allocate a large string (for large object space).
int large_size = HEAP->MaxObjectSizeInPagedSpace() + 1;
int large_size = Page::kMaxNonCodeHeapObjectSize + 1;
char* str = new char[large_size];
for (int i = 0; i < large_size - 1; ++i) str[i] = 'a';
str[large_size - 1] = '\0';
......
......@@ -94,7 +94,7 @@ TEST(Promotion) {
// Allocate a fixed array in the new space.
int array_size =
(HEAP->MaxObjectSizeInPagedSpace() - FixedArray::kHeaderSize) /
(Page::kMaxNonCodeHeapObjectSize - FixedArray::kHeaderSize) /
(kPointerSize * 4);
Object* obj = HEAP->AllocateFixedArray(array_size)->ToObjectChecked();
......@@ -125,7 +125,7 @@ TEST(NoPromotion) {
// Allocate a big Fixed array in the new space.
int max_size =
Min(HEAP->MaxObjectSizeInPagedSpace(), HEAP->MaxObjectSizeInNewSpace());
Min(Page::kMaxNonCodeHeapObjectSize, HEAP->MaxObjectSizeInNewSpace());
int length = (max_size - FixedArray::kHeaderSize) / (2*kPointerSize);
Object* obj = i::Isolate::Current()->heap()->AllocateFixedArray(length)->
......
......@@ -558,7 +558,8 @@ DEPENDENT_TEST(ContextDeserialization, ContextSerialization) {
TEST(LinearAllocation) {
v8::V8::Initialize();
int new_space_max = 512 * KB;
int paged_space_max = Page::kMaxHeapObjectSize;
int paged_space_max = Page::kMaxNonCodeHeapObjectSize;
int code_space_max = HEAP->code_space()->AreaSize();
for (int size = 1000; size < 5 * MB; size += size >> 1) {
size &= ~8; // Round.
......@@ -568,7 +569,7 @@ TEST(LinearAllocation) {
new_space_size,
paged_space_size, // Old pointer space.
paged_space_size, // Old data space.
HEAP->code_space()->RoundSizeDownToObjectAlignment(paged_space_size),
HEAP->code_space()->RoundSizeDownToObjectAlignment(code_space_max),
HEAP->map_space()->RoundSizeDownToObjectAlignment(paged_space_size),
HEAP->cell_space()->RoundSizeDownToObjectAlignment(paged_space_size),
size); // Large object space.
......@@ -604,7 +605,7 @@ TEST(LinearAllocation) {
int old_page_fullness = i % Page::kPageSize;
int page_fullness = (i + kSmallFixedArraySize) % Page::kPageSize;
if (page_fullness < old_page_fullness ||
page_fullness > Page::kObjectAreaSize) {
page_fullness > HEAP->old_pointer_space()->AreaSize()) {
i = RoundUp(i, Page::kPageSize);
pointer_last = NULL;
}
......@@ -624,7 +625,7 @@ TEST(LinearAllocation) {
int old_page_fullness = i % Page::kPageSize;
int page_fullness = (i + kSmallStringSize) % Page::kPageSize;
if (page_fullness < old_page_fullness ||
page_fullness > Page::kObjectAreaSize) {
page_fullness > HEAP->old_data_space()->AreaSize()) {
i = RoundUp(i, Page::kPageSize);
data_last = NULL;
}
......@@ -642,7 +643,7 @@ TEST(LinearAllocation) {
int old_page_fullness = i % Page::kPageSize;
int page_fullness = (i + kMapSize) % Page::kPageSize;
if (page_fullness < old_page_fullness ||
page_fullness > Page::kObjectAreaSize) {
page_fullness > HEAP->map_space()->AreaSize()) {
i = RoundUp(i, Page::kPageSize);
map_last = NULL;
}
......@@ -653,7 +654,7 @@ TEST(LinearAllocation) {
map_last = obj;
}
if (size > Page::kObjectAreaSize) {
if (size > Page::kMaxNonCodeHeapObjectSize) {
// Support for reserving space in large object space is not there yet,
// but using an always-allocate scope is fine for now.
AlwaysAllocateScope always;
......
......@@ -191,9 +191,10 @@ TEST(NewSpace) {
HEAP->ReservedSemiSpaceSize()));
CHECK(new_space.HasBeenSetUp());
while (new_space.Available() >= Page::kMaxHeapObjectSize) {
while (new_space.Available() >= Page::kMaxNonCodeHeapObjectSize) {
Object* obj =
new_space.AllocateRaw(Page::kMaxHeapObjectSize)->ToObjectUnchecked();
new_space.AllocateRaw(Page::kMaxNonCodeHeapObjectSize)->
ToObjectUnchecked();
CHECK(new_space.Contains(HeapObject::cast(obj)));
}
......@@ -223,7 +224,7 @@ TEST(OldSpace) {
CHECK(s->SetUp());
while (s->Available() > 0) {
s->AllocateRaw(Page::kMaxHeapObjectSize)->ToObjectUnchecked();
s->AllocateRaw(Page::kMaxNonCodeHeapObjectSize)->ToObjectUnchecked();
}
s->TearDown();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment