Commit f00631b7 authored by erik.corry@gmail.com's avatar erik.corry@gmail.com

Reduce initial boot-up memory use. This is an other attempt at what

http://codereview.chromium.org/9179012 was trying to achieve.  This
time I am going for 80% of the benefit with around 5% of the complexity.

It works by reducing the size of the first page in each space.  Unlike the
previous change there is no attempt to grow pages, we just allocate more
full-sized pages when we need more memory.  For this reason the first pages are
not quite as small (compare
http://codereview.chromium.org/9179012/diff/1/src/snapshot.h with the changes
in spaces.cc in this cl):  We want to be able to do a little bit of allocation
before we have to add a full-sized page to the space.
Review URL: https://chromiumcodereview.appspot.com/9950048

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@11203 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 5798bc27
...@@ -164,7 +164,7 @@ Page* Page::Initialize(Heap* heap, ...@@ -164,7 +164,7 @@ Page* Page::Initialize(Heap* heap,
Executability executable, Executability executable,
PagedSpace* owner) { PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk); Page* page = reinterpret_cast<Page*>(chunk);
ASSERT(chunk->size() == static_cast<size_t>(kPageSize)); ASSERT(chunk->size() <= static_cast<size_t>(kPageSize));
ASSERT(chunk->owner() == owner); ASSERT(chunk->owner() == owner);
owner->IncreaseCapacity(page->area_size()); owner->IncreaseCapacity(page->area_size());
owner->Free(page->area_start(), page->area_size()); owner->Free(page->area_start(), page->area_size());
......
...@@ -565,11 +565,10 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, ...@@ -565,11 +565,10 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
} }
Page* MemoryAllocator::AllocatePage(PagedSpace* owner, Page* MemoryAllocator::AllocatePage(intptr_t size,
PagedSpace* owner,
Executability executable) { Executability executable) {
MemoryChunk* chunk = AllocateChunk(owner->AreaSize(), MemoryChunk* chunk = AllocateChunk(size, executable, owner);
executable,
owner);
if (chunk == NULL) return NULL; if (chunk == NULL) return NULL;
...@@ -578,8 +577,8 @@ Page* MemoryAllocator::AllocatePage(PagedSpace* owner, ...@@ -578,8 +577,8 @@ Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
Executability executable, Space* owner,
Space* owner) { Executability executable) {
MemoryChunk* chunk = AllocateChunk(object_size, executable, owner); MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
if (chunk == NULL) return NULL; if (chunk == NULL) return NULL;
return LargePage::Initialize(isolate_->heap(), chunk); return LargePage::Initialize(isolate_->heap(), chunk);
...@@ -833,7 +832,6 @@ MaybeObject* PagedSpace::FindObject(Address addr) { ...@@ -833,7 +832,6 @@ MaybeObject* PagedSpace::FindObject(Address addr) {
bool PagedSpace::CanExpand() { bool PagedSpace::CanExpand() {
ASSERT(max_capacity_ % AreaSize() == 0); ASSERT(max_capacity_ % AreaSize() == 0);
ASSERT(Capacity() % AreaSize() == 0);
if (Capacity() == max_capacity_) return false; if (Capacity() == max_capacity_) return false;
...@@ -848,8 +846,14 @@ bool PagedSpace::CanExpand() { ...@@ -848,8 +846,14 @@ bool PagedSpace::CanExpand() {
bool PagedSpace::Expand() { bool PagedSpace::Expand() {
if (!CanExpand()) return false; if (!CanExpand()) return false;
Page* p = heap()->isolate()->memory_allocator()-> intptr_t size = AreaSize();
AllocatePage(this, executable());
if (anchor_.next_page() == &anchor_) {
size = SizeOfFirstPage();
}
Page* p = heap()->isolate()->memory_allocator()->AllocatePage(
size, this, executable());
if (p == NULL) return false; if (p == NULL) return false;
ASSERT(Capacity() <= max_capacity_); ASSERT(Capacity() <= max_capacity_);
...@@ -860,6 +864,38 @@ bool PagedSpace::Expand() { ...@@ -860,6 +864,38 @@ bool PagedSpace::Expand() {
} }
intptr_t PagedSpace::SizeOfFirstPage() {
int size = 0;
switch (identity()) {
case OLD_POINTER_SPACE:
size = 64 * kPointerSize * KB;
break;
case OLD_DATA_SPACE:
size = 192 * KB;
break;
case MAP_SPACE:
size = 128 * KB;
break;
case CELL_SPACE:
size = 96 * KB;
break;
case CODE_SPACE:
if (kPointerSize == 8) {
// On x64 we allocate code pages in a special way (from the reserved
// 2Byte area). That part of the code is not yet upgraded to handle
// small pages.
size = AreaSize();
} else {
size = 384 * KB;
}
break;
default:
UNREACHABLE();
}
return Min(size, AreaSize());
}
int PagedSpace::CountTotalPages() { int PagedSpace::CountTotalPages() {
PageIterator it(this); PageIterator it(this);
int count = 0; int count = 0;
...@@ -903,7 +939,6 @@ void PagedSpace::ReleasePage(Page* page) { ...@@ -903,7 +939,6 @@ void PagedSpace::ReleasePage(Page* page) {
} }
ASSERT(Capacity() > 0); ASSERT(Capacity() > 0);
ASSERT(Capacity() % AreaSize() == 0);
accounting_stats_.ShrinkSpace(AreaSize()); accounting_stats_.ShrinkSpace(AreaSize());
} }
...@@ -1042,6 +1077,7 @@ bool NewSpace::SetUp(int reserved_semispace_capacity, ...@@ -1042,6 +1077,7 @@ bool NewSpace::SetUp(int reserved_semispace_capacity,
if (!to_space_.Commit()) { if (!to_space_.Commit()) {
return false; return false;
} }
ASSERT(!from_space_.is_committed()); // No need to use memory yet.
start_ = chunk_base_; start_ = chunk_base_;
address_mask_ = ~(2 * reserved_semispace_capacity - 1); address_mask_ = ~(2 * reserved_semispace_capacity - 1);
...@@ -2581,7 +2617,7 @@ MaybeObject* LargeObjectSpace::AllocateRaw(int object_size, ...@@ -2581,7 +2617,7 @@ MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
} }
LargePage* page = heap()->isolate()->memory_allocator()-> LargePage* page = heap()->isolate()->memory_allocator()->
AllocateLargePage(object_size, executable, this); AllocateLargePage(object_size, this, executable);
if (page == NULL) return Failure::RetryAfterGC(identity()); if (page == NULL) return Failure::RetryAfterGC(identity());
ASSERT(page->area_size() >= object_size); ASSERT(page->area_size() >= object_size);
......
...@@ -637,8 +637,10 @@ class MemoryChunk { ...@@ -637,8 +637,10 @@ class MemoryChunk {
friend class MemoryAllocator; friend class MemoryAllocator;
}; };
STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// A page is a memory chunk of a size 1MB. Large object pages may be larger. // A page is a memory chunk of a size 1MB. Large object pages may be larger.
// //
...@@ -950,11 +952,11 @@ class MemoryAllocator { ...@@ -950,11 +952,11 @@ class MemoryAllocator {
void TearDown(); void TearDown();
Page* AllocatePage(PagedSpace* owner, Executability executable); Page* AllocatePage(
intptr_t size, PagedSpace* owner, Executability executable);
LargePage* AllocateLargePage(intptr_t object_size, LargePage* AllocateLargePage(
Executability executable, intptr_t object_size, Space* owner, Executability executable);
Space* owner);
void Free(MemoryChunk* chunk); void Free(MemoryChunk* chunk);
...@@ -1625,6 +1627,8 @@ class PagedSpace : public Space { ...@@ -1625,6 +1627,8 @@ class PagedSpace : public Space {
// Maximum capacity of this space. // Maximum capacity of this space.
intptr_t max_capacity_; intptr_t max_capacity_;
intptr_t SizeOfFirstPage();
// Accounting information for this space. // Accounting information for this space.
AllocationStats accounting_stats_; AllocationStats accounting_stats_;
......
...@@ -534,15 +534,15 @@ TEST(BootUpMemoryUse) { ...@@ -534,15 +534,15 @@ TEST(BootUpMemoryUse) {
intptr_t booted_memory = MemoryInUse(); intptr_t booted_memory = MemoryInUse();
if (sizeof(initial_memory) == 8) { if (sizeof(initial_memory) == 8) {
if (v8::internal::Snapshot::IsEnabled()) { if (v8::internal::Snapshot::IsEnabled()) {
CHECK_LE(booted_memory - initial_memory, 6686 * 1024); // 6476. CHECK_LE(booted_memory - initial_memory, 3500 * 1024); // 3396.
} else { } else {
CHECK_LE(booted_memory - initial_memory, 6809 * 1024); // 6628. CHECK_LE(booted_memory - initial_memory, 3500 * 1024); // 3432.
} }
} else { } else {
if (v8::internal::Snapshot::IsEnabled()) { if (v8::internal::Snapshot::IsEnabled()) {
CHECK_LE(booted_memory - initial_memory, 6532 * 1024); // 6388. CHECK_LE(booted_memory - initial_memory, 2600 * 1024); // 2484.
} else { } else {
CHECK_LE(booted_memory - initial_memory, 6940 * 1024); // 6456 CHECK_LE(booted_memory - initial_memory, 2950 * 1024); // 2844
} }
} }
} }
......
...@@ -140,8 +140,8 @@ TEST(MemoryAllocator) { ...@@ -140,8 +140,8 @@ TEST(MemoryAllocator) {
heap->MaxReserved(), heap->MaxReserved(),
OLD_POINTER_SPACE, OLD_POINTER_SPACE,
NOT_EXECUTABLE); NOT_EXECUTABLE);
Page* first_page = Page* first_page = memory_allocator->AllocatePage(
memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE); faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
first_page->InsertAfter(faked_space.anchor()->prev_page()); first_page->InsertAfter(faked_space.anchor()->prev_page());
CHECK(first_page->is_valid()); CHECK(first_page->is_valid());
...@@ -153,8 +153,8 @@ TEST(MemoryAllocator) { ...@@ -153,8 +153,8 @@ TEST(MemoryAllocator) {
} }
// Again, we should get n or n - 1 pages. // Again, we should get n or n - 1 pages.
Page* other = Page* other = memory_allocator->AllocatePage(
memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE); faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
CHECK(other->is_valid()); CHECK(other->is_valid());
total_pages++; total_pages++;
other->InsertAfter(first_page); other->InsertAfter(first_page);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment