Commit 419ea5fc authored by erik.corry@gmail.com's avatar erik.corry@gmail.com

Reduce boot-up memory use of V8.

This is a recommit of http://codereview.chromium.org/9179012
after fixing what turned out to be unrelated out-of-memory
errors.
That was a rebase of http://codereview.chromium.org/9017009/
Review URL: https://chromiumcodereview.appspot.com/9289047

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10542 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent f2eda210
......@@ -1150,6 +1150,7 @@ MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
MemoryChunk* chunk =
Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
desc.instr_size,
EXECUTABLE,
NULL);
if (chunk == NULL) {
......
......@@ -582,10 +582,14 @@ void Heap::ReserveSpace(
PagedSpace* map_space = Heap::map_space();
PagedSpace* cell_space = Heap::cell_space();
LargeObjectSpace* lo_space = Heap::lo_space();
bool one_old_space_gc_has_been_performed = false;
bool gc_performed = true;
int counter = 0;
static const int kThreshold = 20;
bool old_space_gc_performed;
while (gc_performed && counter++ < kThreshold) {
old_space_gc_performed = false;
gc_performed = false;
if (!new_space->ReserveSpace(new_space_size)) {
Heap::CollectGarbage(NEW_SPACE);
......@@ -594,22 +598,27 @@ void Heap::ReserveSpace(
if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
Heap::CollectGarbage(OLD_POINTER_SPACE);
gc_performed = true;
old_space_gc_performed = true;
}
if (!(old_data_space->ReserveSpace(data_space_size))) {
Heap::CollectGarbage(OLD_DATA_SPACE);
gc_performed = true;
old_space_gc_performed = true;
}
if (!(code_space->ReserveSpace(code_space_size))) {
Heap::CollectGarbage(CODE_SPACE);
gc_performed = true;
old_space_gc_performed = true;
}
if (!(map_space->ReserveSpace(map_space_size))) {
Heap::CollectGarbage(MAP_SPACE);
gc_performed = true;
old_space_gc_performed = true;
}
if (!(cell_space->ReserveSpace(cell_space_size))) {
Heap::CollectGarbage(CELL_SPACE);
gc_performed = true;
old_space_gc_performed = true;
}
// We add a slack-factor of 2 in order to have space for a series of
// large-object allocations that are only just larger than the page size.
......@@ -619,15 +628,22 @@ void Heap::ReserveSpace(
// allocation in the other spaces.
large_object_size += cell_space_size + map_space_size + code_space_size +
data_space_size + pointer_space_size;
if (!(lo_space->ReserveSpace(large_object_size))) {
// If we already did one GC in order to make space in old space, there is
// no sense in doing another one. We will attempt to force through the
// large object space allocation, which comes directly from the OS,
// regardless of any soft limit.
if (!one_old_space_gc_has_been_performed &&
!(lo_space->ReserveSpace(large_object_size))) {
Heap::CollectGarbage(LO_SPACE);
gc_performed = true;
}
if (old_space_gc_performed) one_old_space_gc_has_been_performed = true;
}
if (gc_performed) {
// Failed to reserve the space after several attempts.
V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
V8::FatalProcessOutOfMemory("Heap.:ReserveSpace");
}
}
......
......@@ -287,7 +287,7 @@ void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
// It's difficult to filter out slots recorded for large objects.
if (chunk->owner()->identity() == LO_SPACE &&
chunk->size() > static_cast<size_t>(Page::kPageSize) &&
chunk->size() > Page::kPageSize &&
is_compacting) {
chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
}
......
......@@ -2919,7 +2919,8 @@ static void SweepPrecisely(PagedSpace* space,
for ( ; live_objects != 0; live_objects--) {
Address free_end = object_address + offsets[live_index++] * kPointerSize;
if (free_end != free_start) {
space->Free(free_start, static_cast<int>(free_end - free_start));
space->AddToFreeLists(free_start,
static_cast<int>(free_end - free_start));
}
HeapObject* live_object = HeapObject::FromAddress(free_end);
ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
......@@ -2945,7 +2946,8 @@ static void SweepPrecisely(PagedSpace* space,
cells[cell_index] = 0;
}
if (free_start != p->ObjectAreaEnd()) {
space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start));
space->AddToFreeLists(free_start,
static_cast<int>(p->ObjectAreaEnd() - free_start));
}
p->ResetLiveBytes();
}
......@@ -3238,7 +3240,9 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
Page* p = evacuation_candidates_[i];
if (!p->IsEvacuationCandidate()) continue;
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
space->Free(p->ObjectAreaStart(), Page::kObjectAreaSize);
space->AddToFreeLists(
p->ObjectAreaStart(),
static_cast<int>(p->ObjectAreaEnd() - p->ObjectAreaStart()));
p->set_scan_on_scavenge(false);
slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
p->ClearEvacuationCandidate();
......@@ -3555,8 +3559,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
}
size_t size = block_address - p->ObjectAreaStart();
if (cell_index == last_cell_index) {
freed_bytes += static_cast<int>(space->Free(p->ObjectAreaStart(),
static_cast<int>(size)));
freed_bytes += static_cast<int>(space->AddToFreeLists(
p->ObjectAreaStart(), static_cast<int>(size)));
ASSERT_EQ(0, p->LiveBytes());
return freed_bytes;
}
......@@ -3565,8 +3569,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
// Free the first free space.
size = free_end - p->ObjectAreaStart();
freed_bytes += space->Free(p->ObjectAreaStart(),
static_cast<int>(size));
freed_bytes += space->AddToFreeLists(p->ObjectAreaStart(),
static_cast<int>(size));
// The start of the current free area is represented in undigested form by
// the address of the last 32-word section that contained a live object and
// the marking bitmap for that cell, which describes where the live object
......@@ -3595,8 +3599,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
// so now we need to find the start of the first live object at the
// end of the free space.
free_end = StartOfLiveObject(block_address, cell);
freed_bytes += space->Free(free_start,
static_cast<int>(free_end - free_start));
freed_bytes += space->AddToFreeLists(
free_start, static_cast<int>(free_end - free_start));
}
}
// Update our undigested record of where the current free area started.
......@@ -3610,8 +3614,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
// Handle the free space at the end of the page.
if (block_address - free_start > 32 * kPointerSize) {
free_start = DigestFreeStart(free_start, free_start_cell);
freed_bytes += space->Free(free_start,
static_cast<int>(block_address - free_start));
freed_bytes += space->AddToFreeLists(
free_start, static_cast<int>(block_address - free_start));
}
p->ResetLiveBytes();
......
......@@ -612,6 +612,7 @@ Address Deserializer::Allocate(int space_index, Space* space, int size) {
pages_[LO_SPACE].Add(address);
}
last_object_address_ = address;
ASSERT(address >= Page::FromAddress(address)->ObjectAreaStart());
return address;
}
......@@ -622,7 +623,12 @@ HeapObject* Deserializer::GetAddressFromEnd(int space) {
int offset = source_->GetInt();
ASSERT(!SpaceIsLarge(space));
offset <<= kObjectAlignmentBits;
return HeapObject::FromAddress(high_water_[space] - offset);
Address address = high_water_[space] - offset;
// This assert will fail if kMinimumSpaceSizes is too small for a space,
// because we rely on the fact that all allocation is linear when the VM
// is very young.
ASSERT(address >= Page::FromAddress(address)->ObjectAreaStart());
return HeapObject::FromAddress(address);
}
......
......@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "isolate.h"
#include "spaces.h"
#ifndef V8_SNAPSHOT_H_
#define V8_SNAPSHOT_H_
......@@ -86,6 +87,7 @@ class Snapshot {
DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
};
} } // namespace v8::internal
#endif // V8_SNAPSHOT_H_
......@@ -164,12 +164,12 @@ Page* Page::Initialize(Heap* heap,
Executability executable,
PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk);
ASSERT(chunk->size() == static_cast<size_t>(kPageSize));
ASSERT(chunk->size() <= kPageSize);
ASSERT(chunk->owner() == owner);
owner->IncreaseCapacity(Page::kObjectAreaSize);
owner->Free(page->ObjectAreaStart(),
static_cast<int>(page->ObjectAreaEnd() -
page->ObjectAreaStart()));
int object_bytes =
static_cast<int>(page->ObjectAreaEnd() - page->ObjectAreaStart());
owner->IncreaseCapacity(object_bytes);
owner->AddToFreeLists(page->ObjectAreaStart(), object_bytes);
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
......@@ -257,6 +257,7 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
if (new_top > allocation_info_.limit) return NULL;
allocation_info_.top = new_top;
ASSERT(new_top >= Page::FromAllocationTop(new_top)->ObjectAreaStart());
return HeapObject::FromAddress(current_top);
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -496,7 +496,6 @@ void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
Address map_aligned_end = MapEndAlign(end);
ASSERT(map_aligned_start == start);
ASSERT(map_aligned_end == end);
FindPointersToNewSpaceInMaps(map_aligned_start,
map_aligned_end,
......@@ -524,52 +523,57 @@ void StoreBuffer::FindPointersToNewSpaceOnPage(
RegionCallback region_callback,
ObjectSlotCallback slot_callback) {
Address visitable_start = page->ObjectAreaStart();
Address end_of_page = page->ObjectAreaEnd();
Address visitable_end = visitable_start;
Object* free_space_map = heap_->free_space_map();
Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
while (visitable_end < end_of_page) {
Object* o = *reinterpret_cast<Object**>(visitable_end);
// Skip fillers but not things that look like fillers in the special
// garbage section which can contain anything.
if (o == free_space_map ||
o == two_pointer_filler_map ||
(visitable_end == space->top() && visitable_end != space->limit())) {
if (visitable_start != visitable_end) {
// After calling this the special garbage section may have moved.
(this->*region_callback)(visitable_start,
visitable_end,
slot_callback);
if (visitable_end >= space->top() && visitable_end < space->limit()) {
visitable_end = space->limit();
visitable_start = visitable_end;
continue;
while (true) { // While the page grows (doesn't normally happen).
Address end_of_page = page->ObjectAreaEnd();
while (visitable_end < end_of_page) {
Object* o = *reinterpret_cast<Object**>(visitable_end);
// Skip fillers but not things that look like fillers in the special
// garbage section which can contain anything.
if (o == free_space_map ||
o == two_pointer_filler_map ||
(visitable_end == space->top() && visitable_end != space->limit())) {
if (visitable_start != visitable_end) {
// After calling this the special garbage section may have moved.
(this->*region_callback)(visitable_start,
visitable_end,
slot_callback);
if (visitable_end >= space->top() && visitable_end < space->limit()) {
visitable_end = space->limit();
visitable_start = visitable_end;
continue;
}
}
if (visitable_end == space->top() && visitable_end != space->limit()) {
visitable_start = visitable_end = space->limit();
} else {
// At this point we are either at the start of a filler or we are at
// the point where the space->top() used to be before the
// visit_pointer_region call above. Either way we can skip the
// object at the current spot: We don't promise to visit objects
// allocated during heap traversal, and if space->top() moved then it
// must be because an object was allocated at this point.
visitable_start =
visitable_end + HeapObject::FromAddress(visitable_end)->Size();
visitable_end = visitable_start;
}
}
if (visitable_end == space->top() && visitable_end != space->limit()) {
visitable_start = visitable_end = space->limit();
} else {
// At this point we are either at the start of a filler or we are at
// the point where the space->top() used to be before the
// visit_pointer_region call above. Either way we can skip the
// object at the current spot: We don't promise to visit objects
// allocated during heap traversal, and if space->top() moved then it
// must be because an object was allocated at this point.
visitable_start =
visitable_end + HeapObject::FromAddress(visitable_end)->Size();
visitable_end = visitable_start;
ASSERT(o != free_space_map);
ASSERT(o != two_pointer_filler_map);
ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
visitable_end += kPointerSize;
}
} else {
ASSERT(o != free_space_map);
ASSERT(o != two_pointer_filler_map);
ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
visitable_end += kPointerSize;
}
ASSERT(visitable_end >= end_of_page);
// If the page did not grow we are done.
if (end_of_page == page->ObjectAreaEnd()) break;
}
ASSERT(visitable_end == end_of_page);
ASSERT(visitable_end == page->ObjectAreaEnd());
if (visitable_start != visitable_end) {
(this->*region_callback)(visitable_start,
visitable_end,
......
......@@ -153,11 +153,9 @@ int HandleObjectPointerCompare(const Handle<T>* a, const Handle<T>* b) {
}
// Returns the smallest power of two which is >= x. If you pass in a
// number that is already a power of two, it is returned as is.
// Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
// figure 3-3, page 48, where the function is called clp2.
inline uint32_t RoundUpToPowerOf2(uint32_t x) {
template<typename int_type>
inline int RoundUpToPowerOf2(int_type x_argument) {
uintptr_t x = static_cast<uintptr_t>(x_argument);
ASSERT(x <= 0x80000000u);
x = x - 1;
x = x | (x >> 1);
......@@ -165,7 +163,7 @@ inline uint32_t RoundUpToPowerOf2(uint32_t x) {
x = x | (x >> 4);
x = x | (x >> 8);
x = x | (x >> 16);
return x + 1;
return static_cast<int_type>(x + 1);
}
......
......@@ -1236,17 +1236,14 @@ TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
obj = iterator.next()) {
size_of_objects_2 += obj->Size();
}
// Delta must be within 5% of the larger result.
// TODO(gc): Tighten this up by distinguishing between byte
// arrays that are real and those that merely mark free space
// on the heap.
// Delta must be within 1% of the larger result.
if (size_of_objects_1 > size_of_objects_2) {
intptr_t delta = size_of_objects_1 - size_of_objects_2;
PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
"Iterator: %" V8_PTR_PREFIX "d, "
"delta: %" V8_PTR_PREFIX "d\n",
size_of_objects_1, size_of_objects_2, delta);
CHECK_GT(size_of_objects_1 / 20, delta);
CHECK_GT(size_of_objects_1 / 100, delta);
} else {
intptr_t delta = size_of_objects_2 - size_of_objects_1;
PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
......
......@@ -534,15 +534,15 @@ TEST(BootUpMemoryUse) {
intptr_t booted_memory = MemoryInUse();
if (sizeof(initial_memory) == 8) {
if (v8::internal::Snapshot::IsEnabled()) {
CHECK_LE(booted_memory - initial_memory, 6654 * 1024); // 6444.
CHECK_LE(booted_memory - initial_memory, 3050 * 1024); // 2984.
} else {
CHECK_LE(booted_memory - initial_memory, 6777 * 1024); // 6596.
CHECK_LE(booted_memory - initial_memory, 3050 * 1024); // 3008.
}
} else {
if (v8::internal::Snapshot::IsEnabled()) {
CHECK_LE(booted_memory - initial_memory, 6500 * 1024); // 6356.
CHECK_LE(booted_memory - initial_memory, 2000 * 1024); // 1940.
} else {
CHECK_LE(booted_memory - initial_memory, 6654 * 1024); // 6424
CHECK_LE(booted_memory - initial_memory, 2000 * 1024); // 1948
}
}
}
......
......@@ -140,8 +140,8 @@ TEST(MemoryAllocator) {
heap->MaxReserved(),
OLD_POINTER_SPACE,
NOT_EXECUTABLE);
Page* first_page =
memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE);
Page* first_page = memory_allocator->AllocatePage(
Page::kObjectAreaSize, &faked_space, NOT_EXECUTABLE);
first_page->InsertAfter(faked_space.anchor()->prev_page());
CHECK(first_page->is_valid());
......@@ -154,7 +154,8 @@ TEST(MemoryAllocator) {
// Again, we should get n or n - 1 pages.
Page* other =
memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE);
memory_allocator->AllocatePage(
Page::kObjectAreaSize, &faked_space, NOT_EXECUTABLE);
CHECK(other->is_valid());
total_pages++;
other->InsertAfter(first_page);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment