Commit 419ea5fc authored by erik.corry@gmail.com's avatar erik.corry@gmail.com

Reduce boot-up memory use of V8.

This is a recommit of http://codereview.chromium.org/9179012
after fixing what turned out to be unrelated out-of-memory
errors.
That was a rebase of http://codereview.chromium.org/9017009/
Review URL: https://chromiumcodereview.appspot.com/9289047

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10542 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent f2eda210
...@@ -1150,6 +1150,7 @@ MemoryChunk* Deoptimizer::CreateCode(BailoutType type) { ...@@ -1150,6 +1150,7 @@ MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
MemoryChunk* chunk = MemoryChunk* chunk =
Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size, Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
desc.instr_size,
EXECUTABLE, EXECUTABLE,
NULL); NULL);
if (chunk == NULL) { if (chunk == NULL) {
......
...@@ -582,10 +582,14 @@ void Heap::ReserveSpace( ...@@ -582,10 +582,14 @@ void Heap::ReserveSpace(
PagedSpace* map_space = Heap::map_space(); PagedSpace* map_space = Heap::map_space();
PagedSpace* cell_space = Heap::cell_space(); PagedSpace* cell_space = Heap::cell_space();
LargeObjectSpace* lo_space = Heap::lo_space(); LargeObjectSpace* lo_space = Heap::lo_space();
bool one_old_space_gc_has_been_performed = false;
bool gc_performed = true; bool gc_performed = true;
int counter = 0; int counter = 0;
static const int kThreshold = 20; static const int kThreshold = 20;
bool old_space_gc_performed;
while (gc_performed && counter++ < kThreshold) { while (gc_performed && counter++ < kThreshold) {
old_space_gc_performed = false;
gc_performed = false; gc_performed = false;
if (!new_space->ReserveSpace(new_space_size)) { if (!new_space->ReserveSpace(new_space_size)) {
Heap::CollectGarbage(NEW_SPACE); Heap::CollectGarbage(NEW_SPACE);
...@@ -594,22 +598,27 @@ void Heap::ReserveSpace( ...@@ -594,22 +598,27 @@ void Heap::ReserveSpace(
if (!old_pointer_space->ReserveSpace(pointer_space_size)) { if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
Heap::CollectGarbage(OLD_POINTER_SPACE); Heap::CollectGarbage(OLD_POINTER_SPACE);
gc_performed = true; gc_performed = true;
old_space_gc_performed = true;
} }
if (!(old_data_space->ReserveSpace(data_space_size))) { if (!(old_data_space->ReserveSpace(data_space_size))) {
Heap::CollectGarbage(OLD_DATA_SPACE); Heap::CollectGarbage(OLD_DATA_SPACE);
gc_performed = true; gc_performed = true;
old_space_gc_performed = true;
} }
if (!(code_space->ReserveSpace(code_space_size))) { if (!(code_space->ReserveSpace(code_space_size))) {
Heap::CollectGarbage(CODE_SPACE); Heap::CollectGarbage(CODE_SPACE);
gc_performed = true; gc_performed = true;
old_space_gc_performed = true;
} }
if (!(map_space->ReserveSpace(map_space_size))) { if (!(map_space->ReserveSpace(map_space_size))) {
Heap::CollectGarbage(MAP_SPACE); Heap::CollectGarbage(MAP_SPACE);
gc_performed = true; gc_performed = true;
old_space_gc_performed = true;
} }
if (!(cell_space->ReserveSpace(cell_space_size))) { if (!(cell_space->ReserveSpace(cell_space_size))) {
Heap::CollectGarbage(CELL_SPACE); Heap::CollectGarbage(CELL_SPACE);
gc_performed = true; gc_performed = true;
old_space_gc_performed = true;
} }
// We add a slack-factor of 2 in order to have space for a series of // We add a slack-factor of 2 in order to have space for a series of
// large-object allocations that are only just larger than the page size. // large-object allocations that are only just larger than the page size.
...@@ -619,15 +628,22 @@ void Heap::ReserveSpace( ...@@ -619,15 +628,22 @@ void Heap::ReserveSpace(
// allocation in the other spaces. // allocation in the other spaces.
large_object_size += cell_space_size + map_space_size + code_space_size + large_object_size += cell_space_size + map_space_size + code_space_size +
data_space_size + pointer_space_size; data_space_size + pointer_space_size;
if (!(lo_space->ReserveSpace(large_object_size))) {
// If we already did one GC in order to make space in old space, there is
// no sense in doing another one. We will attempt to force through the
// large object space allocation, which comes directly from the OS,
// regardless of any soft limit.
if (!one_old_space_gc_has_been_performed &&
!(lo_space->ReserveSpace(large_object_size))) {
Heap::CollectGarbage(LO_SPACE); Heap::CollectGarbage(LO_SPACE);
gc_performed = true; gc_performed = true;
} }
if (old_space_gc_performed) one_old_space_gc_has_been_performed = true;
} }
if (gc_performed) { if (gc_performed) {
// Failed to reserve the space after several attempts. // Failed to reserve the space after several attempts.
V8::FatalProcessOutOfMemory("Heap::ReserveSpace"); V8::FatalProcessOutOfMemory("Heap.:ReserveSpace");
} }
} }
......
...@@ -287,7 +287,7 @@ void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk, ...@@ -287,7 +287,7 @@ void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
// It's difficult to filter out slots recorded for large objects. // It's difficult to filter out slots recorded for large objects.
if (chunk->owner()->identity() == LO_SPACE && if (chunk->owner()->identity() == LO_SPACE &&
chunk->size() > static_cast<size_t>(Page::kPageSize) && chunk->size() > Page::kPageSize &&
is_compacting) { is_compacting) {
chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION); chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
} }
......
...@@ -2919,7 +2919,8 @@ static void SweepPrecisely(PagedSpace* space, ...@@ -2919,7 +2919,8 @@ static void SweepPrecisely(PagedSpace* space,
for ( ; live_objects != 0; live_objects--) { for ( ; live_objects != 0; live_objects--) {
Address free_end = object_address + offsets[live_index++] * kPointerSize; Address free_end = object_address + offsets[live_index++] * kPointerSize;
if (free_end != free_start) { if (free_end != free_start) {
space->Free(free_start, static_cast<int>(free_end - free_start)); space->AddToFreeLists(free_start,
static_cast<int>(free_end - free_start));
} }
HeapObject* live_object = HeapObject::FromAddress(free_end); HeapObject* live_object = HeapObject::FromAddress(free_end);
ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object))); ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
...@@ -2945,7 +2946,8 @@ static void SweepPrecisely(PagedSpace* space, ...@@ -2945,7 +2946,8 @@ static void SweepPrecisely(PagedSpace* space,
cells[cell_index] = 0; cells[cell_index] = 0;
} }
if (free_start != p->ObjectAreaEnd()) { if (free_start != p->ObjectAreaEnd()) {
space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start)); space->AddToFreeLists(free_start,
static_cast<int>(p->ObjectAreaEnd() - free_start));
} }
p->ResetLiveBytes(); p->ResetLiveBytes();
} }
...@@ -3238,7 +3240,9 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { ...@@ -3238,7 +3240,9 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
Page* p = evacuation_candidates_[i]; Page* p = evacuation_candidates_[i];
if (!p->IsEvacuationCandidate()) continue; if (!p->IsEvacuationCandidate()) continue;
PagedSpace* space = static_cast<PagedSpace*>(p->owner()); PagedSpace* space = static_cast<PagedSpace*>(p->owner());
space->Free(p->ObjectAreaStart(), Page::kObjectAreaSize); space->AddToFreeLists(
p->ObjectAreaStart(),
static_cast<int>(p->ObjectAreaEnd() - p->ObjectAreaStart()));
p->set_scan_on_scavenge(false); p->set_scan_on_scavenge(false);
slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
p->ClearEvacuationCandidate(); p->ClearEvacuationCandidate();
...@@ -3555,8 +3559,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { ...@@ -3555,8 +3559,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
} }
size_t size = block_address - p->ObjectAreaStart(); size_t size = block_address - p->ObjectAreaStart();
if (cell_index == last_cell_index) { if (cell_index == last_cell_index) {
freed_bytes += static_cast<int>(space->Free(p->ObjectAreaStart(), freed_bytes += static_cast<int>(space->AddToFreeLists(
static_cast<int>(size))); p->ObjectAreaStart(), static_cast<int>(size)));
ASSERT_EQ(0, p->LiveBytes()); ASSERT_EQ(0, p->LiveBytes());
return freed_bytes; return freed_bytes;
} }
...@@ -3565,8 +3569,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { ...@@ -3565,8 +3569,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
Address free_end = StartOfLiveObject(block_address, cells[cell_index]); Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
// Free the first free space. // Free the first free space.
size = free_end - p->ObjectAreaStart(); size = free_end - p->ObjectAreaStart();
freed_bytes += space->Free(p->ObjectAreaStart(), freed_bytes += space->AddToFreeLists(p->ObjectAreaStart(),
static_cast<int>(size)); static_cast<int>(size));
// The start of the current free area is represented in undigested form by // The start of the current free area is represented in undigested form by
// the address of the last 32-word section that contained a live object and // the address of the last 32-word section that contained a live object and
// the marking bitmap for that cell, which describes where the live object // the marking bitmap for that cell, which describes where the live object
...@@ -3595,8 +3599,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { ...@@ -3595,8 +3599,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
// so now we need to find the start of the first live object at the // so now we need to find the start of the first live object at the
// end of the free space. // end of the free space.
free_end = StartOfLiveObject(block_address, cell); free_end = StartOfLiveObject(block_address, cell);
freed_bytes += space->Free(free_start, freed_bytes += space->AddToFreeLists(
static_cast<int>(free_end - free_start)); free_start, static_cast<int>(free_end - free_start));
} }
} }
// Update our undigested record of where the current free area started. // Update our undigested record of where the current free area started.
...@@ -3610,8 +3614,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { ...@@ -3610,8 +3614,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
// Handle the free space at the end of the page. // Handle the free space at the end of the page.
if (block_address - free_start > 32 * kPointerSize) { if (block_address - free_start > 32 * kPointerSize) {
free_start = DigestFreeStart(free_start, free_start_cell); free_start = DigestFreeStart(free_start, free_start_cell);
freed_bytes += space->Free(free_start, freed_bytes += space->AddToFreeLists(
static_cast<int>(block_address - free_start)); free_start, static_cast<int>(block_address - free_start));
} }
p->ResetLiveBytes(); p->ResetLiveBytes();
......
...@@ -612,6 +612,7 @@ Address Deserializer::Allocate(int space_index, Space* space, int size) { ...@@ -612,6 +612,7 @@ Address Deserializer::Allocate(int space_index, Space* space, int size) {
pages_[LO_SPACE].Add(address); pages_[LO_SPACE].Add(address);
} }
last_object_address_ = address; last_object_address_ = address;
ASSERT(address >= Page::FromAddress(address)->ObjectAreaStart());
return address; return address;
} }
...@@ -622,7 +623,12 @@ HeapObject* Deserializer::GetAddressFromEnd(int space) { ...@@ -622,7 +623,12 @@ HeapObject* Deserializer::GetAddressFromEnd(int space) {
int offset = source_->GetInt(); int offset = source_->GetInt();
ASSERT(!SpaceIsLarge(space)); ASSERT(!SpaceIsLarge(space));
offset <<= kObjectAlignmentBits; offset <<= kObjectAlignmentBits;
return HeapObject::FromAddress(high_water_[space] - offset); Address address = high_water_[space] - offset;
// This assert will fail if kMinimumSpaceSizes is too small for a space,
// because we rely on the fact that all allocation is linear when the VM
// is very young.
ASSERT(address >= Page::FromAddress(address)->ObjectAreaStart());
return HeapObject::FromAddress(address);
} }
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "isolate.h" #include "isolate.h"
#include "spaces.h"
#ifndef V8_SNAPSHOT_H_ #ifndef V8_SNAPSHOT_H_
#define V8_SNAPSHOT_H_ #define V8_SNAPSHOT_H_
...@@ -86,6 +87,7 @@ class Snapshot { ...@@ -86,6 +87,7 @@ class Snapshot {
DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot); DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
}; };
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_SNAPSHOT_H_ #endif // V8_SNAPSHOT_H_
...@@ -164,12 +164,12 @@ Page* Page::Initialize(Heap* heap, ...@@ -164,12 +164,12 @@ Page* Page::Initialize(Heap* heap,
Executability executable, Executability executable,
PagedSpace* owner) { PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk); Page* page = reinterpret_cast<Page*>(chunk);
ASSERT(chunk->size() == static_cast<size_t>(kPageSize)); ASSERT(chunk->size() <= kPageSize);
ASSERT(chunk->owner() == owner); ASSERT(chunk->owner() == owner);
owner->IncreaseCapacity(Page::kObjectAreaSize); int object_bytes =
owner->Free(page->ObjectAreaStart(), static_cast<int>(page->ObjectAreaEnd() - page->ObjectAreaStart());
static_cast<int>(page->ObjectAreaEnd() - owner->IncreaseCapacity(object_bytes);
page->ObjectAreaStart())); owner->AddToFreeLists(page->ObjectAreaStart(), object_bytes);
heap->incremental_marking()->SetOldSpacePageFlags(chunk); heap->incremental_marking()->SetOldSpacePageFlags(chunk);
...@@ -257,6 +257,7 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) { ...@@ -257,6 +257,7 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
if (new_top > allocation_info_.limit) return NULL; if (new_top > allocation_info_.limit) return NULL;
allocation_info_.top = new_top; allocation_info_.top = new_top;
ASSERT(new_top >= Page::FromAllocationTop(new_top)->ObjectAreaStart());
return HeapObject::FromAddress(current_top); return HeapObject::FromAddress(current_top);
} }
......
This diff is collapsed.
This diff is collapsed.
...@@ -496,7 +496,6 @@ void StoreBuffer::FindPointersToNewSpaceInMapsRegion( ...@@ -496,7 +496,6 @@ void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
Address map_aligned_end = MapEndAlign(end); Address map_aligned_end = MapEndAlign(end);
ASSERT(map_aligned_start == start); ASSERT(map_aligned_start == start);
ASSERT(map_aligned_end == end);
FindPointersToNewSpaceInMaps(map_aligned_start, FindPointersToNewSpaceInMaps(map_aligned_start,
map_aligned_end, map_aligned_end,
...@@ -524,52 +523,57 @@ void StoreBuffer::FindPointersToNewSpaceOnPage( ...@@ -524,52 +523,57 @@ void StoreBuffer::FindPointersToNewSpaceOnPage(
RegionCallback region_callback, RegionCallback region_callback,
ObjectSlotCallback slot_callback) { ObjectSlotCallback slot_callback) {
Address visitable_start = page->ObjectAreaStart(); Address visitable_start = page->ObjectAreaStart();
Address end_of_page = page->ObjectAreaEnd();
Address visitable_end = visitable_start; Address visitable_end = visitable_start;
Object* free_space_map = heap_->free_space_map(); Object* free_space_map = heap_->free_space_map();
Object* two_pointer_filler_map = heap_->two_pointer_filler_map(); Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
while (visitable_end < end_of_page) { while (true) { // While the page grows (doesn't normally happen).
Object* o = *reinterpret_cast<Object**>(visitable_end); Address end_of_page = page->ObjectAreaEnd();
// Skip fillers but not things that look like fillers in the special while (visitable_end < end_of_page) {
// garbage section which can contain anything. Object* o = *reinterpret_cast<Object**>(visitable_end);
if (o == free_space_map || // Skip fillers but not things that look like fillers in the special
o == two_pointer_filler_map || // garbage section which can contain anything.
(visitable_end == space->top() && visitable_end != space->limit())) { if (o == free_space_map ||
if (visitable_start != visitable_end) { o == two_pointer_filler_map ||
// After calling this the special garbage section may have moved. (visitable_end == space->top() && visitable_end != space->limit())) {
(this->*region_callback)(visitable_start, if (visitable_start != visitable_end) {
visitable_end, // After calling this the special garbage section may have moved.
slot_callback); (this->*region_callback)(visitable_start,
if (visitable_end >= space->top() && visitable_end < space->limit()) { visitable_end,
visitable_end = space->limit(); slot_callback);
visitable_start = visitable_end; if (visitable_end >= space->top() && visitable_end < space->limit()) {
continue; visitable_end = space->limit();
visitable_start = visitable_end;
continue;
}
}
if (visitable_end == space->top() && visitable_end != space->limit()) {
visitable_start = visitable_end = space->limit();
} else {
// At this point we are either at the start of a filler or we are at
// the point where the space->top() used to be before the
// visit_pointer_region call above. Either way we can skip the
// object at the current spot: We don't promise to visit objects
// allocated during heap traversal, and if space->top() moved then it
// must be because an object was allocated at this point.
visitable_start =
visitable_end + HeapObject::FromAddress(visitable_end)->Size();
visitable_end = visitable_start;
} }
}
if (visitable_end == space->top() && visitable_end != space->limit()) {
visitable_start = visitable_end = space->limit();
} else { } else {
// At this point we are either at the start of a filler or we are at ASSERT(o != free_space_map);
// the point where the space->top() used to be before the ASSERT(o != two_pointer_filler_map);
// visit_pointer_region call above. Either way we can skip the ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
// object at the current spot: We don't promise to visit objects visitable_end += kPointerSize;
// allocated during heap traversal, and if space->top() moved then it
// must be because an object was allocated at this point.
visitable_start =
visitable_end + HeapObject::FromAddress(visitable_end)->Size();
visitable_end = visitable_start;
} }
} else {
ASSERT(o != free_space_map);
ASSERT(o != two_pointer_filler_map);
ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
visitable_end += kPointerSize;
} }
ASSERT(visitable_end >= end_of_page);
// If the page did not grow we are done.
if (end_of_page == page->ObjectAreaEnd()) break;
} }
ASSERT(visitable_end == end_of_page); ASSERT(visitable_end == page->ObjectAreaEnd());
if (visitable_start != visitable_end) { if (visitable_start != visitable_end) {
(this->*region_callback)(visitable_start, (this->*region_callback)(visitable_start,
visitable_end, visitable_end,
......
...@@ -153,11 +153,9 @@ int HandleObjectPointerCompare(const Handle<T>* a, const Handle<T>* b) { ...@@ -153,11 +153,9 @@ int HandleObjectPointerCompare(const Handle<T>* a, const Handle<T>* b) {
} }
// Returns the smallest power of two which is >= x. If you pass in a template<typename int_type>
// number that is already a power of two, it is returned as is. inline int RoundUpToPowerOf2(int_type x_argument) {
// Implementation is from "Hacker's Delight" by Henry S. Warren, Jr., uintptr_t x = static_cast<uintptr_t>(x_argument);
// figure 3-3, page 48, where the function is called clp2.
inline uint32_t RoundUpToPowerOf2(uint32_t x) {
ASSERT(x <= 0x80000000u); ASSERT(x <= 0x80000000u);
x = x - 1; x = x - 1;
x = x | (x >> 1); x = x | (x >> 1);
...@@ -165,7 +163,7 @@ inline uint32_t RoundUpToPowerOf2(uint32_t x) { ...@@ -165,7 +163,7 @@ inline uint32_t RoundUpToPowerOf2(uint32_t x) {
x = x | (x >> 4); x = x | (x >> 4);
x = x | (x >> 8); x = x | (x >> 8);
x = x | (x >> 16); x = x | (x >> 16);
return x + 1; return static_cast<int_type>(x + 1);
} }
......
...@@ -1236,17 +1236,14 @@ TEST(TestSizeOfObjectsVsHeapIteratorPrecision) { ...@@ -1236,17 +1236,14 @@ TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
obj = iterator.next()) { obj = iterator.next()) {
size_of_objects_2 += obj->Size(); size_of_objects_2 += obj->Size();
} }
// Delta must be within 5% of the larger result. // Delta must be within 1% of the larger result.
// TODO(gc): Tighten this up by distinguishing between byte
// arrays that are real and those that merely mark free space
// on the heap.
if (size_of_objects_1 > size_of_objects_2) { if (size_of_objects_1 > size_of_objects_2) {
intptr_t delta = size_of_objects_1 - size_of_objects_2; intptr_t delta = size_of_objects_1 - size_of_objects_2;
PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, " PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
"Iterator: %" V8_PTR_PREFIX "d, " "Iterator: %" V8_PTR_PREFIX "d, "
"delta: %" V8_PTR_PREFIX "d\n", "delta: %" V8_PTR_PREFIX "d\n",
size_of_objects_1, size_of_objects_2, delta); size_of_objects_1, size_of_objects_2, delta);
CHECK_GT(size_of_objects_1 / 20, delta); CHECK_GT(size_of_objects_1 / 100, delta);
} else { } else {
intptr_t delta = size_of_objects_2 - size_of_objects_1; intptr_t delta = size_of_objects_2 - size_of_objects_1;
PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, " PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
......
...@@ -534,15 +534,15 @@ TEST(BootUpMemoryUse) { ...@@ -534,15 +534,15 @@ TEST(BootUpMemoryUse) {
intptr_t booted_memory = MemoryInUse(); intptr_t booted_memory = MemoryInUse();
if (sizeof(initial_memory) == 8) { if (sizeof(initial_memory) == 8) {
if (v8::internal::Snapshot::IsEnabled()) { if (v8::internal::Snapshot::IsEnabled()) {
CHECK_LE(booted_memory - initial_memory, 6654 * 1024); // 6444. CHECK_LE(booted_memory - initial_memory, 3050 * 1024); // 2984.
} else { } else {
CHECK_LE(booted_memory - initial_memory, 6777 * 1024); // 6596. CHECK_LE(booted_memory - initial_memory, 3050 * 1024); // 3008.
} }
} else { } else {
if (v8::internal::Snapshot::IsEnabled()) { if (v8::internal::Snapshot::IsEnabled()) {
CHECK_LE(booted_memory - initial_memory, 6500 * 1024); // 6356. CHECK_LE(booted_memory - initial_memory, 2000 * 1024); // 1940.
} else { } else {
CHECK_LE(booted_memory - initial_memory, 6654 * 1024); // 6424 CHECK_LE(booted_memory - initial_memory, 2000 * 1024); // 1948
} }
} }
} }
......
...@@ -140,8 +140,8 @@ TEST(MemoryAllocator) { ...@@ -140,8 +140,8 @@ TEST(MemoryAllocator) {
heap->MaxReserved(), heap->MaxReserved(),
OLD_POINTER_SPACE, OLD_POINTER_SPACE,
NOT_EXECUTABLE); NOT_EXECUTABLE);
Page* first_page = Page* first_page = memory_allocator->AllocatePage(
memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE); Page::kObjectAreaSize, &faked_space, NOT_EXECUTABLE);
first_page->InsertAfter(faked_space.anchor()->prev_page()); first_page->InsertAfter(faked_space.anchor()->prev_page());
CHECK(first_page->is_valid()); CHECK(first_page->is_valid());
...@@ -154,7 +154,8 @@ TEST(MemoryAllocator) { ...@@ -154,7 +154,8 @@ TEST(MemoryAllocator) {
// Again, we should get n or n - 1 pages. // Again, we should get n or n - 1 pages.
Page* other = Page* other =
memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE); memory_allocator->AllocatePage(
Page::kObjectAreaSize, &faked_space, NOT_EXECUTABLE);
CHECK(other->is_valid()); CHECK(other->is_valid());
total_pages++; total_pages++;
other->InsertAfter(first_page); other->InsertAfter(first_page);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment