Commit 5a8d1764 authored by erik.corry@gmail.com's avatar erik.corry@gmail.com

Refactoring of snapshots. This simplifies and improves

the speed of deserializing code.  The current startup
time improvement for V8 is around 6%, but code deserialization
is speeded up disproportionately, and we will soon have more
code in the snapshot.
* Removed support for deserializing into large object space.
  The regular pages are 1Mbyte now and that is plenty.  This
  is a big simplification.
* Instead of reserving space for the snapshot we actually
  allocate it now.  This removes some special casing from
  the memory management and simplifies deserialization since
  we are just bumping a pointer rather than calling the
  normal allocation routines during deserialization.
* Record in the snapshot how much we need to boot up and
  allocate it instead of just assuming that allocations in
  a new VM will always be linear.
* In the snapshot we always address an object as a negative
  offset from the current allocation point.  We used to
  sometimes address from the start of the deserialized data,
  but this is less useful now that we have good support for
  roots and repetitions in the deserialization data.
* Code objects were previously deserialized (like other
  objects) by alternating raw data (deserialized with memcpy)
  and pointers (to external references, other objects, etc.).
  Now we deserialize code objects with a single memcpy,
  followed by a series of skips and pointers that partially
  overwrite the code we memcopied out of the snapshot.
  The skips are sometimes merged into the following
  instruction in the deserialization data to reduce dispatch
  time.
* Integers in the snapshot were stored in a variable length
  format that gives a compact representation for small positive
  integers.  This is still the case, but the new encoding can
  be decoded without branches or conditional instructions,
  which is faster on a modern CPU.
Review URL: https://chromiumcodereview.appspot.com/10918067

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@12505 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 77a7d9f5
......@@ -142,7 +142,9 @@ Handle<Code> CodeStub::GetCode() {
}
Activate(code);
ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code));
ASSERT(!NeedsImmovableCode() ||
heap->lo_space()->Contains(code) ||
heap->code_space()->FirstPage()->Contains(code->address()));
return Handle<Code>(code, isolate);
}
......
......@@ -741,17 +741,6 @@ AlwaysAllocateScope::~AlwaysAllocateScope() {
}
LinearAllocationScope::LinearAllocationScope() {
HEAP->linear_allocation_scope_depth_++;
}
LinearAllocationScope::~LinearAllocationScope() {
HEAP->linear_allocation_scope_depth_--;
ASSERT(HEAP->linear_allocation_scope_depth_ >= 0);
}
#ifdef DEBUG
void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
......
......@@ -417,6 +417,7 @@ void Heap::GarbageCollectionPrologue() {
store_buffer()->GCPrologue();
}
intptr_t Heap::SizeOfObjects() {
intptr_t total = 0;
AllSpaces spaces;
......@@ -426,6 +427,17 @@ intptr_t Heap::SizeOfObjects() {
return total;
}
void Heap::RepairFreeListsAfterBoot() {
PagedSpaces spaces;
for (PagedSpace* space = spaces.next();
space != NULL;
space = spaces.next()) {
space->RepairFreeListsAfterBoot();
}
}
void Heap::GarbageCollectionEpilogue() {
store_buffer()->GCEpilogue();
LiveObjectList::GCEpilogue();
......@@ -668,67 +680,42 @@ static bool AbortIncrementalMarkingAndCollectGarbage(
void Heap::ReserveSpace(
int new_space_size,
int pointer_space_size,
int data_space_size,
int code_space_size,
int map_space_size,
int cell_space_size,
int large_object_size) {
NewSpace* new_space = Heap::new_space();
PagedSpace* old_pointer_space = Heap::old_pointer_space();
PagedSpace* old_data_space = Heap::old_data_space();
PagedSpace* code_space = Heap::code_space();
PagedSpace* map_space = Heap::map_space();
PagedSpace* cell_space = Heap::cell_space();
LargeObjectSpace* lo_space = Heap::lo_space();
intptr_t *sizes,
Address *locations_out) {
bool gc_performed = true;
int counter = 0;
static const int kThreshold = 20;
while (gc_performed && counter++ < kThreshold) {
gc_performed = false;
if (!new_space->ReserveSpace(new_space_size)) {
Heap::CollectGarbage(NEW_SPACE,
"failed to reserve space in the new space");
gc_performed = true;
}
if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
AbortIncrementalMarkingAndCollectGarbage(this, OLD_POINTER_SPACE,
"failed to reserve space in the old pointer space");
gc_performed = true;
}
if (!(old_data_space->ReserveSpace(data_space_size))) {
AbortIncrementalMarkingAndCollectGarbage(this, OLD_DATA_SPACE,
"failed to reserve space in the old data space");
gc_performed = true;
}
if (!(code_space->ReserveSpace(code_space_size))) {
AbortIncrementalMarkingAndCollectGarbage(this, CODE_SPACE,
"failed to reserve space in the code space");
gc_performed = true;
}
if (!(map_space->ReserveSpace(map_space_size))) {
AbortIncrementalMarkingAndCollectGarbage(this, MAP_SPACE,
"failed to reserve space in the map space");
gc_performed = true;
}
if (!(cell_space->ReserveSpace(cell_space_size))) {
AbortIncrementalMarkingAndCollectGarbage(this, CELL_SPACE,
"failed to reserve space in the cell space");
gc_performed = true;
}
// We add a slack-factor of 2 in order to have space for a series of
// large-object allocations that are only just larger than the page size.
large_object_size *= 2;
// The ReserveSpace method on the large object space checks how much
// we can expand the old generation. This includes expansion caused by
// allocation in the other spaces.
large_object_size += cell_space_size + map_space_size + code_space_size +
data_space_size + pointer_space_size;
if (!(lo_space->ReserveSpace(large_object_size))) {
AbortIncrementalMarkingAndCollectGarbage(this, LO_SPACE,
"failed to reserve space in the large object space");
gc_performed = true;
ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
if (sizes[space] != 0) {
MaybeObject* allocation;
if (space == NEW_SPACE) {
allocation = new_space()->AllocateRaw(sizes[space]);
} else {
allocation = paged_space(space)->AllocateRaw(sizes[space]);
}
FreeListNode* node;
if (!allocation->To<FreeListNode>(&node)) {
if (space == NEW_SPACE) {
Heap::CollectGarbage(NEW_SPACE,
"failed to reserve space in the new space");
} else {
AbortIncrementalMarkingAndCollectGarbage(
this,
static_cast<AllocationSpace>(space),
"failed to reserve space in paged space");
}
gc_performed = true;
break;
} else {
// Mark with a free list node, in case we have a GC before
// deserializing.
node->set_size(this, sizes[space]);
locations_out[space] = node->address();
}
}
}
}
......@@ -3600,17 +3587,27 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
MaybeObject* maybe_result;
// Large code objects and code objects which should stay at a fixed address
// are allocated in large object space.
if (obj_size > code_space()->AreaSize() || immovable) {
HeapObject* result;
bool force_lo_space = obj_size > code_space()->AreaSize();
if (force_lo_space) {
maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
} else {
maybe_result = code_space_->AllocateRaw(obj_size);
}
if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
Object* result;
if (!maybe_result->ToObject(&result)) return maybe_result;
if (immovable && !force_lo_space &&
// Objects on the first page of each space are never moved.
!code_space_->FirstPage()->Contains(result->address())) {
// Discard the first code allocation, which was on a page where it could be
// moved.
CreateFillerObjectAt(result->address(), obj_size);
maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
}
// Initialize the object
HeapObject::cast(result)->set_map_no_write_barrier(code_map());
result->set_map_no_write_barrier(code_map());
Code* code = Code::cast(result);
ASSERT(!isolate_->code_range()->exists() ||
isolate_->code_range()->contains(code->address()));
......
......@@ -508,6 +508,24 @@ class Heap {
MapSpace* map_space() { return map_space_; }
CellSpace* cell_space() { return cell_space_; }
LargeObjectSpace* lo_space() { return lo_space_; }
PagedSpace* paged_space(int idx) {
switch (idx) {
case OLD_POINTER_SPACE:
return old_pointer_space();
case OLD_DATA_SPACE:
return old_data_space();
case MAP_SPACE:
return map_space();
case CELL_SPACE:
return cell_space();
case CODE_SPACE:
return code_space();
case NEW_SPACE:
case LO_SPACE:
UNREACHABLE();
}
return NULL;
}
bool always_allocate() { return always_allocate_scope_depth_ != 0; }
Address always_allocate_scope_depth_address() {
......@@ -657,6 +675,9 @@ class Heap {
// Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache();
// For use during bootup.
void RepairFreeListsAfterBoot();
// Allocates and fully initializes a String. There are two String
// encodings: ASCII and two byte. One should choose between the three string
// allocation functions based on the encoding of the string buffer used to
......@@ -1309,20 +1330,9 @@ class Heap {
// Commits from space if it is uncommitted.
void EnsureFromSpaceIsCommitted();
// Support for partial snapshots. After calling this we can allocate a
// certain number of bytes using only linear allocation (with a
// LinearAllocationScope and an AlwaysAllocateScope) without using freelists
// or causing a GC. It returns true of space was reserved or false if a GC is
// needed. For paged spaces the space requested must include the space wasted
// at the end of each page when allocating linearly.
void ReserveSpace(
int new_space_size,
int pointer_space_size,
int data_space_size,
int code_space_size,
int map_space_size,
int cell_space_size,
int large_object_size);
// Support for partial snapshots. After calling this we have a linear
// space to write objects in each space.
void ReserveSpace(intptr_t *sizes, Address* addresses);
//
// Support for the API.
......@@ -2131,7 +2141,6 @@ class Heap {
friend class GCTracer;
friend class DisallowAllocationFailure;
friend class AlwaysAllocateScope;
friend class LinearAllocationScope;
friend class Page;
friend class Isolate;
friend class MarkCompactCollector;
......@@ -2198,13 +2207,6 @@ class AlwaysAllocateScope {
};
class LinearAllocationScope {
public:
inline LinearAllocationScope();
inline ~LinearAllocationScope();
};
#ifdef DEBUG
// Visitor class to verify interior pointers in spaces that do not contain
// or care about intergenerational references. All heap object pointers have to
......
......@@ -991,9 +991,6 @@ class Isolate {
Factory* factory() { return reinterpret_cast<Factory*>(this); }
// SerializerDeserializer state.
static const int kPartialSnapshotCacheCapacity = 1400;
static const int kJSRegexpStaticOffsetsVectorSize = 128;
Address external_callback() {
......
......@@ -166,30 +166,37 @@ class CppByteSink : public PartialSnapshotSink {
}
void WriteSpaceUsed(
const char* prefix,
int new_space_used,
int pointer_space_used,
int data_space_used,
int code_space_used,
int map_space_used,
int cell_space_used,
int large_space_used) {
fprintf(fp_, "const int Snapshot::new_space_used_ = %d;\n", new_space_used);
int cell_space_used) {
fprintf(fp_,
"const int Snapshot::pointer_space_used_ = %d;\n",
"const int Snapshot::%snew_space_used_ = %d;\n",
prefix,
new_space_used);
fprintf(fp_,
"const int Snapshot::%spointer_space_used_ = %d;\n",
prefix,
pointer_space_used);
fprintf(fp_,
"const int Snapshot::data_space_used_ = %d;\n",
"const int Snapshot::%sdata_space_used_ = %d;\n",
prefix,
data_space_used);
fprintf(fp_,
"const int Snapshot::code_space_used_ = %d;\n",
"const int Snapshot::%scode_space_used_ = %d;\n",
prefix,
code_space_used);
fprintf(fp_, "const int Snapshot::map_space_used_ = %d;\n", map_space_used);
fprintf(fp_,
"const int Snapshot::cell_space_used_ = %d;\n",
cell_space_used);
"const int Snapshot::%smap_space_used_ = %d;\n",
prefix,
map_space_used);
fprintf(fp_,
"const int Snapshot::large_space_used_ = %d;\n",
large_space_used);
"const int Snapshot::%scell_space_used_ = %d;\n",
prefix,
cell_space_used);
}
void WritePartialSnapshot() {
......@@ -400,12 +407,20 @@ int main(int argc, char** argv) {
sink.WritePartialSnapshot();
sink.WriteSpaceUsed(
"context_",
partial_ser.CurrentAllocationAddress(i::NEW_SPACE),
partial_ser.CurrentAllocationAddress(i::OLD_POINTER_SPACE),
partial_ser.CurrentAllocationAddress(i::OLD_DATA_SPACE),
partial_ser.CurrentAllocationAddress(i::CODE_SPACE),
partial_ser.CurrentAllocationAddress(i::MAP_SPACE),
partial_ser.CurrentAllocationAddress(i::CELL_SPACE),
partial_ser.CurrentAllocationAddress(i::LO_SPACE));
partial_ser.CurrentAllocationAddress(i::CELL_SPACE));
sink.WriteSpaceUsed(
"",
ser.CurrentAllocationAddress(i::NEW_SPACE),
ser.CurrentAllocationAddress(i::OLD_POINTER_SPACE),
ser.CurrentAllocationAddress(i::OLD_DATA_SPACE),
ser.CurrentAllocationAddress(i::CODE_SPACE),
ser.CurrentAllocationAddress(i::MAP_SPACE),
ser.CurrentAllocationAddress(i::CELL_SPACE));
return 0;
}
......@@ -7909,7 +7909,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
// If the function is not optimizable or debugger is active continue using the
// code from the full compiler.
if (!function->shared()->code()->optimizable() ||
if (!FLAG_crankshaft ||
!function->shared()->code()->optimizable() ||
isolate->DebuggerHasBreakPoints()) {
if (FLAG_trace_opt) {
PrintF("[failed to optimize ");
......
This diff is collapsed.
This diff is collapsed.
......@@ -37,10 +37,47 @@
namespace v8 {
namespace internal {
bool Snapshot::Deserialize(const byte* content, int len) {
SnapshotByteSource source(content, len);
Deserializer deserializer(&source);
return V8::Initialize(&deserializer);
static void ReserveSpaceForSnapshot(Deserializer* deserializer,
const char* file_name) {
int file_name_length = StrLength(file_name) + 10;
Vector<char> name = Vector<char>::New(file_name_length + 1);
OS::SNPrintF(name, "%s.size", file_name);
FILE* fp = OS::FOpen(name.start(), "r");
CHECK_NE(NULL, fp);
int new_size, pointer_size, data_size, code_size, map_size, cell_size;
#ifdef _MSC_VER
// Avoid warning about unsafe fscanf from MSVC.
// Please note that this is only fine if %c and %s are not being used.
#define fscanf fscanf_s
#endif
CHECK_EQ(1, fscanf(fp, "new %d\n", &new_size));
CHECK_EQ(1, fscanf(fp, "pointer %d\n", &pointer_size));
CHECK_EQ(1, fscanf(fp, "data %d\n", &data_size));
CHECK_EQ(1, fscanf(fp, "code %d\n", &code_size));
CHECK_EQ(1, fscanf(fp, "map %d\n", &map_size));
CHECK_EQ(1, fscanf(fp, "cell %d\n", &cell_size));
#ifdef _MSC_VER
#undef fscanf
#endif
fclose(fp);
deserializer->set_reservation(NEW_SPACE, new_size);
deserializer->set_reservation(OLD_POINTER_SPACE, pointer_size);
deserializer->set_reservation(OLD_DATA_SPACE, data_size);
deserializer->set_reservation(CODE_SPACE, code_size);
deserializer->set_reservation(MAP_SPACE, map_size);
deserializer->set_reservation(CELL_SPACE, cell_size);
name.Dispose();
}
void Snapshot::ReserveSpaceForLinkedInSnapshot(Deserializer* deserializer) {
deserializer->set_reservation(NEW_SPACE, new_space_used_);
deserializer->set_reservation(OLD_POINTER_SPACE, pointer_space_used_);
deserializer->set_reservation(OLD_DATA_SPACE, data_space_used_);
deserializer->set_reservation(CODE_SPACE, code_space_used_);
deserializer->set_reservation(MAP_SPACE, map_space_used_);
deserializer->set_reservation(CELL_SPACE, cell_space_used_);
}
......@@ -49,12 +86,17 @@ bool Snapshot::Initialize(const char* snapshot_file) {
int len;
byte* str = ReadBytes(snapshot_file, &len);
if (!str) return false;
Deserialize(str, len);
SnapshotByteSource source(str, len);
Deserializer deserializer(&source);
ReserveSpaceForSnapshot(&deserializer, snapshot_file);
bool success = V8::Initialize(&deserializer);
DeleteArray(str);
return true;
return success;
} else if (size_ > 0) {
Deserialize(raw_data_, raw_size_);
return true;
SnapshotByteSource source(raw_data_, raw_size_);
Deserializer deserializer(&source);
ReserveSpaceForLinkedInSnapshot(&deserializer);
return V8::Initialize(&deserializer);
}
return false;
}
......@@ -69,17 +111,16 @@ Handle<Context> Snapshot::NewContextFromSnapshot() {
if (context_size_ == 0) {
return Handle<Context>();
}
HEAP->ReserveSpace(new_space_used_,
pointer_space_used_,
data_space_used_,
code_space_used_,
map_space_used_,
cell_space_used_,
large_space_used_);
SnapshotByteSource source(context_raw_data_,
context_raw_size_);
Deserializer deserializer(&source);
Object* root;
deserializer.set_reservation(NEW_SPACE, context_new_space_used_);
deserializer.set_reservation(OLD_POINTER_SPACE, context_pointer_space_used_);
deserializer.set_reservation(OLD_DATA_SPACE, context_data_space_used_);
deserializer.set_reservation(CODE_SPACE, context_code_space_used_);
deserializer.set_reservation(MAP_SPACE, context_map_space_used_);
deserializer.set_reservation(CELL_SPACE, context_cell_space_used_);
deserializer.DeserializePartial(&root);
CHECK(root->IsContext());
return Handle<Context>(Context::cast(root));
......
......@@ -49,6 +49,12 @@ const int Snapshot::data_space_used_ = 0;
const int Snapshot::code_space_used_ = 0;
const int Snapshot::map_space_used_ = 0;
const int Snapshot::cell_space_used_ = 0;
const int Snapshot::large_space_used_ = 0;
const int Snapshot::context_new_space_used_ = 0;
const int Snapshot::context_pointer_space_used_ = 0;
const int Snapshot::context_data_space_used_ = 0;
const int Snapshot::context_code_space_used_ = 0;
const int Snapshot::context_map_space_used_ = 0;
const int Snapshot::context_cell_space_used_ = 0;
} } // namespace v8::internal
......@@ -77,13 +77,18 @@ class Snapshot {
static const int code_space_used_;
static const int map_space_used_;
static const int cell_space_used_;
static const int large_space_used_;
static const int context_new_space_used_;
static const int context_pointer_space_used_;
static const int context_data_space_used_;
static const int context_code_space_used_;
static const int context_map_space_used_;
static const int context_cell_space_used_;
static const int size_;
static const int raw_size_;
static const int context_size_;
static const int context_raw_size_;
static bool Deserialize(const byte* content, int len);
static void ReserveSpaceForLinkedInSnapshot(Deserializer* deserializer);
DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
};
......
......@@ -269,6 +269,10 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
return object;
}
ASSERT(!heap()->linear_allocation() ||
(anchor_.next_chunk() == &anchor_ &&
anchor_.prev_chunk() == &anchor_));
object = free_list_.Allocate(size_in_bytes);
if (object != NULL) {
if (identity() == CODE_SPACE) {
......
......@@ -881,10 +881,10 @@ intptr_t PagedSpace::SizeOfFirstPage() {
size = 192 * KB;
break;
case MAP_SPACE:
size = 128 * KB;
size = 16 * kPointerSize * KB;
break;
case CELL_SPACE:
size = 96 * KB;
size = 16 * kPointerSize * KB;
break;
case CODE_SPACE:
if (kPointerSize == 8) {
......@@ -2258,11 +2258,40 @@ bool PagedSpace::ReserveSpace(int size_in_bytes) {
Free(top(), old_linear_size);
SetTop(new_area->address(), new_area->address() + size_in_bytes);
Allocate(size_in_bytes);
return true;
}
static void RepairFreeList(Heap* heap, FreeListNode* n) {
while (n != NULL) {
Map** map_location = reinterpret_cast<Map**>(n->address());
if (*map_location == NULL) {
*map_location = heap->free_space_map();
} else {
ASSERT(*map_location == heap->free_space_map());
}
n = n->next();
}
}
void FreeList::RepairLists(Heap* heap) {
RepairFreeList(heap, small_list_);
RepairFreeList(heap, medium_list_);
RepairFreeList(heap, large_list_);
RepairFreeList(heap, huge_list_);
}
// After we have booted, we have created a map which represents free space
// on the heap. If there was already a free list then the elements on it
// were created with the wrong FreeSpaceMap (normally NULL), so we need to
// fix them.
void PagedSpace::RepairFreeListsAfterBoot() {
free_list_.RepairLists(heap());
}
// You have to call this last, since the implementation from PagedSpace
// doesn't know that memory was 'promised' to large object space.
bool LargeObjectSpace::ReserveSpace(int bytes) {
......
......@@ -790,14 +790,6 @@ class Space : public Malloced {
virtual void Print() = 0;
#endif
// After calling this we can allocate a certain number of bytes using only
// linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
// without using freelists or causing a GC. This is used by partial
// snapshots. It returns true of space was reserved or false if a GC is
// needed. For paged spaces the space requested must include the space wasted
// at the end of each when allocating linearly.
virtual bool ReserveSpace(int bytes) = 0;
private:
Heap* heap_;
AllocationSpace id_;
......@@ -1318,6 +1310,11 @@ class FreeListNode: public HeapObject {
inline void Zap();
static inline FreeListNode* cast(MaybeObject* maybe) {
ASSERT(!maybe->IsFailure());
return reinterpret_cast<FreeListNode*>(maybe);
}
private:
static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
......@@ -1380,6 +1377,9 @@ class FreeList BASE_EMBEDDED {
bool IsVeryLong();
#endif
// Used after booting the VM.
void RepairLists(Heap* heap);
struct SizeStats {
intptr_t Total() {
return small_size_ + medium_size_ + large_size_ + huge_size_;
......@@ -1460,6 +1460,10 @@ class PagedSpace : public Space {
// linear in the number of objects in the page. It may be slow.
MUST_USE_RESULT MaybeObject* FindObject(Address addr);
// During boot the free_space_map is created, and afterwards we may need
// to write it into the free list nodes that were already created.
virtual void RepairFreeListsAfterBoot();
// Prepares for a mark-compact GC.
virtual void PrepareForMarkCompact();
......
......@@ -1244,7 +1244,9 @@ TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
for (HeapObject* obj = iterator.next();
obj != NULL;
obj = iterator.next()) {
size_of_objects_2 += obj->Size();
if (!obj->IsFreeSpace()) {
size_of_objects_2 += obj->Size();
}
}
// Delta must be within 5% of the larger result.
// TODO(gc): Tighten this up by distinguishing between byte
......@@ -1273,7 +1275,6 @@ static void FillUpNewSpace(NewSpace* new_space) {
// that the scavenger does not undo the filling.
v8::HandleScope scope;
AlwaysAllocateScope always_allocate;
LinearAllocationScope allocate_linearly;
intptr_t available = new_space->EffectiveCapacity() - new_space->Size();
intptr_t number_of_fillers = (available / FixedArray::SizeFor(32)) - 1;
for (intptr_t i = 0; i < number_of_fillers; i++) {
......@@ -1928,8 +1929,13 @@ TEST(ReleaseOverReservedPages) {
HEAP->CollectAllGarbage(Heap::kNoGCFlags, "triggered by test 2");
CHECK_GE(number_of_test_pages + 1, old_pointer_space->CountTotalPages() * 2);
// Triggering a last-resort GC should cause all pages to be released
// to the OS so that other processes can seize the memory.
// Triggering a last-resort GC should cause all pages to be released to the
// OS so that other processes can seize the memory. If we get a failure here
// where there are 2 pages left instead of 1, then we should increase the
// size of the first page a little in SizeOfFirstPage in spaces.cc. The
// first page should be small in order to reduce memory used when the VM
// boots, but if the 20 small arrays don't fit on the first page then that's
// an indication that it is too small.
HEAP->CollectAllAvailableGarbage("triggered really hard");
CHECK_EQ(1, old_pointer_space->CountTotalPages());
}
......
......@@ -545,9 +545,9 @@ TEST(BootUpMemoryUse) {
}
} else {
if (v8::internal::Snapshot::IsEnabled()) {
CHECK_LE(delta, 2600 * 1024); // 2484.
CHECK_LE(delta, 2500 * 1024); // 2400.
} else {
CHECK_LE(delta, 2950 * 1024); // 2844
CHECK_LE(delta, 2860 * 1024); // 2760.
}
}
}
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment