Commit 5a8d1764 authored by erik.corry@gmail.com's avatar erik.corry@gmail.com

Refactoring of snapshots. This simplifies and improves

the speed of deserializing code.  The current startup
time improvement for V8 is around 6%, but code deserialization
is speeded up disproportionately, and we will soon have more
code in the snapshot.
* Removed support for deserializing into large object space.
  The regular pages are 1Mbyte now and that is plenty.  This
  is a big simplification.
* Instead of reserving space for the snapshot we actually
  allocate it now.  This removes some special casing from
  the memory management and simplifies deserialization since
  we are just bumping a pointer rather than calling the
  normal allocation routines during deserialization.
* Record in the snapshot how much we need to boot up and
  allocate it instead of just assuming that allocations in
  a new VM will always be linear.
* In the snapshot we always address an object as a negative
  offset from the current allocation point.  We used to
  sometimes address from the start of the deserialized data,
  but this is less useful now that we have good support for
  roots and repetitions in the deserialization data.
* Code objects were previously deserialized (like other
  objects) by alternating raw data (deserialized with memcpy)
  and pointers (to external references, other objects, etc.).
  Now we deserialize code objects with a single memcpy,
  followed by a series of skips and pointers that partially
  overwrite the code we memcopied out of the snapshot.
  The skips are sometimes merged into the following
  instruction in the deserialization data to reduce dispatch
  time.
* Integers in the snapshot were stored in a variable length
  format that gives a compact representation for small positive
  integers.  This is still the case, but the new encoding can
  be decoded without branches or conditional instructions,
  which is faster on a modern CPU.
Review URL: https://chromiumcodereview.appspot.com/10918067

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@12505 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 77a7d9f5
...@@ -142,7 +142,9 @@ Handle<Code> CodeStub::GetCode() { ...@@ -142,7 +142,9 @@ Handle<Code> CodeStub::GetCode() {
} }
Activate(code); Activate(code);
ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code)); ASSERT(!NeedsImmovableCode() ||
heap->lo_space()->Contains(code) ||
heap->code_space()->FirstPage()->Contains(code->address()));
return Handle<Code>(code, isolate); return Handle<Code>(code, isolate);
} }
......
...@@ -741,17 +741,6 @@ AlwaysAllocateScope::~AlwaysAllocateScope() { ...@@ -741,17 +741,6 @@ AlwaysAllocateScope::~AlwaysAllocateScope() {
} }
LinearAllocationScope::LinearAllocationScope() {
HEAP->linear_allocation_scope_depth_++;
}
LinearAllocationScope::~LinearAllocationScope() {
HEAP->linear_allocation_scope_depth_--;
ASSERT(HEAP->linear_allocation_scope_depth_ >= 0);
}
#ifdef DEBUG #ifdef DEBUG
void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) { void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) { for (Object** current = start; current < end; current++) {
......
...@@ -417,6 +417,7 @@ void Heap::GarbageCollectionPrologue() { ...@@ -417,6 +417,7 @@ void Heap::GarbageCollectionPrologue() {
store_buffer()->GCPrologue(); store_buffer()->GCPrologue();
} }
intptr_t Heap::SizeOfObjects() { intptr_t Heap::SizeOfObjects() {
intptr_t total = 0; intptr_t total = 0;
AllSpaces spaces; AllSpaces spaces;
...@@ -426,6 +427,17 @@ intptr_t Heap::SizeOfObjects() { ...@@ -426,6 +427,17 @@ intptr_t Heap::SizeOfObjects() {
return total; return total;
} }
void Heap::RepairFreeListsAfterBoot() {
PagedSpaces spaces;
for (PagedSpace* space = spaces.next();
space != NULL;
space = spaces.next()) {
space->RepairFreeListsAfterBoot();
}
}
void Heap::GarbageCollectionEpilogue() { void Heap::GarbageCollectionEpilogue() {
store_buffer()->GCEpilogue(); store_buffer()->GCEpilogue();
LiveObjectList::GCEpilogue(); LiveObjectList::GCEpilogue();
...@@ -668,67 +680,42 @@ static bool AbortIncrementalMarkingAndCollectGarbage( ...@@ -668,67 +680,42 @@ static bool AbortIncrementalMarkingAndCollectGarbage(
void Heap::ReserveSpace( void Heap::ReserveSpace(
int new_space_size, intptr_t *sizes,
int pointer_space_size, Address *locations_out) {
int data_space_size,
int code_space_size,
int map_space_size,
int cell_space_size,
int large_object_size) {
NewSpace* new_space = Heap::new_space();
PagedSpace* old_pointer_space = Heap::old_pointer_space();
PagedSpace* old_data_space = Heap::old_data_space();
PagedSpace* code_space = Heap::code_space();
PagedSpace* map_space = Heap::map_space();
PagedSpace* cell_space = Heap::cell_space();
LargeObjectSpace* lo_space = Heap::lo_space();
bool gc_performed = true; bool gc_performed = true;
int counter = 0; int counter = 0;
static const int kThreshold = 20; static const int kThreshold = 20;
while (gc_performed && counter++ < kThreshold) { while (gc_performed && counter++ < kThreshold) {
gc_performed = false; gc_performed = false;
if (!new_space->ReserveSpace(new_space_size)) { ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
Heap::CollectGarbage(NEW_SPACE, for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
"failed to reserve space in the new space"); if (sizes[space] != 0) {
gc_performed = true; MaybeObject* allocation;
} if (space == NEW_SPACE) {
if (!old_pointer_space->ReserveSpace(pointer_space_size)) { allocation = new_space()->AllocateRaw(sizes[space]);
AbortIncrementalMarkingAndCollectGarbage(this, OLD_POINTER_SPACE, } else {
"failed to reserve space in the old pointer space"); allocation = paged_space(space)->AllocateRaw(sizes[space]);
gc_performed = true; }
} FreeListNode* node;
if (!(old_data_space->ReserveSpace(data_space_size))) { if (!allocation->To<FreeListNode>(&node)) {
AbortIncrementalMarkingAndCollectGarbage(this, OLD_DATA_SPACE, if (space == NEW_SPACE) {
"failed to reserve space in the old data space"); Heap::CollectGarbage(NEW_SPACE,
gc_performed = true; "failed to reserve space in the new space");
} } else {
if (!(code_space->ReserveSpace(code_space_size))) { AbortIncrementalMarkingAndCollectGarbage(
AbortIncrementalMarkingAndCollectGarbage(this, CODE_SPACE, this,
"failed to reserve space in the code space"); static_cast<AllocationSpace>(space),
gc_performed = true; "failed to reserve space in paged space");
} }
if (!(map_space->ReserveSpace(map_space_size))) { gc_performed = true;
AbortIncrementalMarkingAndCollectGarbage(this, MAP_SPACE, break;
"failed to reserve space in the map space"); } else {
gc_performed = true; // Mark with a free list node, in case we have a GC before
} // deserializing.
if (!(cell_space->ReserveSpace(cell_space_size))) { node->set_size(this, sizes[space]);
AbortIncrementalMarkingAndCollectGarbage(this, CELL_SPACE, locations_out[space] = node->address();
"failed to reserve space in the cell space"); }
gc_performed = true; }
}
// We add a slack-factor of 2 in order to have space for a series of
// large-object allocations that are only just larger than the page size.
large_object_size *= 2;
// The ReserveSpace method on the large object space checks how much
// we can expand the old generation. This includes expansion caused by
// allocation in the other spaces.
large_object_size += cell_space_size + map_space_size + code_space_size +
data_space_size + pointer_space_size;
if (!(lo_space->ReserveSpace(large_object_size))) {
AbortIncrementalMarkingAndCollectGarbage(this, LO_SPACE,
"failed to reserve space in the large object space");
gc_performed = true;
} }
} }
...@@ -3600,17 +3587,27 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc, ...@@ -3600,17 +3587,27 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
MaybeObject* maybe_result; MaybeObject* maybe_result;
// Large code objects and code objects which should stay at a fixed address // Large code objects and code objects which should stay at a fixed address
// are allocated in large object space. // are allocated in large object space.
if (obj_size > code_space()->AreaSize() || immovable) { HeapObject* result;
bool force_lo_space = obj_size > code_space()->AreaSize();
if (force_lo_space) {
maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE); maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
} else { } else {
maybe_result = code_space_->AllocateRaw(obj_size); maybe_result = code_space_->AllocateRaw(obj_size);
} }
if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
Object* result; if (immovable && !force_lo_space &&
if (!maybe_result->ToObject(&result)) return maybe_result; // Objects on the first page of each space are never moved.
!code_space_->FirstPage()->Contains(result->address())) {
// Discard the first code allocation, which was on a page where it could be
// moved.
CreateFillerObjectAt(result->address(), obj_size);
maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
}
// Initialize the object // Initialize the object
HeapObject::cast(result)->set_map_no_write_barrier(code_map()); result->set_map_no_write_barrier(code_map());
Code* code = Code::cast(result); Code* code = Code::cast(result);
ASSERT(!isolate_->code_range()->exists() || ASSERT(!isolate_->code_range()->exists() ||
isolate_->code_range()->contains(code->address())); isolate_->code_range()->contains(code->address()));
......
...@@ -508,6 +508,24 @@ class Heap { ...@@ -508,6 +508,24 @@ class Heap {
MapSpace* map_space() { return map_space_; } MapSpace* map_space() { return map_space_; }
CellSpace* cell_space() { return cell_space_; } CellSpace* cell_space() { return cell_space_; }
LargeObjectSpace* lo_space() { return lo_space_; } LargeObjectSpace* lo_space() { return lo_space_; }
PagedSpace* paged_space(int idx) {
switch (idx) {
case OLD_POINTER_SPACE:
return old_pointer_space();
case OLD_DATA_SPACE:
return old_data_space();
case MAP_SPACE:
return map_space();
case CELL_SPACE:
return cell_space();
case CODE_SPACE:
return code_space();
case NEW_SPACE:
case LO_SPACE:
UNREACHABLE();
}
return NULL;
}
bool always_allocate() { return always_allocate_scope_depth_ != 0; } bool always_allocate() { return always_allocate_scope_depth_ != 0; }
Address always_allocate_scope_depth_address() { Address always_allocate_scope_depth_address() {
...@@ -657,6 +675,9 @@ class Heap { ...@@ -657,6 +675,9 @@ class Heap {
// Clear the Instanceof cache (used when a prototype changes). // Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache(); inline void ClearInstanceofCache();
// For use during bootup.
void RepairFreeListsAfterBoot();
// Allocates and fully initializes a String. There are two String // Allocates and fully initializes a String. There are two String
// encodings: ASCII and two byte. One should choose between the three string // encodings: ASCII and two byte. One should choose between the three string
// allocation functions based on the encoding of the string buffer used to // allocation functions based on the encoding of the string buffer used to
...@@ -1309,20 +1330,9 @@ class Heap { ...@@ -1309,20 +1330,9 @@ class Heap {
// Commits from space if it is uncommitted. // Commits from space if it is uncommitted.
void EnsureFromSpaceIsCommitted(); void EnsureFromSpaceIsCommitted();
// Support for partial snapshots. After calling this we can allocate a // Support for partial snapshots. After calling this we have a linear
// certain number of bytes using only linear allocation (with a // space to write objects in each space.
// LinearAllocationScope and an AlwaysAllocateScope) without using freelists void ReserveSpace(intptr_t *sizes, Address* addresses);
// or causing a GC. It returns true of space was reserved or false if a GC is
// needed. For paged spaces the space requested must include the space wasted
// at the end of each page when allocating linearly.
void ReserveSpace(
int new_space_size,
int pointer_space_size,
int data_space_size,
int code_space_size,
int map_space_size,
int cell_space_size,
int large_object_size);
// //
// Support for the API. // Support for the API.
...@@ -2131,7 +2141,6 @@ class Heap { ...@@ -2131,7 +2141,6 @@ class Heap {
friend class GCTracer; friend class GCTracer;
friend class DisallowAllocationFailure; friend class DisallowAllocationFailure;
friend class AlwaysAllocateScope; friend class AlwaysAllocateScope;
friend class LinearAllocationScope;
friend class Page; friend class Page;
friend class Isolate; friend class Isolate;
friend class MarkCompactCollector; friend class MarkCompactCollector;
...@@ -2198,13 +2207,6 @@ class AlwaysAllocateScope { ...@@ -2198,13 +2207,6 @@ class AlwaysAllocateScope {
}; };
class LinearAllocationScope {
public:
inline LinearAllocationScope();
inline ~LinearAllocationScope();
};
#ifdef DEBUG #ifdef DEBUG
// Visitor class to verify interior pointers in spaces that do not contain // Visitor class to verify interior pointers in spaces that do not contain
// or care about intergenerational references. All heap object pointers have to // or care about intergenerational references. All heap object pointers have to
......
...@@ -991,9 +991,6 @@ class Isolate { ...@@ -991,9 +991,6 @@ class Isolate {
Factory* factory() { return reinterpret_cast<Factory*>(this); } Factory* factory() { return reinterpret_cast<Factory*>(this); }
// SerializerDeserializer state.
static const int kPartialSnapshotCacheCapacity = 1400;
static const int kJSRegexpStaticOffsetsVectorSize = 128; static const int kJSRegexpStaticOffsetsVectorSize = 128;
Address external_callback() { Address external_callback() {
......
...@@ -166,30 +166,37 @@ class CppByteSink : public PartialSnapshotSink { ...@@ -166,30 +166,37 @@ class CppByteSink : public PartialSnapshotSink {
} }
void WriteSpaceUsed( void WriteSpaceUsed(
const char* prefix,
int new_space_used, int new_space_used,
int pointer_space_used, int pointer_space_used,
int data_space_used, int data_space_used,
int code_space_used, int code_space_used,
int map_space_used, int map_space_used,
int cell_space_used, int cell_space_used) {
int large_space_used) {
fprintf(fp_, "const int Snapshot::new_space_used_ = %d;\n", new_space_used);
fprintf(fp_, fprintf(fp_,
"const int Snapshot::pointer_space_used_ = %d;\n", "const int Snapshot::%snew_space_used_ = %d;\n",
prefix,
new_space_used);
fprintf(fp_,
"const int Snapshot::%spointer_space_used_ = %d;\n",
prefix,
pointer_space_used); pointer_space_used);
fprintf(fp_, fprintf(fp_,
"const int Snapshot::data_space_used_ = %d;\n", "const int Snapshot::%sdata_space_used_ = %d;\n",
prefix,
data_space_used); data_space_used);
fprintf(fp_, fprintf(fp_,
"const int Snapshot::code_space_used_ = %d;\n", "const int Snapshot::%scode_space_used_ = %d;\n",
prefix,
code_space_used); code_space_used);
fprintf(fp_, "const int Snapshot::map_space_used_ = %d;\n", map_space_used);
fprintf(fp_, fprintf(fp_,
"const int Snapshot::cell_space_used_ = %d;\n", "const int Snapshot::%smap_space_used_ = %d;\n",
cell_space_used); prefix,
map_space_used);
fprintf(fp_, fprintf(fp_,
"const int Snapshot::large_space_used_ = %d;\n", "const int Snapshot::%scell_space_used_ = %d;\n",
large_space_used); prefix,
cell_space_used);
} }
void WritePartialSnapshot() { void WritePartialSnapshot() {
...@@ -400,12 +407,20 @@ int main(int argc, char** argv) { ...@@ -400,12 +407,20 @@ int main(int argc, char** argv) {
sink.WritePartialSnapshot(); sink.WritePartialSnapshot();
sink.WriteSpaceUsed( sink.WriteSpaceUsed(
"context_",
partial_ser.CurrentAllocationAddress(i::NEW_SPACE), partial_ser.CurrentAllocationAddress(i::NEW_SPACE),
partial_ser.CurrentAllocationAddress(i::OLD_POINTER_SPACE), partial_ser.CurrentAllocationAddress(i::OLD_POINTER_SPACE),
partial_ser.CurrentAllocationAddress(i::OLD_DATA_SPACE), partial_ser.CurrentAllocationAddress(i::OLD_DATA_SPACE),
partial_ser.CurrentAllocationAddress(i::CODE_SPACE), partial_ser.CurrentAllocationAddress(i::CODE_SPACE),
partial_ser.CurrentAllocationAddress(i::MAP_SPACE), partial_ser.CurrentAllocationAddress(i::MAP_SPACE),
partial_ser.CurrentAllocationAddress(i::CELL_SPACE), partial_ser.CurrentAllocationAddress(i::CELL_SPACE));
partial_ser.CurrentAllocationAddress(i::LO_SPACE)); sink.WriteSpaceUsed(
"",
ser.CurrentAllocationAddress(i::NEW_SPACE),
ser.CurrentAllocationAddress(i::OLD_POINTER_SPACE),
ser.CurrentAllocationAddress(i::OLD_DATA_SPACE),
ser.CurrentAllocationAddress(i::CODE_SPACE),
ser.CurrentAllocationAddress(i::MAP_SPACE),
ser.CurrentAllocationAddress(i::CELL_SPACE));
return 0; return 0;
} }
...@@ -7909,7 +7909,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) { ...@@ -7909,7 +7909,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
// If the function is not optimizable or debugger is active continue using the // If the function is not optimizable or debugger is active continue using the
// code from the full compiler. // code from the full compiler.
if (!function->shared()->code()->optimizable() || if (!FLAG_crankshaft ||
!function->shared()->code()->optimizable() ||
isolate->DebuggerHasBreakPoints()) { isolate->DebuggerHasBreakPoints()) {
if (FLAG_trace_opt) { if (FLAG_trace_opt) {
PrintF("[failed to optimize "); PrintF("[failed to optimize ");
......
This diff is collapsed.
This diff is collapsed.
...@@ -37,10 +37,47 @@ ...@@ -37,10 +37,47 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
bool Snapshot::Deserialize(const byte* content, int len) {
SnapshotByteSource source(content, len); static void ReserveSpaceForSnapshot(Deserializer* deserializer,
Deserializer deserializer(&source); const char* file_name) {
return V8::Initialize(&deserializer); int file_name_length = StrLength(file_name) + 10;
Vector<char> name = Vector<char>::New(file_name_length + 1);
OS::SNPrintF(name, "%s.size", file_name);
FILE* fp = OS::FOpen(name.start(), "r");
CHECK_NE(NULL, fp);
int new_size, pointer_size, data_size, code_size, map_size, cell_size;
#ifdef _MSC_VER
// Avoid warning about unsafe fscanf from MSVC.
// Please note that this is only fine if %c and %s are not being used.
#define fscanf fscanf_s
#endif
CHECK_EQ(1, fscanf(fp, "new %d\n", &new_size));
CHECK_EQ(1, fscanf(fp, "pointer %d\n", &pointer_size));
CHECK_EQ(1, fscanf(fp, "data %d\n", &data_size));
CHECK_EQ(1, fscanf(fp, "code %d\n", &code_size));
CHECK_EQ(1, fscanf(fp, "map %d\n", &map_size));
CHECK_EQ(1, fscanf(fp, "cell %d\n", &cell_size));
#ifdef _MSC_VER
#undef fscanf
#endif
fclose(fp);
deserializer->set_reservation(NEW_SPACE, new_size);
deserializer->set_reservation(OLD_POINTER_SPACE, pointer_size);
deserializer->set_reservation(OLD_DATA_SPACE, data_size);
deserializer->set_reservation(CODE_SPACE, code_size);
deserializer->set_reservation(MAP_SPACE, map_size);
deserializer->set_reservation(CELL_SPACE, cell_size);
name.Dispose();
}
void Snapshot::ReserveSpaceForLinkedInSnapshot(Deserializer* deserializer) {
deserializer->set_reservation(NEW_SPACE, new_space_used_);
deserializer->set_reservation(OLD_POINTER_SPACE, pointer_space_used_);
deserializer->set_reservation(OLD_DATA_SPACE, data_space_used_);
deserializer->set_reservation(CODE_SPACE, code_space_used_);
deserializer->set_reservation(MAP_SPACE, map_space_used_);
deserializer->set_reservation(CELL_SPACE, cell_space_used_);
} }
...@@ -49,12 +86,17 @@ bool Snapshot::Initialize(const char* snapshot_file) { ...@@ -49,12 +86,17 @@ bool Snapshot::Initialize(const char* snapshot_file) {
int len; int len;
byte* str = ReadBytes(snapshot_file, &len); byte* str = ReadBytes(snapshot_file, &len);
if (!str) return false; if (!str) return false;
Deserialize(str, len); SnapshotByteSource source(str, len);
Deserializer deserializer(&source);
ReserveSpaceForSnapshot(&deserializer, snapshot_file);
bool success = V8::Initialize(&deserializer);
DeleteArray(str); DeleteArray(str);
return true; return success;
} else if (size_ > 0) { } else if (size_ > 0) {
Deserialize(raw_data_, raw_size_); SnapshotByteSource source(raw_data_, raw_size_);
return true; Deserializer deserializer(&source);
ReserveSpaceForLinkedInSnapshot(&deserializer);
return V8::Initialize(&deserializer);
} }
return false; return false;
} }
...@@ -69,17 +111,16 @@ Handle<Context> Snapshot::NewContextFromSnapshot() { ...@@ -69,17 +111,16 @@ Handle<Context> Snapshot::NewContextFromSnapshot() {
if (context_size_ == 0) { if (context_size_ == 0) {
return Handle<Context>(); return Handle<Context>();
} }
HEAP->ReserveSpace(new_space_used_,
pointer_space_used_,
data_space_used_,
code_space_used_,
map_space_used_,
cell_space_used_,
large_space_used_);
SnapshotByteSource source(context_raw_data_, SnapshotByteSource source(context_raw_data_,
context_raw_size_); context_raw_size_);
Deserializer deserializer(&source); Deserializer deserializer(&source);
Object* root; Object* root;
deserializer.set_reservation(NEW_SPACE, context_new_space_used_);
deserializer.set_reservation(OLD_POINTER_SPACE, context_pointer_space_used_);
deserializer.set_reservation(OLD_DATA_SPACE, context_data_space_used_);
deserializer.set_reservation(CODE_SPACE, context_code_space_used_);
deserializer.set_reservation(MAP_SPACE, context_map_space_used_);
deserializer.set_reservation(CELL_SPACE, context_cell_space_used_);
deserializer.DeserializePartial(&root); deserializer.DeserializePartial(&root);
CHECK(root->IsContext()); CHECK(root->IsContext());
return Handle<Context>(Context::cast(root)); return Handle<Context>(Context::cast(root));
......
...@@ -49,6 +49,12 @@ const int Snapshot::data_space_used_ = 0; ...@@ -49,6 +49,12 @@ const int Snapshot::data_space_used_ = 0;
const int Snapshot::code_space_used_ = 0; const int Snapshot::code_space_used_ = 0;
const int Snapshot::map_space_used_ = 0; const int Snapshot::map_space_used_ = 0;
const int Snapshot::cell_space_used_ = 0; const int Snapshot::cell_space_used_ = 0;
const int Snapshot::large_space_used_ = 0;
const int Snapshot::context_new_space_used_ = 0;
const int Snapshot::context_pointer_space_used_ = 0;
const int Snapshot::context_data_space_used_ = 0;
const int Snapshot::context_code_space_used_ = 0;
const int Snapshot::context_map_space_used_ = 0;
const int Snapshot::context_cell_space_used_ = 0;
} } // namespace v8::internal } } // namespace v8::internal
...@@ -77,13 +77,18 @@ class Snapshot { ...@@ -77,13 +77,18 @@ class Snapshot {
static const int code_space_used_; static const int code_space_used_;
static const int map_space_used_; static const int map_space_used_;
static const int cell_space_used_; static const int cell_space_used_;
static const int large_space_used_; static const int context_new_space_used_;
static const int context_pointer_space_used_;
static const int context_data_space_used_;
static const int context_code_space_used_;
static const int context_map_space_used_;
static const int context_cell_space_used_;
static const int size_; static const int size_;
static const int raw_size_; static const int raw_size_;
static const int context_size_; static const int context_size_;
static const int context_raw_size_; static const int context_raw_size_;
static bool Deserialize(const byte* content, int len); static void ReserveSpaceForLinkedInSnapshot(Deserializer* deserializer);
DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot); DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
}; };
......
...@@ -269,6 +269,10 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) { ...@@ -269,6 +269,10 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
return object; return object;
} }
ASSERT(!heap()->linear_allocation() ||
(anchor_.next_chunk() == &anchor_ &&
anchor_.prev_chunk() == &anchor_));
object = free_list_.Allocate(size_in_bytes); object = free_list_.Allocate(size_in_bytes);
if (object != NULL) { if (object != NULL) {
if (identity() == CODE_SPACE) { if (identity() == CODE_SPACE) {
......
...@@ -881,10 +881,10 @@ intptr_t PagedSpace::SizeOfFirstPage() { ...@@ -881,10 +881,10 @@ intptr_t PagedSpace::SizeOfFirstPage() {
size = 192 * KB; size = 192 * KB;
break; break;
case MAP_SPACE: case MAP_SPACE:
size = 128 * KB; size = 16 * kPointerSize * KB;
break; break;
case CELL_SPACE: case CELL_SPACE:
size = 96 * KB; size = 16 * kPointerSize * KB;
break; break;
case CODE_SPACE: case CODE_SPACE:
if (kPointerSize == 8) { if (kPointerSize == 8) {
...@@ -2258,11 +2258,40 @@ bool PagedSpace::ReserveSpace(int size_in_bytes) { ...@@ -2258,11 +2258,40 @@ bool PagedSpace::ReserveSpace(int size_in_bytes) {
Free(top(), old_linear_size); Free(top(), old_linear_size);
SetTop(new_area->address(), new_area->address() + size_in_bytes); SetTop(new_area->address(), new_area->address() + size_in_bytes);
Allocate(size_in_bytes);
return true; return true;
} }
static void RepairFreeList(Heap* heap, FreeListNode* n) {
while (n != NULL) {
Map** map_location = reinterpret_cast<Map**>(n->address());
if (*map_location == NULL) {
*map_location = heap->free_space_map();
} else {
ASSERT(*map_location == heap->free_space_map());
}
n = n->next();
}
}
void FreeList::RepairLists(Heap* heap) {
RepairFreeList(heap, small_list_);
RepairFreeList(heap, medium_list_);
RepairFreeList(heap, large_list_);
RepairFreeList(heap, huge_list_);
}
// After we have booted, we have created a map which represents free space
// on the heap. If there was already a free list then the elements on it
// were created with the wrong FreeSpaceMap (normally NULL), so we need to
// fix them.
void PagedSpace::RepairFreeListsAfterBoot() {
free_list_.RepairLists(heap());
}
// You have to call this last, since the implementation from PagedSpace // You have to call this last, since the implementation from PagedSpace
// doesn't know that memory was 'promised' to large object space. // doesn't know that memory was 'promised' to large object space.
bool LargeObjectSpace::ReserveSpace(int bytes) { bool LargeObjectSpace::ReserveSpace(int bytes) {
......
...@@ -790,14 +790,6 @@ class Space : public Malloced { ...@@ -790,14 +790,6 @@ class Space : public Malloced {
virtual void Print() = 0; virtual void Print() = 0;
#endif #endif
// After calling this we can allocate a certain number of bytes using only
// linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
// without using freelists or causing a GC. This is used by partial
// snapshots. It returns true of space was reserved or false if a GC is
// needed. For paged spaces the space requested must include the space wasted
// at the end of each when allocating linearly.
virtual bool ReserveSpace(int bytes) = 0;
private: private:
Heap* heap_; Heap* heap_;
AllocationSpace id_; AllocationSpace id_;
...@@ -1318,6 +1310,11 @@ class FreeListNode: public HeapObject { ...@@ -1318,6 +1310,11 @@ class FreeListNode: public HeapObject {
inline void Zap(); inline void Zap();
static inline FreeListNode* cast(MaybeObject* maybe) {
ASSERT(!maybe->IsFailure());
return reinterpret_cast<FreeListNode*>(maybe);
}
private: private:
static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize); static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
...@@ -1380,6 +1377,9 @@ class FreeList BASE_EMBEDDED { ...@@ -1380,6 +1377,9 @@ class FreeList BASE_EMBEDDED {
bool IsVeryLong(); bool IsVeryLong();
#endif #endif
// Used after booting the VM.
void RepairLists(Heap* heap);
struct SizeStats { struct SizeStats {
intptr_t Total() { intptr_t Total() {
return small_size_ + medium_size_ + large_size_ + huge_size_; return small_size_ + medium_size_ + large_size_ + huge_size_;
...@@ -1460,6 +1460,10 @@ class PagedSpace : public Space { ...@@ -1460,6 +1460,10 @@ class PagedSpace : public Space {
// linear in the number of objects in the page. It may be slow. // linear in the number of objects in the page. It may be slow.
MUST_USE_RESULT MaybeObject* FindObject(Address addr); MUST_USE_RESULT MaybeObject* FindObject(Address addr);
// During boot the free_space_map is created, and afterwards we may need
// to write it into the free list nodes that were already created.
virtual void RepairFreeListsAfterBoot();
// Prepares for a mark-compact GC. // Prepares for a mark-compact GC.
virtual void PrepareForMarkCompact(); virtual void PrepareForMarkCompact();
......
...@@ -1244,7 +1244,9 @@ TEST(TestSizeOfObjectsVsHeapIteratorPrecision) { ...@@ -1244,7 +1244,9 @@ TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
for (HeapObject* obj = iterator.next(); for (HeapObject* obj = iterator.next();
obj != NULL; obj != NULL;
obj = iterator.next()) { obj = iterator.next()) {
size_of_objects_2 += obj->Size(); if (!obj->IsFreeSpace()) {
size_of_objects_2 += obj->Size();
}
} }
// Delta must be within 5% of the larger result. // Delta must be within 5% of the larger result.
// TODO(gc): Tighten this up by distinguishing between byte // TODO(gc): Tighten this up by distinguishing between byte
...@@ -1273,7 +1275,6 @@ static void FillUpNewSpace(NewSpace* new_space) { ...@@ -1273,7 +1275,6 @@ static void FillUpNewSpace(NewSpace* new_space) {
// that the scavenger does not undo the filling. // that the scavenger does not undo the filling.
v8::HandleScope scope; v8::HandleScope scope;
AlwaysAllocateScope always_allocate; AlwaysAllocateScope always_allocate;
LinearAllocationScope allocate_linearly;
intptr_t available = new_space->EffectiveCapacity() - new_space->Size(); intptr_t available = new_space->EffectiveCapacity() - new_space->Size();
intptr_t number_of_fillers = (available / FixedArray::SizeFor(32)) - 1; intptr_t number_of_fillers = (available / FixedArray::SizeFor(32)) - 1;
for (intptr_t i = 0; i < number_of_fillers; i++) { for (intptr_t i = 0; i < number_of_fillers; i++) {
...@@ -1928,8 +1929,13 @@ TEST(ReleaseOverReservedPages) { ...@@ -1928,8 +1929,13 @@ TEST(ReleaseOverReservedPages) {
HEAP->CollectAllGarbage(Heap::kNoGCFlags, "triggered by test 2"); HEAP->CollectAllGarbage(Heap::kNoGCFlags, "triggered by test 2");
CHECK_GE(number_of_test_pages + 1, old_pointer_space->CountTotalPages() * 2); CHECK_GE(number_of_test_pages + 1, old_pointer_space->CountTotalPages() * 2);
// Triggering a last-resort GC should cause all pages to be released // Triggering a last-resort GC should cause all pages to be released to the
// to the OS so that other processes can seize the memory. // OS so that other processes can seize the memory. If we get a failure here
// where there are 2 pages left instead of 1, then we should increase the
// size of the first page a little in SizeOfFirstPage in spaces.cc. The
// first page should be small in order to reduce memory used when the VM
// boots, but if the 20 small arrays don't fit on the first page then that's
// an indication that it is too small.
HEAP->CollectAllAvailableGarbage("triggered really hard"); HEAP->CollectAllAvailableGarbage("triggered really hard");
CHECK_EQ(1, old_pointer_space->CountTotalPages()); CHECK_EQ(1, old_pointer_space->CountTotalPages());
} }
......
...@@ -545,9 +545,9 @@ TEST(BootUpMemoryUse) { ...@@ -545,9 +545,9 @@ TEST(BootUpMemoryUse) {
} }
} else { } else {
if (v8::internal::Snapshot::IsEnabled()) { if (v8::internal::Snapshot::IsEnabled()) {
CHECK_LE(delta, 2600 * 1024); // 2484. CHECK_LE(delta, 2500 * 1024); // 2400.
} else { } else {
CHECK_LE(delta, 2950 * 1024); // 2844 CHECK_LE(delta, 2860 * 1024); // 2760.
} }
} }
} }
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment