Commit 5a8d1764 authored by erik.corry@gmail.com's avatar erik.corry@gmail.com

Refactoring of snapshots. This simplifies and improves

the speed of deserializing code.  The current startup
time improvement for V8 is around 6%, but code deserialization
is speeded up disproportionately, and we will soon have more
code in the snapshot.
* Removed support for deserializing into large object space.
  The regular pages are 1Mbyte now and that is plenty.  This
  is a big simplification.
* Instead of reserving space for the snapshot we actually
  allocate it now.  This removes some special casing from
  the memory management and simplifies deserialization since
  we are just bumping a pointer rather than calling the
  normal allocation routines during deserialization.
* Record in the snapshot how much we need to boot up and
  allocate it instead of just assuming that allocations in
  a new VM will always be linear.
* In the snapshot we always address an object as a negative
  offset from the current allocation point.  We used to
  sometimes address from the start of the deserialized data,
  but this is less useful now that we have good support for
  roots and repetitions in the deserialization data.
* Code objects were previously deserialized (like other
  objects) by alternating raw data (deserialized with memcpy)
  and pointers (to external references, other objects, etc.).
  Now we deserialize code objects with a single memcpy,
  followed by a series of skips and pointers that partially
  overwrite the code we memcopied out of the snapshot.
  The skips are sometimes merged into the following
  instruction in the deserialization data to reduce dispatch
  time.
* Integers in the snapshot were stored in a variable length
  format that gives a compact representation for small positive
  integers.  This is still the case, but the new encoding can
  be decoded without branches or conditional instructions,
  which is faster on a modern CPU.
Review URL: https://chromiumcodereview.appspot.com/10918067

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@12505 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 77a7d9f5
......@@ -142,7 +142,9 @@ Handle<Code> CodeStub::GetCode() {
}
Activate(code);
ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code));
ASSERT(!NeedsImmovableCode() ||
heap->lo_space()->Contains(code) ||
heap->code_space()->FirstPage()->Contains(code->address()));
return Handle<Code>(code, isolate);
}
......
......@@ -741,17 +741,6 @@ AlwaysAllocateScope::~AlwaysAllocateScope() {
}
LinearAllocationScope::LinearAllocationScope() {
HEAP->linear_allocation_scope_depth_++;
}
LinearAllocationScope::~LinearAllocationScope() {
HEAP->linear_allocation_scope_depth_--;
ASSERT(HEAP->linear_allocation_scope_depth_ >= 0);
}
#ifdef DEBUG
void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
......
......@@ -417,6 +417,7 @@ void Heap::GarbageCollectionPrologue() {
store_buffer()->GCPrologue();
}
intptr_t Heap::SizeOfObjects() {
intptr_t total = 0;
AllSpaces spaces;
......@@ -426,6 +427,17 @@ intptr_t Heap::SizeOfObjects() {
return total;
}
void Heap::RepairFreeListsAfterBoot() {
PagedSpaces spaces;
for (PagedSpace* space = spaces.next();
space != NULL;
space = spaces.next()) {
space->RepairFreeListsAfterBoot();
}
}
void Heap::GarbageCollectionEpilogue() {
store_buffer()->GCEpilogue();
LiveObjectList::GCEpilogue();
......@@ -668,67 +680,42 @@ static bool AbortIncrementalMarkingAndCollectGarbage(
void Heap::ReserveSpace(
int new_space_size,
int pointer_space_size,
int data_space_size,
int code_space_size,
int map_space_size,
int cell_space_size,
int large_object_size) {
NewSpace* new_space = Heap::new_space();
PagedSpace* old_pointer_space = Heap::old_pointer_space();
PagedSpace* old_data_space = Heap::old_data_space();
PagedSpace* code_space = Heap::code_space();
PagedSpace* map_space = Heap::map_space();
PagedSpace* cell_space = Heap::cell_space();
LargeObjectSpace* lo_space = Heap::lo_space();
intptr_t *sizes,
Address *locations_out) {
bool gc_performed = true;
int counter = 0;
static const int kThreshold = 20;
while (gc_performed && counter++ < kThreshold) {
gc_performed = false;
if (!new_space->ReserveSpace(new_space_size)) {
Heap::CollectGarbage(NEW_SPACE,
"failed to reserve space in the new space");
gc_performed = true;
}
if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
AbortIncrementalMarkingAndCollectGarbage(this, OLD_POINTER_SPACE,
"failed to reserve space in the old pointer space");
gc_performed = true;
}
if (!(old_data_space->ReserveSpace(data_space_size))) {
AbortIncrementalMarkingAndCollectGarbage(this, OLD_DATA_SPACE,
"failed to reserve space in the old data space");
gc_performed = true;
}
if (!(code_space->ReserveSpace(code_space_size))) {
AbortIncrementalMarkingAndCollectGarbage(this, CODE_SPACE,
"failed to reserve space in the code space");
gc_performed = true;
}
if (!(map_space->ReserveSpace(map_space_size))) {
AbortIncrementalMarkingAndCollectGarbage(this, MAP_SPACE,
"failed to reserve space in the map space");
gc_performed = true;
}
if (!(cell_space->ReserveSpace(cell_space_size))) {
AbortIncrementalMarkingAndCollectGarbage(this, CELL_SPACE,
"failed to reserve space in the cell space");
gc_performed = true;
}
// We add a slack-factor of 2 in order to have space for a series of
// large-object allocations that are only just larger than the page size.
large_object_size *= 2;
// The ReserveSpace method on the large object space checks how much
// we can expand the old generation. This includes expansion caused by
// allocation in the other spaces.
large_object_size += cell_space_size + map_space_size + code_space_size +
data_space_size + pointer_space_size;
if (!(lo_space->ReserveSpace(large_object_size))) {
AbortIncrementalMarkingAndCollectGarbage(this, LO_SPACE,
"failed to reserve space in the large object space");
gc_performed = true;
ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
if (sizes[space] != 0) {
MaybeObject* allocation;
if (space == NEW_SPACE) {
allocation = new_space()->AllocateRaw(sizes[space]);
} else {
allocation = paged_space(space)->AllocateRaw(sizes[space]);
}
FreeListNode* node;
if (!allocation->To<FreeListNode>(&node)) {
if (space == NEW_SPACE) {
Heap::CollectGarbage(NEW_SPACE,
"failed to reserve space in the new space");
} else {
AbortIncrementalMarkingAndCollectGarbage(
this,
static_cast<AllocationSpace>(space),
"failed to reserve space in paged space");
}
gc_performed = true;
break;
} else {
// Mark with a free list node, in case we have a GC before
// deserializing.
node->set_size(this, sizes[space]);
locations_out[space] = node->address();
}
}
}
}
......@@ -3600,17 +3587,27 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
MaybeObject* maybe_result;
// Large code objects and code objects which should stay at a fixed address
// are allocated in large object space.
if (obj_size > code_space()->AreaSize() || immovable) {
HeapObject* result;
bool force_lo_space = obj_size > code_space()->AreaSize();
if (force_lo_space) {
maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
} else {
maybe_result = code_space_->AllocateRaw(obj_size);
}
if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
Object* result;
if (!maybe_result->ToObject(&result)) return maybe_result;
if (immovable && !force_lo_space &&
// Objects on the first page of each space are never moved.
!code_space_->FirstPage()->Contains(result->address())) {
// Discard the first code allocation, which was on a page where it could be
// moved.
CreateFillerObjectAt(result->address(), obj_size);
maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
}
// Initialize the object
HeapObject::cast(result)->set_map_no_write_barrier(code_map());
result->set_map_no_write_barrier(code_map());
Code* code = Code::cast(result);
ASSERT(!isolate_->code_range()->exists() ||
isolate_->code_range()->contains(code->address()));
......
......@@ -508,6 +508,24 @@ class Heap {
MapSpace* map_space() { return map_space_; }
CellSpace* cell_space() { return cell_space_; }
LargeObjectSpace* lo_space() { return lo_space_; }
PagedSpace* paged_space(int idx) {
switch (idx) {
case OLD_POINTER_SPACE:
return old_pointer_space();
case OLD_DATA_SPACE:
return old_data_space();
case MAP_SPACE:
return map_space();
case CELL_SPACE:
return cell_space();
case CODE_SPACE:
return code_space();
case NEW_SPACE:
case LO_SPACE:
UNREACHABLE();
}
return NULL;
}
bool always_allocate() { return always_allocate_scope_depth_ != 0; }
Address always_allocate_scope_depth_address() {
......@@ -657,6 +675,9 @@ class Heap {
// Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache();
// For use during bootup.
void RepairFreeListsAfterBoot();
// Allocates and fully initializes a String. There are two String
// encodings: ASCII and two byte. One should choose between the three string
// allocation functions based on the encoding of the string buffer used to
......@@ -1309,20 +1330,9 @@ class Heap {
// Commits from space if it is uncommitted.
void EnsureFromSpaceIsCommitted();
// Support for partial snapshots. After calling this we can allocate a
// certain number of bytes using only linear allocation (with a
// LinearAllocationScope and an AlwaysAllocateScope) without using freelists
// or causing a GC. It returns true of space was reserved or false if a GC is
// needed. For paged spaces the space requested must include the space wasted
// at the end of each page when allocating linearly.
void ReserveSpace(
int new_space_size,
int pointer_space_size,
int data_space_size,
int code_space_size,
int map_space_size,
int cell_space_size,
int large_object_size);
// Support for partial snapshots. After calling this we have a linear
// space to write objects in each space.
void ReserveSpace(intptr_t *sizes, Address* addresses);
//
// Support for the API.
......@@ -2131,7 +2141,6 @@ class Heap {
friend class GCTracer;
friend class DisallowAllocationFailure;
friend class AlwaysAllocateScope;
friend class LinearAllocationScope;
friend class Page;
friend class Isolate;
friend class MarkCompactCollector;
......@@ -2198,13 +2207,6 @@ class AlwaysAllocateScope {
};
class LinearAllocationScope {
public:
inline LinearAllocationScope();
inline ~LinearAllocationScope();
};
#ifdef DEBUG
// Visitor class to verify interior pointers in spaces that do not contain
// or care about intergenerational references. All heap object pointers have to
......
......@@ -991,9 +991,6 @@ class Isolate {
Factory* factory() { return reinterpret_cast<Factory*>(this); }
// SerializerDeserializer state.
static const int kPartialSnapshotCacheCapacity = 1400;
static const int kJSRegexpStaticOffsetsVectorSize = 128;
Address external_callback() {
......
......@@ -166,30 +166,37 @@ class CppByteSink : public PartialSnapshotSink {
}
void WriteSpaceUsed(
const char* prefix,
int new_space_used,
int pointer_space_used,
int data_space_used,
int code_space_used,
int map_space_used,
int cell_space_used,
int large_space_used) {
fprintf(fp_, "const int Snapshot::new_space_used_ = %d;\n", new_space_used);
int cell_space_used) {
fprintf(fp_,
"const int Snapshot::pointer_space_used_ = %d;\n",
"const int Snapshot::%snew_space_used_ = %d;\n",
prefix,
new_space_used);
fprintf(fp_,
"const int Snapshot::%spointer_space_used_ = %d;\n",
prefix,
pointer_space_used);
fprintf(fp_,
"const int Snapshot::data_space_used_ = %d;\n",
"const int Snapshot::%sdata_space_used_ = %d;\n",
prefix,
data_space_used);
fprintf(fp_,
"const int Snapshot::code_space_used_ = %d;\n",
"const int Snapshot::%scode_space_used_ = %d;\n",
prefix,
code_space_used);
fprintf(fp_, "const int Snapshot::map_space_used_ = %d;\n", map_space_used);
fprintf(fp_,
"const int Snapshot::cell_space_used_ = %d;\n",
cell_space_used);
"const int Snapshot::%smap_space_used_ = %d;\n",
prefix,
map_space_used);
fprintf(fp_,
"const int Snapshot::large_space_used_ = %d;\n",
large_space_used);
"const int Snapshot::%scell_space_used_ = %d;\n",
prefix,
cell_space_used);
}
void WritePartialSnapshot() {
......@@ -400,12 +407,20 @@ int main(int argc, char** argv) {
sink.WritePartialSnapshot();
sink.WriteSpaceUsed(
"context_",
partial_ser.CurrentAllocationAddress(i::NEW_SPACE),
partial_ser.CurrentAllocationAddress(i::OLD_POINTER_SPACE),
partial_ser.CurrentAllocationAddress(i::OLD_DATA_SPACE),
partial_ser.CurrentAllocationAddress(i::CODE_SPACE),
partial_ser.CurrentAllocationAddress(i::MAP_SPACE),
partial_ser.CurrentAllocationAddress(i::CELL_SPACE),
partial_ser.CurrentAllocationAddress(i::LO_SPACE));
partial_ser.CurrentAllocationAddress(i::CELL_SPACE));
sink.WriteSpaceUsed(
"",
ser.CurrentAllocationAddress(i::NEW_SPACE),
ser.CurrentAllocationAddress(i::OLD_POINTER_SPACE),
ser.CurrentAllocationAddress(i::OLD_DATA_SPACE),
ser.CurrentAllocationAddress(i::CODE_SPACE),
ser.CurrentAllocationAddress(i::MAP_SPACE),
ser.CurrentAllocationAddress(i::CELL_SPACE));
return 0;
}
......@@ -7909,7 +7909,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
// If the function is not optimizable or debugger is active continue using the
// code from the full compiler.
if (!function->shared()->code()->optimizable() ||
if (!FLAG_crankshaft ||
!function->shared()->code()->optimizable() ||
isolate->DebuggerHasBreakPoints()) {
if (FLAG_trace_opt) {
PrintF("[failed to optimize ");
......
......@@ -599,109 +599,34 @@ Deserializer::Deserializer(SnapshotByteSource* source)
: isolate_(NULL),
source_(source),
external_reference_decoder_(NULL) {
}
// This routine both allocates a new object, and also keeps
// track of where objects have been allocated so that we can
// fix back references when deserializing.
Address Deserializer::Allocate(int space_index, Space* space, int size) {
Address address;
if (!SpaceIsLarge(space_index)) {
ASSERT(!SpaceIsPaged(space_index) ||
size <= Page::kPageSize - Page::kObjectStartOffset);
MaybeObject* maybe_new_allocation;
if (space_index == NEW_SPACE) {
maybe_new_allocation =
reinterpret_cast<NewSpace*>(space)->AllocateRaw(size);
} else {
maybe_new_allocation =
reinterpret_cast<PagedSpace*>(space)->AllocateRaw(size);
}
ASSERT(!maybe_new_allocation->IsFailure());
Object* new_allocation = maybe_new_allocation->ToObjectUnchecked();
HeapObject* new_object = HeapObject::cast(new_allocation);
address = new_object->address();
high_water_[space_index] = address + size;
} else {
ASSERT(SpaceIsLarge(space_index));
LargeObjectSpace* lo_space = reinterpret_cast<LargeObjectSpace*>(space);
Object* new_allocation;
if (space_index == kLargeData || space_index == kLargeFixedArray) {
new_allocation =
lo_space->AllocateRaw(size, NOT_EXECUTABLE)->ToObjectUnchecked();
} else {
ASSERT_EQ(kLargeCode, space_index);
new_allocation =
lo_space->AllocateRaw(size, EXECUTABLE)->ToObjectUnchecked();
}
HeapObject* new_object = HeapObject::cast(new_allocation);
// Record all large objects in the same space.
address = new_object->address();
pages_[LO_SPACE].Add(address);
for (int i = 0; i < LAST_SPACE + 1; i++) {
reservations_[i] = kUninitializedReservation;
}
last_object_address_ = address;
return address;
}
// This returns the address of an object that has been described in the
// snapshot as being offset bytes back in a particular space.
HeapObject* Deserializer::GetAddressFromEnd(int space) {
int offset = source_->GetInt();
ASSERT(!SpaceIsLarge(space));
offset <<= kObjectAlignmentBits;
return HeapObject::FromAddress(high_water_[space] - offset);
}
// This returns the address of an object that has been described in the
// snapshot as being offset bytes into a particular space.
HeapObject* Deserializer::GetAddressFromStart(int space) {
int offset = source_->GetInt();
if (SpaceIsLarge(space)) {
// Large spaces have one object per 'page'.
return HeapObject::FromAddress(pages_[LO_SPACE][offset]);
}
offset <<= kObjectAlignmentBits;
if (space == NEW_SPACE) {
// New space has only one space - numbered 0.
return HeapObject::FromAddress(pages_[space][0] + offset);
}
ASSERT(SpaceIsPaged(space));
int page_of_pointee = offset >> kPageSizeBits;
Address object_address = pages_[space][page_of_pointee] +
(offset & Page::kPageAlignmentMask);
return HeapObject::FromAddress(object_address);
}
void Deserializer::Deserialize() {
isolate_ = Isolate::Current();
ASSERT(isolate_ != NULL);
{
// Don't GC while deserializing - just expand the heap.
AlwaysAllocateScope always_allocate;
// Don't use the free lists while deserializing.
LinearAllocationScope allocate_linearly;
// No active threads.
ASSERT_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse());
// No active handles.
ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty());
ASSERT_EQ(NULL, external_reference_decoder_);
external_reference_decoder_ = new ExternalReferenceDecoder();
isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
isolate_->heap()->set_native_contexts_list(
isolate_->heap()->undefined_value());
// Update data pointers to the external strings containing natives sources.
for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
Object* source = isolate_->heap()->natives_source_cache()->get(i);
if (!source->IsUndefined()) {
ExternalAsciiString::cast(source)->update_data_cache();
}
isolate_->heap()->ReserveSpace(reservations_, &high_water_[0]);
// No active threads.
ASSERT_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse());
// No active handles.
ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty());
ASSERT_EQ(NULL, external_reference_decoder_);
external_reference_decoder_ = new ExternalReferenceDecoder();
isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
isolate_->heap()->RepairFreeListsAfterBoot();
isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
isolate_->heap()->set_native_contexts_list(
isolate_->heap()->undefined_value());
// Update data pointers to the external strings containing natives sources.
for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
Object* source = isolate_->heap()->natives_source_cache()->get(i);
if (!source->IsUndefined()) {
ExternalAsciiString::cast(source)->update_data_cache();
}
}
......@@ -713,10 +638,10 @@ void Deserializer::Deserialize() {
void Deserializer::DeserializePartial(Object** root) {
isolate_ = Isolate::Current();
// Don't GC while deserializing - just expand the heap.
AlwaysAllocateScope always_allocate;
// Don't use the free lists while deserializing.
LinearAllocationScope allocate_linearly;
for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) {
ASSERT(reservations_[i] != kUninitializedReservation);
}
isolate_->heap()->ReserveSpace(reservations_, &high_water_[0]);
if (external_reference_decoder_ == NULL) {
external_reference_decoder_ = new ExternalReferenceDecoder();
}
......@@ -758,10 +683,9 @@ void Deserializer::VisitPointers(Object** start, Object** end) {
// written very late, which means the FreeSpace map is not set up by the
// time we need to use it to mark the space at the end of a page free.
void Deserializer::ReadObject(int space_number,
Space* space,
Object** write_back) {
int size = source_->GetInt() << kObjectAlignmentBits;
Address address = Allocate(space_number, space, size);
Address address = Allocate(space_number, size);
*write_back = HeapObject::FromAddress(address);
Object** current = reinterpret_cast<Object**>(address);
Object** limit = current + (size >> kPointerSizeLog2);
......@@ -770,44 +694,19 @@ void Deserializer::ReadObject(int space_number,
}
ReadChunk(current, limit, space_number, address);
#ifdef DEBUG
bool is_codespace = (space == HEAP->code_space()) ||
((space == HEAP->lo_space()) && (space_number == kLargeCode));
bool is_codespace = (space_number == CODE_SPACE);
ASSERT(HeapObject::FromAddress(address)->IsCode() == is_codespace);
#endif
}
// This macro is always used with a constant argument so it should all fold
// away to almost nothing in the generated code. It might be nicer to do this
// with the ternary operator but there are type issues with that.
#define ASSIGN_DEST_SPACE(space_number) \
Space* dest_space; \
if (space_number == NEW_SPACE) { \
dest_space = isolate->heap()->new_space(); \
} else if (space_number == OLD_POINTER_SPACE) { \
dest_space = isolate->heap()->old_pointer_space(); \
} else if (space_number == OLD_DATA_SPACE) { \
dest_space = isolate->heap()->old_data_space(); \
} else if (space_number == CODE_SPACE) { \
dest_space = isolate->heap()->code_space(); \
} else if (space_number == MAP_SPACE) { \
dest_space = isolate->heap()->map_space(); \
} else if (space_number == CELL_SPACE) { \
dest_space = isolate->heap()->cell_space(); \
} else { \
ASSERT(space_number >= LO_SPACE); \
dest_space = isolate->heap()->lo_space(); \
}
static const int kUnknownOffsetFromStart = -1;
void Deserializer::ReadChunk(Object** current,
Object** limit,
int source_space,
Address current_object_address) {
Isolate* const isolate = isolate_;
// Write barrier support costs around 1% in startup time. In fact there
// are no new space objects in current boot snapshots, so it's not needed,
// but that may change.
bool write_barrier_needed = (current_object_address != NULL &&
source_space != NEW_SPACE &&
source_space != CELL_SPACE &&
......@@ -823,21 +722,19 @@ void Deserializer::ReadChunk(Object** current,
ASSERT((within & ~kWhereToPointMask) == 0); \
ASSERT((space_number & ~kSpaceMask) == 0);
#define CASE_BODY(where, how, within, space_number_if_any, offset_from_start) \
#define CASE_BODY(where, how, within, space_number_if_any) \
{ \
bool emit_write_barrier = false; \
bool current_was_incremented = false; \
int space_number = space_number_if_any == kAnyOldSpace ? \
(data & kSpaceMask) : space_number_if_any; \
if (where == kNewObject && how == kPlain && within == kStartOfObject) {\
ASSIGN_DEST_SPACE(space_number) \
ReadObject(space_number, dest_space, current); \
ReadObject(space_number, current); \
emit_write_barrier = (space_number == NEW_SPACE); \
} else { \
Object* new_object = NULL; /* May not be a real Object pointer. */ \
if (where == kNewObject) { \
ASSIGN_DEST_SPACE(space_number) \
ReadObject(space_number, dest_space, &new_object); \
ReadObject(space_number, &new_object); \
} else if (where == kRootArray) { \
int root_id = source_->GetInt(); \
new_object = isolate->heap()->roots_array_start()[root_id]; \
......@@ -848,6 +745,9 @@ void Deserializer::ReadChunk(Object** current,
[cache_index]; \
emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
} else if (where == kExternalReference) { \
int skip = source_->GetInt(); \
current = reinterpret_cast<Object**>(reinterpret_cast<Address>( \
current) + skip); \
int reference_id = source_->GetInt(); \
Address address = external_reference_decoder_-> \
Decode(reference_id); \
......@@ -856,15 +756,12 @@ void Deserializer::ReadChunk(Object** current,
emit_write_barrier = (space_number == NEW_SPACE); \
new_object = GetAddressFromEnd(data & kSpaceMask); \
} else { \
ASSERT(where == kFromStart); \
if (offset_from_start == kUnknownOffsetFromStart) { \
emit_write_barrier = (space_number == NEW_SPACE); \
new_object = GetAddressFromStart(data & kSpaceMask); \
} else { \
Address object_address = pages_[space_number][0] + \
(offset_from_start << kObjectAlignmentBits); \
new_object = HeapObject::FromAddress(object_address); \
} \
ASSERT(where == kBackrefWithSkip); \
int skip = source_->GetInt(); \
current = reinterpret_cast<Object**>( \
reinterpret_cast<Address>(current) + skip); \
emit_write_barrier = (space_number == NEW_SPACE); \
new_object = GetAddressFromEnd(data & kSpaceMask); \
} \
if (within == kInnerPointer) { \
if (space_number != CODE_SPACE || new_object->IsCode()) { \
......@@ -872,7 +769,7 @@ void Deserializer::ReadChunk(Object** current,
new_object = reinterpret_cast<Object*>( \
new_code_object->instruction_start()); \
} else { \
ASSERT(space_number == CODE_SPACE || space_number == kLargeCode);\
ASSERT(space_number == CODE_SPACE); \
JSGlobalPropertyCell* cell = \
JSGlobalPropertyCell::cast(new_object); \
new_object = reinterpret_cast<Object*>( \
......@@ -904,47 +801,18 @@ void Deserializer::ReadChunk(Object** current,
break; \
} \
// This generates a case and a body for each space. The large object spaces are
// very rare in snapshots so they are grouped in one body.
#define ONE_PER_SPACE(where, how, within) \
CASE_STATEMENT(where, how, within, NEW_SPACE) \
CASE_BODY(where, how, within, NEW_SPACE, kUnknownOffsetFromStart) \
CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \
CASE_BODY(where, how, within, OLD_DATA_SPACE, kUnknownOffsetFromStart) \
CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \
CASE_BODY(where, how, within, OLD_POINTER_SPACE, kUnknownOffsetFromStart) \
CASE_STATEMENT(where, how, within, CODE_SPACE) \
CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart) \
CASE_STATEMENT(where, how, within, CELL_SPACE) \
CASE_BODY(where, how, within, CELL_SPACE, kUnknownOffsetFromStart) \
CASE_STATEMENT(where, how, within, MAP_SPACE) \
CASE_BODY(where, how, within, MAP_SPACE, kUnknownOffsetFromStart) \
CASE_STATEMENT(where, how, within, kLargeData) \
CASE_STATEMENT(where, how, within, kLargeCode) \
CASE_STATEMENT(where, how, within, kLargeFixedArray) \
CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart)
// This generates a case and a body for the new space (which has to do extra
// write barrier handling) and handles the other spaces with 8 fall-through
// cases and one body.
#define ALL_SPACES(where, how, within) \
CASE_STATEMENT(where, how, within, NEW_SPACE) \
CASE_BODY(where, how, within, NEW_SPACE, kUnknownOffsetFromStart) \
CASE_BODY(where, how, within, NEW_SPACE) \
CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \
CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \
CASE_STATEMENT(where, how, within, CODE_SPACE) \
CASE_STATEMENT(where, how, within, CELL_SPACE) \
CASE_STATEMENT(where, how, within, MAP_SPACE) \
CASE_STATEMENT(where, how, within, kLargeData) \
CASE_STATEMENT(where, how, within, kLargeCode) \
CASE_STATEMENT(where, how, within, kLargeFixedArray) \
CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart)
#define ONE_PER_CODE_SPACE(where, how, within) \
CASE_STATEMENT(where, how, within, CODE_SPACE) \
CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart) \
CASE_STATEMENT(where, how, within, kLargeCode) \
CASE_BODY(where, how, within, kLargeCode, kUnknownOffsetFromStart)
CASE_BODY(where, how, within, kAnyOldSpace)
#define FOUR_CASES(byte_code) \
case byte_code: \
......@@ -958,14 +826,48 @@ void Deserializer::ReadChunk(Object** current,
FOUR_CASES(byte_code + 8) \
FOUR_CASES(byte_code + 12)
#define COMMON_RAW_LENGTHS(f) \
f(1) \
f(2) \
f(3) \
f(4) \
f(5) \
f(6) \
f(7) \
f(8) \
f(9) \
f(10) \
f(11) \
f(12) \
f(13) \
f(14) \
f(15) \
f(16) \
f(17) \
f(18) \
f(19) \
f(20) \
f(21) \
f(22) \
f(23) \
f(24) \
f(25) \
f(26) \
f(27) \
f(28) \
f(29) \
f(30) \
f(31)
// We generate 15 cases and bodies that process special tags that combine
// the raw data tag and the length into one byte.
#define RAW_CASE(index, size) \
case kRawData + index: { \
byte* raw_data_out = reinterpret_cast<byte*>(current); \
source_->CopyRaw(raw_data_out, size); \
current = reinterpret_cast<Object**>(raw_data_out + size); \
break; \
#define RAW_CASE(index) \
case kRawData + index: { \
byte* raw_data_out = reinterpret_cast<byte*>(current); \
source_->CopyRaw(raw_data_out, index * kPointerSize); \
current = \
reinterpret_cast<Object**>(raw_data_out + index * kPointerSize); \
break; \
}
COMMON_RAW_LENGTHS(RAW_CASE)
#undef RAW_CASE
......@@ -976,12 +878,11 @@ void Deserializer::ReadChunk(Object** current,
int size = source_->GetInt();
byte* raw_data_out = reinterpret_cast<byte*>(current);
source_->CopyRaw(raw_data_out, size);
current = reinterpret_cast<Object**>(raw_data_out + size);
break;
}
SIXTEEN_CASES(kRootArrayLowConstants)
SIXTEEN_CASES(kRootArrayHighConstants) {
SIXTEEN_CASES(kRootArrayConstants + kNoSkipDistance)
SIXTEEN_CASES(kRootArrayConstants + kNoSkipDistance + 16) {
int root_id = RootArrayConstantFromByteCode(data);
Object* object = isolate->heap()->roots_array_start()[root_id];
ASSERT(!isolate->heap()->InNewSpace(object));
......@@ -989,6 +890,18 @@ void Deserializer::ReadChunk(Object** current,
break;
}
SIXTEEN_CASES(kRootArrayConstants + kHasSkipDistance)
SIXTEEN_CASES(kRootArrayConstants + kHasSkipDistance + 16) {
int root_id = RootArrayConstantFromByteCode(data);
int skip = source_->GetInt();
current = reinterpret_cast<Object**>(
reinterpret_cast<intptr_t>(current) + skip);
Object* object = isolate->heap()->roots_array_start()[root_id];
ASSERT(!isolate->heap()->InNewSpace(object));
*current++ = object;
break;
}
case kRepeat: {
int repeats = source_->GetInt();
Object* object = current[-1];
......@@ -1000,10 +913,11 @@ void Deserializer::ReadChunk(Object** current,
STATIC_ASSERT(kRootArrayNumberOfConstantEncodings ==
Heap::kOldSpaceRoots);
STATIC_ASSERT(kMaxRepeats == 12);
FOUR_CASES(kConstantRepeat)
FOUR_CASES(kConstantRepeat + 4)
FOUR_CASES(kConstantRepeat + 8) {
STATIC_ASSERT(kMaxRepeats == 13);
case kConstantRepeat:
FOUR_CASES(kConstantRepeat + 1)
FOUR_CASES(kConstantRepeat + 5)
FOUR_CASES(kConstantRepeat + 9) {
int repeats = RepeatsForCode(data);
Object* object = current[-1];
ASSERT(!isolate->heap()->InNewSpace(object));
......@@ -1014,100 +928,80 @@ void Deserializer::ReadChunk(Object** current,
// Deserialize a new object and write a pointer to it to the current
// object.
ONE_PER_SPACE(kNewObject, kPlain, kStartOfObject)
ALL_SPACES(kNewObject, kPlain, kStartOfObject)
// Support for direct instruction pointers in functions. It's an inner
// pointer because it points at the entry point, not at the start of the
// code object.
ONE_PER_CODE_SPACE(kNewObject, kPlain, kInnerPointer)
CASE_STATEMENT(kNewObject, kPlain, kInnerPointer, CODE_SPACE)
CASE_BODY(kNewObject, kPlain, kInnerPointer, CODE_SPACE)
// Deserialize a new code object and write a pointer to its first
// instruction to the current code object.
ONE_PER_SPACE(kNewObject, kFromCode, kInnerPointer)
ALL_SPACES(kNewObject, kFromCode, kInnerPointer)
// Find a recently deserialized object using its offset from the current
// allocation point and write a pointer to it to the current object.
ALL_SPACES(kBackref, kPlain, kStartOfObject)
ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject)
#if V8_TARGET_ARCH_MIPS
// Deserialize a new object from pointer found in code and write
// a pointer to it to the current object. Required only for MIPS, and
// omitted on the other architectures because it is fully unrolled and
// would cause bloat.
ONE_PER_SPACE(kNewObject, kFromCode, kStartOfObject)
ALL_SPACES(kNewObject, kFromCode, kStartOfObject)
// Find a recently deserialized code object using its offset from the
// current allocation point and write a pointer to it to the current
// object. Required only for MIPS.
ALL_SPACES(kBackref, kFromCode, kStartOfObject)
// Find an already deserialized code object using its offset from
// the start and write a pointer to it to the current object.
// Required only for MIPS.
ALL_SPACES(kFromStart, kFromCode, kStartOfObject)
ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject)
#endif
// Find a recently deserialized code object using its offset from the
// current allocation point and write a pointer to its first instruction
// to the current code object or the instruction pointer in a function
// object.
ALL_SPACES(kBackref, kFromCode, kInnerPointer)
ALL_SPACES(kBackrefWithSkip, kFromCode, kInnerPointer)
ALL_SPACES(kBackref, kPlain, kInnerPointer)
// Find an already deserialized object using its offset from the start
// and write a pointer to it to the current object.
ALL_SPACES(kFromStart, kPlain, kStartOfObject)
ALL_SPACES(kFromStart, kPlain, kInnerPointer)
// Find an already deserialized code object using its offset from the
// start and write a pointer to its first instruction to the current code
// object.
ALL_SPACES(kFromStart, kFromCode, kInnerPointer)
ALL_SPACES(kBackrefWithSkip, kPlain, kInnerPointer)
// Find an object in the roots array and write a pointer to it to the
// current object.
CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0)
CASE_BODY(kRootArray, kPlain, kStartOfObject, 0, kUnknownOffsetFromStart)
CASE_BODY(kRootArray, kPlain, kStartOfObject, 0)
// Find an object in the partial snapshots cache and write a pointer to it
// to the current object.
CASE_STATEMENT(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
CASE_BODY(kPartialSnapshotCache,
kPlain,
kStartOfObject,
0,
kUnknownOffsetFromStart)
0)
// Find an code entry in the partial snapshots cache and
// write a pointer to it to the current object.
CASE_STATEMENT(kPartialSnapshotCache, kPlain, kInnerPointer, 0)
CASE_BODY(kPartialSnapshotCache,
kPlain,
kInnerPointer,
0,
kUnknownOffsetFromStart)
0)
// Find an external reference and write a pointer to it to the current
// object.
CASE_STATEMENT(kExternalReference, kPlain, kStartOfObject, 0)
CASE_BODY(kExternalReference,
kPlain,
kStartOfObject,
0,
kUnknownOffsetFromStart)
0)
// Find an external reference and write a pointer to it in the current
// code object.
CASE_STATEMENT(kExternalReference, kFromCode, kStartOfObject, 0)
CASE_BODY(kExternalReference,
kFromCode,
kStartOfObject,
0,
kUnknownOffsetFromStart)
0)
#undef CASE_STATEMENT
#undef CASE_BODY
#undef ONE_PER_SPACE
#undef ALL_SPACES
#undef ASSIGN_DEST_SPACE
case kNewPage: {
int space = source_->Get();
pages_[space].Add(last_object_address_);
if (space == CODE_SPACE) {
CPU::FlushICache(last_object_address_, Page::kPageSize);
}
break;
}
case kSkip: {
current++;
int size = source_->GetInt();
current = reinterpret_cast<Object**>(
reinterpret_cast<intptr_t>(current) + size);
break;
}
......@@ -1132,18 +1026,20 @@ void Deserializer::ReadChunk(Object** current,
UNREACHABLE();
}
}
ASSERT_EQ(current, limit);
ASSERT_EQ(limit, current);
}
void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
const int max_shift = ((kPointerSize * kBitsPerByte) / 7) * 7;
for (int shift = max_shift; shift > 0; shift -= 7) {
if (integer >= static_cast<uintptr_t>(1u) << shift) {
Put((static_cast<int>((integer >> shift)) & 0x7f) | 0x80, "IntPart");
}
}
PutSection(static_cast<int>(integer & 0x7f), "IntLastPart");
ASSERT(integer < 1 << 22);
integer <<= 2;
int bytes = 1;
if (integer > 0xff) bytes = 2;
if (integer > 0xffff) bytes = 3;
integer |= bytes;
Put(static_cast<int>(integer & 0xff), "IntPart1");
if (bytes > 1) Put(static_cast<int>((integer >> 8) & 0xff), "IntPart2");
if (bytes > 2) Put(static_cast<int>((integer >> 16) & 0xff), "IntPart3");
}
......@@ -1151,7 +1047,6 @@ Serializer::Serializer(SnapshotByteSink* sink)
: sink_(sink),
current_root_index_(0),
external_reference_encoder_(new ExternalReferenceEncoder),
large_object_total_(0),
root_index_wave_front_(0) {
isolate_ = Isolate::Current();
// The serializer is meant to be used only to generate initial heap images
......@@ -1184,6 +1079,7 @@ void StartupSerializer::SerializeStrongReferences() {
void PartialSerializer::Serialize(Object** object) {
this->VisitPointer(object);
Pad();
}
......@@ -1198,14 +1094,14 @@ void Serializer::VisitPointers(Object** start, Object** end) {
if (reinterpret_cast<Address>(current) ==
isolate->heap()->store_buffer()->TopAddress()) {
sink_->Put(kSkip, "Skip");
sink_->PutInt(kPointerSize, "SkipOneWord");
} else if ((*current)->IsSmi()) {
sink_->Put(kRawData, "RawData");
sink_->PutInt(kPointerSize, "length");
sink_->Put(kRawData + 1, "Smi");
for (int i = 0; i < kPointerSize; i++) {
sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte");
}
} else {
SerializeObject(*current, kPlain, kStartOfObject);
SerializeObject(*current, kPlain, kStartOfObject, 0);
}
}
}
......@@ -1292,58 +1188,50 @@ void Serializer::SerializeReferenceToPreviousObject(
int space,
int address,
HowToCode how_to_code,
WhereToPoint where_to_point) {
WhereToPoint where_to_point,
int skip) {
int offset = CurrentAllocationAddress(space) - address;
bool from_start = true;
if (SpaceIsPaged(space)) {
// For paged space it is simple to encode back from current allocation if
// the object is on the same page as the current allocation pointer.
if ((CurrentAllocationAddress(space) >> kPageSizeBits) ==
(address >> kPageSizeBits)) {
from_start = false;
address = offset;
}
} else if (space == NEW_SPACE) {
// For new space it is always simple to encode back from current allocation.
if (offset < address) {
from_start = false;
address = offset;
}
}
// If we are actually dealing with real offsets (and not a numbering of
// all objects) then we should shift out the bits that are always 0.
if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits;
if (from_start) {
sink_->Put(kFromStart + how_to_code + where_to_point + space, "RefSer");
sink_->PutInt(address, "address");
} else {
// Shift out the bits that are always 0.
offset >>= kObjectAlignmentBits;
if (skip == 0) {
sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRefSer");
sink_->PutInt(address, "address");
} else {
sink_->Put(kBackrefWithSkip + how_to_code + where_to_point + space,
"BackRefSerWithSkip");
sink_->PutInt(skip, "BackRefSkipDistance");
}
sink_->PutInt(offset, "offset");
}
void StartupSerializer::SerializeObject(
Object* o,
HowToCode how_to_code,
WhereToPoint where_to_point) {
WhereToPoint where_to_point,
int skip) {
CHECK(o->IsHeapObject());
HeapObject* heap_object = HeapObject::cast(o);
int root_index;
if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) {
PutRoot(root_index, heap_object, how_to_code, where_to_point);
PutRoot(root_index, heap_object, how_to_code, where_to_point, skip);
return;
}
if (address_mapper_.IsMapped(heap_object)) {
int space = SpaceOfAlreadySerializedObject(heap_object);
int space = SpaceOfObject(heap_object);
int address = address_mapper_.MappedTo(heap_object);
SerializeReferenceToPreviousObject(space,
address,
how_to_code,
where_to_point);
where_to_point,
skip);
} else {
if (skip != 0) {
sink_->Put(kSkip, "FlushPendingSkip");
sink_->PutInt(skip, "SkipDistance");
}
// Object has not yet been serialized. Serialize it here.
ObjectSerializer object_serializer(this,
heap_object,
......@@ -1365,25 +1253,32 @@ void StartupSerializer::SerializeWeakReferences() {
Object* undefined = isolate->heap()->undefined_value();
VisitPointer(&undefined);
HEAP->IterateWeakRoots(this, VISIT_ALL);
Pad();
}
void Serializer::PutRoot(int root_index,
HeapObject* object,
SerializerDeserializer::HowToCode how_to_code,
SerializerDeserializer::WhereToPoint where_to_point) {
SerializerDeserializer::WhereToPoint where_to_point,
int skip) {
if (how_to_code == kPlain &&
where_to_point == kStartOfObject &&
root_index < kRootArrayNumberOfConstantEncodings &&
!HEAP->InNewSpace(object)) {
if (root_index < kRootArrayNumberOfLowConstantEncodings) {
sink_->Put(kRootArrayLowConstants + root_index, "RootLoConstant");
if (skip == 0) {
sink_->Put(kRootArrayConstants + kNoSkipDistance + root_index,
"RootConstant");
} else {
sink_->Put(kRootArrayHighConstants + root_index -
kRootArrayNumberOfLowConstantEncodings,
"RootHiConstant");
sink_->Put(kRootArrayConstants + kHasSkipDistance + root_index,
"RootConstant");
sink_->PutInt(skip, "SkipInPutRoot");
}
} else {
if (skip != 0) {
sink_->Put(kSkip, "SkipFromPutRoot");
sink_->PutInt(skip, "SkipFromPutRootDistance");
}
sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
sink_->PutInt(root_index, "root_index");
}
......@@ -1393,7 +1288,8 @@ void Serializer::PutRoot(int root_index,
void PartialSerializer::SerializeObject(
Object* o,
HowToCode how_to_code,
WhereToPoint where_to_point) {
WhereToPoint where_to_point,
int skip) {
CHECK(o->IsHeapObject());
HeapObject* heap_object = HeapObject::cast(o);
......@@ -1406,11 +1302,16 @@ void PartialSerializer::SerializeObject(
int root_index;
if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) {
PutRoot(root_index, heap_object, how_to_code, where_to_point);
PutRoot(root_index, heap_object, how_to_code, where_to_point, skip);
return;
}
if (ShouldBeInThePartialSnapshotCache(heap_object)) {
if (skip != 0) {
sink_->Put(kSkip, "SkipFromSerializeObject");
sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
}
int cache_index = PartialSnapshotCacheIndex(heap_object);
sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point,
"PartialSnapshotCache");
......@@ -1427,13 +1328,18 @@ void PartialSerializer::SerializeObject(
ASSERT(!heap_object->IsSymbol());
if (address_mapper_.IsMapped(heap_object)) {
int space = SpaceOfAlreadySerializedObject(heap_object);
int space = SpaceOfObject(heap_object);
int address = address_mapper_.MappedTo(heap_object);
SerializeReferenceToPreviousObject(space,
address,
how_to_code,
where_to_point);
where_to_point,
skip);
} else {
if (skip != 0) {
sink_->Put(kSkip, "SkipFromSerializeObject");
sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
}
// Object has not yet been serialized. Serialize it here.
ObjectSerializer serializer(this,
heap_object,
......@@ -1457,16 +1363,11 @@ void Serializer::ObjectSerializer::Serialize() {
SnapshotPositionEvent(object_->address(), sink_->Position()));
// Mark this object as already serialized.
bool start_new_page;
int offset = serializer_->Allocate(space, size, &start_new_page);
int offset = serializer_->Allocate(space, size);
serializer_->address_mapper()->AddMapping(object_, offset);
if (start_new_page) {
sink_->Put(kNewPage, "NewPage");
sink_->PutSection(space, "NewPageSpace");
}
// Serialize the map (first word of the object).
serializer_->SerializeObject(object_->map(), kPlain, kStartOfObject);
serializer_->SerializeObject(object_->map(), kPlain, kStartOfObject, 0);
// Serialize the rest of the object.
CHECK_EQ(0, bytes_processed_so_far_);
......@@ -1507,7 +1408,8 @@ void Serializer::ObjectSerializer::VisitPointers(Object** start,
sink_->Put(CodeForRepeats(repeat_count), "SerializeRepeats");
}
} else {
serializer_->SerializeObject(current_contents, kPlain, kStartOfObject);
serializer_->SerializeObject(
current_contents, kPlain, kStartOfObject, 0);
bytes_processed_so_far_ += kPointerSize;
current++;
}
......@@ -1519,9 +1421,10 @@ void Serializer::ObjectSerializer::VisitPointers(Object** start,
void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
Object** current = rinfo->target_object_address();
OutputRawData(rinfo->target_address_address());
int skip = OutputRawData(rinfo->target_address_address(),
kCanReturnSkipInsteadOfSkipping);
HowToCode representation = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
serializer_->SerializeObject(*current, representation, kStartOfObject);
serializer_->SerializeObject(*current, representation, kStartOfObject, skip);
bytes_processed_so_far_ += rinfo->target_address_size();
}
......@@ -1529,10 +1432,12 @@ void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
void Serializer::ObjectSerializer::VisitExternalReferences(Address* start,
Address* end) {
Address references_start = reinterpret_cast<Address>(start);
OutputRawData(references_start);
int skip = OutputRawData(references_start, kCanReturnSkipInsteadOfSkipping);
for (Address* current = start; current < end; current++) {
sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
sink_->PutInt(skip, "SkipB4ExternalRef");
skip = 0;
int reference_id = serializer_->EncodeExternalReference(*current);
sink_->PutInt(reference_id, "reference id");
}
......@@ -1542,12 +1447,13 @@ void Serializer::ObjectSerializer::VisitExternalReferences(Address* start,
void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
Address references_start = rinfo->target_address_address();
OutputRawData(references_start);
int skip = OutputRawData(references_start, kCanReturnSkipInsteadOfSkipping);
Address* current = rinfo->target_reference_address();
int representation = rinfo->IsCodedSpecially() ?
kFromCode + kStartOfObject : kPlain + kStartOfObject;
sink_->Put(kExternalReference + representation, "ExternalRef");
sink_->PutInt(skip, "SkipB4ExternalRef");
int reference_id = serializer_->EncodeExternalReference(*current);
sink_->PutInt(reference_id, "reference id");
bytes_processed_so_far_ += rinfo->target_address_size();
......@@ -1556,7 +1462,7 @@ void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
Address target_start = rinfo->target_address_address();
OutputRawData(target_start);
int skip = OutputRawData(target_start, kCanReturnSkipInsteadOfSkipping);
Address target = rinfo->target_address();
uint32_t encoding = serializer_->EncodeExternalReference(target);
CHECK(target == NULL ? encoding == 0 : encoding != 0);
......@@ -1568,6 +1474,7 @@ void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
representation = kStartOfObject + kPlain;
}
sink_->Put(kExternalReference + representation, "ExternalReference");
sink_->PutInt(skip, "SkipB4ExternalRef");
sink_->PutInt(encoding, "reference id");
bytes_processed_so_far_ += rinfo->target_address_size();
}
......@@ -1576,17 +1483,17 @@ void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
CHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
Address target_start = rinfo->target_address_address();
OutputRawData(target_start);
int skip = OutputRawData(target_start, kCanReturnSkipInsteadOfSkipping);
Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
serializer_->SerializeObject(target, kFromCode, kInnerPointer);
serializer_->SerializeObject(target, kFromCode, kInnerPointer, skip);
bytes_processed_so_far_ += rinfo->target_address_size();
}
void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
Code* target = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
OutputRawData(entry_address);
serializer_->SerializeObject(target, kPlain, kInnerPointer);
int skip = OutputRawData(entry_address, kCanReturnSkipInsteadOfSkipping);
serializer_->SerializeObject(target, kPlain, kInnerPointer, skip);
bytes_processed_so_far_ += kPointerSize;
}
......@@ -1595,8 +1502,8 @@ void Serializer::ObjectSerializer::VisitGlobalPropertyCell(RelocInfo* rinfo) {
ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(rinfo->target_cell());
OutputRawData(rinfo->pc());
serializer_->SerializeObject(cell, kPlain, kInnerPointer);
int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
serializer_->SerializeObject(cell, kPlain, kInnerPointer, skip);
}
......@@ -1624,59 +1531,58 @@ void Serializer::ObjectSerializer::VisitExternalAsciiString(
}
void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
int Serializer::ObjectSerializer::OutputRawData(
Address up_to, Serializer::ObjectSerializer::ReturnSkip return_skip) {
Address object_start = object_->address();
Address base = object_start + bytes_processed_so_far_;
int up_to_offset = static_cast<int>(up_to - object_start);
int skipped = up_to_offset - bytes_processed_so_far_;
int to_skip = up_to_offset - bytes_processed_so_far_;
int bytes_to_output = to_skip;
bytes_processed_so_far_ += to_skip;
// This assert will fail if the reloc info gives us the target_address_address
// locations in a non-ascending order. Luckily that doesn't happen.
ASSERT(skipped >= 0);
if (skipped != 0) {
Address base = object_start + bytes_processed_so_far_;
#define RAW_CASE(index, length) \
if (skipped == length) { \
ASSERT(to_skip >= 0);
bool outputting_code = false;
if (to_skip != 0 && code_object_ && !code_has_been_output_) {
// Output the code all at once and fix later.
bytes_to_output = object_->Size() + to_skip - bytes_processed_so_far_;
outputting_code = true;
code_has_been_output_ = true;
}
if (bytes_to_output != 0 &&
(!code_object_ || outputting_code)) {
#define RAW_CASE(index) \
if (!outputting_code && bytes_to_output == index * kPointerSize && \
index * kPointerSize == to_skip) { \
sink_->PutSection(kRawData + index, "RawDataFixed"); \
to_skip = 0; /* This insn already skips. */ \
} else /* NOLINT */
COMMON_RAW_LENGTHS(RAW_CASE)
#undef RAW_CASE
{ /* NOLINT */
// We always end up here if we are outputting the code of a code object.
sink_->Put(kRawData, "RawData");
sink_->PutInt(skipped, "length");
sink_->PutInt(bytes_to_output, "length");
}
for (int i = 0; i < skipped; i++) {
for (int i = 0; i < bytes_to_output; i++) {
unsigned int data = base[i];
sink_->PutSection(data, "Byte");
}
bytes_processed_so_far_ += skipped;
}
}
int Serializer::SpaceOfObject(HeapObject* object) {
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
AllocationSpace s = static_cast<AllocationSpace>(i);
if (HEAP->InSpace(object, s)) {
if (i == LO_SPACE) {
if (object->IsCode()) {
return kLargeCode;
} else if (object->IsFixedArray()) {
return kLargeFixedArray;
} else {
return kLargeData;
}
}
return i;
}
if (to_skip != 0 && return_skip == kIgnoringReturn) {
sink_->Put(kSkip, "Skip");
sink_->PutInt(to_skip, "SkipDistance");
to_skip = 0;
}
UNREACHABLE();
return 0;
return to_skip;
}
int Serializer::SpaceOfAlreadySerializedObject(HeapObject* object) {
int Serializer::SpaceOfObject(HeapObject* object) {
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
AllocationSpace s = static_cast<AllocationSpace>(i);
if (HEAP->InSpace(object, s)) {
ASSERT(i < kNumberOfSpaces);
return i;
}
}
......@@ -1685,34 +1591,8 @@ int Serializer::SpaceOfAlreadySerializedObject(HeapObject* object) {
}
int Serializer::Allocate(int space, int size, bool* new_page) {
int Serializer::Allocate(int space, int size) {
CHECK(space >= 0 && space < kNumberOfSpaces);
if (SpaceIsLarge(space)) {
// In large object space we merely number the objects instead of trying to
// determine some sort of address.
*new_page = true;
large_object_total_ += size;
return fullness_[LO_SPACE]++;
}
*new_page = false;
if (fullness_[space] == 0) {
*new_page = true;
}
if (SpaceIsPaged(space)) {
// Paged spaces are a little special. We encode their addresses as if the
// pages were all contiguous and each page were filled up in the range
// 0 - Page::kObjectAreaSize. In practice the pages may not be contiguous
// and allocation does not start at offset 0 in the page, but this scheme
// means the deserializer can get the page number quickly by shifting the
// serialized address.
CHECK(IsPowerOf2(Page::kPageSize));
int used_in_this_page = (fullness_[space] & (Page::kPageSize - 1));
CHECK(size <= SpaceAreaSize(space));
if (used_in_this_page + size > SpaceAreaSize(space)) {
*new_page = true;
fullness_[space] = RoundUp(fullness_[space], Page::kPageSize);
}
}
int allocation_address = fullness_[space];
fullness_[space] = allocation_address + size;
return allocation_address;
......@@ -1728,4 +1608,21 @@ int Serializer::SpaceAreaSize(int space) {
}
void Serializer::Pad() {
// The non-branching GetInt will read up to 3 bytes too far, so we need
// to pad the snapshot to make sure we don't read over the end.
for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
sink_->Put(kNop, "Padding");
}
}
bool SnapshotByteSource::AtEOF() {
if (0u + length_ - position_ > sizeof(uint32_t)) return false;
for (int x = position_; x < length_; x++) {
if (data_[x] != SerializerDeserializer::nop()) return false;
}
return true;
}
} } // namespace v8::internal
......@@ -170,13 +170,27 @@ class SnapshotByteSource {
return data_[position_++];
}
int32_t GetUnalignedInt() {
#if defined(V8_HOST_CAN_READ_UNALIGNED) && __BYTE_ORDER == __LITTLE_ENDIAN
int32_t answer;
ASSERT(position_ + sizeof(answer) <= length_ + 0u);
answer = *reinterpret_cast<const int32_t*>(data_ + position_);
#else
int32_t answer = data_[position_];
answer |= data_[position_ + 1] << 8;
answer |= data_[position_ + 2] << 16;
answer |= data_[position_ + 3] << 24;
#endif
return answer;
}
void Advance(int by) { position_ += by; }
inline void CopyRaw(byte* to, int number_of_bytes);
inline int GetInt();
bool AtEOF() {
return position_ == length_;
}
bool AtEOF();
int position() { return position_; }
......@@ -187,23 +201,6 @@ class SnapshotByteSource {
};
#define COMMON_RAW_LENGTHS(f) \
f(1, 1) \
f(2, 2) \
f(3, 3) \
f(4, 4) \
f(5, 5) \
f(6, 6) \
f(7, 7) \
f(8, 8) \
f(9, 12) \
f(10, 16) \
f(11, 20) \
f(12, 24) \
f(13, 28) \
f(14, 32) \
f(15, 36)
// The Serializer/Deserializer class is a common superclass for Serializer and
// Deserializer which is used to store common constants and methods used by
// both.
......@@ -211,23 +208,24 @@ class SerializerDeserializer: public ObjectVisitor {
public:
static void Iterate(ObjectVisitor* visitor);
static int nop() { return kNop; }
protected:
// Where the pointed-to object can be found:
enum Where {
kNewObject = 0, // Object is next in snapshot.
// 1-8 One per space.
// 1-6 One per space.
kRootArray = 0x9, // Object is found in root array.
kPartialSnapshotCache = 0xa, // Object is in the cache.
kExternalReference = 0xb, // Pointer to an external reference.
kSkip = 0xc, // Skip a pointer sized cell.
// 0xd-0xf Free.
kBackref = 0x10, // Object is described relative to end.
// 0x11-0x18 One per space.
// 0x19-0x1f Free.
kFromStart = 0x20, // Object is described relative to start.
// 0x21-0x28 One per space.
// 0x29-0x2f Free.
// 0x30-0x3f Used by misc. tags below.
kSkip = 0xc, // Skip n bytes.
kNop = 0xd, // Does nothing, used to pad.
// 0xe-0xf Free.
kBackref = 0x10, // Object is described relative to end.
// 0x11-0x16 One per space.
kBackrefWithSkip = 0x18, // Object is described relative to end.
// 0x19-0x1e One per space.
// 0x20-0x3f Used by misc. tags below.
kPointedToMask = 0x3f
};
......@@ -239,6 +237,13 @@ class SerializerDeserializer: public ObjectVisitor {
kHowToCodeMask = 0x40
};
// For kRootArrayConstants
enum WithSkip {
kNoSkipDistance = 0,
kHasSkipDistance = 0x40,
kWithSkipMask = 0x40
};
// Where to point within the object.
enum WhereToPoint {
kStartOfObject = 0,
......@@ -247,9 +252,12 @@ class SerializerDeserializer: public ObjectVisitor {
};
// Misc.
// Raw data to be copied from the snapshot.
static const int kRawData = 0x30;
// Some common raw lengths: 0x31-0x3f
// Raw data to be copied from the snapshot. This byte code does not advance
// the current pointer, which is used for code objects, where we write the
// entire code in one memcpy, then fix up stuff with kSkip and other byte
// codes that overwrite data.
static const int kRawData = 0x20;
// Some common raw lengths: 0x21-0x3f. These autoadvance the current pointer.
// A tag emitted at strategic points in the snapshot to delineate sections.
// If the deserializer does not find these at the expected moments then it
// is an indication that the snapshot and the VM do not fit together.
......@@ -259,64 +267,44 @@ class SerializerDeserializer: public ObjectVisitor {
// Used for the source code of the natives, which is in the executable, but
// is referred to from external strings in the snapshot.
static const int kNativesStringResource = 0x71;
static const int kNewPage = 0x72;
static const int kRepeat = 0x73;
static const int kConstantRepeat = 0x74;
// 0x74-0x7f Repeat last word (subtract 0x73 to get the count).
static const int kMaxRepeats = 0x7f - 0x73;
static const int kRepeat = 0x72;
static const int kConstantRepeat = 0x73;
// 0x73-0x7f Repeat last word (subtract 0x72 to get the count).
static const int kMaxRepeats = 0x7f - 0x72;
static int CodeForRepeats(int repeats) {
ASSERT(repeats >= 1 && repeats <= kMaxRepeats);
return 0x73 + repeats;
return 0x72 + repeats;
}
static int RepeatsForCode(int byte_code) {
ASSERT(byte_code >= kConstantRepeat && byte_code <= 0x7f);
return byte_code - 0x73;
return byte_code - 0x72;
}
static const int kRootArrayLowConstants = 0xb0;
// 0xb0-0xbf Things from the first 16 elements of the root array.
static const int kRootArrayHighConstants = 0xf0;
// 0xf0-0xff Things from the next 16 elements of the root array.
static const int kRootArrayConstants = 0xa0;
// 0xa0-0xbf Things from the first 32 elements of the root array.
static const int kRootArrayNumberOfConstantEncodings = 0x20;
static const int kRootArrayNumberOfLowConstantEncodings = 0x10;
static int RootArrayConstantFromByteCode(int byte_code) {
int constant = (byte_code & 0xf) | ((byte_code & 0x40) >> 2);
ASSERT(constant >= 0 && constant < kRootArrayNumberOfConstantEncodings);
return constant;
return byte_code & 0x1f;
}
static const int kLargeData = LAST_SPACE;
static const int kLargeCode = kLargeData + 1;
static const int kLargeFixedArray = kLargeCode + 1;
static const int kNumberOfSpaces = kLargeFixedArray + 1;
static const int kNumberOfSpaces = LO_SPACE;
static const int kAnyOldSpace = -1;
// A bitmask for getting the space out of an instruction.
static const int kSpaceMask = 15;
static inline bool SpaceIsLarge(int space) { return space >= kLargeData; }
static inline bool SpaceIsPaged(int space) {
return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE;
}
static const int kSpaceMask = 7;
};
int SnapshotByteSource::GetInt() {
// A little unwind to catch the really small ints.
int snapshot_byte = Get();
if ((snapshot_byte & 0x80) == 0) {
return snapshot_byte;
}
int accumulator = (snapshot_byte & 0x7f) << 7;
while (true) {
snapshot_byte = Get();
if ((snapshot_byte & 0x80) == 0) {
return accumulator | snapshot_byte;
}
accumulator = (accumulator | (snapshot_byte & 0x7f)) << 7;
}
UNREACHABLE();
return accumulator;
// This way of variable-length encoding integers does not suffer from branch
// mispredictions.
uint32_t answer = GetUnalignedInt();
int bytes = answer & 3;
Advance(bytes);
uint32_t mask = 0xffffffffu;
mask >>= 32 - (bytes << 3);
answer &= mask;
answer >>= 2;
return answer;
}
......@@ -340,6 +328,12 @@ class Deserializer: public SerializerDeserializer {
// Deserialize a single object and the objects reachable from it.
void DeserializePartial(Object** root);
void set_reservation(int space_number, uintptr_t reservation) {
ASSERT(space_number >= 0);
ASSERT(space_number <= LAST_SPACE);
reservations_[space_number] = reservation;
}
private:
virtual void VisitPointers(Object** start, Object** end);
......@@ -358,28 +352,36 @@ class Deserializer: public SerializerDeserializer {
// the heap.
void ReadChunk(
Object** start, Object** end, int space, Address object_address);
HeapObject* GetAddressFromStart(int space);
inline HeapObject* GetAddressFromEnd(int space);
Address Allocate(int space_number, Space* space, int size);
void ReadObject(int space_number, Space* space, Object** write_back);
void ReadObject(int space_number, Object** write_back);
// This routine both allocates a new object, and also keeps
// track of where objects have been allocated so that we can
// fix back references when deserializing.
Address Allocate(int space_index, int size) {
Address address = high_water_[space_index];
high_water_[space_index] = address + size;
return address;
}
// This returns the address of an object that has been described in the
// snapshot as being offset bytes back in a particular space.
HeapObject* GetAddressFromEnd(int space) {
int offset = source_->GetInt();
offset <<= kObjectAlignmentBits;
return HeapObject::FromAddress(high_water_[space] - offset);
}
// Cached current isolate.
Isolate* isolate_;
// Keep track of the pages in the paged spaces.
// (In large object space we are keeping track of individual objects
// rather than pages.) In new space we just need the address of the
// first object and the others will flow from that.
List<Address> pages_[SerializerDeserializer::kNumberOfSpaces];
SnapshotByteSource* source_;
// This is the address of the next object that will be allocated in each
// space. It is used to calculate the addresses of back-references.
Address high_water_[LAST_SPACE + 1];
// This is the address of the most recent object that was allocated. It
// is used to set the location of the new page when we encounter a
// START_NEW_PAGE_SERIALIZATION tag.
Address last_object_address_;
intptr_t reservations_[LAST_SPACE + 1];
static const intptr_t kUninitializedReservation = -1;
ExternalReferenceDecoder* external_reference_decoder_;
......@@ -461,7 +463,7 @@ class Serializer : public SerializerDeserializer {
// You can call this after serialization to find out how much space was used
// in each space.
int CurrentAllocationAddress(int space) {
if (SpaceIsLarge(space)) return large_object_total_;
ASSERT(space < kNumberOfSpaces);
return fullness_[space];
}
......@@ -478,8 +480,11 @@ class Serializer : public SerializerDeserializer {
static void TooLateToEnableNow() { too_late_to_enable_now_ = true; }
static bool enabled() { return serialization_enabled_; }
SerializationAddressMapper* address_mapper() { return &address_mapper_; }
void PutRoot(
int index, HeapObject* object, HowToCode how, WhereToPoint where);
void PutRoot(int index,
HeapObject* object,
HowToCode how,
WhereToPoint where,
int skip);
protected:
static const int kInvalidRootIndex = -1;
......@@ -503,7 +508,9 @@ class Serializer : public SerializerDeserializer {
object_(HeapObject::cast(o)),
sink_(sink),
reference_representation_(how_to_code + where_to_point),
bytes_processed_so_far_(0) { }
bytes_processed_so_far_(0),
code_object_(o->IsCode()),
code_has_been_output_(false) { }
void Serialize();
void VisitPointers(Object** start, Object** end);
void VisitEmbeddedPointer(RelocInfo* target);
......@@ -523,34 +530,36 @@ class Serializer : public SerializerDeserializer {
}
private:
void OutputRawData(Address up_to);
enum ReturnSkip { kCanReturnSkipInsteadOfSkipping, kIgnoringReturn };
// This function outputs or skips the raw data between the last pointer and
// up to the current position. It optionally can just return the number of
// bytes to skip instead of performing a skip instruction, in case the skip
// can be merged into the next instruction.
int OutputRawData(Address up_to, ReturnSkip return_skip = kIgnoringReturn);
Serializer* serializer_;
HeapObject* object_;
SnapshotByteSink* sink_;
int reference_representation_;
int bytes_processed_so_far_;
bool code_object_;
bool code_has_been_output_;
};
virtual void SerializeObject(Object* o,
HowToCode how_to_code,
WhereToPoint where_to_point) = 0;
WhereToPoint where_to_point,
int skip) = 0;
void SerializeReferenceToPreviousObject(
int space,
int address,
HowToCode how_to_code,
WhereToPoint where_to_point);
WhereToPoint where_to_point,
int skip);
void InitializeAllocators();
// This will return the space for an object. If the object is in large
// object space it may return kLargeCode or kLargeFixedArray in order
// to indicate to the deserializer what kind of large object allocation
// to make.
// This will return the space for an object.
static int SpaceOfObject(HeapObject* object);
// This just returns the space of the object. It will return LO_SPACE
// for all large objects since you can't check the type of the object
// once the map has been used for the serialization address.
static int SpaceOfAlreadySerializedObject(HeapObject* object);
int Allocate(int space, int size, bool* new_page_started);
int Allocate(int space, int size);
int EncodeExternalReference(Address addr) {
return external_reference_encoder_->Encode(addr);
}
......@@ -559,9 +568,7 @@ class Serializer : public SerializerDeserializer {
Isolate* isolate_;
// Keep track of the fullness of each space in order to generate
// relative addresses for back references. Large objects are
// just numbered sequentially since relative addresses make no
// sense in large object space.
// relative addresses for back references.
int fullness_[LAST_SPACE + 1];
SnapshotByteSink* sink_;
int current_root_index_;
......@@ -569,9 +576,9 @@ class Serializer : public SerializerDeserializer {
static bool serialization_enabled_;
// Did we already make use of the fact that serialization was not enabled?
static bool too_late_to_enable_now_;
int large_object_total_;
SerializationAddressMapper address_mapper_;
intptr_t root_index_wave_front_;
void Pad();
friend class ObjectSerializer;
friend class Deserializer;
......@@ -594,7 +601,8 @@ class PartialSerializer : public Serializer {
virtual void Serialize(Object** o);
virtual void SerializeObject(Object* o,
HowToCode how_to_code,
WhereToPoint where_to_point);
WhereToPoint where_to_point,
int skip);
protected:
virtual int PartialSnapshotCacheIndex(HeapObject* o);
......@@ -632,11 +640,13 @@ class StartupSerializer : public Serializer {
virtual void SerializeStrongReferences();
virtual void SerializeObject(Object* o,
HowToCode how_to_code,
WhereToPoint where_to_point);
WhereToPoint where_to_point,
int skip);
void SerializeWeakReferences();
void Serialize() {
SerializeStrongReferences();
SerializeWeakReferences();
Pad();
}
private:
......
......@@ -37,10 +37,47 @@
namespace v8 {
namespace internal {
bool Snapshot::Deserialize(const byte* content, int len) {
SnapshotByteSource source(content, len);
Deserializer deserializer(&source);
return V8::Initialize(&deserializer);
static void ReserveSpaceForSnapshot(Deserializer* deserializer,
const char* file_name) {
int file_name_length = StrLength(file_name) + 10;
Vector<char> name = Vector<char>::New(file_name_length + 1);
OS::SNPrintF(name, "%s.size", file_name);
FILE* fp = OS::FOpen(name.start(), "r");
CHECK_NE(NULL, fp);
int new_size, pointer_size, data_size, code_size, map_size, cell_size;
#ifdef _MSC_VER
// Avoid warning about unsafe fscanf from MSVC.
// Please note that this is only fine if %c and %s are not being used.
#define fscanf fscanf_s
#endif
CHECK_EQ(1, fscanf(fp, "new %d\n", &new_size));
CHECK_EQ(1, fscanf(fp, "pointer %d\n", &pointer_size));
CHECK_EQ(1, fscanf(fp, "data %d\n", &data_size));
CHECK_EQ(1, fscanf(fp, "code %d\n", &code_size));
CHECK_EQ(1, fscanf(fp, "map %d\n", &map_size));
CHECK_EQ(1, fscanf(fp, "cell %d\n", &cell_size));
#ifdef _MSC_VER
#undef fscanf
#endif
fclose(fp);
deserializer->set_reservation(NEW_SPACE, new_size);
deserializer->set_reservation(OLD_POINTER_SPACE, pointer_size);
deserializer->set_reservation(OLD_DATA_SPACE, data_size);
deserializer->set_reservation(CODE_SPACE, code_size);
deserializer->set_reservation(MAP_SPACE, map_size);
deserializer->set_reservation(CELL_SPACE, cell_size);
name.Dispose();
}
void Snapshot::ReserveSpaceForLinkedInSnapshot(Deserializer* deserializer) {
deserializer->set_reservation(NEW_SPACE, new_space_used_);
deserializer->set_reservation(OLD_POINTER_SPACE, pointer_space_used_);
deserializer->set_reservation(OLD_DATA_SPACE, data_space_used_);
deserializer->set_reservation(CODE_SPACE, code_space_used_);
deserializer->set_reservation(MAP_SPACE, map_space_used_);
deserializer->set_reservation(CELL_SPACE, cell_space_used_);
}
......@@ -49,12 +86,17 @@ bool Snapshot::Initialize(const char* snapshot_file) {
int len;
byte* str = ReadBytes(snapshot_file, &len);
if (!str) return false;
Deserialize(str, len);
SnapshotByteSource source(str, len);
Deserializer deserializer(&source);
ReserveSpaceForSnapshot(&deserializer, snapshot_file);
bool success = V8::Initialize(&deserializer);
DeleteArray(str);
return true;
return success;
} else if (size_ > 0) {
Deserialize(raw_data_, raw_size_);
return true;
SnapshotByteSource source(raw_data_, raw_size_);
Deserializer deserializer(&source);
ReserveSpaceForLinkedInSnapshot(&deserializer);
return V8::Initialize(&deserializer);
}
return false;
}
......@@ -69,17 +111,16 @@ Handle<Context> Snapshot::NewContextFromSnapshot() {
if (context_size_ == 0) {
return Handle<Context>();
}
HEAP->ReserveSpace(new_space_used_,
pointer_space_used_,
data_space_used_,
code_space_used_,
map_space_used_,
cell_space_used_,
large_space_used_);
SnapshotByteSource source(context_raw_data_,
context_raw_size_);
Deserializer deserializer(&source);
Object* root;
deserializer.set_reservation(NEW_SPACE, context_new_space_used_);
deserializer.set_reservation(OLD_POINTER_SPACE, context_pointer_space_used_);
deserializer.set_reservation(OLD_DATA_SPACE, context_data_space_used_);
deserializer.set_reservation(CODE_SPACE, context_code_space_used_);
deserializer.set_reservation(MAP_SPACE, context_map_space_used_);
deserializer.set_reservation(CELL_SPACE, context_cell_space_used_);
deserializer.DeserializePartial(&root);
CHECK(root->IsContext());
return Handle<Context>(Context::cast(root));
......
......@@ -49,6 +49,12 @@ const int Snapshot::data_space_used_ = 0;
const int Snapshot::code_space_used_ = 0;
const int Snapshot::map_space_used_ = 0;
const int Snapshot::cell_space_used_ = 0;
const int Snapshot::large_space_used_ = 0;
const int Snapshot::context_new_space_used_ = 0;
const int Snapshot::context_pointer_space_used_ = 0;
const int Snapshot::context_data_space_used_ = 0;
const int Snapshot::context_code_space_used_ = 0;
const int Snapshot::context_map_space_used_ = 0;
const int Snapshot::context_cell_space_used_ = 0;
} } // namespace v8::internal
......@@ -77,13 +77,18 @@ class Snapshot {
static const int code_space_used_;
static const int map_space_used_;
static const int cell_space_used_;
static const int large_space_used_;
static const int context_new_space_used_;
static const int context_pointer_space_used_;
static const int context_data_space_used_;
static const int context_code_space_used_;
static const int context_map_space_used_;
static const int context_cell_space_used_;
static const int size_;
static const int raw_size_;
static const int context_size_;
static const int context_raw_size_;
static bool Deserialize(const byte* content, int len);
static void ReserveSpaceForLinkedInSnapshot(Deserializer* deserializer);
DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
};
......
......@@ -269,6 +269,10 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
return object;
}
ASSERT(!heap()->linear_allocation() ||
(anchor_.next_chunk() == &anchor_ &&
anchor_.prev_chunk() == &anchor_));
object = free_list_.Allocate(size_in_bytes);
if (object != NULL) {
if (identity() == CODE_SPACE) {
......
......@@ -881,10 +881,10 @@ intptr_t PagedSpace::SizeOfFirstPage() {
size = 192 * KB;
break;
case MAP_SPACE:
size = 128 * KB;
size = 16 * kPointerSize * KB;
break;
case CELL_SPACE:
size = 96 * KB;
size = 16 * kPointerSize * KB;
break;
case CODE_SPACE:
if (kPointerSize == 8) {
......@@ -2258,11 +2258,40 @@ bool PagedSpace::ReserveSpace(int size_in_bytes) {
Free(top(), old_linear_size);
SetTop(new_area->address(), new_area->address() + size_in_bytes);
Allocate(size_in_bytes);
return true;
}
static void RepairFreeList(Heap* heap, FreeListNode* n) {
while (n != NULL) {
Map** map_location = reinterpret_cast<Map**>(n->address());
if (*map_location == NULL) {
*map_location = heap->free_space_map();
} else {
ASSERT(*map_location == heap->free_space_map());
}
n = n->next();
}
}
void FreeList::RepairLists(Heap* heap) {
RepairFreeList(heap, small_list_);
RepairFreeList(heap, medium_list_);
RepairFreeList(heap, large_list_);
RepairFreeList(heap, huge_list_);
}
// After we have booted, we have created a map which represents free space
// on the heap. If there was already a free list then the elements on it
// were created with the wrong FreeSpaceMap (normally NULL), so we need to
// fix them.
void PagedSpace::RepairFreeListsAfterBoot() {
free_list_.RepairLists(heap());
}
// You have to call this last, since the implementation from PagedSpace
// doesn't know that memory was 'promised' to large object space.
bool LargeObjectSpace::ReserveSpace(int bytes) {
......
......@@ -790,14 +790,6 @@ class Space : public Malloced {
virtual void Print() = 0;
#endif
// After calling this we can allocate a certain number of bytes using only
// linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
// without using freelists or causing a GC. This is used by partial
// snapshots. It returns true of space was reserved or false if a GC is
// needed. For paged spaces the space requested must include the space wasted
// at the end of each when allocating linearly.
virtual bool ReserveSpace(int bytes) = 0;
private:
Heap* heap_;
AllocationSpace id_;
......@@ -1318,6 +1310,11 @@ class FreeListNode: public HeapObject {
inline void Zap();
static inline FreeListNode* cast(MaybeObject* maybe) {
ASSERT(!maybe->IsFailure());
return reinterpret_cast<FreeListNode*>(maybe);
}
private:
static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
......@@ -1380,6 +1377,9 @@ class FreeList BASE_EMBEDDED {
bool IsVeryLong();
#endif
// Used after booting the VM.
void RepairLists(Heap* heap);
struct SizeStats {
intptr_t Total() {
return small_size_ + medium_size_ + large_size_ + huge_size_;
......@@ -1460,6 +1460,10 @@ class PagedSpace : public Space {
// linear in the number of objects in the page. It may be slow.
MUST_USE_RESULT MaybeObject* FindObject(Address addr);
// During boot the free_space_map is created, and afterwards we may need
// to write it into the free list nodes that were already created.
virtual void RepairFreeListsAfterBoot();
// Prepares for a mark-compact GC.
virtual void PrepareForMarkCompact();
......
......@@ -1244,7 +1244,9 @@ TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
for (HeapObject* obj = iterator.next();
obj != NULL;
obj = iterator.next()) {
size_of_objects_2 += obj->Size();
if (!obj->IsFreeSpace()) {
size_of_objects_2 += obj->Size();
}
}
// Delta must be within 5% of the larger result.
// TODO(gc): Tighten this up by distinguishing between byte
......@@ -1273,7 +1275,6 @@ static void FillUpNewSpace(NewSpace* new_space) {
// that the scavenger does not undo the filling.
v8::HandleScope scope;
AlwaysAllocateScope always_allocate;
LinearAllocationScope allocate_linearly;
intptr_t available = new_space->EffectiveCapacity() - new_space->Size();
intptr_t number_of_fillers = (available / FixedArray::SizeFor(32)) - 1;
for (intptr_t i = 0; i < number_of_fillers; i++) {
......@@ -1928,8 +1929,13 @@ TEST(ReleaseOverReservedPages) {
HEAP->CollectAllGarbage(Heap::kNoGCFlags, "triggered by test 2");
CHECK_GE(number_of_test_pages + 1, old_pointer_space->CountTotalPages() * 2);
// Triggering a last-resort GC should cause all pages to be released
// to the OS so that other processes can seize the memory.
// Triggering a last-resort GC should cause all pages to be released to the
// OS so that other processes can seize the memory. If we get a failure here
// where there are 2 pages left instead of 1, then we should increase the
// size of the first page a little in SizeOfFirstPage in spaces.cc. The
// first page should be small in order to reduce memory used when the VM
// boots, but if the 20 small arrays don't fit on the first page then that's
// an indication that it is too small.
HEAP->CollectAllAvailableGarbage("triggered really hard");
CHECK_EQ(1, old_pointer_space->CountTotalPages());
}
......
......@@ -545,9 +545,9 @@ TEST(BootUpMemoryUse) {
}
} else {
if (v8::internal::Snapshot::IsEnabled()) {
CHECK_LE(delta, 2600 * 1024); // 2484.
CHECK_LE(delta, 2500 * 1024); // 2400.
} else {
CHECK_LE(delta, 2950 * 1024); // 2844
CHECK_LE(delta, 2860 * 1024); // 2760.
}
}
}
......
......@@ -196,8 +196,7 @@ class FileByteSink : public SnapshotByteSink {
int data_space_used,
int code_space_used,
int map_space_used,
int cell_space_used,
int large_space_used);
int cell_space_used);
private:
FILE* fp_;
......@@ -211,8 +210,7 @@ void FileByteSink::WriteSpaceUsed(
int data_space_used,
int code_space_used,
int map_space_used,
int cell_space_used,
int large_space_used) {
int cell_space_used) {
int file_name_length = StrLength(file_name_) + 10;
Vector<char> name = Vector<char>::New(file_name_length + 1);
OS::SNPrintF(name, "%s.size", file_name_);
......@@ -224,7 +222,6 @@ void FileByteSink::WriteSpaceUsed(
fprintf(fp, "code %d\n", code_space_used);
fprintf(fp, "map %d\n", map_space_used);
fprintf(fp, "cell %d\n", cell_space_used);
fprintf(fp, "large %d\n", large_space_used);
fclose(fp);
}
......@@ -233,6 +230,15 @@ static bool WriteToFile(const char* snapshot_file) {
FileByteSink file(snapshot_file);
StartupSerializer ser(&file);
ser.Serialize();
file.WriteSpaceUsed(
ser.CurrentAllocationAddress(NEW_SPACE),
ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
ser.CurrentAllocationAddress(OLD_DATA_SPACE),
ser.CurrentAllocationAddress(CODE_SPACE),
ser.CurrentAllocationAddress(MAP_SPACE),
ser.CurrentAllocationAddress(CELL_SPACE));
return true;
}
......@@ -384,7 +390,6 @@ TEST(PartialSerialization) {
env.Dispose();
FileByteSink startup_sink(startup_name.start());
startup_name.Dispose();
StartupSerializer startup_serializer(&startup_sink);
startup_serializer.SerializeStrongReferences();
......@@ -392,26 +397,35 @@ TEST(PartialSerialization) {
PartialSerializer p_ser(&startup_serializer, &partial_sink);
p_ser.Serialize(&raw_foo);
startup_serializer.SerializeWeakReferences();
partial_sink.WriteSpaceUsed(
p_ser.CurrentAllocationAddress(NEW_SPACE),
p_ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
p_ser.CurrentAllocationAddress(OLD_DATA_SPACE),
p_ser.CurrentAllocationAddress(CODE_SPACE),
p_ser.CurrentAllocationAddress(MAP_SPACE),
p_ser.CurrentAllocationAddress(CELL_SPACE),
p_ser.CurrentAllocationAddress(LO_SPACE));
p_ser.CurrentAllocationAddress(CELL_SPACE));
startup_sink.WriteSpaceUsed(
startup_serializer.CurrentAllocationAddress(NEW_SPACE),
startup_serializer.CurrentAllocationAddress(OLD_POINTER_SPACE),
startup_serializer.CurrentAllocationAddress(OLD_DATA_SPACE),
startup_serializer.CurrentAllocationAddress(CODE_SPACE),
startup_serializer.CurrentAllocationAddress(MAP_SPACE),
startup_serializer.CurrentAllocationAddress(CELL_SPACE));
startup_name.Dispose();
}
}
static void ReserveSpaceForPartialSnapshot(const char* file_name) {
static void ReserveSpaceForSnapshot(Deserializer* deserializer,
const char* file_name) {
int file_name_length = StrLength(file_name) + 10;
Vector<char> name = Vector<char>::New(file_name_length + 1);
OS::SNPrintF(name, "%s.size", file_name);
FILE* fp = OS::FOpen(name.start(), "r");
name.Dispose();
int new_size, pointer_size, data_size, code_size, map_size, cell_size;
int large_size;
#ifdef _MSC_VER
// Avoid warning about unsafe fscanf from MSVC.
// Please note that this is only fine if %c and %s are not being used.
......@@ -423,18 +437,16 @@ static void ReserveSpaceForPartialSnapshot(const char* file_name) {
CHECK_EQ(1, fscanf(fp, "code %d\n", &code_size));
CHECK_EQ(1, fscanf(fp, "map %d\n", &map_size));
CHECK_EQ(1, fscanf(fp, "cell %d\n", &cell_size));
CHECK_EQ(1, fscanf(fp, "large %d\n", &large_size));
#ifdef _MSC_VER
#undef fscanf
#endif
fclose(fp);
HEAP->ReserveSpace(new_size,
pointer_size,
data_size,
code_size,
map_size,
cell_size,
large_size);
deserializer->set_reservation(NEW_SPACE, new_size);
deserializer->set_reservation(OLD_POINTER_SPACE, pointer_size);
deserializer->set_reservation(OLD_DATA_SPACE, data_size);
deserializer->set_reservation(CODE_SPACE, code_size);
deserializer->set_reservation(MAP_SPACE, map_size);
deserializer->set_reservation(CELL_SPACE, cell_size);
}
......@@ -448,7 +460,6 @@ DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
startup_name.Dispose();
const char* file_name = FLAG_testing_serialization_file;
ReserveSpaceForPartialSnapshot(file_name);
int snapshot_size = 0;
byte* snapshot = ReadBytes(file_name, &snapshot_size);
......@@ -457,18 +468,19 @@ DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
{
SnapshotByteSource source(snapshot, snapshot_size);
Deserializer deserializer(&source);
ReserveSpaceForSnapshot(&deserializer, file_name);
deserializer.DeserializePartial(&root);
CHECK(root->IsString());
}
v8::HandleScope handle_scope;
Handle<Object> root_handle(root);
ReserveSpaceForPartialSnapshot(file_name);
Object* root2;
{
SnapshotByteSource source(snapshot, snapshot_size);
Deserializer deserializer(&source);
ReserveSpaceForSnapshot(&deserializer, file_name);
deserializer.DeserializePartial(&root2);
CHECK(root2->IsString());
CHECK(*root_handle == root2);
......@@ -506,7 +518,6 @@ TEST(ContextSerialization) {
env.Dispose();
FileByteSink startup_sink(startup_name.start());
startup_name.Dispose();
StartupSerializer startup_serializer(&startup_sink);
startup_serializer.SerializeStrongReferences();
......@@ -514,14 +525,23 @@ TEST(ContextSerialization) {
PartialSerializer p_ser(&startup_serializer, &partial_sink);
p_ser.Serialize(&raw_context);
startup_serializer.SerializeWeakReferences();
partial_sink.WriteSpaceUsed(
p_ser.CurrentAllocationAddress(NEW_SPACE),
p_ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
p_ser.CurrentAllocationAddress(OLD_DATA_SPACE),
p_ser.CurrentAllocationAddress(CODE_SPACE),
p_ser.CurrentAllocationAddress(MAP_SPACE),
p_ser.CurrentAllocationAddress(CELL_SPACE),
p_ser.CurrentAllocationAddress(LO_SPACE));
p_ser.CurrentAllocationAddress(CELL_SPACE));
startup_sink.WriteSpaceUsed(
startup_serializer.CurrentAllocationAddress(NEW_SPACE),
startup_serializer.CurrentAllocationAddress(OLD_POINTER_SPACE),
startup_serializer.CurrentAllocationAddress(OLD_DATA_SPACE),
startup_serializer.CurrentAllocationAddress(CODE_SPACE),
startup_serializer.CurrentAllocationAddress(MAP_SPACE),
startup_serializer.CurrentAllocationAddress(CELL_SPACE));
startup_name.Dispose();
}
}
......@@ -536,7 +556,6 @@ DEPENDENT_TEST(ContextDeserialization, ContextSerialization) {
startup_name.Dispose();
const char* file_name = FLAG_testing_serialization_file;
ReserveSpaceForPartialSnapshot(file_name);
int snapshot_size = 0;
byte* snapshot = ReadBytes(file_name, &snapshot_size);
......@@ -545,18 +564,19 @@ DEPENDENT_TEST(ContextDeserialization, ContextSerialization) {
{
SnapshotByteSource source(snapshot, snapshot_size);
Deserializer deserializer(&source);
ReserveSpaceForSnapshot(&deserializer, file_name);
deserializer.DeserializePartial(&root);
CHECK(root->IsContext());
}
v8::HandleScope handle_scope;
Handle<Object> root_handle(root);
ReserveSpaceForPartialSnapshot(file_name);
Object* root2;
{
SnapshotByteSource source(snapshot, snapshot_size);
Deserializer deserializer(&source);
ReserveSpaceForSnapshot(&deserializer, file_name);
deserializer.DeserializePartial(&root2);
CHECK(root2->IsContext());
CHECK(*root_handle != root2);
......@@ -565,120 +585,6 @@ DEPENDENT_TEST(ContextDeserialization, ContextSerialization) {
}
TEST(LinearAllocation) {
v8::V8::Initialize();
int new_space_max = 512 * KB;
int paged_space_max = Page::kMaxNonCodeHeapObjectSize;
int code_space_max = HEAP->code_space()->AreaSize();
for (int size = 1000; size < 5 * MB; size += size >> 1) {
size &= ~8; // Round.
int new_space_size = (size < new_space_max) ? size : new_space_max;
int paged_space_size = (size < paged_space_max) ? size : paged_space_max;
HEAP->ReserveSpace(
new_space_size,
paged_space_size, // Old pointer space.
paged_space_size, // Old data space.
HEAP->code_space()->RoundSizeDownToObjectAlignment(code_space_max),
HEAP->map_space()->RoundSizeDownToObjectAlignment(paged_space_size),
HEAP->cell_space()->RoundSizeDownToObjectAlignment(paged_space_size),
size); // Large object space.
LinearAllocationScope linear_allocation_scope;
DisallowAllocationFailure disallow_allocation_failure;
const int kSmallFixedArrayLength = 4;
const int kSmallFixedArraySize =
FixedArray::kHeaderSize + kSmallFixedArrayLength * kPointerSize;
const int kSmallStringLength = 16;
const int kSmallStringSize =
(SeqAsciiString::kHeaderSize + kSmallStringLength +
kObjectAlignmentMask) & ~kObjectAlignmentMask;
const int kMapSize = Map::kSize;
Object* new_last = NULL;
for (int i = 0;
i + kSmallFixedArraySize <= new_space_size;
i += kSmallFixedArraySize) {
Object* obj =
HEAP->AllocateFixedArray(kSmallFixedArrayLength)->ToObjectChecked();
if (new_last != NULL) {
CHECK(reinterpret_cast<char*>(obj) ==
reinterpret_cast<char*>(new_last) + kSmallFixedArraySize);
}
new_last = obj;
}
Object* pointer_last = NULL;
for (int i = 0;
i + kSmallFixedArraySize <= paged_space_size;
i += kSmallFixedArraySize) {
Object* obj = HEAP->AllocateFixedArray(kSmallFixedArrayLength,
TENURED)->ToObjectChecked();
int old_page_fullness = i % Page::kPageSize;
int page_fullness = (i + kSmallFixedArraySize) % Page::kPageSize;
if (page_fullness < old_page_fullness ||
page_fullness > HEAP->old_pointer_space()->AreaSize()) {
i = RoundUp(i, Page::kPageSize);
pointer_last = NULL;
}
if (pointer_last != NULL) {
CHECK(reinterpret_cast<char*>(obj) ==
reinterpret_cast<char*>(pointer_last) + kSmallFixedArraySize);
}
pointer_last = obj;
}
Object* data_last = NULL;
for (int i = 0;
i + kSmallStringSize <= paged_space_size;
i += kSmallStringSize) {
Object* obj = HEAP->AllocateRawAsciiString(kSmallStringLength,
TENURED)->ToObjectChecked();
int old_page_fullness = i % Page::kPageSize;
int page_fullness = (i + kSmallStringSize) % Page::kPageSize;
if (page_fullness < old_page_fullness ||
page_fullness > HEAP->old_data_space()->AreaSize()) {
i = RoundUp(i, Page::kPageSize);
data_last = NULL;
}
if (data_last != NULL) {
CHECK(reinterpret_cast<char*>(obj) ==
reinterpret_cast<char*>(data_last) + kSmallStringSize);
}
data_last = obj;
}
Object* map_last = NULL;
for (int i = 0; i + kMapSize <= paged_space_size; i += kMapSize) {
Object* obj = HEAP->AllocateMap(JS_OBJECT_TYPE,
42 * kPointerSize)->ToObjectChecked();
int old_page_fullness = i % Page::kPageSize;
int page_fullness = (i + kMapSize) % Page::kPageSize;
if (page_fullness < old_page_fullness ||
page_fullness > HEAP->map_space()->AreaSize()) {
i = RoundUp(i, Page::kPageSize);
map_last = NULL;
}
if (map_last != NULL) {
CHECK(reinterpret_cast<char*>(obj) ==
reinterpret_cast<char*>(map_last) + kMapSize);
}
map_last = obj;
}
if (size > Page::kMaxNonCodeHeapObjectSize) {
// Support for reserving space in large object space is not there yet,
// but using an always-allocate scope is fine for now.
AlwaysAllocateScope always;
int large_object_array_length =
(size - FixedArray::kHeaderSize) / kPointerSize;
Object* obj = HEAP->AllocateFixedArray(large_object_array_length,
TENURED)->ToObjectChecked();
CHECK(!obj->IsFailure());
}
}
}
TEST(TestThatAlwaysSucceeds) {
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment