Commit 1257f35c authored by yangguo@chromium.org's avatar yangguo@chromium.org

Support large objects in the serializer/deserializer.

R=hpayer@chromium.org, mvstanton@chromium.org

Review URL: https://codereview.chromium.org/581223004

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@24204 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent b790fd6b
......@@ -923,9 +923,12 @@ void Heap::ReserveSpace(int* sizes, Address* locations_out) {
static const int kThreshold = 20;
while (gc_performed && counter++ < kThreshold) {
gc_performed = false;
DCHECK(NEW_SPACE == FIRST_PAGED_SPACE - 1);
for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
if (sizes[space] != 0) {
for (int space = NEW_SPACE; space < Serializer::kNumberOfSpaces; space++) {
if (sizes[space] == 0) continue;
bool perform_gc = false;
if (space == LO_SPACE) {
perform_gc = !lo_space()->CanAllocateSize(sizes[space]);
} else {
AllocationResult allocation;
if (space == NEW_SPACE) {
allocation = new_space()->AllocateRaw(sizes[space]);
......@@ -933,23 +936,27 @@ void Heap::ReserveSpace(int* sizes, Address* locations_out) {
allocation = paged_space(space)->AllocateRaw(sizes[space]);
}
FreeListNode* node;
if (!allocation.To(&node)) {
if (allocation.To(&node)) {
// Mark with a free list node, in case we have a GC before
// deserializing.
node->set_size(this, sizes[space]);
DCHECK(space < Serializer::kNumberOfPreallocatedSpaces);
locations_out[space] = node->address();
} else {
perform_gc = true;
}
}
if (perform_gc) {
if (space == NEW_SPACE) {
Heap::CollectGarbage(NEW_SPACE,
"failed to reserve space in the new space");
} else {
AbortIncrementalMarkingAndCollectGarbage(
this, static_cast<AllocationSpace>(space),
"failed to reserve space in paged space");
"failed to reserve space in paged or large object space");
}
gc_performed = true;
break;
} else {
// Mark with a free list node, in case we have a GC before
// deserializing.
node->set_size(this, sizes[space]);
locations_out[space] = node->address();
}
break; // Abort for-loop over spaces and retry.
}
}
}
......
......@@ -2021,6 +2021,7 @@ class Heap {
int gc_callbacks_depth_;
friend class AlwaysAllocateScope;
friend class Deserializer;
friend class Factory;
friend class GCCallbacksScope;
friend class GCTracer;
......
......@@ -2840,9 +2840,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
return AllocationResult::Retry(identity());
}
if (Size() + object_size > max_capacity_) {
return AllocationResult::Retry(identity());
}
if (!CanAllocateSize(object_size)) return AllocationResult::Retry(identity());
LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage(
object_size, this, executable);
......
......@@ -2728,6 +2728,8 @@ class LargeObjectSpace : public Space {
MUST_USE_RESULT AllocationResult
AllocateRaw(int object_size, Executability executable);
bool CanAllocateSize(int size) { return Size() + size <= max_capacity_; }
// Available bytes for objects in this space.
inline intptr_t Available();
......
......@@ -84,10 +84,10 @@ class SnapshotWriter {
i::List<i::byte> startup_blob;
i::ListSnapshotSink sink(&startup_blob);
int spaces[] = {
i::NEW_SPACE, i::OLD_POINTER_SPACE, i::OLD_DATA_SPACE, i::CODE_SPACE,
i::MAP_SPACE, i::CELL_SPACE, i::PROPERTY_CELL_SPACE
};
int spaces[] = {i::NEW_SPACE, i::OLD_POINTER_SPACE,
i::OLD_DATA_SPACE, i::CODE_SPACE,
i::MAP_SPACE, i::CELL_SPACE,
i::PROPERTY_CELL_SPACE, i::LO_SPACE};
i::byte* snapshot_bytes = snapshot_data.begin();
sink.PutBlob(snapshot_bytes, snapshot_data.length(), "snapshot");
......@@ -197,6 +197,7 @@ class SnapshotWriter {
WriteSizeVar(ser, prefix, "map", i::MAP_SPACE);
WriteSizeVar(ser, prefix, "cell", i::CELL_SPACE);
WriteSizeVar(ser, prefix, "property_cell", i::PROPERTY_CELL_SPACE);
WriteSizeVar(ser, prefix, "lo", i::LO_SPACE);
fprintf(fp_, "\n");
}
......
......@@ -596,8 +596,9 @@ Deserializer::Deserializer(SnapshotByteSource* source)
: isolate_(NULL),
attached_objects_(NULL),
source_(source),
external_reference_decoder_(NULL) {
for (int i = 0; i < LAST_SPACE + 1; i++) {
external_reference_decoder_(NULL),
deserialized_large_objects_(0) {
for (int i = 0; i < kNumberOfSpaces; i++) {
reservations_[i] = kUninitializedReservation;
}
}
......@@ -615,7 +616,7 @@ void Deserializer::FlushICacheForNewCodeObjects() {
void Deserializer::Deserialize(Isolate* isolate) {
isolate_ = isolate;
DCHECK(isolate_ != NULL);
isolate_->heap()->ReserveSpace(reservations_, &high_water_[0]);
isolate_->heap()->ReserveSpace(reservations_, high_water_);
// No active threads.
DCHECK_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse());
// No active handles.
......@@ -662,7 +663,8 @@ void Deserializer::DeserializePartial(Isolate* isolate, Object** root) {
for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) {
DCHECK(reservations_[i] != kUninitializedReservation);
}
isolate_->heap()->ReserveSpace(reservations_, &high_water_[0]);
Heap* heap = isolate->heap();
heap->ReserveSpace(reservations_, high_water_);
if (external_reference_decoder_ == NULL) {
external_reference_decoder_ = new ExternalReferenceDecoder(isolate);
}
......@@ -798,11 +800,40 @@ void Deserializer::ReadObject(int space_number,
*write_back = obj;
#ifdef DEBUG
bool is_codespace = (space_number == CODE_SPACE);
DCHECK(obj->IsCode() == is_codespace);
if (obj->IsCode()) {
DCHECK(space_number == CODE_SPACE || space_number == LO_SPACE);
} else {
DCHECK(space_number != CODE_SPACE);
}
#endif
}
// We know the space requirements before deserialization and can
// pre-allocate that reserved space. During deserialization, all we need
// to do is to bump up the pointer for each space in the reserved
// space. This is also used for fixing back references.
// Since multiple large objects cannot be folded into one large object
// space allocation, we have to do an actual allocation when deserializing
// each large object. Instead of tracking offset for back references, we
// reference large objects by index.
Address Deserializer::Allocate(int space_index, int size) {
if (space_index == LO_SPACE) {
AlwaysAllocateScope scope(isolate_);
LargeObjectSpace* lo_space = isolate_->heap()->lo_space();
Executability exec = static_cast<Executability>(source_->GetInt());
AllocationResult result = lo_space->AllocateRaw(size, exec);
HeapObject* obj = HeapObject::cast(result.ToObjectChecked());
deserialized_large_objects_.Add(obj);
return obj->address();
} else {
DCHECK(space_index < kNumberOfPreallocatedSpaces);
Address address = high_water_[space_index];
high_water_[space_index] = address + size;
return address;
}
}
void Deserializer::ReadChunk(Object** current,
Object** limit,
int source_space,
......@@ -931,9 +962,10 @@ void Deserializer::ReadChunk(Object** current,
CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \
CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \
CASE_STATEMENT(where, how, within, CODE_SPACE) \
CASE_STATEMENT(where, how, within, MAP_SPACE) \
CASE_STATEMENT(where, how, within, CELL_SPACE) \
CASE_STATEMENT(where, how, within, PROPERTY_CELL_SPACE) \
CASE_STATEMENT(where, how, within, MAP_SPACE) \
CASE_STATEMENT(where, how, within, LO_SPACE) \
CASE_BODY(where, how, within, kAnyOldSpace)
#define FOUR_CASES(byte_code) \
......@@ -1184,12 +1216,11 @@ Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
sink_(sink),
external_reference_encoder_(new ExternalReferenceEncoder(isolate)),
root_index_wave_front_(0),
code_address_map_(NULL) {
code_address_map_(NULL),
seen_large_objects_index_(0) {
// The serializer is meant to be used only to generate initial heap images
// from a context in which there is only one isolate.
for (int i = 0; i <= LAST_SPACE; i++) {
fullness_[i] = 0;
}
for (int i = 0; i < kNumberOfSpaces; i++) fullness_[i] = 0;
}
......@@ -1324,10 +1355,7 @@ void Serializer::SerializeReferenceToPreviousObject(HeapObject* heap_object,
WhereToPoint where_to_point,
int skip) {
int space = SpaceOfObject(heap_object);
int address = address_mapper_.MappedTo(heap_object);
int offset = CurrentAllocationAddress(space) - address;
// Shift out the bits that are always 0.
offset >>= kObjectAlignmentBits;
if (skip == 0) {
sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRefSer");
} else {
......@@ -1335,7 +1363,17 @@ void Serializer::SerializeReferenceToPreviousObject(HeapObject* heap_object,
"BackRefSerWithSkip");
sink_->PutInt(skip, "BackRefSkipDistance");
}
if (space == LO_SPACE) {
int index = address_mapper_.MappedTo(heap_object);
sink_->PutInt(index, "large object index");
} else {
int address = address_mapper_.MappedTo(heap_object);
int offset = CurrentAllocationAddress(space) - address;
// Shift out the bits that are always 0.
offset >>= kObjectAlignmentBits;
sink_->PutInt(offset, "offset");
}
}
......@@ -1494,8 +1532,18 @@ void Serializer::ObjectSerializer::Serialize() {
}
// Mark this object as already serialized.
if (space == LO_SPACE) {
if (object_->IsCode()) {
sink_->PutInt(EXECUTABLE, "executable large object");
} else {
sink_->PutInt(NOT_EXECUTABLE, "not executable large object");
}
int index = serializer_->AllocateLargeObject(size);
serializer_->address_mapper()->AddMapping(object_, index);
} else {
int offset = serializer_->Allocate(space, size);
serializer_->address_mapper()->AddMapping(object_, offset);
}
// Serialize the map (first word of the object).
serializer_->SerializeObject(object_->map(), kPlain, kStartOfObject, 0);
......@@ -1747,8 +1795,14 @@ int Serializer::SpaceOfObject(HeapObject* object) {
}
int Serializer::AllocateLargeObject(int size) {
fullness_[LO_SPACE] += size;
return seen_large_objects_index_++;
}
int Serializer::Allocate(int space, int size) {
CHECK(space >= 0 && space < kNumberOfSpaces);
CHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
int allocation_address = fullness_[space];
fullness_[space] = allocation_address + size;
return allocation_address;
......@@ -1840,11 +1894,11 @@ void CodeSerializer::SerializeObject(Object* o, HowToCode how_to_code,
if (heap_object->IsCode()) {
Code* code_object = Code::cast(heap_object);
DCHECK(!code_object->is_optimized_code());
if (code_object->kind() == Code::BUILTIN) {
SerializeBuiltin(code_object, how_to_code, where_to_point, skip);
return;
}
if (code_object->IsCodeStubOrIC()) {
} else if (code_object->IsCodeStubOrIC()) {
SerializeCodeStub(code_object, how_to_code, where_to_point, skip);
return;
}
......@@ -1991,7 +2045,7 @@ Handle<SharedFunctionInfo> CodeSerializer::Deserialize(Isolate* isolate,
SnapshotByteSource payload(scd.Payload(), scd.PayloadLength());
Deserializer deserializer(&payload);
STATIC_ASSERT(NEW_SPACE == 0);
for (int i = NEW_SPACE; i <= PROPERTY_CELL_SPACE; i++) {
for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) {
deserializer.set_reservation(i, scd.GetReservation(i));
}
......@@ -2041,7 +2095,7 @@ SerializedCodeData::SerializedCodeData(List<byte>* payload, CodeSerializer* cs)
SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys);
SetHeaderValue(kPayloadLengthOffset, payload->length());
STATIC_ASSERT(NEW_SPACE == 0);
for (int i = NEW_SPACE; i <= PROPERTY_CELL_SPACE; i++) {
for (int i = 0; i < SerializerDeserializer::kNumberOfSpaces; i++) {
SetHeaderValue(kReservationsOffset + i, cs->CurrentAllocationAddress(i));
}
......
......@@ -148,11 +148,15 @@ class SerializerDeserializer: public ObjectVisitor {
static int nop() { return kNop; }
// No reservation for large object space necessary.
static const int kNumberOfPreallocatedSpaces = LO_SPACE;
static const int kNumberOfSpaces = INVALID_SPACE;
protected:
// Where the pointed-to object can be found:
enum Where {
kNewObject = 0, // Object is next in snapshot.
// 1-6 One per space.
// 1-7 One per space.
kRootArray = 0x9, // Object is found in root array.
kPartialSnapshotCache = 0xa, // Object is in the cache.
kExternalReference = 0xb, // Pointer to an external reference.
......@@ -161,9 +165,9 @@ class SerializerDeserializer: public ObjectVisitor {
kAttachedReference = 0xe, // Object is described in an attached list.
kNop = 0xf, // Does nothing, used to pad.
kBackref = 0x10, // Object is described relative to end.
// 0x11-0x16 One per space.
// 0x11-0x17 One per space.
kBackrefWithSkip = 0x18, // Object is described relative to end.
// 0x19-0x1e One per space.
// 0x19-0x1f One per space.
// 0x20-0x3f Used by misc. tags below.
kPointedToMask = 0x3f
};
......@@ -225,11 +229,11 @@ class SerializerDeserializer: public ObjectVisitor {
return byte_code & 0x1f;
}
static const int kNumberOfSpaces = LO_SPACE;
static const int kAnyOldSpace = -1;
// A bitmask for getting the space out of an instruction.
static const int kSpaceMask = 7;
STATIC_ASSERT(kNumberOfSpaces <= kSpaceMask + 1);
};
......@@ -249,7 +253,7 @@ class Deserializer: public SerializerDeserializer {
void set_reservation(int space_number, int reservation) {
DCHECK(space_number >= 0);
DCHECK(space_number <= LAST_SPACE);
DCHECK(space_number < kNumberOfSpaces);
reservations_[space_number] = reservation;
}
......@@ -282,24 +286,18 @@ class Deserializer: public SerializerDeserializer {
void ReadChunk(
Object** start, Object** end, int space, Address object_address);
void ReadObject(int space_number, Object** write_back);
Address Allocate(int space_index, int size);
// Special handling for serialized code like hooking up internalized strings.
HeapObject* ProcessNewObjectFromSerializedCode(HeapObject* obj);
Object* ProcessBackRefInSerializedCode(Object* obj);
// This routine both allocates a new object, and also keeps
// track of where objects have been allocated so that we can
// fix back references when deserializing.
Address Allocate(int space_index, int size) {
Address address = high_water_[space_index];
high_water_[space_index] = address + size;
return address;
}
// This returns the address of an object that has been described in the
// snapshot as being offset bytes back in a particular space.
HeapObject* GetAddressFromEnd(int space) {
int offset = source_->GetInt();
if (space == LO_SPACE) return deserialized_large_objects_[offset];
DCHECK(space < kNumberOfPreallocatedSpaces);
offset <<= kObjectAlignmentBits;
return HeapObject::FromAddress(high_water_[space] - offset);
}
......@@ -313,13 +311,15 @@ class Deserializer: public SerializerDeserializer {
SnapshotByteSource* source_;
// This is the address of the next object that will be allocated in each
// space. It is used to calculate the addresses of back-references.
Address high_water_[LAST_SPACE + 1];
Address high_water_[kNumberOfPreallocatedSpaces];
int reservations_[LAST_SPACE + 1];
int reservations_[kNumberOfSpaces];
static const intptr_t kUninitializedReservation = -1;
ExternalReferenceDecoder* external_reference_decoder_;
List<HeapObject*> deserialized_large_objects_;
DISALLOW_COPY_AND_ASSIGN(Deserializer);
};
......@@ -466,6 +466,7 @@ class Serializer : public SerializerDeserializer {
void InitializeAllocators();
// This will return the space for an object.
static int SpaceOfObject(HeapObject* object);
int AllocateLargeObject(int size);
int Allocate(int space, int size);
int EncodeExternalReference(Address addr) {
return external_reference_encoder_->Encode(addr);
......@@ -480,7 +481,7 @@ class Serializer : public SerializerDeserializer {
Isolate* isolate_;
// Keep track of the fullness of each space in order to generate
// relative addresses for back references.
int fullness_[LAST_SPACE + 1];
int fullness_[kNumberOfSpaces];
SnapshotByteSink* sink_;
ExternalReferenceEncoder* external_reference_encoder_;
......@@ -497,6 +498,8 @@ class Serializer : public SerializerDeserializer {
private:
CodeAddressMap* code_address_map_;
// We map serialized large objects to indexes for back-referencing.
int seen_large_objects_index_;
DISALLOW_COPY_AND_ASSIGN(Serializer);
};
......@@ -691,8 +694,8 @@ class SerializedCodeData {
static const int kPayloadLengthOffset = 2;
static const int kReservationsOffset = 3;
static const int kNumSpaces = PROPERTY_CELL_SPACE - NEW_SPACE + 1;
static const int kHeaderEntries = kReservationsOffset + kNumSpaces;
static const int kHeaderEntries =
kReservationsOffset + SerializerDeserializer::kNumberOfSpaces;
static const int kHeaderSize = kHeaderEntries * kIntSize;
// Following the header, we store, in sequential order
......
......@@ -21,8 +21,8 @@ void Snapshot::ReserveSpaceForLinkedInSnapshot(Deserializer* deserializer) {
deserializer->set_reservation(CODE_SPACE, code_space_used_);
deserializer->set_reservation(MAP_SPACE, map_space_used_);
deserializer->set_reservation(CELL_SPACE, cell_space_used_);
deserializer->set_reservation(PROPERTY_CELL_SPACE,
property_cell_space_used_);
deserializer->set_reservation(PROPERTY_CELL_SPACE, property_cell_space_used_);
deserializer->set_reservation(LO_SPACE, lo_space_used_);
}
......@@ -67,6 +67,7 @@ Handle<Context> Snapshot::NewContextFromSnapshot(Isolate* isolate) {
deserializer.set_reservation(CELL_SPACE, context_cell_space_used_);
deserializer.set_reservation(PROPERTY_CELL_SPACE,
context_property_cell_space_used_);
deserializer.set_reservation(LO_SPACE, context_lo_space_used_);
deserializer.DeserializePartial(isolate, &root);
CHECK(root->IsContext());
return Handle<Context>(Context::cast(root));
......
......@@ -27,6 +27,7 @@ const int Snapshot::code_space_used_ = 0;
const int Snapshot::map_space_used_ = 0;
const int Snapshot::cell_space_used_ = 0;
const int Snapshot::property_cell_space_used_ = 0;
const int Snapshot::lo_space_used_ = 0;
const int Snapshot::context_new_space_used_ = 0;
const int Snapshot::context_pointer_space_used_ = 0;
......@@ -35,5 +36,5 @@ const int Snapshot::context_code_space_used_ = 0;
const int Snapshot::context_map_space_used_ = 0;
const int Snapshot::context_cell_space_used_ = 0;
const int Snapshot::context_property_cell_space_used_ = 0;
const int Snapshot::context_lo_space_used_ = 0;
} } // namespace v8::internal
......@@ -25,6 +25,7 @@ struct SnapshotImpl {
int map_space_used;
int cell_space_used;
int property_cell_space_used;
int lo_space_used;
const byte* context_data;
int context_size;
......@@ -35,6 +36,7 @@ struct SnapshotImpl {
int context_map_space_used;
int context_cell_space_used;
int context_property_cell_space_used;
int context_lo_space_used;
};
......@@ -66,6 +68,7 @@ bool Snapshot::Initialize(Isolate* isolate) {
deserializer.set_reservation(CELL_SPACE, snapshot_impl_->cell_space_used);
deserializer.set_reservation(PROPERTY_CELL_SPACE,
snapshot_impl_->property_cell_space_used);
deserializer.set_reservation(LO_SPACE, snapshot_impl_->lo_space_used);
bool success = isolate->Init(&deserializer);
if (FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
......@@ -97,6 +100,7 @@ Handle<Context> Snapshot::NewContextFromSnapshot(Isolate* isolate) {
deserializer.set_reservation(PROPERTY_CELL_SPACE,
snapshot_impl_->
context_property_cell_space_used);
deserializer.set_reservation(LO_SPACE, snapshot_impl_->context_lo_space_used);
Object* root;
deserializer.DeserializePartial(isolate, &root);
CHECK(root->IsContext());
......@@ -123,6 +127,7 @@ void SetSnapshotFromFile(StartupData* snapshot_blob) {
snapshot_impl_->map_space_used = source.GetInt();
snapshot_impl_->cell_space_used = source.GetInt();
snapshot_impl_->property_cell_space_used = source.GetInt();
snapshot_impl_->lo_space_used = source.GetInt();
success &= source.GetBlob(&snapshot_impl_->context_data,
&snapshot_impl_->context_size);
......
......@@ -39,15 +39,17 @@ void SnapshotByteSource::CopyRaw(byte* to, int number_of_bytes) {
void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
DCHECK(integer < 1 << 22);
DCHECK(integer < 1 << 30);
integer <<= 2;
int bytes = 1;
if (integer > 0xff) bytes = 2;
if (integer > 0xffff) bytes = 3;
integer |= bytes;
if (integer > 0xffffff) bytes = 4;
integer |= (bytes - 1);
Put(static_cast<int>(integer & 0xff), "IntPart1");
if (bytes > 1) Put(static_cast<int>((integer >> 8) & 0xff), "IntPart2");
if (bytes > 2) Put(static_cast<int>((integer >> 16) & 0xff), "IntPart3");
if (bytes > 3) Put(static_cast<int>((integer >> 24) & 0xff), "IntPart4");
}
void SnapshotByteSink::PutRaw(byte* data, int number_of_bytes,
......
......@@ -39,7 +39,7 @@ class SnapshotByteSource FINAL {
// This way of variable-length encoding integers does not suffer from branch
// mispredictions.
uint32_t answer = GetUnalignedInt();
int bytes = answer & 3;
int bytes = (answer & 3) + 1;
Advance(bytes);
uint32_t mask = 0xffffffffu;
mask >>= 32 - (bytes << 3);
......
......@@ -48,6 +48,7 @@ class Snapshot {
static const int map_space_used_;
static const int cell_space_used_;
static const int property_cell_space_used_;
static const int lo_space_used_;
static const int context_new_space_used_;
static const int context_pointer_space_used_;
static const int context_data_space_used_;
......@@ -55,6 +56,7 @@ class Snapshot {
static const int context_map_space_used_;
static const int context_cell_space_used_;
static const int context_property_cell_space_used_;
static const int context_lo_space_used_;
static const int size_;
static const int raw_size_;
static const int context_size_;
......
......@@ -137,14 +137,10 @@ class FileByteSink : public SnapshotByteSink {
virtual int Position() {
return ftell(fp_);
}
void WriteSpaceUsed(
int new_space_used,
int pointer_space_used,
int data_space_used,
int code_space_used,
int map_space_used,
int cell_space_used,
int property_cell_space_used);
void WriteSpaceUsed(int new_space_used, int pointer_space_used,
int data_space_used, int code_space_used,
int map_space_used, int cell_space_used,
int property_cell_space_used, int lo_space_used);
private:
FILE* fp_;
......@@ -152,14 +148,11 @@ class FileByteSink : public SnapshotByteSink {
};
void FileByteSink::WriteSpaceUsed(
int new_space_used,
int pointer_space_used,
int data_space_used,
int code_space_used,
int map_space_used,
int cell_space_used,
int property_cell_space_used) {
void FileByteSink::WriteSpaceUsed(int new_space_used, int pointer_space_used,
int data_space_used, int code_space_used,
int map_space_used, int cell_space_used,
int property_cell_space_used,
int lo_space_used) {
int file_name_length = StrLength(file_name_) + 10;
Vector<char> name = Vector<char>::New(file_name_length + 1);
SNPrintF(name, "%s.size", file_name_);
......@@ -172,6 +165,7 @@ void FileByteSink::WriteSpaceUsed(
fprintf(fp, "map %d\n", map_space_used);
fprintf(fp, "cell %d\n", cell_space_used);
fprintf(fp, "property cell %d\n", property_cell_space_used);
fprintf(fp, "lo %d\n", lo_space_used);
fclose(fp);
}
......@@ -181,14 +175,14 @@ static bool WriteToFile(Isolate* isolate, const char* snapshot_file) {
StartupSerializer ser(isolate, &file);
ser.Serialize();
file.WriteSpaceUsed(
ser.CurrentAllocationAddress(NEW_SPACE),
file.WriteSpaceUsed(ser.CurrentAllocationAddress(NEW_SPACE),
ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
ser.CurrentAllocationAddress(OLD_DATA_SPACE),
ser.CurrentAllocationAddress(CODE_SPACE),
ser.CurrentAllocationAddress(MAP_SPACE),
ser.CurrentAllocationAddress(CELL_SPACE),
ser.CurrentAllocationAddress(PROPERTY_CELL_SPACE));
ser.CurrentAllocationAddress(PROPERTY_CELL_SPACE),
ser.CurrentAllocationAddress(LO_SPACE));
return true;
}
......@@ -246,7 +240,7 @@ static void ReserveSpaceForSnapshot(Deserializer* deserializer,
FILE* fp = v8::base::OS::FOpen(name.start(), "r");
name.Dispose();
int new_size, pointer_size, data_size, code_size, map_size, cell_size,
property_cell_size;
property_cell_size, lo_size;
#ifdef _MSC_VER
// Avoid warning about unsafe fscanf from MSVC.
// Please note that this is only fine if %c and %s are not being used.
......@@ -259,6 +253,7 @@ static void ReserveSpaceForSnapshot(Deserializer* deserializer,
CHECK_EQ(1, fscanf(fp, "map %d\n", &map_size));
CHECK_EQ(1, fscanf(fp, "cell %d\n", &cell_size));
CHECK_EQ(1, fscanf(fp, "property cell %d\n", &property_cell_size));
CHECK_EQ(1, fscanf(fp, "lo %d\n", &lo_size));
#ifdef _MSC_VER
#undef fscanf
#endif
......@@ -270,6 +265,7 @@ static void ReserveSpaceForSnapshot(Deserializer* deserializer,
deserializer->set_reservation(MAP_SPACE, map_size);
deserializer->set_reservation(CELL_SPACE, cell_size);
deserializer->set_reservation(PROPERTY_CELL_SPACE, property_cell_size);
deserializer->set_reservation(LO_SPACE, lo_size);
}
......@@ -456,7 +452,8 @@ UNINITIALIZED_TEST(PartialSerialization) {
p_ser.CurrentAllocationAddress(CODE_SPACE),
p_ser.CurrentAllocationAddress(MAP_SPACE),
p_ser.CurrentAllocationAddress(CELL_SPACE),
p_ser.CurrentAllocationAddress(PROPERTY_CELL_SPACE));
p_ser.CurrentAllocationAddress(PROPERTY_CELL_SPACE),
p_ser.CurrentAllocationAddress(LO_SPACE));
startup_sink.WriteSpaceUsed(
startup_serializer.CurrentAllocationAddress(NEW_SPACE),
......@@ -465,7 +462,8 @@ UNINITIALIZED_TEST(PartialSerialization) {
startup_serializer.CurrentAllocationAddress(CODE_SPACE),
startup_serializer.CurrentAllocationAddress(MAP_SPACE),
startup_serializer.CurrentAllocationAddress(CELL_SPACE),
startup_serializer.CurrentAllocationAddress(PROPERTY_CELL_SPACE));
startup_serializer.CurrentAllocationAddress(PROPERTY_CELL_SPACE),
startup_serializer.CurrentAllocationAddress(LO_SPACE));
startup_name.Dispose();
}
v8_isolate->Exit();
......@@ -579,7 +577,8 @@ UNINITIALIZED_TEST(ContextSerialization) {
p_ser.CurrentAllocationAddress(CODE_SPACE),
p_ser.CurrentAllocationAddress(MAP_SPACE),
p_ser.CurrentAllocationAddress(CELL_SPACE),
p_ser.CurrentAllocationAddress(PROPERTY_CELL_SPACE));
p_ser.CurrentAllocationAddress(PROPERTY_CELL_SPACE),
p_ser.CurrentAllocationAddress(LO_SPACE));
startup_sink.WriteSpaceUsed(
startup_serializer.CurrentAllocationAddress(NEW_SPACE),
......@@ -588,7 +587,8 @@ UNINITIALIZED_TEST(ContextSerialization) {
startup_serializer.CurrentAllocationAddress(CODE_SPACE),
startup_serializer.CurrentAllocationAddress(MAP_SPACE),
startup_serializer.CurrentAllocationAddress(CELL_SPACE),
startup_serializer.CurrentAllocationAddress(PROPERTY_CELL_SPACE));
startup_serializer.CurrentAllocationAddress(PROPERTY_CELL_SPACE),
startup_serializer.CurrentAllocationAddress(LO_SPACE));
startup_name.Dispose();
}
v8_isolate->Dispose();
......@@ -786,6 +786,121 @@ TEST(SerializeToplevelInternalizedString) {
}
Vector<const uint8_t> ConstructSource(Vector<const uint8_t> head,
Vector<const uint8_t> body,
Vector<const uint8_t> tail, int repeats) {
int source_length = head.length() + body.length() * repeats + tail.length();
uint8_t* source = NewArray<uint8_t>(static_cast<size_t>(source_length));
CopyChars(source, head.start(), head.length());
for (int i = 0; i < repeats; i++) {
CopyChars(source + head.length() + i * body.length(), body.start(),
body.length());
}
CopyChars(source + head.length() + repeats * body.length(), tail.start(),
tail.length());
return Vector<const uint8_t>(const_cast<const uint8_t*>(source),
source_length);
}
TEST(SerializeToplevelLargeCodeObject) {
FLAG_serialize_toplevel = true;
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
v8::HandleScope scope(CcTest::isolate());
Vector<const uint8_t> source =
ConstructSource(STATIC_CHAR_VECTOR("var j=1; try { if (j) throw 1;"),
STATIC_CHAR_VECTOR("for(var i=0;i<1;i++)j++;"),
STATIC_CHAR_VECTOR("} catch (e) { j=7; } j"), 10000);
Handle<String> source_str =
isolate->factory()->NewStringFromOneByte(source).ToHandleChecked();
Handle<JSObject> global(isolate->context()->global_object());
ScriptData* cache = NULL;
Handle<SharedFunctionInfo> orig = Compiler::CompileScript(
source_str, Handle<String>(), 0, 0, false,
Handle<Context>(isolate->native_context()), NULL, &cache,
v8::ScriptCompiler::kProduceCodeCache, NOT_NATIVES_CODE);
CHECK(isolate->heap()->InSpace(orig->code(), LO_SPACE));
Handle<SharedFunctionInfo> copy;
{
DisallowCompilation no_compile_expected(isolate);
copy = Compiler::CompileScript(
source_str, Handle<String>(), 0, 0, false,
Handle<Context>(isolate->native_context()), NULL, &cache,
v8::ScriptCompiler::kConsumeCodeCache, NOT_NATIVES_CODE);
}
CHECK_NE(*orig, *copy);
Handle<JSFunction> copy_fun =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
copy, isolate->native_context());
Handle<Object> copy_result =
Execution::Call(isolate, copy_fun, global, 0, NULL).ToHandleChecked();
int result_int;
CHECK(copy_result->ToInt32(&result_int));
CHECK_EQ(7, result_int);
delete cache;
source.Dispose();
}
TEST(SerializeToplevelLargeString) {
FLAG_serialize_toplevel = true;
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
v8::HandleScope scope(CcTest::isolate());
Vector<const uint8_t> source = ConstructSource(
STATIC_CHAR_VECTOR("var s = \""), STATIC_CHAR_VECTOR("abcdef"),
STATIC_CHAR_VECTOR("\"; s"), 1000000);
Handle<String> source_str =
isolate->factory()->NewStringFromOneByte(source).ToHandleChecked();
Handle<JSObject> global(isolate->context()->global_object());
ScriptData* cache = NULL;
Handle<SharedFunctionInfo> orig = Compiler::CompileScript(
source_str, Handle<String>(), 0, 0, false,
Handle<Context>(isolate->native_context()), NULL, &cache,
v8::ScriptCompiler::kProduceCodeCache, NOT_NATIVES_CODE);
Handle<SharedFunctionInfo> copy;
{
DisallowCompilation no_compile_expected(isolate);
copy = Compiler::CompileScript(
source_str, Handle<String>(), 0, 0, false,
Handle<Context>(isolate->native_context()), NULL, &cache,
v8::ScriptCompiler::kConsumeCodeCache, NOT_NATIVES_CODE);
}
CHECK_NE(*orig, *copy);
Handle<JSFunction> copy_fun =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
copy, isolate->native_context());
Handle<Object> copy_result =
Execution::Call(isolate, copy_fun, global, 0, NULL).ToHandleChecked();
CHECK_EQ(6 * 1000000, Handle<String>::cast(copy_result)->length());
CHECK(isolate->heap()->InSpace(HeapObject::cast(*copy_result), LO_SPACE));
delete cache;
source.Dispose();
}
TEST(SerializeToplevelIsolates) {
FLAG_serialize_toplevel = true;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment