Commit 1257f35c authored by yangguo@chromium.org's avatar yangguo@chromium.org

Support large objects in the serializer/deserializer.

R=hpayer@chromium.org, mvstanton@chromium.org

Review URL: https://codereview.chromium.org/581223004

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@24204 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent b790fd6b
......@@ -923,9 +923,12 @@ void Heap::ReserveSpace(int* sizes, Address* locations_out) {
static const int kThreshold = 20;
while (gc_performed && counter++ < kThreshold) {
gc_performed = false;
DCHECK(NEW_SPACE == FIRST_PAGED_SPACE - 1);
for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
if (sizes[space] != 0) {
for (int space = NEW_SPACE; space < Serializer::kNumberOfSpaces; space++) {
if (sizes[space] == 0) continue;
bool perform_gc = false;
if (space == LO_SPACE) {
perform_gc = !lo_space()->CanAllocateSize(sizes[space]);
} else {
AllocationResult allocation;
if (space == NEW_SPACE) {
allocation = new_space()->AllocateRaw(sizes[space]);
......@@ -933,23 +936,27 @@ void Heap::ReserveSpace(int* sizes, Address* locations_out) {
allocation = paged_space(space)->AllocateRaw(sizes[space]);
}
FreeListNode* node;
if (!allocation.To(&node)) {
if (allocation.To(&node)) {
// Mark with a free list node, in case we have a GC before
// deserializing.
node->set_size(this, sizes[space]);
DCHECK(space < Serializer::kNumberOfPreallocatedSpaces);
locations_out[space] = node->address();
} else {
perform_gc = true;
}
}
if (perform_gc) {
if (space == NEW_SPACE) {
Heap::CollectGarbage(NEW_SPACE,
"failed to reserve space in the new space");
} else {
AbortIncrementalMarkingAndCollectGarbage(
this, static_cast<AllocationSpace>(space),
"failed to reserve space in paged space");
"failed to reserve space in paged or large object space");
}
gc_performed = true;
break;
} else {
// Mark with a free list node, in case we have a GC before
// deserializing.
node->set_size(this, sizes[space]);
locations_out[space] = node->address();
}
break; // Abort for-loop over spaces and retry.
}
}
}
......
......@@ -2021,6 +2021,7 @@ class Heap {
int gc_callbacks_depth_;
friend class AlwaysAllocateScope;
friend class Deserializer;
friend class Factory;
friend class GCCallbacksScope;
friend class GCTracer;
......
......@@ -2840,9 +2840,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
return AllocationResult::Retry(identity());
}
if (Size() + object_size > max_capacity_) {
return AllocationResult::Retry(identity());
}
if (!CanAllocateSize(object_size)) return AllocationResult::Retry(identity());
LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage(
object_size, this, executable);
......
......@@ -2728,6 +2728,8 @@ class LargeObjectSpace : public Space {
MUST_USE_RESULT AllocationResult
AllocateRaw(int object_size, Executability executable);
bool CanAllocateSize(int size) { return Size() + size <= max_capacity_; }
// Available bytes for objects in this space.
inline intptr_t Available();
......
......@@ -84,10 +84,10 @@ class SnapshotWriter {
i::List<i::byte> startup_blob;
i::ListSnapshotSink sink(&startup_blob);
int spaces[] = {
i::NEW_SPACE, i::OLD_POINTER_SPACE, i::OLD_DATA_SPACE, i::CODE_SPACE,
i::MAP_SPACE, i::CELL_SPACE, i::PROPERTY_CELL_SPACE
};
int spaces[] = {i::NEW_SPACE, i::OLD_POINTER_SPACE,
i::OLD_DATA_SPACE, i::CODE_SPACE,
i::MAP_SPACE, i::CELL_SPACE,
i::PROPERTY_CELL_SPACE, i::LO_SPACE};
i::byte* snapshot_bytes = snapshot_data.begin();
sink.PutBlob(snapshot_bytes, snapshot_data.length(), "snapshot");
......@@ -197,6 +197,7 @@ class SnapshotWriter {
WriteSizeVar(ser, prefix, "map", i::MAP_SPACE);
WriteSizeVar(ser, prefix, "cell", i::CELL_SPACE);
WriteSizeVar(ser, prefix, "property_cell", i::PROPERTY_CELL_SPACE);
WriteSizeVar(ser, prefix, "lo", i::LO_SPACE);
fprintf(fp_, "\n");
}
......
......@@ -596,8 +596,9 @@ Deserializer::Deserializer(SnapshotByteSource* source)
: isolate_(NULL),
attached_objects_(NULL),
source_(source),
external_reference_decoder_(NULL) {
for (int i = 0; i < LAST_SPACE + 1; i++) {
external_reference_decoder_(NULL),
deserialized_large_objects_(0) {
for (int i = 0; i < kNumberOfSpaces; i++) {
reservations_[i] = kUninitializedReservation;
}
}
......@@ -615,7 +616,7 @@ void Deserializer::FlushICacheForNewCodeObjects() {
void Deserializer::Deserialize(Isolate* isolate) {
isolate_ = isolate;
DCHECK(isolate_ != NULL);
isolate_->heap()->ReserveSpace(reservations_, &high_water_[0]);
isolate_->heap()->ReserveSpace(reservations_, high_water_);
// No active threads.
DCHECK_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse());
// No active handles.
......@@ -662,7 +663,8 @@ void Deserializer::DeserializePartial(Isolate* isolate, Object** root) {
for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) {
DCHECK(reservations_[i] != kUninitializedReservation);
}
isolate_->heap()->ReserveSpace(reservations_, &high_water_[0]);
Heap* heap = isolate->heap();
heap->ReserveSpace(reservations_, high_water_);
if (external_reference_decoder_ == NULL) {
external_reference_decoder_ = new ExternalReferenceDecoder(isolate);
}
......@@ -798,11 +800,40 @@ void Deserializer::ReadObject(int space_number,
*write_back = obj;
#ifdef DEBUG
bool is_codespace = (space_number == CODE_SPACE);
DCHECK(obj->IsCode() == is_codespace);
if (obj->IsCode()) {
DCHECK(space_number == CODE_SPACE || space_number == LO_SPACE);
} else {
DCHECK(space_number != CODE_SPACE);
}
#endif
}
// We know the space requirements before deserialization and can
// pre-allocate that reserved space. During deserialization, all we need
// to do is to bump up the pointer for each space in the reserved
// space. This is also used for fixing back references.
// Since multiple large objects cannot be folded into one large object
// space allocation, we have to do an actual allocation when deserializing
// each large object. Instead of tracking offset for back references, we
// reference large objects by index.
Address Deserializer::Allocate(int space_index, int size) {
if (space_index == LO_SPACE) {
AlwaysAllocateScope scope(isolate_);
LargeObjectSpace* lo_space = isolate_->heap()->lo_space();
Executability exec = static_cast<Executability>(source_->GetInt());
AllocationResult result = lo_space->AllocateRaw(size, exec);
HeapObject* obj = HeapObject::cast(result.ToObjectChecked());
deserialized_large_objects_.Add(obj);
return obj->address();
} else {
DCHECK(space_index < kNumberOfPreallocatedSpaces);
Address address = high_water_[space_index];
high_water_[space_index] = address + size;
return address;
}
}
void Deserializer::ReadChunk(Object** current,
Object** limit,
int source_space,
......@@ -931,9 +962,10 @@ void Deserializer::ReadChunk(Object** current,
CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \
CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \
CASE_STATEMENT(where, how, within, CODE_SPACE) \
CASE_STATEMENT(where, how, within, MAP_SPACE) \
CASE_STATEMENT(where, how, within, CELL_SPACE) \
CASE_STATEMENT(where, how, within, PROPERTY_CELL_SPACE) \
CASE_STATEMENT(where, how, within, MAP_SPACE) \
CASE_STATEMENT(where, how, within, LO_SPACE) \
CASE_BODY(where, how, within, kAnyOldSpace)
#define FOUR_CASES(byte_code) \
......@@ -1184,12 +1216,11 @@ Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
sink_(sink),
external_reference_encoder_(new ExternalReferenceEncoder(isolate)),
root_index_wave_front_(0),
code_address_map_(NULL) {
code_address_map_(NULL),
seen_large_objects_index_(0) {
// The serializer is meant to be used only to generate initial heap images
// from a context in which there is only one isolate.
for (int i = 0; i <= LAST_SPACE; i++) {
fullness_[i] = 0;
}
for (int i = 0; i < kNumberOfSpaces; i++) fullness_[i] = 0;
}
......@@ -1324,10 +1355,7 @@ void Serializer::SerializeReferenceToPreviousObject(HeapObject* heap_object,
WhereToPoint where_to_point,
int skip) {
int space = SpaceOfObject(heap_object);
int address = address_mapper_.MappedTo(heap_object);
int offset = CurrentAllocationAddress(space) - address;
// Shift out the bits that are always 0.
offset >>= kObjectAlignmentBits;
if (skip == 0) {
sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRefSer");
} else {
......@@ -1335,7 +1363,17 @@ void Serializer::SerializeReferenceToPreviousObject(HeapObject* heap_object,
"BackRefSerWithSkip");
sink_->PutInt(skip, "BackRefSkipDistance");
}
if (space == LO_SPACE) {
int index = address_mapper_.MappedTo(heap_object);
sink_->PutInt(index, "large object index");
} else {
int address = address_mapper_.MappedTo(heap_object);
int offset = CurrentAllocationAddress(space) - address;
// Shift out the bits that are always 0.
offset >>= kObjectAlignmentBits;
sink_->PutInt(offset, "offset");
}
}
......@@ -1494,8 +1532,18 @@ void Serializer::ObjectSerializer::Serialize() {
}
// Mark this object as already serialized.
if (space == LO_SPACE) {
if (object_->IsCode()) {
sink_->PutInt(EXECUTABLE, "executable large object");
} else {
sink_->PutInt(NOT_EXECUTABLE, "not executable large object");
}
int index = serializer_->AllocateLargeObject(size);
serializer_->address_mapper()->AddMapping(object_, index);
} else {
int offset = serializer_->Allocate(space, size);
serializer_->address_mapper()->AddMapping(object_, offset);
}
// Serialize the map (first word of the object).
serializer_->SerializeObject(object_->map(), kPlain, kStartOfObject, 0);
......@@ -1747,8 +1795,14 @@ int Serializer::SpaceOfObject(HeapObject* object) {
}
int Serializer::AllocateLargeObject(int size) {
fullness_[LO_SPACE] += size;
return seen_large_objects_index_++;
}
int Serializer::Allocate(int space, int size) {
CHECK(space >= 0 && space < kNumberOfSpaces);
CHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
int allocation_address = fullness_[space];
fullness_[space] = allocation_address + size;
return allocation_address;
......@@ -1840,11 +1894,11 @@ void CodeSerializer::SerializeObject(Object* o, HowToCode how_to_code,
if (heap_object->IsCode()) {
Code* code_object = Code::cast(heap_object);
DCHECK(!code_object->is_optimized_code());
if (code_object->kind() == Code::BUILTIN) {
SerializeBuiltin(code_object, how_to_code, where_to_point, skip);
return;
}
if (code_object->IsCodeStubOrIC()) {
} else if (code_object->IsCodeStubOrIC()) {
SerializeCodeStub(code_object, how_to_code, where_to_point, skip);
return;
}
......@@ -1991,7 +2045,7 @@ Handle<SharedFunctionInfo> CodeSerializer::Deserialize(Isolate* isolate,
SnapshotByteSource payload(scd.Payload(), scd.PayloadLength());
Deserializer deserializer(&payload);
STATIC_ASSERT(NEW_SPACE == 0);
for (int i = NEW_SPACE; i <= PROPERTY_CELL_SPACE; i++) {
for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) {
deserializer.set_reservation(i, scd.GetReservation(i));
}
......@@ -2041,7 +2095,7 @@ SerializedCodeData::SerializedCodeData(List<byte>* payload, CodeSerializer* cs)
SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys);
SetHeaderValue(kPayloadLengthOffset, payload->length());
STATIC_ASSERT(NEW_SPACE == 0);
for (int i = NEW_SPACE; i <= PROPERTY_CELL_SPACE; i++) {
for (int i = 0; i < SerializerDeserializer::kNumberOfSpaces; i++) {
SetHeaderValue(kReservationsOffset + i, cs->CurrentAllocationAddress(i));
}
......
......@@ -148,11 +148,15 @@ class SerializerDeserializer: public ObjectVisitor {
static int nop() { return kNop; }
// No reservation for large object space necessary.
static const int kNumberOfPreallocatedSpaces = LO_SPACE;
static const int kNumberOfSpaces = INVALID_SPACE;
protected:
// Where the pointed-to object can be found:
enum Where {
kNewObject = 0, // Object is next in snapshot.
// 1-6 One per space.
// 1-7 One per space.
kRootArray = 0x9, // Object is found in root array.
kPartialSnapshotCache = 0xa, // Object is in the cache.
kExternalReference = 0xb, // Pointer to an external reference.
......@@ -161,9 +165,9 @@ class SerializerDeserializer: public ObjectVisitor {
kAttachedReference = 0xe, // Object is described in an attached list.
kNop = 0xf, // Does nothing, used to pad.
kBackref = 0x10, // Object is described relative to end.
// 0x11-0x16 One per space.
// 0x11-0x17 One per space.
kBackrefWithSkip = 0x18, // Object is described relative to end.
// 0x19-0x1e One per space.
// 0x19-0x1f One per space.
// 0x20-0x3f Used by misc. tags below.
kPointedToMask = 0x3f
};
......@@ -225,11 +229,11 @@ class SerializerDeserializer: public ObjectVisitor {
return byte_code & 0x1f;
}
static const int kNumberOfSpaces = LO_SPACE;
static const int kAnyOldSpace = -1;
// A bitmask for getting the space out of an instruction.
static const int kSpaceMask = 7;
STATIC_ASSERT(kNumberOfSpaces <= kSpaceMask + 1);
};
......@@ -249,7 +253,7 @@ class Deserializer: public SerializerDeserializer {
void set_reservation(int space_number, int reservation) {
DCHECK(space_number >= 0);
DCHECK(space_number <= LAST_SPACE);
DCHECK(space_number < kNumberOfSpaces);
reservations_[space_number] = reservation;
}
......@@ -282,24 +286,18 @@ class Deserializer: public SerializerDeserializer {
void ReadChunk(
Object** start, Object** end, int space, Address object_address);
void ReadObject(int space_number, Object** write_back);
Address Allocate(int space_index, int size);
// Special handling for serialized code like hooking up internalized strings.
HeapObject* ProcessNewObjectFromSerializedCode(HeapObject* obj);
Object* ProcessBackRefInSerializedCode(Object* obj);
// This routine both allocates a new object, and also keeps
// track of where objects have been allocated so that we can
// fix back references when deserializing.
Address Allocate(int space_index, int size) {
Address address = high_water_[space_index];
high_water_[space_index] = address + size;
return address;
}
// This returns the address of an object that has been described in the
// snapshot as being offset bytes back in a particular space.
HeapObject* GetAddressFromEnd(int space) {
int offset = source_->GetInt();
if (space == LO_SPACE) return deserialized_large_objects_[offset];
DCHECK(space < kNumberOfPreallocatedSpaces);
offset <<= kObjectAlignmentBits;
return HeapObject::FromAddress(high_water_[space] - offset);
}
......@@ -313,13 +311,15 @@ class Deserializer: public SerializerDeserializer {
SnapshotByteSource* source_;
// This is the address of the next object that will be allocated in each
// space. It is used to calculate the addresses of back-references.
Address high_water_[LAST_SPACE + 1];
Address high_water_[kNumberOfPreallocatedSpaces];
int reservations_[LAST_SPACE + 1];
int reservations_[kNumberOfSpaces];
static const intptr_t kUninitializedReservation = -1;
ExternalReferenceDecoder* external_reference_decoder_;
List<HeapObject*> deserialized_large_objects_;
DISALLOW_COPY_AND_ASSIGN(Deserializer);
};
......@@ -466,6 +466,7 @@ class Serializer : public SerializerDeserializer {
void InitializeAllocators();
// This will return the space for an object.
static int SpaceOfObject(HeapObject* object);
int AllocateLargeObject(int size);
int Allocate(int space, int size);
int EncodeExternalReference(Address addr) {
return external_reference_encoder_->Encode(addr);
......@@ -480,7 +481,7 @@ class Serializer : public SerializerDeserializer {
Isolate* isolate_;
// Keep track of the fullness of each space in order to generate
// relative addresses for back references.
int fullness_[LAST_SPACE + 1];
int fullness_[kNumberOfSpaces];
SnapshotByteSink* sink_;
ExternalReferenceEncoder* external_reference_encoder_;
......@@ -497,6 +498,8 @@ class Serializer : public SerializerDeserializer {
private:
CodeAddressMap* code_address_map_;
// We map serialized large objects to indexes for back-referencing.
int seen_large_objects_index_;
DISALLOW_COPY_AND_ASSIGN(Serializer);
};
......@@ -691,8 +694,8 @@ class SerializedCodeData {
static const int kPayloadLengthOffset = 2;
static const int kReservationsOffset = 3;
static const int kNumSpaces = PROPERTY_CELL_SPACE - NEW_SPACE + 1;
static const int kHeaderEntries = kReservationsOffset + kNumSpaces;
static const int kHeaderEntries =
kReservationsOffset + SerializerDeserializer::kNumberOfSpaces;
static const int kHeaderSize = kHeaderEntries * kIntSize;
// Following the header, we store, in sequential order
......
......@@ -21,8 +21,8 @@ void Snapshot::ReserveSpaceForLinkedInSnapshot(Deserializer* deserializer) {
deserializer->set_reservation(CODE_SPACE, code_space_used_);
deserializer->set_reservation(MAP_SPACE, map_space_used_);
deserializer->set_reservation(CELL_SPACE, cell_space_used_);
deserializer->set_reservation(PROPERTY_CELL_SPACE,
property_cell_space_used_);
deserializer->set_reservation(PROPERTY_CELL_SPACE, property_cell_space_used_);
deserializer->set_reservation(LO_SPACE, lo_space_used_);
}
......@@ -67,6 +67,7 @@ Handle<Context> Snapshot::NewContextFromSnapshot(Isolate* isolate) {
deserializer.set_reservation(CELL_SPACE, context_cell_space_used_);
deserializer.set_reservation(PROPERTY_CELL_SPACE,
context_property_cell_space_used_);
deserializer.set_reservation(LO_SPACE, context_lo_space_used_);
deserializer.DeserializePartial(isolate, &root);
CHECK(root->IsContext());
return Handle<Context>(Context::cast(root));
......
......@@ -27,6 +27,7 @@ const int Snapshot::code_space_used_ = 0;
const int Snapshot::map_space_used_ = 0;
const int Snapshot::cell_space_used_ = 0;
const int Snapshot::property_cell_space_used_ = 0;
const int Snapshot::lo_space_used_ = 0;
const int Snapshot::context_new_space_used_ = 0;
const int Snapshot::context_pointer_space_used_ = 0;
......@@ -35,5 +36,5 @@ const int Snapshot::context_code_space_used_ = 0;
const int Snapshot::context_map_space_used_ = 0;
const int Snapshot::context_cell_space_used_ = 0;
const int Snapshot::context_property_cell_space_used_ = 0;
const int Snapshot::context_lo_space_used_ = 0;
} } // namespace v8::internal
......@@ -25,6 +25,7 @@ struct SnapshotImpl {
int map_space_used;
int cell_space_used;
int property_cell_space_used;
int lo_space_used;
const byte* context_data;
int context_size;
......@@ -35,6 +36,7 @@ struct SnapshotImpl {
int context_map_space_used;
int context_cell_space_used;
int context_property_cell_space_used;
int context_lo_space_used;
};
......@@ -66,6 +68,7 @@ bool Snapshot::Initialize(Isolate* isolate) {
deserializer.set_reservation(CELL_SPACE, snapshot_impl_->cell_space_used);
deserializer.set_reservation(PROPERTY_CELL_SPACE,
snapshot_impl_->property_cell_space_used);
deserializer.set_reservation(LO_SPACE, snapshot_impl_->lo_space_used);
bool success = isolate->Init(&deserializer);
if (FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
......@@ -97,6 +100,7 @@ Handle<Context> Snapshot::NewContextFromSnapshot(Isolate* isolate) {
deserializer.set_reservation(PROPERTY_CELL_SPACE,
snapshot_impl_->
context_property_cell_space_used);
deserializer.set_reservation(LO_SPACE, snapshot_impl_->context_lo_space_used);
Object* root;
deserializer.DeserializePartial(isolate, &root);
CHECK(root->IsContext());
......@@ -123,6 +127,7 @@ void SetSnapshotFromFile(StartupData* snapshot_blob) {
snapshot_impl_->map_space_used = source.GetInt();
snapshot_impl_->cell_space_used = source.GetInt();
snapshot_impl_->property_cell_space_used = source.GetInt();
snapshot_impl_->lo_space_used = source.GetInt();
success &= source.GetBlob(&snapshot_impl_->context_data,
&snapshot_impl_->context_size);
......
......@@ -39,15 +39,17 @@ void SnapshotByteSource::CopyRaw(byte* to, int number_of_bytes) {
void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
DCHECK(integer < 1 << 22);
DCHECK(integer < 1 << 30);
integer <<= 2;
int bytes = 1;
if (integer > 0xff) bytes = 2;
if (integer > 0xffff) bytes = 3;
integer |= bytes;
if (integer > 0xffffff) bytes = 4;
integer |= (bytes - 1);
Put(static_cast<int>(integer & 0xff), "IntPart1");
if (bytes > 1) Put(static_cast<int>((integer >> 8) & 0xff), "IntPart2");
if (bytes > 2) Put(static_cast<int>((integer >> 16) & 0xff), "IntPart3");
if (bytes > 3) Put(static_cast<int>((integer >> 24) & 0xff), "IntPart4");
}
void SnapshotByteSink::PutRaw(byte* data, int number_of_bytes,
......
......@@ -39,7 +39,7 @@ class SnapshotByteSource FINAL {
// This way of variable-length encoding integers does not suffer from branch
// mispredictions.
uint32_t answer = GetUnalignedInt();
int bytes = answer & 3;
int bytes = (answer & 3) + 1;
Advance(bytes);
uint32_t mask = 0xffffffffu;
mask >>= 32 - (bytes << 3);
......
......@@ -48,6 +48,7 @@ class Snapshot {
static const int map_space_used_;
static const int cell_space_used_;
static const int property_cell_space_used_;
static const int lo_space_used_;
static const int context_new_space_used_;
static const int context_pointer_space_used_;
static const int context_data_space_used_;
......@@ -55,6 +56,7 @@ class Snapshot {
static const int context_map_space_used_;
static const int context_cell_space_used_;
static const int context_property_cell_space_used_;
static const int context_lo_space_used_;
static const int size_;
static const int raw_size_;
static const int context_size_;
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment