Commit 81577a79 authored by Leszek Swirski's avatar Leszek Swirski Committed by Commit Bot

[serializer] Change deferring to use forward refs

Now that we have forward references, we can replace the body deferring
mechanism with forward references to the entire pointer.

This ensures that objects are always deserialized with their contents
(aside from themselves maybe holding forward refs), and as a result we
can simplify the CanBeDeferred conditions which encode the constraint
that some objects either need immediately have contents, or cannot be
deferred because their fields are changed temporarily (e.g. backing
store refs).

This also means that objects with length fields (e.g. arrays) will
always have those length fields deserialized when the object is
deserialized, which was not the case when the body could be deferred.
This helps us in the plan to make GC possible during deserialization.

Bug: v8:10815
Change-Id: Ib0e5399b9de6027765691e8cb47410a2ccc15485
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2390643Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69760}
parent f2a832ca
...@@ -117,36 +117,8 @@ void Deserializer::DeserializeDeferredObjects() { ...@@ -117,36 +117,8 @@ void Deserializer::DeserializeDeferredObjects() {
DisallowHeapAllocation no_gc; DisallowHeapAllocation no_gc;
for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) { for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
switch (code) { SnapshotSpace space = NewObject::Decode(code);
case kAlignmentPrefix: ReadObject(space);
case kAlignmentPrefix + 1:
case kAlignmentPrefix + 2: {
int alignment = code - (SerializerDeserializer::kAlignmentPrefix - 1);
allocator()->SetAlignment(static_cast<AllocationAlignment>(alignment));
break;
}
default: {
SnapshotSpace space = NewObject::Decode(code);
HeapObject object = GetBackReferencedObject(space);
int size = source_.GetInt() << kTaggedSizeLog2;
Address obj_address = object.address();
// Object's map is already initialized, now read the rest.
MaybeObjectSlot start(obj_address + kTaggedSize);
MaybeObjectSlot end(obj_address + size);
bool filled = ReadData(start, end, space, obj_address);
CHECK(filled);
DCHECK(CanBeDeferred(object));
PostProcessNewObject(object, space);
}
}
}
// When the deserialization of maps are deferred, they will be created
// as filler maps, and we postpone the post processing until the maps
// are also deserialized.
for (const auto& pair : fillers_to_post_process_) {
DCHECK(!pair.first.IsFiller());
PostProcessNewObject(pair.first, pair.second);
} }
} }
...@@ -192,11 +164,7 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj, ...@@ -192,11 +164,7 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj,
DisallowHeapAllocation no_gc; DisallowHeapAllocation no_gc;
if ((FLAG_rehash_snapshot && can_rehash_) || deserializing_user_code()) { if ((FLAG_rehash_snapshot && can_rehash_) || deserializing_user_code()) {
if (obj.IsFiller()) { if (obj.IsString()) {
DCHECK_EQ(fillers_to_post_process_.find(obj),
fillers_to_post_process_.end());
fillers_to_post_process_.insert({obj, space});
} else if (obj.IsString()) {
// Uninitialize hash field as we need to recompute the hash. // Uninitialize hash field as we need to recompute the hash.
String string = String::cast(obj); String string = String::cast(obj);
string.set_hash_field(String::kEmptyHashField); string.set_hash_field(String::kEmptyHashField);
...@@ -371,10 +339,8 @@ HeapObject Deserializer::ReadObject() { ...@@ -371,10 +339,8 @@ HeapObject Deserializer::ReadObject() {
MaybeObject object; MaybeObject object;
// We are reading to a location outside of JS heap, so pass kNew to avoid // We are reading to a location outside of JS heap, so pass kNew to avoid
// triggering write barriers. // triggering write barriers.
bool filled = ReadData(FullMaybeObjectSlot(&object), FullMaybeObjectSlot(&object + 1),
ReadData(FullMaybeObjectSlot(&object), FullMaybeObjectSlot(&object + 1), SnapshotSpace::kNew, kNullAddress);
SnapshotSpace::kNew, kNullAddress);
CHECK(filled);
return object.GetHeapObjectAssumeStrong(); return object.GetHeapObjectAssumeStrong();
} }
...@@ -414,10 +380,8 @@ HeapObject Deserializer::ReadObject(SnapshotSpace space) { ...@@ -414,10 +380,8 @@ HeapObject Deserializer::ReadObject(SnapshotSpace space) {
MaybeObjectSlot limit(address + size); MaybeObjectSlot limit(address + size);
current.store(MaybeObject::FromObject(map)); current.store(MaybeObject::FromObject(map));
if (ReadData(current + 1, limit, space, address)) { ReadData(current + 1, limit, space, address);
// Only post process if object content has not been deferred. obj = PostProcessNewObject(obj, space);
obj = PostProcessNewObject(obj, space);
}
#ifdef DEBUG #ifdef DEBUG
if (obj.IsCode()) { if (obj.IsCode()) {
...@@ -446,8 +410,7 @@ HeapObject Deserializer::ReadMetaMap() { ...@@ -446,8 +410,7 @@ HeapObject Deserializer::ReadMetaMap() {
current.store(MaybeObject(current.address() + kHeapObjectTag)); current.store(MaybeObject(current.address() + kHeapObjectTag));
// Set the instance-type manually, to allow backrefs to read it. // Set the instance-type manually, to allow backrefs to read it.
Map::unchecked_cast(obj).set_instance_type(MAP_TYPE); Map::unchecked_cast(obj).set_instance_type(MAP_TYPE);
// The meta map's contents cannot be deferred. ReadData(current + 1, limit, space, address);
CHECK(ReadData(current + 1, limit, space, address));
return obj; return obj;
} }
...@@ -459,8 +422,7 @@ void Deserializer::ReadCodeObjectBody(SnapshotSpace space, ...@@ -459,8 +422,7 @@ void Deserializer::ReadCodeObjectBody(SnapshotSpace space,
// Now we read the rest of code header's fields. // Now we read the rest of code header's fields.
MaybeObjectSlot current(code_object_address + HeapObject::kHeaderSize); MaybeObjectSlot current(code_object_address + HeapObject::kHeaderSize);
MaybeObjectSlot limit(code_object_address + Code::kDataStart); MaybeObjectSlot limit(code_object_address + Code::kDataStart);
bool filled = ReadData(current, limit, space, code_object_address); ReadData(current, limit, space, code_object_address);
CHECK(filled);
// Now iterate RelocInfos the same way it was done by the serialzier and // Now iterate RelocInfos the same way it was done by the serialzier and
// deserialize respective data into RelocInfos. // deserialize respective data into RelocInfos.
...@@ -573,7 +535,7 @@ constexpr byte VerifyBytecodeCount(byte bytecode) { ...@@ -573,7 +535,7 @@ constexpr byte VerifyBytecodeCount(byte bytecode) {
} // namespace } // namespace
template <typename TSlot> template <typename TSlot>
bool Deserializer::ReadData(TSlot current, TSlot limit, void Deserializer::ReadData(TSlot current, TSlot limit,
SnapshotSpace source_space, SnapshotSpace source_space,
Address current_object_address) { Address current_object_address) {
// Write barrier support costs around 1% in startup time. In fact there // Write barrier support costs around 1% in startup time. In fact there
...@@ -678,18 +640,8 @@ bool Deserializer::ReadData(TSlot current, TSlot limit, ...@@ -678,18 +640,8 @@ bool Deserializer::ReadData(TSlot current, TSlot limit,
break; break;
} }
case kDeferred: {
// Deferred can only occur right after the heap object's map field.
DCHECK_EQ(current.address(), current_object_address + kTaggedSize);
HeapObject obj = HeapObject::FromAddress(current_object_address);
// If the deferred object is a map, its instance type may be used
// during deserialization. Initialize it with a temporary value.
if (obj.IsMap()) Map::cast(obj).set_instance_type(FILLER_TYPE);
current = limit;
return false;
}
case kRegisterPendingForwardRef: { case kRegisterPendingForwardRef: {
DCHECK_NE(current_object_address, kNullAddress);
HeapObject obj = HeapObject::FromAddress(current_object_address); HeapObject obj = HeapObject::FromAddress(current_object_address);
unresolved_forward_refs_.emplace_back( unresolved_forward_refs_.emplace_back(
obj, current.address() - current_object_address); obj, current.address() - current_object_address);
...@@ -881,7 +833,6 @@ bool Deserializer::ReadData(TSlot current, TSlot limit, ...@@ -881,7 +833,6 @@ bool Deserializer::ReadData(TSlot current, TSlot limit,
} }
} }
CHECK_EQ(limit, current); CHECK_EQ(limit, current);
return true;
} }
Address Deserializer::ReadExternalReferenceCase() { Address Deserializer::ReadExternalReferenceCase() {
......
...@@ -135,9 +135,9 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer { ...@@ -135,9 +135,9 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
// space id is used for the write barrier. The object_address is the address // space id is used for the write barrier. The object_address is the address
// of the object we are writing into, or nullptr if we are not writing into an // of the object we are writing into, or nullptr if we are not writing into an
// object, i.e. if we are writing a series of tagged values that are not on // object, i.e. if we are writing a series of tagged values that are not on
// the heap. Return false if the object content has been deferred. // the heap.
template <typename TSlot> template <typename TSlot>
bool ReadData(TSlot start, TSlot end, SnapshotSpace space, void ReadData(TSlot start, TSlot end, SnapshotSpace space,
Address object_address); Address object_address);
// A helper function for ReadData, templatized on the bytecode for efficiency. // A helper function for ReadData, templatized on the bytecode for efficiency.
...@@ -205,11 +205,6 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer { ...@@ -205,11 +205,6 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
// TODO(6593): generalize rehashing, and remove this flag. // TODO(6593): generalize rehashing, and remove this flag.
bool can_rehash_; bool can_rehash_;
std::vector<HeapObject> to_rehash_; std::vector<HeapObject> to_rehash_;
// Store the objects whose maps are deferred and thus initialized as filler
// maps during deserialization, so that they can be processed later when the
// maps become available.
std::unordered_map<HeapObject, SnapshotSpace, Object::Hasher>
fillers_to_post_process_;
#ifdef DEBUG #ifdef DEBUG
uint32_t num_api_references_; uint32_t num_api_references_;
......
...@@ -86,12 +86,9 @@ bool ReadOnlySerializer::MustBeDeferred(HeapObject object) { ...@@ -86,12 +86,9 @@ bool ReadOnlySerializer::MustBeDeferred(HeapObject object) {
// be saved without problems. // be saved without problems.
return false; return false;
} }
// Just defer everything except for Map objects until all required roots are // Defer objects with special alignment requirements until the filler roots
// serialized. Some objects may have special alignment requirements, that may // are serialized.
// not be fulfilled during deserialization until few first root objects are return HeapObject::RequiredAlignment(object.map()) != kWordAligned;
// serialized. But we must serialize Map objects since deserializer checks
// that these root objects are indeed Maps.
return !object.IsMap();
} }
bool ReadOnlySerializer::SerializeUsingReadOnlyObjectCache( bool ReadOnlySerializer::SerializeUsingReadOnlyObjectCache(
......
...@@ -30,14 +30,9 @@ void SerializerDeserializer::Iterate(Isolate* isolate, RootVisitor* visitor) { ...@@ -30,14 +30,9 @@ void SerializerDeserializer::Iterate(Isolate* isolate, RootVisitor* visitor) {
} }
bool SerializerDeserializer::CanBeDeferred(HeapObject o) { bool SerializerDeserializer::CanBeDeferred(HeapObject o) {
// ArrayBuffer instances are serialized by first re-assigning a index // Maps cannot be deferred as objects are expected to have a valid map
// to the backing store field, then serializing the object, and then // immediately.
// storing the actual backing store address again (and the same for the return !o.IsMap();
// ArrayBufferExtension). If serialization of the object itself is deferred,
// the real backing store address is written into the snapshot, which cannot
// be processed when deserializing.
return !o.IsString() && !o.IsScript() && !o.IsJSTypedArray() &&
!o.IsJSArrayBuffer();
} }
void SerializerDeserializer::RestoreExternalReferenceRedirectors( void SerializerDeserializer::RestoreExternalReferenceRedirectors(
......
...@@ -72,8 +72,8 @@ class SerializerDeserializer : public RootVisitor { ...@@ -72,8 +72,8 @@ class SerializerDeserializer : public RootVisitor {
// clang-format off // clang-format off
#define UNUSED_SERIALIZER_BYTE_CODES(V) \ #define UNUSED_SERIALIZER_BYTE_CODES(V) \
V(0x06) V(0x07) V(0x0e) V(0x0f) \ V(0x06) V(0x07) V(0x0e) V(0x0f) \
/* Free range 0x2b..0x2f */ \ /* Free range 0x2a..0x2f */ \
V(0x2b) V(0x2c) V(0x2d) V(0x2e) V(0x2f) \ V(0x2a) V(0x2b) V(0x2c) V(0x2d) V(0x2e) V(0x2f) \
/* Free range 0x30..0x3f */ \ /* Free range 0x30..0x3f */ \
V(0x30) V(0x31) V(0x32) V(0x33) V(0x34) V(0x35) V(0x36) V(0x37) \ V(0x30) V(0x31) V(0x32) V(0x33) V(0x34) V(0x35) V(0x36) V(0x37) \
V(0x38) V(0x39) V(0x3a) V(0x3b) V(0x3c) V(0x3d) V(0x3e) V(0x3f) \ V(0x38) V(0x39) V(0x3a) V(0x3b) V(0x3c) V(0x3d) V(0x3e) V(0x3f) \
...@@ -145,16 +145,14 @@ class SerializerDeserializer : public RootVisitor { ...@@ -145,16 +145,14 @@ class SerializerDeserializer : public RootVisitor {
kNop, kNop,
// Move to next reserved chunk. // Move to next reserved chunk.
kNextChunk, kNextChunk,
// Deferring object content. // 3 alignment prefixes 0x16..0x18
kDeferred, kAlignmentPrefix = 0x16,
// 3 alignment prefixes 0x17..0x19
kAlignmentPrefix = 0x17,
// A tag emitted at strategic points in the snapshot to delineate sections. // A tag emitted at strategic points in the snapshot to delineate sections.
// If the deserializer does not find these at the expected moments then it // If the deserializer does not find these at the expected moments then it
// is an indication that the snapshot and the VM do not fit together. // is an indication that the snapshot and the VM do not fit together.
// Examine the build process for architecture, version or configuration // Examine the build process for architecture, version or configuration
// mismatches. // mismatches.
kSynchronize = 0x1a, kSynchronize = 0x19,
// Repeats of variable length. // Repeats of variable length.
kVariableRepeat, kVariableRepeat,
// Used for embedder-allocated backing stores for TypedArrays. // Used for embedder-allocated backing stores for TypedArrays.
......
...@@ -71,6 +71,9 @@ void Serializer::OutputStatistics(const char* name) { ...@@ -71,6 +71,9 @@ void Serializer::OutputStatistics(const char* name) {
} }
void Serializer::SerializeDeferredObjects() { void Serializer::SerializeDeferredObjects() {
if (FLAG_trace_serializer) {
PrintF("Serializing deferred objects\n");
}
while (!deferred_objects_.empty()) { while (!deferred_objects_.empty()) {
HeapObject obj = deferred_objects_.back(); HeapObject obj = deferred_objects_.back();
deferred_objects_.pop_back(); deferred_objects_.pop_back();
...@@ -165,13 +168,13 @@ bool Serializer::SerializeBackReference(HeapObject obj) { ...@@ -165,13 +168,13 @@ bool Serializer::SerializeBackReference(HeapObject obj) {
} }
bool Serializer::SerializePendingObject(HeapObject obj) { bool Serializer::SerializePendingObject(HeapObject obj) {
auto it = forward_refs_per_pending_object_.find(obj); PendingObjectReference pending_obj =
if (it == forward_refs_per_pending_object_.end()) { forward_refs_per_pending_object_.find(obj);
if (pending_obj == forward_refs_per_pending_object_.end()) {
return false; return false;
} }
int forward_ref_id = PutPendingForwardReference(); PutPendingForwardReferenceTo(pending_obj);
it->second.push_back(forward_ref_id);
return true; return true;
} }
...@@ -271,10 +274,13 @@ void Serializer::PutRepeat(int repeat_count) { ...@@ -271,10 +274,13 @@ void Serializer::PutRepeat(int repeat_count) {
} }
} }
int Serializer::PutPendingForwardReference() { void Serializer::PutPendingForwardReferenceTo(
PendingObjectReference reference) {
sink_.Put(kRegisterPendingForwardRef, "RegisterPendingForwardRef"); sink_.Put(kRegisterPendingForwardRef, "RegisterPendingForwardRef");
unresolved_forward_refs_++; unresolved_forward_refs_++;
return next_forward_ref_id_++; // Register the current slot with the pending object.
int forward_ref_id = next_forward_ref_id_++;
reference->second.push_back(forward_ref_id);
} }
void Serializer::ResolvePendingForwardReference(int forward_reference_id) { void Serializer::ResolvePendingForwardReference(int forward_reference_id) {
...@@ -295,9 +301,11 @@ Serializer::PendingObjectReference Serializer::RegisterObjectIsPending( ...@@ -295,9 +301,11 @@ Serializer::PendingObjectReference Serializer::RegisterObjectIsPending(
auto forward_refs_entry_insertion = auto forward_refs_entry_insertion =
forward_refs_per_pending_object_.emplace(obj, std::vector<int>()); forward_refs_per_pending_object_.emplace(obj, std::vector<int>());
// Make sure the above emplace actually added the object, rather than // If the above emplace didn't actually add the object, then the object must
// overwriting an existing entry. // already have been registered pending by deferring. It might not be in the
DCHECK(forward_refs_entry_insertion.second); // deferred objects queue though, since it may be the very object we just
// popped off that queue, so just check that it can be deferred.
DCHECK_IMPLIES(!forward_refs_entry_insertion.second, CanBeDeferred(obj));
// return the iterator into the map as the reference. // return the iterator into the map as the reference.
return forward_refs_entry_insertion.first; return forward_refs_entry_insertion.first;
...@@ -581,6 +589,26 @@ class UnlinkWeakNextScope { ...@@ -581,6 +589,26 @@ class UnlinkWeakNextScope {
}; };
void Serializer::ObjectSerializer::Serialize() { void Serializer::ObjectSerializer::Serialize() {
RecursionScope recursion(serializer_);
// Defer objects as "pending" if they cannot be serialized now, or if we
// exceed a certain recursion depth. Some objects cannot be deferred
if ((recursion.ExceedsMaximum() && CanBeDeferred(object_)) ||
serializer_->MustBeDeferred(object_)) {
DCHECK(CanBeDeferred(object_));
if (FLAG_trace_serializer) {
PrintF(" Deferring heap object: ");
object_.ShortPrint();
PrintF("\n");
}
// Deferred objects are considered "pending".
PendingObjectReference pending_obj =
serializer_->RegisterObjectIsPending(object_);
serializer_->PutPendingForwardReferenceTo(pending_obj);
serializer_->QueueDeferredObject(object_);
return;
}
if (FLAG_trace_serializer) { if (FLAG_trace_serializer) {
PrintF(" Encoding heap object: "); PrintF(" Encoding heap object: ");
object_.ShortPrint(); object_.ShortPrint();
...@@ -669,43 +697,27 @@ void Serializer::ObjectSerializer::SerializeObject() { ...@@ -669,43 +697,27 @@ void Serializer::ObjectSerializer::SerializeObject() {
CHECK_EQ(0, bytes_processed_so_far_); CHECK_EQ(0, bytes_processed_so_far_);
bytes_processed_so_far_ = kTaggedSize; bytes_processed_so_far_ = kTaggedSize;
RecursionScope recursion(serializer_);
// Objects that are immediately post processed during deserialization
// cannot be deferred, since post processing requires the object content.
if ((recursion.ExceedsMaximum() && CanBeDeferred(object_)) ||
serializer_->MustBeDeferred(object_)) {
serializer_->QueueDeferredObject(object_);
sink_->Put(kDeferred, "Deferring object content");
return;
}
SerializeContent(map, size); SerializeContent(map, size);
} }
void Serializer::ObjectSerializer::SerializeDeferred() { void Serializer::ObjectSerializer::SerializeDeferred() {
if (FLAG_trace_serializer) {
PrintF(" Encoding deferred heap object: ");
object_.ShortPrint();
PrintF("\n");
}
int size = object_.Size();
Map map = object_.map();
SerializerReference back_reference = SerializerReference back_reference =
serializer_->reference_map()->LookupReference( serializer_->reference_map()->LookupReference(
reinterpret_cast<void*>(object_.ptr())); reinterpret_cast<void*>(object_.ptr()));
DCHECK(back_reference.is_back_reference());
// Serialize the rest of the object. if (back_reference.is_valid()) {
CHECK_EQ(0, bytes_processed_so_far_); if (FLAG_trace_serializer) {
bytes_processed_so_far_ = kTaggedSize; PrintF(" Deferred heap object ");
object_.ShortPrint();
serializer_->PutAlignmentPrefix(object_); PrintF(" was already serialized\n");
sink_->Put(NewObject::Encode(back_reference.space()), "deferred object"); }
serializer_->PutBackReference(object_, back_reference); return;
sink_->PutInt(size >> kTaggedSizeLog2, "deferred object size"); }
SerializeContent(map, size); if (FLAG_trace_serializer) {
PrintF(" Encoding deferred heap object\n");
}
Serialize();
} }
void Serializer::ObjectSerializer::SerializeContent(Map map, int size) { void Serializer::ObjectSerializer::SerializeContent(Map map, int size) {
......
...@@ -176,6 +176,9 @@ class Serializer : public SerializerDeserializer { ...@@ -176,6 +176,9 @@ class Serializer : public SerializerDeserializer {
Isolate* isolate() const { return isolate_; } Isolate* isolate() const { return isolate_; }
protected: protected:
using PendingObjectReference =
std::map<HeapObject, std::vector<int>>::iterator;
class ObjectSerializer; class ObjectSerializer;
class RecursionScope { class RecursionScope {
public: public:
...@@ -212,7 +215,7 @@ class Serializer : public SerializerDeserializer { ...@@ -212,7 +215,7 @@ class Serializer : public SerializerDeserializer {
// Emit a marker noting that this slot is a forward reference to the an // Emit a marker noting that this slot is a forward reference to the an
// object which has not yet been serialized. // object which has not yet been serialized.
int PutPendingForwardReference(); void PutPendingForwardReferenceTo(PendingObjectReference reference);
// Resolve the given previously registered forward reference to the current // Resolve the given previously registered forward reference to the current
// object. // object.
void ResolvePendingForwardReference(int obj); void ResolvePendingForwardReference(int obj);
...@@ -251,14 +254,11 @@ class Serializer : public SerializerDeserializer { ...@@ -251,14 +254,11 @@ class Serializer : public SerializerDeserializer {
Code CopyCode(Code code); Code CopyCode(Code code);
void QueueDeferredObject(HeapObject obj) { void QueueDeferredObject(HeapObject obj) {
DCHECK(reference_map_.LookupReference(reinterpret_cast<void*>(obj.ptr())) DCHECK(!reference_map_.LookupReference(reinterpret_cast<void*>(obj.ptr()))
.is_back_reference()); .is_valid());
deferred_objects_.push_back(obj); deferred_objects_.push_back(obj);
} }
using PendingObjectReference =
std::map<HeapObject, std::vector<int>>::iterator;
// Register that the the given object shouldn't be immediately serialized, but // Register that the the given object shouldn't be immediately serialized, but
// will be serialized later and any references to it should be pending forward // will be serialized later and any references to it should be pending forward
// references. // references.
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment