Commit cb1a96e5 authored by Sathya Gunasekaran's avatar Sathya Gunasekaran Committed by Commit Bot

Revert "[serializer] Change deferring to use forward refs"

This reverts commit 81577a79.

Reason for revert: https://ci.chromium.org/p/v8/builders/ci/V8%20Linux64%20-%20shared/10544

Original change's description:
> [serializer] Change deferring to use forward refs
> 
> Now that we have forward references, we can replace the body deferring
> mechanism with forward references to the entire pointer.
> 
> This ensures that objects are always deserialized with their contents
> (aside from themselves maybe holding forward refs), and as a result we
> can simplify the CanBeDeferred conditions which encode the constraint
> that some objects either need immediately have contents, or cannot be
> deferred because their fields are changed temporarily (e.g. backing
> store refs).
> 
> This also means that objects with length fields (e.g. arrays) will
> always have those length fields deserialized when the object is
> deserialized, which was not the case when the body could be deferred.
> This helps us in the plan to make GC possible during deserialization.
> 
> Bug: v8:10815
> Change-Id: Ib0e5399b9de6027765691e8cb47410a2ccc15485
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2390643
> Reviewed-by: Jakob Gruber <jgruber@chromium.org>
> Commit-Queue: Leszek Swirski <leszeks@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#69760}

TBR=jgruber@chromium.org,leszeks@chromium.org

Change-Id: I7a93a59217a2b38e2157c0f7ffc7ac648590a8d6
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:10815
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2398535Reviewed-by: 's avatarSathya Gunasekaran  <gsathya@chromium.org>
Commit-Queue: Sathya Gunasekaran  <gsathya@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69763}
parent 79e4420c
...@@ -117,8 +117,36 @@ void Deserializer::DeserializeDeferredObjects() { ...@@ -117,8 +117,36 @@ void Deserializer::DeserializeDeferredObjects() {
DisallowHeapAllocation no_gc; DisallowHeapAllocation no_gc;
for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) { for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
switch (code) {
case kAlignmentPrefix:
case kAlignmentPrefix + 1:
case kAlignmentPrefix + 2: {
int alignment = code - (SerializerDeserializer::kAlignmentPrefix - 1);
allocator()->SetAlignment(static_cast<AllocationAlignment>(alignment));
break;
}
default: {
SnapshotSpace space = NewObject::Decode(code); SnapshotSpace space = NewObject::Decode(code);
ReadObject(space); HeapObject object = GetBackReferencedObject(space);
int size = source_.GetInt() << kTaggedSizeLog2;
Address obj_address = object.address();
// Object's map is already initialized, now read the rest.
MaybeObjectSlot start(obj_address + kTaggedSize);
MaybeObjectSlot end(obj_address + size);
bool filled = ReadData(start, end, space, obj_address);
CHECK(filled);
DCHECK(CanBeDeferred(object));
PostProcessNewObject(object, space);
}
}
}
// When the deserialization of maps are deferred, they will be created
// as filler maps, and we postpone the post processing until the maps
// are also deserialized.
for (const auto& pair : fillers_to_post_process_) {
DCHECK(!pair.first.IsFiller());
PostProcessNewObject(pair.first, pair.second);
} }
} }
...@@ -164,7 +192,11 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj, ...@@ -164,7 +192,11 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj,
DisallowHeapAllocation no_gc; DisallowHeapAllocation no_gc;
if ((FLAG_rehash_snapshot && can_rehash_) || deserializing_user_code()) { if ((FLAG_rehash_snapshot && can_rehash_) || deserializing_user_code()) {
if (obj.IsString()) { if (obj.IsFiller()) {
DCHECK_EQ(fillers_to_post_process_.find(obj),
fillers_to_post_process_.end());
fillers_to_post_process_.insert({obj, space});
} else if (obj.IsString()) {
// Uninitialize hash field as we need to recompute the hash. // Uninitialize hash field as we need to recompute the hash.
String string = String::cast(obj); String string = String::cast(obj);
string.set_hash_field(String::kEmptyHashField); string.set_hash_field(String::kEmptyHashField);
...@@ -339,8 +371,10 @@ HeapObject Deserializer::ReadObject() { ...@@ -339,8 +371,10 @@ HeapObject Deserializer::ReadObject() {
MaybeObject object; MaybeObject object;
// We are reading to a location outside of JS heap, so pass kNew to avoid // We are reading to a location outside of JS heap, so pass kNew to avoid
// triggering write barriers. // triggering write barriers.
bool filled =
ReadData(FullMaybeObjectSlot(&object), FullMaybeObjectSlot(&object + 1), ReadData(FullMaybeObjectSlot(&object), FullMaybeObjectSlot(&object + 1),
SnapshotSpace::kNew, kNullAddress); SnapshotSpace::kNew, kNullAddress);
CHECK(filled);
return object.GetHeapObjectAssumeStrong(); return object.GetHeapObjectAssumeStrong();
} }
...@@ -380,8 +414,10 @@ HeapObject Deserializer::ReadObject(SnapshotSpace space) { ...@@ -380,8 +414,10 @@ HeapObject Deserializer::ReadObject(SnapshotSpace space) {
MaybeObjectSlot limit(address + size); MaybeObjectSlot limit(address + size);
current.store(MaybeObject::FromObject(map)); current.store(MaybeObject::FromObject(map));
ReadData(current + 1, limit, space, address); if (ReadData(current + 1, limit, space, address)) {
// Only post process if object content has not been deferred.
obj = PostProcessNewObject(obj, space); obj = PostProcessNewObject(obj, space);
}
#ifdef DEBUG #ifdef DEBUG
if (obj.IsCode()) { if (obj.IsCode()) {
...@@ -410,7 +446,8 @@ HeapObject Deserializer::ReadMetaMap() { ...@@ -410,7 +446,8 @@ HeapObject Deserializer::ReadMetaMap() {
current.store(MaybeObject(current.address() + kHeapObjectTag)); current.store(MaybeObject(current.address() + kHeapObjectTag));
// Set the instance-type manually, to allow backrefs to read it. // Set the instance-type manually, to allow backrefs to read it.
Map::unchecked_cast(obj).set_instance_type(MAP_TYPE); Map::unchecked_cast(obj).set_instance_type(MAP_TYPE);
ReadData(current + 1, limit, space, address); // The meta map's contents cannot be deferred.
CHECK(ReadData(current + 1, limit, space, address));
return obj; return obj;
} }
...@@ -422,7 +459,8 @@ void Deserializer::ReadCodeObjectBody(SnapshotSpace space, ...@@ -422,7 +459,8 @@ void Deserializer::ReadCodeObjectBody(SnapshotSpace space,
// Now we read the rest of code header's fields. // Now we read the rest of code header's fields.
MaybeObjectSlot current(code_object_address + HeapObject::kHeaderSize); MaybeObjectSlot current(code_object_address + HeapObject::kHeaderSize);
MaybeObjectSlot limit(code_object_address + Code::kDataStart); MaybeObjectSlot limit(code_object_address + Code::kDataStart);
ReadData(current, limit, space, code_object_address); bool filled = ReadData(current, limit, space, code_object_address);
CHECK(filled);
// Now iterate RelocInfos the same way it was done by the serialzier and // Now iterate RelocInfos the same way it was done by the serialzier and
// deserialize respective data into RelocInfos. // deserialize respective data into RelocInfos.
...@@ -535,7 +573,7 @@ constexpr byte VerifyBytecodeCount(byte bytecode) { ...@@ -535,7 +573,7 @@ constexpr byte VerifyBytecodeCount(byte bytecode) {
} // namespace } // namespace
template <typename TSlot> template <typename TSlot>
void Deserializer::ReadData(TSlot current, TSlot limit, bool Deserializer::ReadData(TSlot current, TSlot limit,
SnapshotSpace source_space, SnapshotSpace source_space,
Address current_object_address) { Address current_object_address) {
// Write barrier support costs around 1% in startup time. In fact there // Write barrier support costs around 1% in startup time. In fact there
...@@ -640,8 +678,18 @@ void Deserializer::ReadData(TSlot current, TSlot limit, ...@@ -640,8 +678,18 @@ void Deserializer::ReadData(TSlot current, TSlot limit,
break; break;
} }
case kDeferred: {
// Deferred can only occur right after the heap object's map field.
DCHECK_EQ(current.address(), current_object_address + kTaggedSize);
HeapObject obj = HeapObject::FromAddress(current_object_address);
// If the deferred object is a map, its instance type may be used
// during deserialization. Initialize it with a temporary value.
if (obj.IsMap()) Map::cast(obj).set_instance_type(FILLER_TYPE);
current = limit;
return false;
}
case kRegisterPendingForwardRef: { case kRegisterPendingForwardRef: {
DCHECK_NE(current_object_address, kNullAddress);
HeapObject obj = HeapObject::FromAddress(current_object_address); HeapObject obj = HeapObject::FromAddress(current_object_address);
unresolved_forward_refs_.emplace_back( unresolved_forward_refs_.emplace_back(
obj, current.address() - current_object_address); obj, current.address() - current_object_address);
...@@ -833,6 +881,7 @@ void Deserializer::ReadData(TSlot current, TSlot limit, ...@@ -833,6 +881,7 @@ void Deserializer::ReadData(TSlot current, TSlot limit,
} }
} }
CHECK_EQ(limit, current); CHECK_EQ(limit, current);
return true;
} }
Address Deserializer::ReadExternalReferenceCase() { Address Deserializer::ReadExternalReferenceCase() {
......
...@@ -135,9 +135,9 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer { ...@@ -135,9 +135,9 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
// space id is used for the write barrier. The object_address is the address // space id is used for the write barrier. The object_address is the address
// of the object we are writing into, or nullptr if we are not writing into an // of the object we are writing into, or nullptr if we are not writing into an
// object, i.e. if we are writing a series of tagged values that are not on // object, i.e. if we are writing a series of tagged values that are not on
// the heap. // the heap. Return false if the object content has been deferred.
template <typename TSlot> template <typename TSlot>
void ReadData(TSlot start, TSlot end, SnapshotSpace space, bool ReadData(TSlot start, TSlot end, SnapshotSpace space,
Address object_address); Address object_address);
// A helper function for ReadData, templatized on the bytecode for efficiency. // A helper function for ReadData, templatized on the bytecode for efficiency.
...@@ -205,6 +205,11 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer { ...@@ -205,6 +205,11 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
// TODO(6593): generalize rehashing, and remove this flag. // TODO(6593): generalize rehashing, and remove this flag.
bool can_rehash_; bool can_rehash_;
std::vector<HeapObject> to_rehash_; std::vector<HeapObject> to_rehash_;
// Store the objects whose maps are deferred and thus initialized as filler
// maps during deserialization, so that they can be processed later when the
// maps become available.
std::unordered_map<HeapObject, SnapshotSpace, Object::Hasher>
fillers_to_post_process_;
#ifdef DEBUG #ifdef DEBUG
uint32_t num_api_references_; uint32_t num_api_references_;
......
...@@ -86,9 +86,12 @@ bool ReadOnlySerializer::MustBeDeferred(HeapObject object) { ...@@ -86,9 +86,12 @@ bool ReadOnlySerializer::MustBeDeferred(HeapObject object) {
// be saved without problems. // be saved without problems.
return false; return false;
} }
// Defer objects with special alignment requirements until the filler roots // Just defer everything except for Map objects until all required roots are
// are serialized. // serialized. Some objects may have special alignment requirements, that may
return HeapObject::RequiredAlignment(object.map()) != kWordAligned; // not be fulfilled during deserialization until few first root objects are
// serialized. But we must serialize Map objects since deserializer checks
// that these root objects are indeed Maps.
return !object.IsMap();
} }
bool ReadOnlySerializer::SerializeUsingReadOnlyObjectCache( bool ReadOnlySerializer::SerializeUsingReadOnlyObjectCache(
......
...@@ -30,9 +30,14 @@ void SerializerDeserializer::Iterate(Isolate* isolate, RootVisitor* visitor) { ...@@ -30,9 +30,14 @@ void SerializerDeserializer::Iterate(Isolate* isolate, RootVisitor* visitor) {
} }
bool SerializerDeserializer::CanBeDeferred(HeapObject o) { bool SerializerDeserializer::CanBeDeferred(HeapObject o) {
// Maps cannot be deferred as objects are expected to have a valid map // ArrayBuffer instances are serialized by first re-assigning a index
// immediately. // to the backing store field, then serializing the object, and then
return !o.IsMap(); // storing the actual backing store address again (and the same for the
// ArrayBufferExtension). If serialization of the object itself is deferred,
// the real backing store address is written into the snapshot, which cannot
// be processed when deserializing.
return !o.IsString() && !o.IsScript() && !o.IsJSTypedArray() &&
!o.IsJSArrayBuffer();
} }
void SerializerDeserializer::RestoreExternalReferenceRedirectors( void SerializerDeserializer::RestoreExternalReferenceRedirectors(
......
...@@ -72,8 +72,8 @@ class SerializerDeserializer : public RootVisitor { ...@@ -72,8 +72,8 @@ class SerializerDeserializer : public RootVisitor {
// clang-format off // clang-format off
#define UNUSED_SERIALIZER_BYTE_CODES(V) \ #define UNUSED_SERIALIZER_BYTE_CODES(V) \
V(0x06) V(0x07) V(0x0e) V(0x0f) \ V(0x06) V(0x07) V(0x0e) V(0x0f) \
/* Free range 0x2a..0x2f */ \ /* Free range 0x2b..0x2f */ \
V(0x2a) V(0x2b) V(0x2c) V(0x2d) V(0x2e) V(0x2f) \ V(0x2b) V(0x2c) V(0x2d) V(0x2e) V(0x2f) \
/* Free range 0x30..0x3f */ \ /* Free range 0x30..0x3f */ \
V(0x30) V(0x31) V(0x32) V(0x33) V(0x34) V(0x35) V(0x36) V(0x37) \ V(0x30) V(0x31) V(0x32) V(0x33) V(0x34) V(0x35) V(0x36) V(0x37) \
V(0x38) V(0x39) V(0x3a) V(0x3b) V(0x3c) V(0x3d) V(0x3e) V(0x3f) \ V(0x38) V(0x39) V(0x3a) V(0x3b) V(0x3c) V(0x3d) V(0x3e) V(0x3f) \
...@@ -145,14 +145,16 @@ class SerializerDeserializer : public RootVisitor { ...@@ -145,14 +145,16 @@ class SerializerDeserializer : public RootVisitor {
kNop, kNop,
// Move to next reserved chunk. // Move to next reserved chunk.
kNextChunk, kNextChunk,
// 3 alignment prefixes 0x16..0x18 // Deferring object content.
kAlignmentPrefix = 0x16, kDeferred,
// 3 alignment prefixes 0x17..0x19
kAlignmentPrefix = 0x17,
// A tag emitted at strategic points in the snapshot to delineate sections. // A tag emitted at strategic points in the snapshot to delineate sections.
// If the deserializer does not find these at the expected moments then it // If the deserializer does not find these at the expected moments then it
// is an indication that the snapshot and the VM do not fit together. // is an indication that the snapshot and the VM do not fit together.
// Examine the build process for architecture, version or configuration // Examine the build process for architecture, version or configuration
// mismatches. // mismatches.
kSynchronize = 0x19, kSynchronize = 0x1a,
// Repeats of variable length. // Repeats of variable length.
kVariableRepeat, kVariableRepeat,
// Used for embedder-allocated backing stores for TypedArrays. // Used for embedder-allocated backing stores for TypedArrays.
......
...@@ -71,9 +71,6 @@ void Serializer::OutputStatistics(const char* name) { ...@@ -71,9 +71,6 @@ void Serializer::OutputStatistics(const char* name) {
} }
void Serializer::SerializeDeferredObjects() { void Serializer::SerializeDeferredObjects() {
if (FLAG_trace_serializer) {
PrintF("Serializing deferred objects\n");
}
while (!deferred_objects_.empty()) { while (!deferred_objects_.empty()) {
HeapObject obj = deferred_objects_.back(); HeapObject obj = deferred_objects_.back();
deferred_objects_.pop_back(); deferred_objects_.pop_back();
...@@ -168,13 +165,13 @@ bool Serializer::SerializeBackReference(HeapObject obj) { ...@@ -168,13 +165,13 @@ bool Serializer::SerializeBackReference(HeapObject obj) {
} }
bool Serializer::SerializePendingObject(HeapObject obj) { bool Serializer::SerializePendingObject(HeapObject obj) {
PendingObjectReference pending_obj = auto it = forward_refs_per_pending_object_.find(obj);
forward_refs_per_pending_object_.find(obj); if (it == forward_refs_per_pending_object_.end()) {
if (pending_obj == forward_refs_per_pending_object_.end()) {
return false; return false;
} }
PutPendingForwardReferenceTo(pending_obj); int forward_ref_id = PutPendingForwardReference();
it->second.push_back(forward_ref_id);
return true; return true;
} }
...@@ -274,13 +271,10 @@ void Serializer::PutRepeat(int repeat_count) { ...@@ -274,13 +271,10 @@ void Serializer::PutRepeat(int repeat_count) {
} }
} }
void Serializer::PutPendingForwardReferenceTo( int Serializer::PutPendingForwardReference() {
PendingObjectReference reference) {
sink_.Put(kRegisterPendingForwardRef, "RegisterPendingForwardRef"); sink_.Put(kRegisterPendingForwardRef, "RegisterPendingForwardRef");
unresolved_forward_refs_++; unresolved_forward_refs_++;
// Register the current slot with the pending object. return next_forward_ref_id_++;
int forward_ref_id = next_forward_ref_id_++;
reference->second.push_back(forward_ref_id);
} }
void Serializer::ResolvePendingForwardReference(int forward_reference_id) { void Serializer::ResolvePendingForwardReference(int forward_reference_id) {
...@@ -301,11 +295,9 @@ Serializer::PendingObjectReference Serializer::RegisterObjectIsPending( ...@@ -301,11 +295,9 @@ Serializer::PendingObjectReference Serializer::RegisterObjectIsPending(
auto forward_refs_entry_insertion = auto forward_refs_entry_insertion =
forward_refs_per_pending_object_.emplace(obj, std::vector<int>()); forward_refs_per_pending_object_.emplace(obj, std::vector<int>());
// If the above emplace didn't actually add the object, then the object must // Make sure the above emplace actually added the object, rather than
// already have been registered pending by deferring. It might not be in the // overwriting an existing entry.
// deferred objects queue though, since it may be the very object we just DCHECK(forward_refs_entry_insertion.second);
// popped off that queue, so just check that it can be deferred.
DCHECK_IMPLIES(!forward_refs_entry_insertion.second, CanBeDeferred(obj));
// return the iterator into the map as the reference. // return the iterator into the map as the reference.
return forward_refs_entry_insertion.first; return forward_refs_entry_insertion.first;
...@@ -589,26 +581,6 @@ class UnlinkWeakNextScope { ...@@ -589,26 +581,6 @@ class UnlinkWeakNextScope {
}; };
void Serializer::ObjectSerializer::Serialize() { void Serializer::ObjectSerializer::Serialize() {
RecursionScope recursion(serializer_);
// Defer objects as "pending" if they cannot be serialized now, or if we
// exceed a certain recursion depth. Some objects cannot be deferred
if ((recursion.ExceedsMaximum() && CanBeDeferred(object_)) ||
serializer_->MustBeDeferred(object_)) {
DCHECK(CanBeDeferred(object_));
if (FLAG_trace_serializer) {
PrintF(" Deferring heap object: ");
object_.ShortPrint();
PrintF("\n");
}
// Deferred objects are considered "pending".
PendingObjectReference pending_obj =
serializer_->RegisterObjectIsPending(object_);
serializer_->PutPendingForwardReferenceTo(pending_obj);
serializer_->QueueDeferredObject(object_);
return;
}
if (FLAG_trace_serializer) { if (FLAG_trace_serializer) {
PrintF(" Encoding heap object: "); PrintF(" Encoding heap object: ");
object_.ShortPrint(); object_.ShortPrint();
...@@ -697,27 +669,43 @@ void Serializer::ObjectSerializer::SerializeObject() { ...@@ -697,27 +669,43 @@ void Serializer::ObjectSerializer::SerializeObject() {
CHECK_EQ(0, bytes_processed_so_far_); CHECK_EQ(0, bytes_processed_so_far_);
bytes_processed_so_far_ = kTaggedSize; bytes_processed_so_far_ = kTaggedSize;
RecursionScope recursion(serializer_);
// Objects that are immediately post processed during deserialization
// cannot be deferred, since post processing requires the object content.
if ((recursion.ExceedsMaximum() && CanBeDeferred(object_)) ||
serializer_->MustBeDeferred(object_)) {
serializer_->QueueDeferredObject(object_);
sink_->Put(kDeferred, "Deferring object content");
return;
}
SerializeContent(map, size); SerializeContent(map, size);
} }
void Serializer::ObjectSerializer::SerializeDeferred() { void Serializer::ObjectSerializer::SerializeDeferred() {
if (FLAG_trace_serializer) {
PrintF(" Encoding deferred heap object: ");
object_.ShortPrint();
PrintF("\n");
}
int size = object_.Size();
Map map = object_.map();
SerializerReference back_reference = SerializerReference back_reference =
serializer_->reference_map()->LookupReference( serializer_->reference_map()->LookupReference(
reinterpret_cast<void*>(object_.ptr())); reinterpret_cast<void*>(object_.ptr()));
DCHECK(back_reference.is_back_reference());
if (back_reference.is_valid()) { // Serialize the rest of the object.
if (FLAG_trace_serializer) { CHECK_EQ(0, bytes_processed_so_far_);
PrintF(" Deferred heap object "); bytes_processed_so_far_ = kTaggedSize;
object_.ShortPrint();
PrintF(" was already serialized\n");
}
return;
}
if (FLAG_trace_serializer) { serializer_->PutAlignmentPrefix(object_);
PrintF(" Encoding deferred heap object\n"); sink_->Put(NewObject::Encode(back_reference.space()), "deferred object");
} serializer_->PutBackReference(object_, back_reference);
Serialize(); sink_->PutInt(size >> kTaggedSizeLog2, "deferred object size");
SerializeContent(map, size);
} }
void Serializer::ObjectSerializer::SerializeContent(Map map, int size) { void Serializer::ObjectSerializer::SerializeContent(Map map, int size) {
......
...@@ -176,9 +176,6 @@ class Serializer : public SerializerDeserializer { ...@@ -176,9 +176,6 @@ class Serializer : public SerializerDeserializer {
Isolate* isolate() const { return isolate_; } Isolate* isolate() const { return isolate_; }
protected: protected:
using PendingObjectReference =
std::map<HeapObject, std::vector<int>>::iterator;
class ObjectSerializer; class ObjectSerializer;
class RecursionScope { class RecursionScope {
public: public:
...@@ -215,7 +212,7 @@ class Serializer : public SerializerDeserializer { ...@@ -215,7 +212,7 @@ class Serializer : public SerializerDeserializer {
// Emit a marker noting that this slot is a forward reference to the an // Emit a marker noting that this slot is a forward reference to the an
// object which has not yet been serialized. // object which has not yet been serialized.
void PutPendingForwardReferenceTo(PendingObjectReference reference); int PutPendingForwardReference();
// Resolve the given previously registered forward reference to the current // Resolve the given previously registered forward reference to the current
// object. // object.
void ResolvePendingForwardReference(int obj); void ResolvePendingForwardReference(int obj);
...@@ -254,11 +251,14 @@ class Serializer : public SerializerDeserializer { ...@@ -254,11 +251,14 @@ class Serializer : public SerializerDeserializer {
Code CopyCode(Code code); Code CopyCode(Code code);
void QueueDeferredObject(HeapObject obj) { void QueueDeferredObject(HeapObject obj) {
DCHECK(!reference_map_.LookupReference(reinterpret_cast<void*>(obj.ptr())) DCHECK(reference_map_.LookupReference(reinterpret_cast<void*>(obj.ptr()))
.is_valid()); .is_back_reference());
deferred_objects_.push_back(obj); deferred_objects_.push_back(obj);
} }
using PendingObjectReference =
std::map<HeapObject, std::vector<int>>::iterator;
// Register that the the given object shouldn't be immediately serialized, but // Register that the the given object shouldn't be immediately serialized, but
// will be serialized later and any references to it should be pending forward // will be serialized later and any references to it should be pending forward
// references. // references.
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment