Commit 59e4b751 authored by Jakob Gruber's avatar Jakob Gruber Committed by Commit Bot

[snapshot] Refactor Serializer

This CL refactors allocation & reservation logic into a new
DefaultSerializerAllocator class.  In upcoming work, this will be
further extended by a custom allocator for builtin serialization.

Additionally, this cleans up a bunch of cosmetics (encapsulation and
other nits).

Bug: v8:6624
Change-Id: Ibcf12a525c8fcb26d9c16b7a12fd598c37a0e10a
Reviewed-on: https://chromium-review.googlesource.com/650357Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarYang Guo <yangguo@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#48077}
parent 5b127a97
......@@ -1953,6 +1953,8 @@ v8_source_set("v8_base") {
"src/snapshot/builtin-serializer.h",
"src/snapshot/code-serializer.cc",
"src/snapshot/code-serializer.h",
"src/snapshot/default-serializer-allocator.cc",
"src/snapshot/default-serializer-allocator.h",
"src/snapshot/deserializer.cc",
"src/snapshot/deserializer.h",
"src/snapshot/natives-common.cc",
......
......@@ -4063,7 +4063,7 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
// object space for side effects.
IncrementalMarking::MarkingState* marking_state =
incremental_marking()->marking_state();
for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
for (int i = OLD_SPACE; i < Serializer<>::kNumberOfSpaces; i++) {
const Heap::Reservation& res = reservations[i];
for (auto& chunk : res) {
Address addr = chunk.start;
......
......@@ -20,7 +20,7 @@ BuiltinSerializer::~BuiltinSerializer() {
void BuiltinSerializer::SerializeBuiltins() {
for (int i = 0; i < Builtins::builtin_count; i++) {
builtin_offsets_[i] = sink()->Position();
builtin_offsets_[i] = sink_.Position();
SerializeBuiltin(isolate()->builtins()->builtin(i));
}
Pad(); // Pad with kNop since GetInt() might read too far.
......@@ -55,7 +55,7 @@ void BuiltinSerializer::SerializeObject(HeapObject* o, HowToCode how_to_code,
DCHECK(!o->IsSmi());
// Roots can simply be serialized as root references.
int root_index = root_index_map_.Lookup(o);
int root_index = root_index_map()->Lookup(o);
if (root_index != RootIndexMap::kInvalidRootIndex) {
DCHECK(startup_serializer_->root_has_been_serialized(root_index));
PutRoot(root_index, o, how_to_code, where_to_point, skip);
......
......@@ -15,7 +15,7 @@ class StartupSerializer;
// Responsible for serializing all builtin objects during startup snapshot
// creation. Builtins are serialized into a dedicated area of the snapshot.
// See snapshot.h for documentation of the snapshot layout.
class BuiltinSerializer : public Serializer {
class BuiltinSerializer : public Serializer<> {
public:
BuiltinSerializer(Isolate* isolate, StartupSerializer* startup_serializer);
~BuiltinSerializer() override;
......
......@@ -55,7 +55,7 @@ ScriptData* CodeSerializer::Serialize(Handle<HeapObject> obj) {
SerializeDeferredObjects();
Pad();
SerializedCodeData data(sink()->data(), this);
SerializedCodeData data(sink_.data(), this);
return data.GetScriptData();
}
......@@ -64,7 +64,7 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
int root_index = root_index_map_.Lookup(obj);
int root_index = root_index_map()->Lookup(obj);
if (root_index != RootIndexMap::kInvalidRootIndex) {
PutRoot(root_index, obj, how_to_code, where_to_point, skip);
return;
......@@ -317,9 +317,7 @@ SerializedCodeData::SerializedCodeData(const std::vector<byte>* payload,
const CodeSerializer* cs) {
DisallowHeapAllocation no_gc;
const std::vector<uint32_t>* stub_keys = cs->stub_keys();
std::vector<Reservation> reservations;
cs->EncodeReservations(&reservations);
std::vector<Reservation> reservations = cs->EncodeReservations();
// Calculate sizes.
uint32_t reservation_size =
......
......@@ -11,7 +11,7 @@
namespace v8 {
namespace internal {
class CodeSerializer : public Serializer {
class CodeSerializer : public Serializer<> {
public:
static ScriptData* Serialize(Isolate* isolate,
Handle<SharedFunctionInfo> info,
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/snapshot/default-serializer-allocator.h"
#include "src/heap/heap-inl.h"
#include "src/snapshot/serializer.h"
#include "src/snapshot/snapshot-source-sink.h"
namespace v8 {
namespace internal {
DefaultSerializerAllocator::DefaultSerializerAllocator(
Serializer<DefaultSerializerAllocator>* serializer)
: serializer_(serializer) {
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
pending_chunk_[i] = 0;
}
}
SerializerReference DefaultSerializerAllocator::Allocate(AllocationSpace space,
uint32_t size) {
DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
DCHECK(size > 0 && size <= MaxChunkSizeInSpace(space));
// Maps are allocated through AllocateMap.
DCHECK_NE(MAP_SPACE, space);
uint32_t new_chunk_size = pending_chunk_[space] + size;
if (new_chunk_size > MaxChunkSizeInSpace(space)) {
// The new chunk size would not fit onto a single page. Complete the
// current chunk and start a new one.
serializer_->PutNextChunk(space);
completed_chunks_[space].push_back(pending_chunk_[space]);
pending_chunk_[space] = 0;
new_chunk_size = size;
}
uint32_t offset = pending_chunk_[space];
pending_chunk_[space] = new_chunk_size;
return SerializerReference::BackReference(
space, static_cast<uint32_t>(completed_chunks_[space].size()), offset);
}
SerializerReference DefaultSerializerAllocator::AllocateMap() {
// Maps are allocated one-by-one when deserializing.
return SerializerReference::MapReference(num_maps_++);
}
SerializerReference DefaultSerializerAllocator::AllocateLargeObject(
uint32_t size) {
// Large objects are allocated one-by-one when deserializing. We do not
// have to keep track of multiple chunks.
large_objects_total_size_ += size;
return SerializerReference::LargeObjectReference(seen_large_objects_index_++);
}
SerializerReference DefaultSerializerAllocator::AllocateOffHeapBackingStore() {
DCHECK_NE(0, seen_backing_stores_index_);
return SerializerReference::OffHeapBackingStoreReference(
seen_backing_stores_index_++);
}
#ifdef DEBUG
bool DefaultSerializerAllocator::BackReferenceIsAlreadyAllocated(
SerializerReference reference) const {
DCHECK(reference.is_back_reference());
AllocationSpace space = reference.space();
if (space == LO_SPACE) {
return reference.large_object_index() < seen_large_objects_index_;
} else if (space == MAP_SPACE) {
return reference.map_index() < num_maps_;
} else {
size_t chunk_index = reference.chunk_index();
if (chunk_index == completed_chunks_[space].size()) {
return reference.chunk_offset() < pending_chunk_[space];
} else {
return chunk_index < completed_chunks_[space].size() &&
reference.chunk_offset() < completed_chunks_[space][chunk_index];
}
}
}
bool DefaultSerializerAllocator::HasNotExceededFirstPageOfEachSpace() const {
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
if (!completed_chunks_[i].empty()) return false;
}
return true;
}
#endif
std::vector<SerializedData::Reservation>
DefaultSerializerAllocator::EncodeReservations() const {
std::vector<SerializedData::Reservation> out;
STATIC_ASSERT(NEW_SPACE == 0);
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
for (size_t j = 0; j < completed_chunks_[i].size(); j++) {
out.emplace_back(completed_chunks_[i][j]);
}
if (pending_chunk_[i] > 0 || completed_chunks_[i].size() == 0) {
out.emplace_back(pending_chunk_[i]);
}
out.back().mark_as_last();
}
STATIC_ASSERT(MAP_SPACE == kNumberOfPreallocatedSpaces);
out.emplace_back(num_maps_ * Map::kSize);
out.back().mark_as_last();
STATIC_ASSERT(LO_SPACE == MAP_SPACE + 1);
out.emplace_back(large_objects_total_size_);
out.back().mark_as_last();
return out;
}
void DefaultSerializerAllocator::OutputStatistics() {
DCHECK(FLAG_serialization_statistics);
PrintF(" Spaces (bytes):\n");
STATIC_ASSERT(NEW_SPACE == 0);
for (int space = 0; space < kNumberOfSpaces; space++) {
PrintF("%16s", AllocationSpaceName(static_cast<AllocationSpace>(space)));
}
PrintF("\n");
STATIC_ASSERT(NEW_SPACE == 0);
for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
size_t s = pending_chunk_[space];
for (uint32_t chunk_size : completed_chunks_[space]) s += chunk_size;
PrintF("%16" PRIuS, s);
}
STATIC_ASSERT(MAP_SPACE == kNumberOfPreallocatedSpaces);
PrintF("%16d", num_maps_ * Map::kSize);
STATIC_ASSERT(LO_SPACE == MAP_SPACE + 1);
PrintF("%16d\n", large_objects_total_size_);
}
// static
uint32_t DefaultSerializerAllocator::MaxChunkSizeInSpace(int space) {
DCHECK(0 <= space && space < kNumberOfPreallocatedSpaces);
return static_cast<uint32_t>(
MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(space)));
}
} // namespace internal
} // namespace v8
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_SNAPSHOT_DEFAULT_SERIALIZER_ALLOCATOR_H_
#define V8_SNAPSHOT_DEFAULT_SERIALIZER_ALLOCATOR_H_
#include "src/snapshot/serializer-common.h"
namespace v8 {
namespace internal {
template <class AllocatorT>
class Serializer;
class DefaultSerializerAllocator final {
public:
DefaultSerializerAllocator(
Serializer<DefaultSerializerAllocator>* serializer);
SerializerReference Allocate(AllocationSpace space, uint32_t size);
SerializerReference AllocateMap();
SerializerReference AllocateLargeObject(uint32_t size);
SerializerReference AllocateOffHeapBackingStore();
#ifdef DEBUG
bool BackReferenceIsAlreadyAllocated(
SerializerReference back_reference) const;
bool HasNotExceededFirstPageOfEachSpace() const;
#endif
std::vector<SerializedData::Reservation> EncodeReservations() const;
void OutputStatistics();
private:
static constexpr int kNumberOfPreallocatedSpaces =
SerializerDeserializer::kNumberOfPreallocatedSpaces;
static constexpr int kNumberOfSpaces =
SerializerDeserializer::kNumberOfSpaces;
static uint32_t MaxChunkSizeInSpace(int space);
// Objects from the same space are put into chunks for bulk-allocation
// when deserializing. We have to make sure that each chunk fits into a
// page. So we track the chunk size in pending_chunk_ of a space, but
// when it exceeds a page, we complete the current chunk and start a new one.
uint32_t pending_chunk_[kNumberOfPreallocatedSpaces];
std::vector<uint32_t> completed_chunks_[kNumberOfPreallocatedSpaces];
// Number of maps that we need to allocate.
uint32_t num_maps_ = 0;
// We map serialized large objects to indexes for back-referencing.
uint32_t large_objects_total_size_ = 0;
uint32_t seen_large_objects_index_ = 0;
// Used to keep track of the off-heap backing stores used by TypedArrays/
// ArrayBuffers. Note that the index begins at 1 and not 0, because when a
// TypedArray has an on-heap backing store, the backing_store pointer in the
// corresponding ArrayBuffer will be null, which makes it indistinguishable
// from index 0.
uint32_t seen_backing_stores_index_ = 1;
// The current serializer.
Serializer<DefaultSerializerAllocator>* const serializer_;
DISALLOW_COPY_AND_ASSIGN(DefaultSerializerAllocator)
};
} // namespace internal
} // namespace v8
#endif // V8_SNAPSHOT_DEFAULT_SERIALIZER_ALLOCATOR_H_
......@@ -36,11 +36,12 @@ void PartialSerializer::Serialize(Object** o, bool include_global_proxy) {
// and it's next context pointer may point to the code-stub context. Clear
// it before serializing, it will get re-added to the context list
// explicitly when it's loaded.
context->set(Context::NEXT_CONTEXT_LINK, isolate_->heap()->undefined_value());
context->set(Context::NEXT_CONTEXT_LINK,
isolate()->heap()->undefined_value());
DCHECK(!context->global_object()->IsUndefined(context->GetIsolate()));
// Reset math random cache to get fresh random numbers.
context->set_math_random_index(Smi::kZero);
context->set_math_random_cache(isolate_->heap()->undefined_value());
context->set_math_random_cache(isolate()->heap()->undefined_value());
DCHECK_NULL(rehashable_global_dictionary_);
rehashable_global_dictionary_ = context->global_object()->global_dictionary();
......@@ -66,7 +67,7 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
}
if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
int root_index = root_index_map_.Lookup(obj);
int root_index = root_index_map()->Lookup(obj);
if (root_index != RootIndexMap::kInvalidRootIndex) {
PutRoot(root_index, obj, how_to_code, where_to_point, skip);
return;
......@@ -87,7 +88,7 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
// Pointers from the partial snapshot to the objects in the startup snapshot
// should go through the root array or through the partial snapshot cache.
// If this is not the case you may have to add something to the root array.
DCHECK(!startup_serializer_->reference_map()->Lookup(obj).is_valid());
DCHECK(!startup_serializer_->ReferenceMapContains(obj));
// All the internalized strings that the partial snapshot needs should be
// either in the root table or in the partial snapshot cache.
DCHECK(!obj->IsInternalizedString());
......@@ -97,7 +98,7 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
FlushSkip(skip);
// Clear literal boilerplates and feedback.
if (obj->IsFeedbackVector()) FeedbackVector::cast(obj)->ClearSlots(isolate_);
if (obj->IsFeedbackVector()) FeedbackVector::cast(obj)->ClearSlots(isolate());
if (obj->IsJSObject()) {
JSObject* jsobj = JSObject::cast(obj);
......@@ -138,7 +139,7 @@ void PartialSerializer::SerializeEmbedderFields() {
HandleScope scope(isolate());
Handle<JSObject> obj(embedder_field_holders_.back(), isolate());
embedder_field_holders_.pop_back();
SerializerReference reference = reference_map_.Lookup(*obj);
SerializerReference reference = reference_map()->Lookup(*obj);
DCHECK(reference.is_back_reference());
int embedder_fields_count = obj->GetEmbedderFieldCount();
for (int i = 0; i < embedder_fields_count; i++) {
......
......@@ -13,7 +13,7 @@ namespace internal {
class StartupSerializer;
class PartialSerializer : public Serializer {
class PartialSerializer : public Serializer<> {
public:
PartialSerializer(Isolate* isolate, StartupSerializer* startup_serializer,
v8::SerializeEmbedderFieldsCallback callback);
......
This diff is collapsed.
......@@ -10,6 +10,7 @@
#include "src/isolate.h"
#include "src/log.h"
#include "src/objects.h"
#include "src/snapshot/default-serializer-allocator.h"
#include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot-source-sink.h"
......@@ -119,24 +120,23 @@ class CodeAddressMap : public CodeEventLogger {
Isolate* isolate_;
};
// There can be only one serializer per V8 process.
template <class AllocatorT = DefaultSerializerAllocator>
class Serializer : public SerializerDeserializer {
public:
explicit Serializer(Isolate* isolate);
~Serializer() override;
void EncodeReservations(std::vector<SerializedData::Reservation>* out) const;
std::vector<SerializedData::Reservation> EncodeReservations() const {
return allocator_.EncodeReservations();
}
void SerializeDeferredObjects();
const std::vector<byte>* Payload() const { return sink_.data(); }
Isolate* isolate() const { return isolate_; }
SerializerReferenceMap* reference_map() { return &reference_map_; }
RootIndexMap* root_index_map() { return &root_index_map_; }
bool ReferenceMapContains(HeapObject* o) {
return reference_map()->Lookup(o).is_valid();
}
#ifdef OBJECT_PRINT
void CountInstanceType(Map* map, int size);
#endif // OBJECT_PRINT
Isolate* isolate() const { return isolate_; }
protected:
class ObjectSerializer;
......@@ -155,6 +155,7 @@ class Serializer : public SerializerDeserializer {
Serializer* serializer_;
};
void SerializeDeferredObjects();
virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) = 0;
......@@ -164,16 +165,13 @@ class Serializer : public SerializerDeserializer {
void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where,
int skip);
void PutSmi(Smi* smi);
void PutBackReference(HeapObject* object, SerializerReference reference);
void PutAttachedReference(SerializerReference reference,
HowToCode how_to_code, WhereToPoint where_to_point);
// Emit alignment prefix if necessary, return required padding space in bytes.
int PutAlignmentPrefix(HeapObject* object);
void PutNextChunk(int space);
// Returns true if the object was successfully serialized as hot object.
bool SerializeHotObject(HeapObject* obj, HowToCode how_to_code,
......@@ -202,17 +200,10 @@ class Serializer : public SerializerDeserializer {
}
}
// This will return the space for an object.
SerializerReference AllocateOffHeapBackingStore();
SerializerReference AllocateLargeObject(int size);
SerializerReference AllocateMap();
SerializerReference Allocate(AllocationSpace space, int size);
ExternalReferenceEncoder::Value EncodeExternalReference(Address addr) {
return external_reference_encoder_.Encode(addr);
}
bool HasNotExceededFirstPageOfEachSpace();
// GetInt reads 4 bytes at once, requiring padding at the end.
void Pad();
......@@ -222,14 +213,6 @@ class Serializer : public SerializerDeserializer {
Code* CopyCode(Code* code);
inline uint32_t max_chunk_size(int space) const {
DCHECK_LE(0, space);
DCHECK_LT(space, kNumberOfSpaces);
return max_chunk_size_[space];
}
const SnapshotByteSink* sink() const { return &sink_; }
void QueueDeferredObject(HeapObject* obj) {
DCHECK(reference_map_.Lookup(obj).is_back_reference());
deferred_objects_.push_back(obj);
......@@ -237,56 +220,32 @@ class Serializer : public SerializerDeserializer {
void OutputStatistics(const char* name);
#ifdef OBJECT_PRINT
void CountInstanceType(Map* map, int size);
#endif // OBJECT_PRINT
#ifdef DEBUG
void PushStack(HeapObject* o) { stack_.push_back(o); }
void PopStack() { stack_.pop_back(); }
void PrintStack();
bool BackReferenceIsAlreadyAllocated(SerializerReference back_reference);
#endif // DEBUG
Isolate* isolate_;
SerializerReferenceMap* reference_map() { return &reference_map_; }
RootIndexMap* root_index_map() { return &root_index_map_; }
AllocatorT* allocator() { return &allocator_; }
SnapshotByteSink sink_;
ExternalReferenceEncoder external_reference_encoder_;
SnapshotByteSink sink_; // Used directly by subclasses.
private:
Isolate* isolate_;
SerializerReferenceMap reference_map_;
ExternalReferenceEncoder external_reference_encoder_;
RootIndexMap root_index_map_;
int recursion_depth_;
friend class Deserializer;
friend class ObjectSerializer;
friend class RecursionScope;
friend class SnapshotData;
private:
CodeAddressMap* code_address_map_;
// Objects from the same space are put into chunks for bulk-allocation
// when deserializing. We have to make sure that each chunk fits into a
// page. So we track the chunk size in pending_chunk_ of a space, but
// when it exceeds a page, we complete the current chunk and start a new one.
uint32_t pending_chunk_[kNumberOfPreallocatedSpaces];
std::vector<uint32_t> completed_chunks_[kNumberOfPreallocatedSpaces];
uint32_t max_chunk_size_[kNumberOfPreallocatedSpaces];
// Number of maps that we need to allocate.
uint32_t num_maps_;
// We map serialized large objects to indexes for back-referencing.
uint32_t large_objects_total_size_;
uint32_t seen_large_objects_index_;
// Used to keep track of the off-heap backing stores used by TypedArrays/
// ArrayBuffers. Note that the index begins at 1 and not 0, because when a
// TypedArray has an on-heap backing store, the backing_store pointer in the
// corresponding ArrayBuffer will be null, which makes it indistinguishable
// from index 0.
uint32_t seen_backing_stores_index_;
CodeAddressMap* code_address_map_ = nullptr;
std::vector<byte> code_buffer_;
// To handle stack overflow.
std::vector<HeapObject*> deferred_objects_;
std::vector<HeapObject*> deferred_objects_; // To handle stack overflow.
int recursion_depth_ = 0;
AllocatorT allocator_;
#ifdef OBJECT_PRINT
static const int kInstanceTypes = 256;
......@@ -298,10 +257,13 @@ class Serializer : public SerializerDeserializer {
std::vector<HeapObject*> stack_;
#endif // DEBUG
friend class DefaultSerializerAllocator;
DISALLOW_COPY_AND_ASSIGN(Serializer);
};
class Serializer::ObjectSerializer : public ObjectVisitor {
template <class AllocatorT>
class Serializer<AllocatorT>::ObjectSerializer : public ObjectVisitor {
public:
ObjectSerializer(Serializer* serializer, HeapObject* obj,
SnapshotByteSink* sink, HowToCode how_to_code,
......
......@@ -269,11 +269,11 @@ Vector<const byte> Snapshot::ExtractContextData(const v8::StartupData* data,
return Vector<const byte>(context_data, context_length);
}
SnapshotData::SnapshotData(const Serializer* serializer) {
template <class AllocatorT>
SnapshotData::SnapshotData(const Serializer<AllocatorT>* serializer) {
DisallowHeapAllocation no_gc;
std::vector<Reservation> reservations;
serializer->EncodeReservations(&reservations);
const std::vector<byte>* payload = serializer->sink()->data();
std::vector<Reservation> reservations = serializer->EncodeReservations();
const std::vector<byte>* payload = serializer->Payload();
// Calculate sizes.
uint32_t reservation_size =
......@@ -299,6 +299,10 @@ SnapshotData::SnapshotData(const Serializer* serializer) {
static_cast<size_t>(payload->size()));
}
// Explicit instantiation.
template SnapshotData::SnapshotData(
const Serializer<DefaultSerializerAllocator>* serializer);
bool SnapshotData::IsSane() {
return GetHeaderValue(kVersionHashOffset) == Version::Hash();
}
......
......@@ -23,7 +23,8 @@ class StartupSerializer;
class SnapshotData : public SerializedData {
public:
// Used when producing.
explicit SnapshotData(const Serializer* serializer);
template <class AllocatorT>
explicit SnapshotData(const Serializer<AllocatorT>* serializer);
// Used when consuming.
explicit SnapshotData(const Vector<const byte> snapshot)
......
......@@ -44,7 +44,7 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
}
if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
int root_index = root_index_map_.Lookup(obj);
int root_index = root_index_map()->Lookup(obj);
// We can only encode roots as such if it has already been serialized.
// That applies to root indices below the wave front.
if (root_index != RootIndexMap::kInvalidRootIndex) {
......@@ -58,7 +58,7 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
FlushSkip(skip);
if (isolate_->external_reference_redirector() && obj->IsAccessorInfo()) {
if (isolate()->external_reference_redirector() && obj->IsAccessorInfo()) {
// Wipe external reference redirects in the accessor info.
AccessorInfo* info = AccessorInfo::cast(obj);
Address original_address = Foreign::cast(info->getter())->foreign_address();
......@@ -66,7 +66,7 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
accessor_infos_.push_back(info);
} else if (obj->IsScript() && Script::cast(obj)->IsUserJavaScript()) {
Script::cast(obj)->set_context_data(
isolate_->heap()->uninitialized_symbol());
isolate()->heap()->uninitialized_symbol());
} else if (obj->IsSharedFunctionInfo()) {
// Clear inferred name for native functions.
SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
......@@ -125,7 +125,7 @@ void StartupSerializer::SerializeStrongReferences() {
serializing_immortal_immovables_roots_ = true;
isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG_ROOT_LIST);
// Check that immortal immovable roots are allocated on the first page.
CHECK(HasNotExceededFirstPageOfEachSpace());
DCHECK(allocator()->HasNotExceededFirstPageOfEachSpace());
serializing_immortal_immovables_roots_ = false;
// Visit the rest of the strong roots.
// Clear the stack limits to make the snapshot reproducible.
......@@ -185,11 +185,11 @@ void StartupSerializer::CheckRehashability(HeapObject* table) {
// We can only correctly rehash if the four hash tables below are the only
// ones that we deserialize.
if (table->IsUnseededNumberDictionary()) return;
if (table == isolate_->heap()->empty_ordered_hash_table()) return;
if (table == isolate_->heap()->empty_slow_element_dictionary()) return;
if (table == isolate_->heap()->empty_property_dictionary()) return;
if (table == isolate_->heap()->weak_object_to_code_table()) return;
if (table == isolate_->heap()->string_table()) return;
if (table == isolate()->heap()->empty_ordered_hash_table()) return;
if (table == isolate()->heap()->empty_slow_element_dictionary()) return;
if (table == isolate()->heap()->empty_property_dictionary()) return;
if (table == isolate()->heap()->weak_object_to_code_table()) return;
if (table == isolate()->heap()->string_table()) return;
can_be_rehashed_ = false;
}
......
......@@ -12,7 +12,7 @@
namespace v8 {
namespace internal {
class StartupSerializer : public Serializer {
class StartupSerializer : public Serializer<> {
public:
StartupSerializer(
Isolate* isolate,
......
......@@ -1339,6 +1339,8 @@
'snapshot/builtin-serializer.h',
'snapshot/code-serializer.cc',
'snapshot/code-serializer.h',
'snapshot/default-serializer-allocator.cc',
'snapshot/default-serializer-allocator.h',
'snapshot/deserializer.cc',
'snapshot/deserializer.h',
'snapshot/natives-common.cc',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment