Commit 4450f7ca authored by jgruber's avatar jgruber Committed by Commit Bot

[snapshot] Refactor deserializer allocations

A continuation of the work in 59e4b751, this extracts logic around
memory reservation and allocations out of the Deserializer class.

Follow-up work is planned to create a specialized allocator for
builtin deserialization.

Bug: v8:6624
Change-Id: I7081cdc557ab8fb2571aadb816399e136ea2cdbb
Reviewed-on: https://chromium-review.googlesource.com/716036
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarYang Guo <yangguo@chromium.org>
Cr-Commit-Position: refs/heads/master@{#48634}
parent 8411f8f9
...@@ -1968,6 +1968,8 @@ v8_source_set("v8_base") { ...@@ -1968,6 +1968,8 @@ v8_source_set("v8_base") {
"src/snapshot/builtin-serializer.h", "src/snapshot/builtin-serializer.h",
"src/snapshot/code-serializer.cc", "src/snapshot/code-serializer.cc",
"src/snapshot/code-serializer.h", "src/snapshot/code-serializer.h",
"src/snapshot/default-deserializer-allocator.cc",
"src/snapshot/default-deserializer-allocator.h",
"src/snapshot/default-serializer-allocator.cc", "src/snapshot/default-serializer-allocator.cc",
"src/snapshot/default-serializer-allocator.h", "src/snapshot/default-serializer-allocator.h",
"src/snapshot/deserializer.cc", "src/snapshot/deserializer.cc",
......
...@@ -38,7 +38,7 @@ BuiltinDeserializer::BuiltinDeserializer(Isolate* isolate, ...@@ -38,7 +38,7 @@ BuiltinDeserializer::BuiltinDeserializer(Isolate* isolate,
: Deserializer(data, false) { : Deserializer(data, false) {
// We may have to relax this at some point to pack reloc infos and handler // We may have to relax this at some point to pack reloc infos and handler
// tables into the builtin blob (instead of the partial snapshot cache). // tables into the builtin blob (instead of the partial snapshot cache).
DCHECK(ReservesOnlyCodeSpace()); DCHECK(allocator()->ReservesOnlyCodeSpace());
builtin_offsets_ = data->BuiltinOffsets(); builtin_offsets_ = data->BuiltinOffsets();
DCHECK_EQ(Builtins::builtin_count, builtin_offsets_.length()); DCHECK_EQ(Builtins::builtin_count, builtin_offsets_.length());
...@@ -136,7 +136,7 @@ uint32_t BuiltinDeserializer::ExtractBuiltinSize(int builtin_id) { ...@@ -136,7 +136,7 @@ uint32_t BuiltinDeserializer::ExtractBuiltinSize(int builtin_id) {
} }
Heap::Reservation BuiltinDeserializer::CreateReservationsForEagerBuiltins() { Heap::Reservation BuiltinDeserializer::CreateReservationsForEagerBuiltins() {
DCHECK(ReservesOnlyCodeSpace()); DCHECK(allocator()->ReservesOnlyCodeSpace());
Heap::Reservation result; Heap::Reservation result;
...@@ -236,7 +236,8 @@ Address BuiltinDeserializer::Allocate(int space_index, int size) { ...@@ -236,7 +236,8 @@ Address BuiltinDeserializer::Allocate(int space_index, int size) {
DCHECK_EQ(ExtractBuiltinSize(current_builtin_id_), size); DCHECK_EQ(ExtractBuiltinSize(current_builtin_id_), size);
Object* obj = isolate()->builtins()->builtin(current_builtin_id_); Object* obj = isolate()->builtins()->builtin(current_builtin_id_);
DCHECK(Internals::HasHeapObjectTag(obj)); DCHECK(Internals::HasHeapObjectTag(obj));
return HeapObject::cast(obj)->address(); HeapObject* heap_obj = HeapObject::cast(obj);
return heap_obj->address();
} }
} // namespace internal } // namespace internal
......
...@@ -14,7 +14,7 @@ namespace internal { ...@@ -14,7 +14,7 @@ namespace internal {
class BuiltinSnapshotData; class BuiltinSnapshotData;
// Deserializes the builtins blob. // Deserializes the builtins blob.
class BuiltinDeserializer final : public Deserializer { class BuiltinDeserializer final : public Deserializer<> {
public: public:
BuiltinDeserializer(Isolate* isolate, const BuiltinSnapshotData* data); BuiltinDeserializer(Isolate* isolate, const BuiltinSnapshotData* data);
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/snapshot/default-deserializer-allocator.h"
#include "src/heap/heap-inl.h"
#include "src/snapshot/builtin-deserializer.h"
#include "src/snapshot/deserializer.h"
#include "src/snapshot/startup-deserializer.h"
namespace v8 {
namespace internal {
DefaultDeserializerAllocator::DefaultDeserializerAllocator(
Deserializer<DefaultDeserializerAllocator>* deserializer)
: deserializer_(deserializer) {}
// We know the space requirements before deserialization and can
// pre-allocate that reserved space. During deserialization, all we need
// to do is to bump up the pointer for each space in the reserved
// space. This is also used for fixing back references.
// We may have to split up the pre-allocation into several chunks
// because it would not fit onto a single page. We do not have to keep
// track of when to move to the next chunk. An opcode will signal this.
// Since multiple large objects cannot be folded into one large object
// space allocation, we have to do an actual allocation when deserializing
// each large object. Instead of tracking offset for back references, we
// reference large objects by index.
Address DefaultDeserializerAllocator::AllocateRaw(AllocationSpace space,
int size) {
if (space == LO_SPACE) {
AlwaysAllocateScope scope(isolate());
LargeObjectSpace* lo_space = isolate()->heap()->lo_space();
// TODO(jgruber): May be cleaner to pass in executability as an argument.
Executability exec =
static_cast<Executability>(deserializer_->source()->Get());
AllocationResult result = lo_space->AllocateRaw(size, exec);
HeapObject* obj = result.ToObjectChecked();
deserialized_large_objects_.push_back(obj);
return obj->address();
} else if (space == MAP_SPACE) {
DCHECK_EQ(Map::kSize, size);
return allocated_maps_[next_map_index_++];
} else {
DCHECK(space < kNumberOfPreallocatedSpaces);
Address address = high_water_[space];
DCHECK_NOT_NULL(address);
high_water_[space] += size;
#ifdef DEBUG
// Assert that the current reserved chunk is still big enough.
const Heap::Reservation& reservation = reservations_[space];
int chunk_index = current_chunk_[space];
DCHECK_LE(high_water_[space], reservation[chunk_index].end);
#endif
if (space == CODE_SPACE) SkipList::Update(address, size);
return address;
}
}
Address DefaultDeserializerAllocator::Allocate(AllocationSpace space,
int size) {
Address address;
HeapObject* obj;
if (next_alignment_ != kWordAligned) {
const int reserved = size + Heap::GetMaximumFillToAlign(next_alignment_);
address = AllocateRaw(space, reserved);
obj = HeapObject::FromAddress(address);
// If one of the following assertions fails, then we are deserializing an
// aligned object when the filler maps have not been deserialized yet.
// We require filler maps as padding to align the object.
Heap* heap = isolate()->heap();
DCHECK(heap->free_space_map()->IsMap());
DCHECK(heap->one_pointer_filler_map()->IsMap());
DCHECK(heap->two_pointer_filler_map()->IsMap());
obj = heap->AlignWithFiller(obj, size, reserved, next_alignment_);
address = obj->address();
next_alignment_ = kWordAligned;
return address;
} else {
return AllocateRaw(space, size);
}
}
void DefaultDeserializerAllocator::MoveToNextChunk(AllocationSpace space) {
DCHECK(space < kNumberOfPreallocatedSpaces);
uint32_t chunk_index = current_chunk_[space];
const Heap::Reservation& reservation = reservations_[space];
// Make sure the current chunk is indeed exhausted.
CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
// Move to next reserved chunk.
chunk_index = ++current_chunk_[space];
CHECK_LT(chunk_index, reservation.size());
high_water_[space] = reservation[chunk_index].start;
}
HeapObject* DefaultDeserializerAllocator::GetMap(uint32_t index) {
DCHECK_LT(index, next_map_index_);
return HeapObject::FromAddress(allocated_maps_[index]);
}
HeapObject* DefaultDeserializerAllocator::GetLargeObject(uint32_t index) {
DCHECK_LT(index, deserialized_large_objects_.size());
return deserialized_large_objects_[index];
}
HeapObject* DefaultDeserializerAllocator::GetObject(AllocationSpace space,
uint32_t chunk_index,
uint32_t chunk_offset) {
DCHECK_LT(space, kNumberOfPreallocatedSpaces);
DCHECK_LE(chunk_index, current_chunk_[space]);
Address address = reservations_[space][chunk_index].start + chunk_offset;
if (next_alignment_ != kWordAligned) {
int padding = Heap::GetFillToAlign(address, next_alignment_);
next_alignment_ = kWordAligned;
DCHECK(padding == 0 || HeapObject::FromAddress(address)->IsFiller());
address += padding;
}
return HeapObject::FromAddress(address);
}
void DefaultDeserializerAllocator::DecodeReservation(
Vector<const SerializedData::Reservation> res) {
DCHECK_EQ(0, reservations_[NEW_SPACE].size());
STATIC_ASSERT(NEW_SPACE == 0);
int current_space = NEW_SPACE;
for (auto& r : res) {
reservations_[current_space].push_back({r.chunk_size(), NULL, NULL});
if (r.is_last()) current_space++;
}
DCHECK_EQ(kNumberOfSpaces, current_space);
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
}
bool DefaultDeserializerAllocator::ReserveSpace() {
#ifdef DEBUG
for (int i = NEW_SPACE; i < kNumberOfSpaces; ++i) {
DCHECK(reservations_[i].size() > 0);
}
#endif // DEBUG
DCHECK(allocated_maps_.empty());
if (!isolate()->heap()->ReserveSpace(reservations_, &allocated_maps_)) {
return false;
}
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
high_water_[i] = reservations_[i][0].start;
}
return true;
}
// static
bool DefaultDeserializerAllocator::ReserveSpace(
StartupDeserializer* startup_deserializer,
BuiltinDeserializer* builtin_deserializer) {
const int first_space = NEW_SPACE;
const int last_space = SerializerDeserializer::kNumberOfSpaces;
Isolate* isolate = startup_deserializer->isolate();
// Create a set of merged reservations to reserve space in one go.
// The BuiltinDeserializer's reservations are ignored, since our actual
// requirements vary based on whether lazy deserialization is enabled.
// Instead, we manually determine the required code-space.
Heap::Reservation merged_reservations[kNumberOfSpaces];
for (int i = first_space; i < last_space; i++) {
merged_reservations[i] =
startup_deserializer->allocator()->reservations_[i];
}
Heap::Reservation builtin_reservations =
builtin_deserializer->CreateReservationsForEagerBuiltins();
DCHECK(!builtin_reservations.empty());
for (const auto& c : builtin_reservations) {
merged_reservations[CODE_SPACE].push_back(c);
}
if (!isolate->heap()->ReserveSpace(
merged_reservations,
&startup_deserializer->allocator()->allocated_maps_)) {
return false;
}
DisallowHeapAllocation no_allocation;
// Distribute the successful allocations between both deserializers.
// There's nothing to be done here except for code space.
{
const int num_builtin_reservations =
static_cast<int>(builtin_reservations.size());
for (int i = num_builtin_reservations - 1; i >= 0; i--) {
const auto& c = merged_reservations[CODE_SPACE].back();
DCHECK_EQ(c.size, builtin_reservations[i].size);
DCHECK_EQ(c.size, c.end - c.start);
builtin_reservations[i].start = c.start;
builtin_reservations[i].end = c.end;
merged_reservations[CODE_SPACE].pop_back();
}
builtin_deserializer->InitializeBuiltinsTable(builtin_reservations);
}
// Write back startup reservations.
for (int i = first_space; i < last_space; i++) {
startup_deserializer->allocator()->reservations_[i].swap(
merged_reservations[i]);
}
for (int i = first_space; i < kNumberOfPreallocatedSpaces; i++) {
startup_deserializer->allocator()->high_water_[i] =
startup_deserializer->allocator()->reservations_[i][0].start;
builtin_deserializer->allocator()->high_water_[i] = nullptr;
}
return true;
}
bool DefaultDeserializerAllocator::ReservesOnlyCodeSpace() const {
for (int space = NEW_SPACE; space < kNumberOfSpaces; space++) {
if (space == CODE_SPACE) continue;
const auto& r = reservations_[space];
for (const Heap::Chunk& c : r)
if (c.size != 0) return false;
}
return true;
}
bool DefaultDeserializerAllocator::ReservationsAreFullyUsed() const {
for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
const uint32_t chunk_index = current_chunk_[space];
if (reservations_[space].size() != chunk_index + 1) {
return false;
}
if (reservations_[space][chunk_index].end != high_water_[space]) {
return false;
}
}
return (allocated_maps_.size() == next_map_index_);
}
void DefaultDeserializerAllocator::
RegisterDeserializedObjectsForBlackAllocation() {
isolate()->heap()->RegisterDeserializedObjectsForBlackAllocation(
reservations_, deserialized_large_objects_, allocated_maps_);
}
Isolate* DefaultDeserializerAllocator::isolate() const {
return deserializer_->isolate();
}
} // namespace internal
} // namespace v8
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_SNAPSHOT_DEFAULT_DESERIALIZER_ALLOCATOR_H_
#define V8_SNAPSHOT_DEFAULT_DESERIALIZER_ALLOCATOR_H_
#include "src/globals.h"
#include "src/heap/heap.h"
#include "src/snapshot/serializer-common.h"
namespace v8 {
namespace internal {
template <class AllocatorT>
class Deserializer;
class BuiltinDeserializer;
class StartupDeserializer;
class DefaultDeserializerAllocator final {
public:
DefaultDeserializerAllocator(
Deserializer<DefaultDeserializerAllocator>* deserializer);
// ------- Allocation Methods -------
// Methods related to memory allocation during deserialization.
Address Allocate(AllocationSpace space, int size);
void MoveToNextChunk(AllocationSpace space);
void SetAlignment(AllocationAlignment alignment) {
DCHECK_EQ(kWordAligned, next_alignment_);
DCHECK_LE(kWordAligned, alignment);
DCHECK_LE(alignment, kDoubleUnaligned);
next_alignment_ = static_cast<AllocationAlignment>(alignment);
}
HeapObject* GetMap(uint32_t index);
HeapObject* GetLargeObject(uint32_t index);
HeapObject* GetObject(AllocationSpace space, uint32_t chunk_index,
uint32_t chunk_offset);
// ------- Reservation Methods -------
// Methods related to memory reservations (prior to deserialization).
void DecodeReservation(Vector<const SerializedData::Reservation> res);
bool ReserveSpace();
// Atomically reserves space for the two given deserializers. Guarantees
// reservation for both without garbage collection in-between.
static bool ReserveSpace(StartupDeserializer* startup_deserializer,
BuiltinDeserializer* builtin_deserializer);
bool ReservesOnlyCodeSpace() const;
bool ReservationsAreFullyUsed() const;
// ------- Misc Utility Methods -------
void RegisterDeserializedObjectsForBlackAllocation();
// For SortMapDescriptors();
const std::vector<Address>& GetAllocatedMaps() const {
return allocated_maps_;
}
private:
Isolate* isolate() const;
// Raw allocation without considering alignment.
Address AllocateRaw(AllocationSpace space, int size);
private:
static constexpr int kNumberOfPreallocatedSpaces =
SerializerDeserializer::kNumberOfPreallocatedSpaces;
static constexpr int kNumberOfSpaces =
SerializerDeserializer::kNumberOfSpaces;
// The address of the next object that will be allocated in each space.
// Each space has a number of chunks reserved by the GC, with each chunk
// fitting into a page. Deserialized objects are allocated into the
// current chunk of the target space by bumping up high water mark.
Heap::Reservation reservations_[kNumberOfSpaces];
uint32_t current_chunk_[kNumberOfPreallocatedSpaces];
Address high_water_[kNumberOfPreallocatedSpaces];
// The alignment of the next allocation.
AllocationAlignment next_alignment_ = kWordAligned;
// All required maps are pre-allocated during reservation. {next_map_index_}
// stores the index of the next map to return from allocation.
uint32_t next_map_index_ = 0;
std::vector<Address> allocated_maps_;
// Allocated large objects are kept in this map and may be fetched later as
// back-references.
std::vector<HeapObject*> deserialized_large_objects_;
// The current deserializer.
Deserializer<DefaultDeserializerAllocator>* const deserializer_;
DISALLOW_COPY_AND_ASSIGN(DefaultDeserializerAllocator)
};
} // namespace internal
} // namespace v8
#endif // V8_SNAPSHOT_DEFAULT_DESERIALIZER_ALLOCATOR_H_
This diff is collapsed.
...@@ -7,14 +7,16 @@ ...@@ -7,14 +7,16 @@
#include <vector> #include <vector>
#include "src/heap/heap.h" #include "src/snapshot/default-deserializer-allocator.h"
#include "src/objects.h"
#include "src/snapshot/serializer-common.h" #include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot-source-sink.h" #include "src/snapshot/snapshot-source-sink.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
class HeapObject;
class Object;
// Used for platforms with embedded constant pools to trigger deserialization // Used for platforms with embedded constant pools to trigger deserialization
// of objects found in code. // of objects found in code.
#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \ #if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
...@@ -25,21 +27,12 @@ namespace internal { ...@@ -25,21 +27,12 @@ namespace internal {
#define V8_CODE_EMBEDS_OBJECT_POINTER 0 #define V8_CODE_EMBEDS_OBJECT_POINTER 0
#endif #endif
class BuiltinDeserializer;
class Heap;
class StartupDeserializer;
// A Deserializer reads a snapshot and reconstructs the Object graph it defines. // A Deserializer reads a snapshot and reconstructs the Object graph it defines.
template <class AllocatorT = DefaultDeserializerAllocator>
class Deserializer : public SerializerDeserializer { class Deserializer : public SerializerDeserializer {
public: public:
~Deserializer() override; ~Deserializer() override;
// Add an object to back an attached reference. The order to add objects must
// mirror the order they are added in the serializer.
void AddAttachedObject(Handle<HeapObject> attached_object) {
attached_objects_.push_back(attached_object);
}
void SetRehashability(bool v) { can_rehash_ = v; } void SetRehashability(bool v) { can_rehash_ = v; }
protected: protected:
...@@ -49,29 +42,18 @@ class Deserializer : public SerializerDeserializer { ...@@ -49,29 +42,18 @@ class Deserializer : public SerializerDeserializer {
: isolate_(nullptr), : isolate_(nullptr),
source_(data->Payload()), source_(data->Payload()),
magic_number_(data->GetMagicNumber()), magic_number_(data->GetMagicNumber()),
next_map_index_(0),
external_reference_table_(nullptr), external_reference_table_(nullptr),
deserialized_large_objects_(0), allocator_(this),
deserializing_user_code_(deserializing_user_code), deserializing_user_code_(deserializing_user_code),
next_alignment_(kWordAligned),
can_rehash_(false) { can_rehash_(false) {
DecodeReservation(data->Reservations()); allocator()->DecodeReservation(data->Reservations());
// We start the indicies here at 1, so that we can distinguish between an // We start the indices here at 1, so that we can distinguish between an
// actual index and a nullptr in a deserialized object requiring fix-up. // actual index and a nullptr in a deserialized object requiring fix-up.
off_heap_backing_stores_.push_back(nullptr); off_heap_backing_stores_.push_back(nullptr);
} }
bool ReserveSpace();
// Atomically reserves space for the two given deserializers. Guarantees
// reservation for both without garbage collection in-between.
static bool ReserveSpace(StartupDeserializer* startup_deserializer,
BuiltinDeserializer* builtin_deserializer);
bool ReservesOnlyCodeSpace() const;
void Initialize(Isolate* isolate); void Initialize(Isolate* isolate);
void DeserializeDeferredObjects(); void DeserializeDeferredObjects();
void RegisterDeserializedObjectsForBlackAllocation();
virtual Address Allocate(int space_index, int size); virtual Address Allocate(int space_index, int size);
...@@ -82,6 +64,12 @@ class Deserializer : public SerializerDeserializer { ...@@ -82,6 +64,12 @@ class Deserializer : public SerializerDeserializer {
// snapshot by chunk index and offset. // snapshot by chunk index and offset.
HeapObject* GetBackReferencedObject(int space); HeapObject* GetBackReferencedObject(int space);
// Add an object to back an attached reference. The order to add objects must
// mirror the order they are added in the serializer.
void AddAttachedObject(Handle<HeapObject> attached_object) {
attached_objects_.push_back(attached_object);
}
// Sort descriptors of deserialized maps using new string hashes. // Sort descriptors of deserialized maps using new string hashes.
void SortMapDescriptors(); void SortMapDescriptors();
...@@ -102,6 +90,8 @@ class Deserializer : public SerializerDeserializer { ...@@ -102,6 +90,8 @@ class Deserializer : public SerializerDeserializer {
const std::vector<TransitionArray*>& transition_arrays() const { const std::vector<TransitionArray*>& transition_arrays() const {
return transition_arrays_; return transition_arrays_;
} }
AllocatorT* allocator() { return &allocator_; }
bool deserializing_user_code() const { return deserializing_user_code_; } bool deserializing_user_code() const { return deserializing_user_code_; }
bool can_rehash() const { return can_rehash_; } bool can_rehash() const { return can_rehash_; }
...@@ -112,20 +102,10 @@ class Deserializer : public SerializerDeserializer { ...@@ -112,20 +102,10 @@ class Deserializer : public SerializerDeserializer {
void Synchronize(VisitorSynchronization::SyncTag tag) override; void Synchronize(VisitorSynchronization::SyncTag tag) override;
void DecodeReservation(Vector<const SerializedData::Reservation> res);
void UnalignedCopy(Object** dest, Object** src) { void UnalignedCopy(Object** dest, Object** src) {
memcpy(dest, src, sizeof(*src)); memcpy(dest, src, sizeof(*src));
} }
void SetAlignment(byte data) {
DCHECK_EQ(kWordAligned, next_alignment_);
int alignment = data - (kAlignmentPrefix - 1);
DCHECK_LE(kWordAligned, alignment);
DCHECK_LE(alignment, kDoubleUnaligned);
next_alignment_ = static_cast<AllocationAlignment>(alignment);
}
// Fills in some heap data in an area from start to end (non-inclusive). The // Fills in some heap data in an area from start to end (non-inclusive). The
// space id is used for the write barrier. The object_address is the address // space id is used for the write barrier. The object_address is the address
// of the object we are writing into, or nullptr if we are not writing into an // of the object we are writing into, or nullptr if we are not writing into an
...@@ -159,19 +139,8 @@ class Deserializer : public SerializerDeserializer { ...@@ -159,19 +139,8 @@ class Deserializer : public SerializerDeserializer {
SnapshotByteSource source_; SnapshotByteSource source_;
uint32_t magic_number_; uint32_t magic_number_;
// The address of the next object that will be allocated in each space.
// Each space has a number of chunks reserved by the GC, with each chunk
// fitting into a page. Deserialized objects are allocated into the
// current chunk of the target space by bumping up high water mark.
Heap::Reservation reservations_[kNumberOfSpaces];
uint32_t current_chunk_[kNumberOfPreallocatedSpaces];
Address high_water_[kNumberOfPreallocatedSpaces];
int next_map_index_;
std::vector<Address> allocated_maps_;
ExternalReferenceTable* external_reference_table_; ExternalReferenceTable* external_reference_table_;
std::vector<HeapObject*> deserialized_large_objects_;
std::vector<Code*> new_code_objects_; std::vector<Code*> new_code_objects_;
std::vector<AccessorInfo*> accessor_infos_; std::vector<AccessorInfo*> accessor_infos_;
std::vector<Handle<String>> new_internalized_strings_; std::vector<Handle<String>> new_internalized_strings_;
...@@ -179,14 +148,13 @@ class Deserializer : public SerializerDeserializer { ...@@ -179,14 +148,13 @@ class Deserializer : public SerializerDeserializer {
std::vector<TransitionArray*> transition_arrays_; std::vector<TransitionArray*> transition_arrays_;
std::vector<byte*> off_heap_backing_stores_; std::vector<byte*> off_heap_backing_stores_;
AllocatorT allocator_;
const bool deserializing_user_code_; const bool deserializing_user_code_;
// TODO(jgruber): This workaround will no longer be necessary once builtin // TODO(jgruber): This workaround will no longer be necessary once builtin
// reference patching has been removed (through advance allocation). // reference patching has been removed (through advance allocation).
bool deserializing_builtins_ = false; bool deserializing_builtins_ = false;
AllocationAlignment next_alignment_;
// TODO(6593): generalize rehashing, and remove this flag. // TODO(6593): generalize rehashing, and remove this flag.
bool can_rehash_; bool can_rehash_;
...@@ -194,6 +162,9 @@ class Deserializer : public SerializerDeserializer { ...@@ -194,6 +162,9 @@ class Deserializer : public SerializerDeserializer {
uint32_t num_api_references_; uint32_t num_api_references_;
#endif // DEBUG #endif // DEBUG
// For source(), isolate(), and allocator().
friend class DefaultDeserializerAllocator;
DISALLOW_COPY_AND_ASSIGN(Deserializer); DISALLOW_COPY_AND_ASSIGN(Deserializer);
}; };
......
...@@ -67,7 +67,7 @@ ObjectDeserializer::DeserializeWasmCompiledModule( ...@@ -67,7 +67,7 @@ ObjectDeserializer::DeserializeWasmCompiledModule(
MaybeHandle<HeapObject> ObjectDeserializer::Deserialize(Isolate* isolate) { MaybeHandle<HeapObject> ObjectDeserializer::Deserialize(Isolate* isolate) {
Initialize(isolate); Initialize(isolate);
if (!ReserveSpace()) return MaybeHandle<HeapObject>(); if (!allocator()->ReserveSpace()) return MaybeHandle<HeapObject>();
DCHECK(deserializing_user_code()); DCHECK(deserializing_user_code());
HandleScope scope(isolate); HandleScope scope(isolate);
...@@ -79,7 +79,7 @@ MaybeHandle<HeapObject> ObjectDeserializer::Deserialize(Isolate* isolate) { ...@@ -79,7 +79,7 @@ MaybeHandle<HeapObject> ObjectDeserializer::Deserialize(Isolate* isolate) {
DeserializeDeferredObjects(); DeserializeDeferredObjects();
FlushICacheForNewCodeObjectsAndRecordEmbeddedObjects(); FlushICacheForNewCodeObjectsAndRecordEmbeddedObjects();
result = Handle<HeapObject>(HeapObject::cast(root)); result = Handle<HeapObject>(HeapObject::cast(root));
RegisterDeserializedObjectsForBlackAllocation(); allocator()->RegisterDeserializedObjectsForBlackAllocation();
} }
CommitPostProcessedObjects(); CommitPostProcessedObjects();
return scope.CloseAndEscape(result); return scope.CloseAndEscape(result);
......
...@@ -15,7 +15,7 @@ class SharedFunctionInfo; ...@@ -15,7 +15,7 @@ class SharedFunctionInfo;
class WasmCompiledModule; class WasmCompiledModule;
// Deserializes the object graph rooted at a given object. // Deserializes the object graph rooted at a given object.
class ObjectDeserializer final : public Deserializer { class ObjectDeserializer final : public Deserializer<> {
public: public:
static MaybeHandle<SharedFunctionInfo> DeserializeSharedFunctionInfo( static MaybeHandle<SharedFunctionInfo> DeserializeSharedFunctionInfo(
Isolate* isolate, const SerializedCodeData* data, Handle<String> source); Isolate* isolate, const SerializedCodeData* data, Handle<String> source);
......
...@@ -30,7 +30,9 @@ MaybeHandle<Object> PartialDeserializer::Deserialize( ...@@ -30,7 +30,9 @@ MaybeHandle<Object> PartialDeserializer::Deserialize(
Isolate* isolate, Handle<JSGlobalProxy> global_proxy, Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer) { v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer) {
Initialize(isolate); Initialize(isolate);
if (!ReserveSpace()) V8::FatalProcessOutOfMemory("PartialDeserializer"); if (!allocator()->ReserveSpace()) {
V8::FatalProcessOutOfMemory("PartialDeserializer");
}
AddAttachedObject(global_proxy); AddAttachedObject(global_proxy);
...@@ -44,7 +46,7 @@ MaybeHandle<Object> PartialDeserializer::Deserialize( ...@@ -44,7 +46,7 @@ MaybeHandle<Object> PartialDeserializer::Deserialize(
DeserializeDeferredObjects(); DeserializeDeferredObjects();
DeserializeEmbedderFields(embedder_fields_deserializer); DeserializeEmbedderFields(embedder_fields_deserializer);
RegisterDeserializedObjectsForBlackAllocation(); allocator()->RegisterDeserializedObjectsForBlackAllocation();
// There's no code deserialized here. If this assert fires then that's // There's no code deserialized here. If this assert fires then that's
// changed and logging should be added to notify the profiler et al of the // changed and logging should be added to notify the profiler et al of the
......
...@@ -15,7 +15,7 @@ class Context; ...@@ -15,7 +15,7 @@ class Context;
// Deserializes the context-dependent object graph rooted at a given object. // Deserializes the context-dependent object graph rooted at a given object.
// The PartialDeserializer is not expected to deserialize any code objects. // The PartialDeserializer is not expected to deserialize any code objects.
class PartialDeserializer final : public Deserializer { class PartialDeserializer final : public Deserializer<> {
public: public:
static MaybeHandle<Context> DeserializeContext( static MaybeHandle<Context> DeserializeContext(
Isolate* isolate, const SnapshotData* data, bool can_rehash, Isolate* isolate, const SnapshotData* data, bool can_rehash,
......
...@@ -18,7 +18,8 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) { ...@@ -18,7 +18,8 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
BuiltinDeserializer builtin_deserializer(isolate, builtin_data_); BuiltinDeserializer builtin_deserializer(isolate, builtin_data_);
if (!Deserializer::ReserveSpace(this, &builtin_deserializer)) { if (!DefaultDeserializerAllocator::ReserveSpace(this,
&builtin_deserializer)) {
V8::FatalProcessOutOfMemory("StartupDeserializer"); V8::FatalProcessOutOfMemory("StartupDeserializer");
} }
......
...@@ -12,7 +12,7 @@ namespace v8 { ...@@ -12,7 +12,7 @@ namespace v8 {
namespace internal { namespace internal {
// Initializes an isolate with context-independent data from a given snapshot. // Initializes an isolate with context-independent data from a given snapshot.
class StartupDeserializer final : public Deserializer { class StartupDeserializer final : public Deserializer<> {
public: public:
StartupDeserializer(const SnapshotData* startup_data, StartupDeserializer(const SnapshotData* startup_data,
const BuiltinSnapshotData* builtin_data) const BuiltinSnapshotData* builtin_data)
......
...@@ -1345,6 +1345,8 @@ ...@@ -1345,6 +1345,8 @@
'snapshot/builtin-serializer.h', 'snapshot/builtin-serializer.h',
'snapshot/code-serializer.cc', 'snapshot/code-serializer.cc',
'snapshot/code-serializer.h', 'snapshot/code-serializer.h',
'snapshot/default-deserializer-allocator.cc',
'snapshot/default-deserializer-allocator.h',
'snapshot/default-serializer-allocator.cc', 'snapshot/default-serializer-allocator.cc',
'snapshot/default-serializer-allocator.h', 'snapshot/default-serializer-allocator.h',
'snapshot/deserializer.cc', 'snapshot/deserializer.cc',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment