Commit b7b91142 authored by jgruber's avatar jgruber Committed by Commit Bot

[snapshot] Dynamically allocate builtin code objects in advance

Our current deserializers (startup & partial) use a system of static memory
reservations: required memory is determined at serialization time, which we
then request before deserialization and dole out as-needed during
deserialization.

Lazy builtin deserialization needs a bit more flexibility. On the one hand, the
amount of required memory varies since --lazy-deserialization can be switched
on and off at runtime.

On the other, builtin deserialization has been made order-independent, and we
can encounter references to builtins before they have been deserialized.

Both problems are solved by dynamically allocating required memory and
initializing the builtins table with the (yet uninitialized) builtin Code
objects.

Bug: v8:6624
Change-Id: Iee90992e91adb4ab45dae1acc81f64a108d12584
Reviewed-on: https://chromium-review.googlesource.com/647748
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarYang Guo <yangguo@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47789}
parent d354feb6
...@@ -201,8 +201,8 @@ ...@@ -201,8 +201,8 @@
#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK 0 #define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK 0
#endif #endif
// Number of bits to represent the page size for paged spaces. The value of 20 // Number of bits to represent the page size for paged spaces. The value of 19
// gives 1Mb bytes per page. // gives 512Kb bytes per page.
const int kPageSizeBits = 19; const int kPageSizeBits = 19;
#endif // V8_BASE_BUILD_CONFIG_H_ #endif // V8_BASE_BUILD_CONFIG_H_
...@@ -147,6 +147,14 @@ Handle<Code> Builtins::OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint) { ...@@ -147,6 +147,14 @@ Handle<Code> Builtins::OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint) {
UNREACHABLE(); UNREACHABLE();
} }
void Builtins::set_builtin(int index, HeapObject* builtin) {
DCHECK(Builtins::IsBuiltinId(index));
DCHECK(Internals::HasHeapObjectTag(builtin));
// The given builtin may be completely uninitialized thus we cannot check its
// type here.
builtins_[index] = builtin;
}
Handle<Code> Builtins::builtin_handle(Name name) { Handle<Code> Builtins::builtin_handle(Name name) {
return Handle<Code>(reinterpret_cast<Code**>(builtin_address(name))); return Handle<Code>(reinterpret_cast<Code**>(builtin_address(name)));
} }
......
...@@ -71,6 +71,9 @@ class Builtins { ...@@ -71,6 +71,9 @@ class Builtins {
Handle<Code> NewCloneShallowArray(AllocationSiteMode allocation_mode); Handle<Code> NewCloneShallowArray(AllocationSiteMode allocation_mode);
Handle<Code> JSConstructStubGeneric(); Handle<Code> JSConstructStubGeneric();
// Used by BuiltinDeserializer.
void set_builtin(int index, HeapObject* builtin);
Code* builtin(Name name) { Code* builtin(Name name) {
// Code::cast cannot be used here since we access builtins // Code::cast cannot be used here since we access builtins
// during the marking phase of mark sweep. See IC::Clear. // during the marking phase of mark sweep. See IC::Clear.
......
...@@ -4,22 +4,174 @@ ...@@ -4,22 +4,174 @@
#include "src/snapshot/builtin-deserializer.h" #include "src/snapshot/builtin-deserializer.h"
#include "src/objects-inl.h"
#include "src/snapshot/snapshot.h" #include "src/snapshot/snapshot.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// Tracks the builtin currently being deserialized (required for allocation).
class DeserializingBuiltinScope {
public:
DeserializingBuiltinScope(BuiltinDeserializer* builtin_deserializer,
int builtin_id)
: builtin_deserializer_(builtin_deserializer) {
DCHECK_EQ(BuiltinDeserializer::kNoBuiltinId,
builtin_deserializer->current_builtin_id_);
builtin_deserializer->current_builtin_id_ = builtin_id;
}
~DeserializingBuiltinScope() {
builtin_deserializer_->current_builtin_id_ =
BuiltinDeserializer::kNoBuiltinId;
}
private:
BuiltinDeserializer* builtin_deserializer_;
DISALLOW_COPY_AND_ASSIGN(DeserializingBuiltinScope)
};
BuiltinDeserializer::BuiltinDeserializer(const BuiltinSnapshotData* data) BuiltinDeserializer::BuiltinDeserializer(const BuiltinSnapshotData* data)
: Deserializer(data, false) { : Deserializer(data, false) {
// We may have to relax this at some point to pack reloc infos and handler
// tables into the builtin blob (instead of the partial snapshot cache).
DCHECK(ReservesOnlyCodeSpace());
builtin_offsets_ = data->BuiltinOffsets(); builtin_offsets_ = data->BuiltinOffsets();
DCHECK_EQ(Builtins::builtin_count, builtin_offsets_.length());
DCHECK(std::is_sorted(builtin_offsets_.begin(), builtin_offsets_.end())); DCHECK(std::is_sorted(builtin_offsets_.begin(), builtin_offsets_.end()));
builtin_sizes_ = ExtractBuiltinSizes();
DCHECK_EQ(Builtins::builtin_count, builtin_sizes_.size());
}
void BuiltinDeserializer::DeserializeEagerBuiltins() {
DCHECK(!AllowHeapAllocation::IsAllowed());
DCHECK_EQ(0, source()->position());
// TODO(jgruber): Replace lazy builtins with DeserializeLazy.
Builtins* builtins = isolate()->builtins();
for (int i = 0; i < Builtins::builtin_count; i++) {
builtins->set_builtin(i, DeserializeBuiltin(i));
}
#ifdef DEBUG
for (int i = 0; i < Builtins::builtin_count; i++) {
Object* o = builtins->builtin(static_cast<Builtins::Name>(i));
DCHECK(o->IsCode() && Code::cast(o)->is_builtin());
}
#endif
} }
void BuiltinDeserializer::DeserializeAllBuiltins() { Code* BuiltinDeserializer::DeserializeBuiltin(int builtin_id) {
DCHECK(!AllowHeapAllocation::IsAllowed()); DCHECK(!AllowHeapAllocation::IsAllowed());
DCHECK(Builtins::IsBuiltinId(builtin_id));
DeserializingBuiltinScope scope(this, builtin_id);
const int initial_position = source()->position();
const uint32_t offset = builtin_offsets_[builtin_id];
source()->set_position(offset);
Object* o = ReadDataSingle();
DCHECK(o->IsCode() && Code::cast(o)->is_builtin());
// Rewind.
source()->set_position(initial_position);
return Code::cast(o);
}
uint32_t BuiltinDeserializer::ExtractBuiltinSize(int builtin_id) {
DCHECK(Builtins::IsBuiltinId(builtin_id));
const int initial_position = source()->position();
const uint32_t offset = builtin_offsets_[builtin_id];
source()->set_position(offset);
// Grab the size of the code object.
byte data = source()->Get();
// The first bytecode can either be kNewObject, or kNextChunk if the current
// chunk has been exhausted. Since we do allocations differently here, we
// don't care about kNextChunk and can simply skip over it.
// TODO(jgruber): When refactoring (de)serializer allocations, ensure we don't
// generate kNextChunk bytecodes anymore for the builtins snapshot. In fact,
// the entire reservations mechanism is unused for the builtins snapshot.
if (data == kNextChunk) {
source()->Get(); // Skip over kNextChunk's {space} parameter.
data = source()->Get();
}
DCHECK_EQ(kNewObject | kPlain | kStartOfObject | CODE_SPACE, data);
const uint32_t result = source()->GetInt() << kObjectAlignmentBits;
// Rewind.
source()->set_position(initial_position);
return result;
}
std::vector<uint32_t> BuiltinDeserializer::ExtractBuiltinSizes() {
std::vector<uint32_t> result;
result.reserve(Builtins::builtin_count);
for (int i = 0; i < Builtins::builtin_count; i++) {
result.push_back(ExtractBuiltinSize(i));
}
return result;
}
Heap::Reservation BuiltinDeserializer::CreateReservationsForEagerBuiltins() {
DCHECK(ReservesOnlyCodeSpace());
Heap::Reservation result;
for (int i = 0; i < Builtins::builtin_count; i++) {
// TODO(jgruber): Skip lazy builtins.
const uint32_t builtin_size = builtin_sizes_[i];
DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
result.push_back({builtin_size, nullptr, nullptr});
}
return result;
}
void BuiltinDeserializer::InitializeBuiltinsTable(
const Heap::Reservation& reservation) {
DCHECK(!AllowHeapAllocation::IsAllowed());
// Other builtins can be replaced by DeserializeLazy so it may not be lazy.
DCHECK(!Builtins::IsLazy(Builtins::kDeserializeLazy));
Builtins* builtins = isolate()->builtins();
int reservation_index = 0;
for (int i = 0; i < Builtins::builtin_count; i++) {
// TODO(jgruber): Replace lazy builtins with DeserializeLazy.
Address start = reservation[reservation_index].start;
DCHECK_EQ(builtin_sizes_[i], reservation[reservation_index].size);
DCHECK_EQ(builtin_sizes_[i], reservation[reservation_index].end - start);
builtins->set_builtin(i, HeapObject::FromAddress(start));
reservation_index++;
}
DCHECK_EQ(reservation.size(), reservation_index);
}
isolate()->builtins()->IterateBuiltins(this); Address BuiltinDeserializer::Allocate(int space_index, int size) {
PostProcessDeferredBuiltinReferences(); DCHECK_EQ(CODE_SPACE, space_index);
DCHECK_EQ(ExtractBuiltinSize(current_builtin_id_), size);
Object* obj = isolate()->builtins()->builtin(
static_cast<Builtins::Name>(current_builtin_id_));
DCHECK(Internals::HasHeapObjectTag(obj));
Address address = HeapObject::cast(obj)->address();
SkipList::Update(address, size);
return address;
} }
} // namespace internal } // namespace internal
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#ifndef V8_SNAPSHOT_BUILTIN_DESERIALIZER_H_ #ifndef V8_SNAPSHOT_BUILTIN_DESERIALIZER_H_
#define V8_SNAPSHOT_BUILTIN_DESERIALIZER_H_ #define V8_SNAPSHOT_BUILTIN_DESERIALIZER_H_
#include "src/heap/heap.h"
#include "src/snapshot/deserializer.h" #include "src/snapshot/deserializer.h"
namespace v8 { namespace v8 {
...@@ -22,17 +23,54 @@ class BuiltinDeserializer final : public Deserializer { ...@@ -22,17 +23,54 @@ class BuiltinDeserializer final : public Deserializer {
// Builtins deserialization is tightly integrated with deserialization of the // Builtins deserialization is tightly integrated with deserialization of the
// startup blob. In particular, we need to ensure that no GC can occur // startup blob. In particular, we need to ensure that no GC can occur
// between startup- and builtins deserialization, as all existing builtin // between startup- and builtins deserialization, as all builtins have been
// references need to be fixed up after builtins have been deserialized. // pre-allocated and their pointers may not be invalidated.
// Thus this quirky two-sided API: required memory needs to be reserved void DeserializeEagerBuiltins();
// pre-startup deserialization, and builtins must be deserialized at exactly Code* DeserializeBuiltin(int builtin_id);
// the right point during startup deserialization.
void DeserializeAllBuiltins(); // These methods are used to pre-allocate builtin objects prior to
// deserialization.
// TODO(jgruber): Refactor reservation/allocation logic in deserializers to
// make this less messy.
Heap::Reservation CreateReservationsForEagerBuiltins();
void InitializeBuiltinsTable(const Heap::Reservation& reservation);
private: private:
// Extracts the size builtin Code objects (baked into the snapshot).
uint32_t ExtractBuiltinSize(int builtin_id);
std::vector<uint32_t> ExtractBuiltinSizes();
// Allocation works differently here than in other deserializers. Instead of
// a statically-known memory area determined at serialization-time, our
// memory requirements here are determined at runtime. Another major
// difference is that we create builtin Code objects up-front (before
// deserialization) in order to avoid having to patch builtin references
// later on. See also the kBuiltin case in deserializer.cc.
//
// Allocate simply returns the pre-allocated object prepared by
// InitializeBuiltinsTable.
Address Allocate(int space_index, int size) override;
// BuiltinDeserializer implements its own builtin iteration logic. Make sure
// the RootVisitor API is not used accidentally.
void VisitRootPointers(Root root, Object** start, Object** end) override {
UNREACHABLE();
}
// Stores the builtin currently being deserialized. We need this to determine
// where to 'allocate' from during deserialization.
static const int kNoBuiltinId = -1;
int current_builtin_id_ = kNoBuiltinId;
// The sizes of each builtin Code object in its deserialized state. This list
// is used to determine required space prior to deserialization.
std::vector<uint32_t> builtin_sizes_;
// The offsets of each builtin within the serialized data. Equivalent to // The offsets of each builtin within the serialized data. Equivalent to
// BuiltinSerializer::builtin_offsets_ but on the deserialization side. // BuiltinSerializer::builtin_offsets_ but on the deserialization side.
Vector<const uint32_t> builtin_offsets_; Vector<const uint32_t> builtin_offsets_;
friend class DeserializingBuiltinScope;
}; };
} // namespace internal } // namespace internal
......
...@@ -56,76 +56,81 @@ bool Deserializer::ReserveSpace() { ...@@ -56,76 +56,81 @@ bool Deserializer::ReserveSpace() {
} }
// static // static
bool Deserializer::ReserveSpace(StartupDeserializer* lhs, bool Deserializer::ReserveSpace(StartupDeserializer* startup_deserializer,
BuiltinDeserializer* rhs) { BuiltinDeserializer* builtin_deserializer) {
const int first_space = NEW_SPACE; const int first_space = NEW_SPACE;
const int last_space = SerializerDeserializer::kNumberOfSpaces; const int last_space = SerializerDeserializer::kNumberOfSpaces;
Isolate* isolate = lhs->isolate(); Isolate* isolate = startup_deserializer->isolate();
// Merge reservations to reserve space in one go. // Create a set of merged reservations to reserve space in one go.
// The BuiltinDeserializer's reservations are ignored, since our actual
// requirements vary based on whether lazy deserialization is enabled.
// Instead, we manually determine the required code-space.
DCHECK(builtin_deserializer->ReservesOnlyCodeSpace());
Heap::Reservation merged_reservations[kNumberOfSpaces]; Heap::Reservation merged_reservations[kNumberOfSpaces];
for (int i = first_space; i < last_space; i++) { for (int i = first_space; i < last_space; i++) {
Heap::Reservation& r = merged_reservations[i]; merged_reservations[i] = startup_deserializer->reservations_[i];
Heap::Reservation& lhs_r = lhs->reservations_[i];
Heap::Reservation& rhs_r = rhs->reservations_[i];
DCHECK(!lhs_r.empty());
DCHECK(!rhs_r.empty());
r.insert(r.end(), lhs_r.begin(), lhs_r.end());
r.insert(r.end(), rhs_r.begin(), rhs_r.end());
} }
std::vector<Address> merged_allocated_maps; Heap::Reservation builtin_reservations =
builtin_deserializer->CreateReservationsForEagerBuiltins();
DCHECK(!builtin_reservations.empty());
for (const auto& c : builtin_reservations) {
merged_reservations[CODE_SPACE].push_back(c);
}
if (!isolate->heap()->ReserveSpace(merged_reservations, if (!isolate->heap()->ReserveSpace(merged_reservations,
&merged_allocated_maps)) { &startup_deserializer->allocated_maps_)) {
return false; return false;
} }
DisallowHeapAllocation no_allocation;
// Distribute the successful allocations between both deserializers. // Distribute the successful allocations between both deserializers.
// There's nothing to be done here except for map space. // There's nothing to be done here except for code space.
{ {
Heap::Reservation& lhs_r = lhs->reservations_[MAP_SPACE]; const int num_builtin_reservations =
Heap::Reservation& rhs_r = rhs->reservations_[MAP_SPACE]; static_cast<int>(builtin_reservations.size());
DCHECK_EQ(1, lhs_r.size()); for (int i = num_builtin_reservations - 1; i >= 0; i--) {
DCHECK_EQ(1, rhs_r.size()); const auto& c = merged_reservations[CODE_SPACE].back();
const int lhs_num_maps = lhs_r[0].size / Map::kSize; DCHECK_EQ(c.size, builtin_reservations[i].size);
const int rhs_num_maps = rhs_r[0].size / Map::kSize; DCHECK_EQ(c.size, c.end - c.start);
DCHECK_EQ(merged_allocated_maps.size(), lhs_num_maps + rhs_num_maps); builtin_reservations[i].start = c.start;
{ builtin_reservations[i].end = c.end;
std::vector<Address>& dst = lhs->allocated_maps_; merged_reservations[CODE_SPACE].pop_back();
DCHECK(dst.empty());
auto it = merged_allocated_maps.begin();
dst.insert(dst.end(), it, it + lhs_num_maps);
}
{
std::vector<Address>& dst = rhs->allocated_maps_;
DCHECK(dst.empty());
auto it = merged_allocated_maps.begin() + lhs_num_maps;
dst.insert(dst.end(), it, it + rhs_num_maps);
} }
builtin_deserializer->InitializeBuiltinsTable(builtin_reservations);
} }
// Write back startup reservations.
for (int i = first_space; i < last_space; i++) { for (int i = first_space; i < last_space; i++) {
Heap::Reservation& r = merged_reservations[i]; startup_deserializer->reservations_[i].swap(merged_reservations[i]);
Heap::Reservation& lhs_r = lhs->reservations_[i];
Heap::Reservation& rhs_r = rhs->reservations_[i];
const int lhs_num_reservations = static_cast<int>(lhs_r.size());
lhs_r.clear();
lhs_r.insert(lhs_r.end(), r.begin(), r.begin() + lhs_num_reservations);
rhs_r.clear();
rhs_r.insert(rhs_r.end(), r.begin() + lhs_num_reservations, r.end());
} }
for (int i = first_space; i < kNumberOfPreallocatedSpaces; i++) { for (int i = first_space; i < kNumberOfPreallocatedSpaces; i++) {
lhs->high_water_[i] = lhs->reservations_[i][0].start; startup_deserializer->high_water_[i] =
rhs->high_water_[i] = rhs->reservations_[i][0].start; startup_deserializer->reservations_[i][0].start;
builtin_deserializer->high_water_[i] = nullptr;
} }
return true; return true;
} }
bool Deserializer::ReservesOnlyCodeSpace() const {
for (int space = NEW_SPACE; space < kNumberOfSpaces; space++) {
if (space == CODE_SPACE) continue;
const auto& r = reservations_[space];
for (const Heap::Chunk& c : r)
if (c.size != 0) return false;
}
return true;
}
void Deserializer::Initialize(Isolate* isolate) { void Deserializer::Initialize(Isolate* isolate) {
DCHECK_NULL(isolate_); DCHECK_NULL(isolate_);
DCHECK_NOT_NULL(isolate); DCHECK_NOT_NULL(isolate);
...@@ -232,35 +237,6 @@ uint32_t StringTableInsertionKey::ComputeHashField(String* string) { ...@@ -232,35 +237,6 @@ uint32_t StringTableInsertionKey::ComputeHashField(String* string) {
return string->hash_field(); return string->hash_field();
} }
void Deserializer::PostProcessDeferredBuiltinReferences() {
for (const DeferredBuiltinReference& ref : builtin_references_) {
DCHECK((ref.bytecode & kWhereMask) == kBuiltin);
const byte how = ref.bytecode & kHowToCodeMask;
const byte within = ref.bytecode & kWhereToPointMask;
Object* new_object = isolate()->builtins()->builtin(ref.builtin_name);
DCHECK(new_object->IsCode());
if (within == kInnerPointer) {
DCHECK(how == kFromCode);
Code* new_code_object = Code::cast(new_object);
new_object =
reinterpret_cast<Object*>(new_code_object->instruction_start());
}
if (how == kFromCode) {
Code* code = Code::cast(HeapObject::FromAddress(ref.current_object));
Assembler::deserialization_set_special_target_at(
isolate(), reinterpret_cast<Address>(ref.target_addr), code,
reinterpret_cast<Address>(new_object));
} else {
// TODO(jgruber): We could save one ptr-size per kPlain entry by using
// a separate struct for kPlain and kFromCode deferred references.
UnalignedCopy(ref.target_addr, &new_object);
}
}
}
HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) { HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
if (deserializing_user_code()) { if (deserializing_user_code()) {
if (obj->IsString()) { if (obj->IsString()) {
...@@ -482,6 +458,18 @@ Address Deserializer::Allocate(int space_index, int size) { ...@@ -482,6 +458,18 @@ Address Deserializer::Allocate(int space_index, int size) {
} }
} }
Object* Deserializer::ReadDataSingle() {
Object* o;
Object** start = &o;
Object** end = start + 1;
int source_space = NEW_SPACE;
Address current_object = nullptr;
CHECK(ReadData(start, end, source_space, current_object));
return o;
}
bool Deserializer::ReadData(Object** current, Object** limit, int source_space, bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
Address current_object_address) { Address current_object_address) {
Isolate* const isolate = isolate_; Isolate* const isolate = isolate_;
...@@ -780,15 +768,6 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space, ...@@ -780,15 +768,6 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
return true; return true;
} }
namespace {
Object* MagicPointer() {
// Returns a pointer to this static variable to mark builtin references that
// have not yet been post-processed.
static uint64_t magic = 0xfefefefefefefefe;
return reinterpret_cast<Object*>(&magic);
}
} // namespace
template <int where, int how, int within, int space_number_if_any> template <int where, int how, int within, int space_number_if_any>
Object** Deserializer::ReadDataCase(Isolate* isolate, Object** current, Object** Deserializer::ReadDataCase(Isolate* isolate, Object** current,
Address current_object_address, byte data, Address current_object_address, byte data,
...@@ -840,21 +819,18 @@ Object** Deserializer::ReadDataCase(Isolate* isolate, Object** current, ...@@ -840,21 +819,18 @@ Object** Deserializer::ReadDataCase(Isolate* isolate, Object** current,
DCHECK(Builtins::IsBuiltinId(builtin_id)); DCHECK(Builtins::IsBuiltinId(builtin_id));
Builtins::Name name = static_cast<Builtins::Name>(builtin_id); Builtins::Name name = static_cast<Builtins::Name>(builtin_id);
new_object = isolate->builtins()->builtin(name); new_object = isolate->builtins()->builtin(name);
// Record the builtin reference for post-processing after builtin
// deserialization, and replace new_object with a magic byte marker.
builtin_references_.emplace_back(data, name, current,
current_object_address);
if (new_object == nullptr) new_object = MagicPointer();
emit_write_barrier = false; emit_write_barrier = false;
} }
if (within == kInnerPointer) { if (within == kInnerPointer) {
DCHECK(how == kFromCode); DCHECK(how == kFromCode);
if (new_object == MagicPointer()) { if (where == kBuiltin) {
DCHECK(where == kBuiltin); // At this point, new_object may still be uninitialized, thus the
// unchecked Code cast.
new_object = reinterpret_cast<Object*>(
reinterpret_cast<Code*>(new_object)->instruction_start());
} else if (new_object->IsCode()) { } else if (new_object->IsCode()) {
Code* new_code_object = Code::cast(new_object); new_object = reinterpret_cast<Object*>(
new_object = Code::cast(new_object)->instruction_start());
reinterpret_cast<Object*>(new_code_object->instruction_start());
} else { } else {
Cell* cell = Cell::cast(new_object); Cell* cell = Cell::cast(new_object);
new_object = reinterpret_cast<Object*>(cell->ValueAddress()); new_object = reinterpret_cast<Object*>(cell->ValueAddress());
......
...@@ -65,13 +65,19 @@ class Deserializer : public SerializerDeserializer { ...@@ -65,13 +65,19 @@ class Deserializer : public SerializerDeserializer {
// Atomically reserves space for the two given deserializers. Guarantees // Atomically reserves space for the two given deserializers. Guarantees
// reservation for both without garbage collection in-between. // reservation for both without garbage collection in-between.
// TODO(jgruber): Replace this with advance builtin handle allocation. static bool ReserveSpace(StartupDeserializer* startup_deserializer,
static bool ReserveSpace(StartupDeserializer* lhs, BuiltinDeserializer* rhs); BuiltinDeserializer* builtin_deserializer);
bool ReservesOnlyCodeSpace() const;
void Initialize(Isolate* isolate); void Initialize(Isolate* isolate);
void DeserializeDeferredObjects(); void DeserializeDeferredObjects();
void RegisterDeserializedObjectsForBlackAllocation(); void RegisterDeserializedObjectsForBlackAllocation();
virtual Address Allocate(int space_index, int size);
// Deserializes into a single pointer and returns the resulting object.
Object* ReadDataSingle();
// This returns the address of an object that has been described in the // This returns the address of an object that has been described in the
// snapshot by chunk index and offset. // snapshot by chunk index and offset.
HeapObject* GetBackReferencedObject(int space); HeapObject* GetBackReferencedObject(int space);
...@@ -79,10 +85,6 @@ class Deserializer : public SerializerDeserializer { ...@@ -79,10 +85,6 @@ class Deserializer : public SerializerDeserializer {
// Sort descriptors of deserialized maps using new string hashes. // Sort descriptors of deserialized maps using new string hashes.
void SortMapDescriptors(); void SortMapDescriptors();
// Fix up all recorded pointers to builtins once builtins have been
// deserialized.
void PostProcessDeferredBuiltinReferences();
Isolate* isolate() const { return isolate_; } Isolate* isolate() const { return isolate_; }
SnapshotByteSource* source() { return &source_; } SnapshotByteSource* source() { return &source_; }
const std::vector<Code*>& new_code_objects() const { const std::vector<Code*>& new_code_objects() const {
...@@ -122,23 +124,6 @@ class Deserializer : public SerializerDeserializer { ...@@ -122,23 +124,6 @@ class Deserializer : public SerializerDeserializer {
next_alignment_ = static_cast<AllocationAlignment>(alignment); next_alignment_ = static_cast<AllocationAlignment>(alignment);
} }
// Builtin references are collected during initial deserialization and later
// iterated and filled in with the correct addresses after builtins have
// themselves been deserialized.
struct DeferredBuiltinReference {
byte bytecode;
Builtins::Name builtin_name;
Object** target_addr;
Address current_object;
DeferredBuiltinReference(byte bytecode, Builtins::Name builtin_name,
Object** target_addr, Address current_object)
: bytecode(bytecode),
builtin_name(builtin_name),
target_addr(target_addr),
current_object(current_object) {}
};
// Fills in some heap data in an area from start to end (non-inclusive). The // Fills in some heap data in an area from start to end (non-inclusive). The
// space id is used for the write barrier. The object_address is the address // space id is used for the write barrier. The object_address is the address
// of the object we are writing into, or NULL if we are not writing into an // of the object we are writing into, or NULL if we are not writing into an
...@@ -155,7 +140,6 @@ class Deserializer : public SerializerDeserializer { ...@@ -155,7 +140,6 @@ class Deserializer : public SerializerDeserializer {
bool write_barrier_needed); bool write_barrier_needed);
void ReadObject(int space_number, Object** write_back); void ReadObject(int space_number, Object** write_back);
Address Allocate(int space_index, int size);
// Special handling for serialized code like hooking up internalized strings. // Special handling for serialized code like hooking up internalized strings.
HeapObject* PostProcessNewObject(HeapObject* obj, int space); HeapObject* PostProcessNewObject(HeapObject* obj, int space);
...@@ -192,7 +176,6 @@ class Deserializer : public SerializerDeserializer { ...@@ -192,7 +176,6 @@ class Deserializer : public SerializerDeserializer {
std::vector<Handle<Script>> new_scripts_; std::vector<Handle<Script>> new_scripts_;
std::vector<TransitionArray*> transition_arrays_; std::vector<TransitionArray*> transition_arrays_;
std::vector<byte*> off_heap_backing_stores_; std::vector<byte*> off_heap_backing_stores_;
std::vector<DeferredBuiltinReference> builtin_references_;
const bool deserializing_user_code_; const bool deserializing_user_code_;
......
...@@ -64,6 +64,7 @@ class SnapshotByteSource final { ...@@ -64,6 +64,7 @@ class SnapshotByteSource final {
int GetBlob(const byte** data); int GetBlob(const byte** data);
int position() { return position_; } int position() { return position_; }
void set_position(int position) { position_ = position; }
private: private:
const byte* data_; const byte* data_;
......
...@@ -43,10 +43,8 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) { ...@@ -43,10 +43,8 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
FlushICacheForNewIsolate(); FlushICacheForNewIsolate();
RestoreExternalReferenceRedirectors(accessor_infos()); RestoreExternalReferenceRedirectors(accessor_infos());
// Eagerly deserialize all builtins from the builtin snapshot. // Deserialize eager builtins from the builtin snapshot.
// TODO(6624): Deserialize lazily. builtin_deserializer.DeserializeEagerBuiltins();
builtin_deserializer.DeserializeAllBuiltins();
PostProcessDeferredBuiltinReferences();
} }
isolate->heap()->set_native_contexts_list(isolate->heap()->undefined_value()); isolate->heap()->set_native_contexts_list(isolate->heap()->undefined_value());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment