Commit 1b2a341e authored by jgruber's avatar jgruber Committed by Commit Bot

[snapshot] Move bytecode handlers to builtins snapshot

This is the first step towards lazy-deserializing bytecode handlers.

Bytecode handler code objects are now serialized into the builtins
snapshot area (which, like many other related concepts, has become
somewhat of a misnomer now that it contains both builtins and
handlers).

Handlers are still eagerly-deserialized upon Isolate creation. This will
change in follow-up CLs.

Bug: v8:6624
Cq-Include-Trybots: master.tryserver.chromium.linux:linux_chromium_rel_ng
Change-Id: I7b257f76f5e9e90d5f7b183980bae7bc621171fc
Reviewed-on: https://chromium-review.googlesource.com/738030
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarPeter Marshall <petermarshall@chromium.org>
Reviewed-by: 's avatarMythri Alle <mythria@chromium.org>
Cr-Commit-Position: refs/heads/master@{#48977}
parent ba06ceac
......@@ -1970,6 +1970,8 @@ v8_source_set("v8_base") {
"src/snapshot/builtin-serializer-allocator.h",
"src/snapshot/builtin-serializer.cc",
"src/snapshot/builtin-serializer.h",
"src/snapshot/builtin-snapshot-utils.cc",
"src/snapshot/builtin-snapshot-utils.h",
"src/snapshot/code-serializer.cc",
"src/snapshot/code-serializer.h",
"src/snapshot/default-deserializer-allocator.cc",
......
......@@ -749,7 +749,7 @@ StartupData SnapshotCreator::CreateBlob(
// cache and thus needs to happen before SerializeWeakReferencesAndDeferred
// is called below.
i::BuiltinSerializer builtin_serializer(isolate, &startup_serializer);
builtin_serializer.SerializeBuiltins();
builtin_serializer.SerializeBuiltinsAndHandlers();
startup_serializer.SerializeWeakReferencesAndDeferred();
can_be_rehashed = can_be_rehashed && startup_serializer.can_be_rehashed();
......
......@@ -96,6 +96,14 @@ Code* Interpreter::GetBytecodeHandler(Bytecode bytecode,
return Code::GetCodeFromTargetAddress(code_entry);
}
void Interpreter::SetBytecodeHandler(Bytecode bytecode,
OperandScale operand_scale,
Code* handler) {
DCHECK(handler->kind() == Code::BYTECODE_HANDLER);
size_t index = GetDispatchTableIndex(bytecode, operand_scale);
dispatch_table_[index] = handler->entry();
}
// static
size_t Interpreter::GetDispatchTableIndex(Bytecode bytecode,
OperandScale operand_scale) {
......@@ -241,7 +249,7 @@ CompilationJob* Interpreter::NewCompilationJob(ParseInfo* parse_info,
return new InterpreterCompilationJob(parse_info, literal, isolate);
}
bool Interpreter::IsDispatchTableInitialized() {
bool Interpreter::IsDispatchTableInitialized() const {
return dispatch_table_[0] != nullptr;
}
......
......@@ -19,13 +19,14 @@ namespace v8 {
namespace internal {
class Isolate;
class BuiltinDeserializerAllocator;
class Callable;
class CompilationInfo;
class CompilationJob;
class FunctionLiteral;
class ParseInfo;
class SetupIsolateDelegate;
class RootVisitor;
class SetupIsolateDelegate;
namespace interpreter {
......@@ -41,9 +42,13 @@ class Interpreter {
FunctionLiteral* literal,
Isolate* isolate);
// Return bytecode handler for |bytecode|.
// Return bytecode handler for |bytecode| and |operand_scale|.
Code* GetBytecodeHandler(Bytecode bytecode, OperandScale operand_scale);
// Set the bytecode handler for |bytecode| and |operand_scale|.
void SetBytecodeHandler(Bytecode bytecode, OperandScale operand_scale,
Code* handler);
// GC support.
void IterateDispatchTable(RootVisitor* v);
......@@ -52,6 +57,8 @@ class Interpreter {
V8_EXPORT_PRIVATE Local<v8::Object> GetDispatchCountersObject();
bool IsDispatchTableInitialized() const;
Address dispatch_table_address() {
return reinterpret_cast<Address>(&dispatch_table_[0]);
}
......@@ -66,6 +73,7 @@ class Interpreter {
private:
friend class SetupInterpreter;
friend class v8::internal::SetupIsolateDelegate;
friend class v8::internal::BuiltinDeserializerAllocator;
uintptr_t GetDispatchCounter(Bytecode from, Bytecode to) const;
......@@ -73,8 +81,6 @@ class Interpreter {
static size_t GetDispatchTableIndex(Bytecode bytecode,
OperandScale operand_scale);
bool IsDispatchTableInitialized();
static const int kNumberOfWideVariants = BytecodeOperands::kOperandScaleCount;
static const int kDispatchTableSize = kNumberOfWideVariants * (kMaxUInt8 + 1);
static const int kNumberOfBytecodes = static_cast<int>(Bytecode::kLast) + 1;
......
......@@ -5,44 +5,71 @@
#include "src/snapshot/builtin-deserializer-allocator.h"
#include "src/heap/heap-inl.h"
#include "src/interpreter/interpreter.h"
#include "src/snapshot/builtin-deserializer.h"
#include "src/snapshot/deserializer.h"
namespace v8 {
namespace internal {
using interpreter::Bytecode;
using interpreter::Bytecodes;
using interpreter::Interpreter;
using interpreter::OperandScale;
BuiltinDeserializerAllocator::BuiltinDeserializerAllocator(
Deserializer<BuiltinDeserializerAllocator>* deserializer)
: deserializer_(deserializer) {}
Address BuiltinDeserializerAllocator::Allocate(AllocationSpace space,
int size) {
const int builtin_id = deserializer()->CurrentBuiltinId();
const int code_object_id = deserializer()->CurrentCodeObjectId();
DCHECK_NE(BuiltinDeserializer::kNoCodeObjectId, code_object_id);
DCHECK_EQ(CODE_SPACE, space);
DCHECK_EQ(deserializer()->ExtractBuiltinSize(builtin_id), size);
DCHECK_EQ(deserializer()->ExtractCodeObjectSize(code_object_id), size);
#ifdef DEBUG
RegisterBuiltinAllocation(builtin_id);
RegisterCodeObjectAllocation(code_object_id);
#endif
Object* obj = isolate()->builtins()->builtin(builtin_id);
DCHECK(Internals::HasHeapObjectTag(obj));
return HeapObject::cast(obj)->address();
if (BSU::IsBuiltinIndex(code_object_id)) {
Object* obj = isolate()->builtins()->builtin(code_object_id);
DCHECK(Internals::HasHeapObjectTag(obj));
return HeapObject::cast(obj)->address();
} else if (BSU::IsHandlerIndex(code_object_id)) {
Bytecode bytecode;
OperandScale operand_scale;
std::tie(bytecode, operand_scale) = BSU::BytecodeFromIndex(code_object_id);
Address* dispatch_table = isolate()->interpreter()->dispatch_table_;
const size_t index =
Interpreter::GetDispatchTableIndex(bytecode, operand_scale);
Object* obj = HeapObject::FromAddress(dispatch_table[index]);
DCHECK(Internals::HasHeapObjectTag(obj));
return HeapObject::cast(obj)->address();
}
UNREACHABLE();
}
Heap::Reservation
BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltins() {
BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltinsAndHandlers() {
Heap::Reservation result;
// Reservations for builtins.
// DeserializeLazy is always the first reservation (to simplify logic in
// InitializeBuiltinsTable).
{
DCHECK(!Builtins::IsLazy(Builtins::kDeserializeLazy));
uint32_t builtin_size =
deserializer()->ExtractBuiltinSize(Builtins::kDeserializeLazy);
deserializer()->ExtractCodeObjectSize(Builtins::kDeserializeLazy);
DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
result.push_back({builtin_size, nullptr, nullptr});
}
for (int i = 0; i < Builtins::builtin_count; i++) {
for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
if (i == Builtins::kDeserializeLazy) continue;
// Skip lazy builtins. These will be replaced by the DeserializeLazy code
......@@ -51,17 +78,31 @@ BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltins() {
continue;
}
uint32_t builtin_size = deserializer()->ExtractBuiltinSize(i);
uint32_t builtin_size = deserializer()->ExtractCodeObjectSize(i);
DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
result.push_back({builtin_size, nullptr, nullptr});
}
// Reservations for bytecode handlers.
BSU::ForEachBytecode(
[=, &result](Bytecode bytecode, OperandScale operand_scale) {
// TODO(jgruber): Replace with DeserializeLazy handler.
if (!BSU::BytecodeHasDedicatedHandler(bytecode, operand_scale)) return;
const int index = BSU::BytecodeToIndex(bytecode, operand_scale);
uint32_t handler_size = deserializer()->ExtractCodeObjectSize(index);
DCHECK_LE(handler_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
result.push_back({handler_size, nullptr, nullptr});
});
return result;
}
void BuiltinDeserializerAllocator::InitializeBuiltinFromReservation(
const Heap::Chunk& chunk, int builtin_id) {
DCHECK_EQ(deserializer()->ExtractBuiltinSize(builtin_id), chunk.size);
DCHECK_EQ(deserializer()->ExtractCodeObjectSize(builtin_id), chunk.size);
DCHECK_EQ(chunk.size, chunk.end - chunk.start);
SkipList::Update(chunk.start, chunk.size);
......@@ -69,14 +110,40 @@ void BuiltinDeserializerAllocator::InitializeBuiltinFromReservation(
HeapObject::FromAddress(chunk.start));
#ifdef DEBUG
RegisterBuiltinReservation(builtin_id);
RegisterCodeObjectReservation(builtin_id);
#endif
}
void BuiltinDeserializerAllocator::InitializeHandlerFromReservation(
const Heap::Chunk& chunk, interpreter::Bytecode bytecode,
interpreter::OperandScale operand_scale) {
DCHECK_EQ(deserializer()->ExtractCodeObjectSize(
BSU::BytecodeToIndex(bytecode, operand_scale)),
chunk.size);
DCHECK_EQ(chunk.size, chunk.end - chunk.start);
SkipList::Update(chunk.start, chunk.size);
Address* dispatch_table = isolate()->interpreter()->dispatch_table_;
const size_t index =
Interpreter::GetDispatchTableIndex(bytecode, operand_scale);
// At this point, the HeapObject is not yet a Code object, and thus we don't
// initialize with code->entry() here. Once deserialization completes, this
// is overwritten with the final code->entry() value.
dispatch_table[index] = chunk.start;
#ifdef DEBUG
RegisterCodeObjectReservation(BSU::BytecodeToIndex(bytecode, operand_scale));
#endif
}
void BuiltinDeserializerAllocator::InitializeBuiltinsTable(
void BuiltinDeserializerAllocator::InitializeFromReservations(
const Heap::Reservation& reservation) {
DCHECK(!AllowHeapAllocation::IsAllowed());
// Initialize the builtins table.
Builtins* builtins = isolate()->builtins();
int reservation_index = 0;
......@@ -91,7 +158,7 @@ void BuiltinDeserializerAllocator::InitializeBuiltinsTable(
Code* deserialize_lazy = builtins->builtin(Builtins::kDeserializeLazy);
for (int i = 0; i < Builtins::builtin_count; i++) {
for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
if (i == Builtins::kDeserializeLazy) continue;
if (deserializer()->IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) {
......@@ -102,6 +169,18 @@ void BuiltinDeserializerAllocator::InitializeBuiltinsTable(
}
}
// Initialize the interpreter dispatch table.
BSU::ForEachBytecode(
[=, &reservation_index](Bytecode bytecode, OperandScale operand_scale) {
// TODO(jgruber): Replace with DeserializeLazy handler.
if (!BSU::BytecodeHasDedicatedHandler(bytecode, operand_scale)) return;
InitializeHandlerFromReservation(reservation[reservation_index],
bytecode, operand_scale);
reservation_index++;
});
DCHECK_EQ(reservation.size(), reservation_index);
}
......@@ -114,7 +193,8 @@ void BuiltinDeserializerAllocator::ReserveAndInitializeBuiltinsTableForBuiltin(
DCHECK_EQ(Builtins::kDeserializeLazy,
isolate()->builtins()->builtin(builtin_id)->builtin_index());
const uint32_t builtin_size = deserializer()->ExtractBuiltinSize(builtin_id);
const uint32_t builtin_size =
deserializer()->ExtractCodeObjectSize(builtin_id);
DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
Handle<HeapObject> o =
......@@ -130,18 +210,20 @@ void BuiltinDeserializerAllocator::ReserveAndInitializeBuiltinsTableForBuiltin(
isolate()->builtins()->set_builtin(builtin_id, *o);
#ifdef DEBUG
RegisterBuiltinReservation(builtin_id);
RegisterCodeObjectReservation(builtin_id);
#endif
}
#ifdef DEBUG
void BuiltinDeserializerAllocator::RegisterBuiltinReservation(int builtin_id) {
const auto result = unused_reservations_.emplace(builtin_id);
void BuiltinDeserializerAllocator::RegisterCodeObjectReservation(
int code_object_id) {
const auto result = unused_reservations_.emplace(code_object_id);
CHECK(result.second); // False, iff builtin_id was already present in set.
}
void BuiltinDeserializerAllocator::RegisterBuiltinAllocation(int builtin_id) {
const size_t removed_elems = unused_reservations_.erase(builtin_id);
void BuiltinDeserializerAllocator::RegisterCodeObjectAllocation(
int code_object_id) {
const size_t removed_elems = unused_reservations_.erase(code_object_id);
CHECK_EQ(removed_elems, 1);
}
......
......@@ -18,8 +18,11 @@ template <class AllocatorT>
class Deserializer;
class BuiltinDeserializer;
class BuiltinSnapshotUtils;
class BuiltinDeserializerAllocator final {
using BSU = BuiltinSnapshotUtils;
public:
BuiltinDeserializerAllocator(
Deserializer<BuiltinDeserializerAllocator>* deserializer);
......@@ -59,8 +62,8 @@ class BuiltinDeserializerAllocator final {
// deserialization.
// TODO(jgruber): Refactor reservation/allocation logic in deserializers to
// make this less messy.
Heap::Reservation CreateReservationsForEagerBuiltins();
void InitializeBuiltinsTable(const Heap::Reservation& reservation);
Heap::Reservation CreateReservationsForEagerBuiltinsAndHandlers();
void InitializeFromReservations(const Heap::Reservation& reservation);
// Creates reservations and initializes the builtins table in preparation for
// lazily deserializing a single builtin.
......@@ -85,9 +88,14 @@ class BuiltinDeserializerAllocator final {
void InitializeBuiltinFromReservation(const Heap::Chunk& chunk,
int builtin_id);
// As above, but for interpreter bytecode handlers.
void InitializeHandlerFromReservation(
const Heap::Chunk& chunk, interpreter::Bytecode bytecode,
interpreter::OperandScale operand_scale);
#ifdef DEBUG
void RegisterBuiltinReservation(int builtin_id);
void RegisterBuiltinAllocation(int builtin_id);
void RegisterCodeObjectReservation(int code_object_id);
void RegisterCodeObjectAllocation(int code_object_id);
std::unordered_set<int> unused_reservations_;
#endif
......
......@@ -5,50 +5,59 @@
#include "src/snapshot/builtin-deserializer.h"
#include "src/assembler-inl.h"
#include "src/interpreter/interpreter.h"
#include "src/objects-inl.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
// Tracks the builtin currently being deserialized (required for allocation).
class DeserializingBuiltinScope {
using interpreter::Bytecode;
using interpreter::Bytecodes;
using interpreter::Interpreter;
using interpreter::OperandScale;
// Tracks the code object currently being deserialized (required for
// allocation).
class DeserializingCodeObjectScope {
public:
DeserializingBuiltinScope(BuiltinDeserializer* builtin_deserializer,
int builtin_id)
DeserializingCodeObjectScope(BuiltinDeserializer* builtin_deserializer,
int code_object_id)
: builtin_deserializer_(builtin_deserializer) {
DCHECK_EQ(BuiltinDeserializer::kNoBuiltinId,
builtin_deserializer->current_builtin_id_);
builtin_deserializer->current_builtin_id_ = builtin_id;
DCHECK_EQ(BuiltinDeserializer::kNoCodeObjectId,
builtin_deserializer->current_code_object_id_);
builtin_deserializer->current_code_object_id_ = code_object_id;
}
~DeserializingBuiltinScope() {
builtin_deserializer_->current_builtin_id_ =
BuiltinDeserializer::kNoBuiltinId;
~DeserializingCodeObjectScope() {
builtin_deserializer_->current_code_object_id_ =
BuiltinDeserializer::kNoCodeObjectId;
}
private:
BuiltinDeserializer* builtin_deserializer_;
DISALLOW_COPY_AND_ASSIGN(DeserializingBuiltinScope)
DISALLOW_COPY_AND_ASSIGN(DeserializingCodeObjectScope)
};
BuiltinDeserializer::BuiltinDeserializer(Isolate* isolate,
const BuiltinSnapshotData* data)
: Deserializer(data, false) {
builtin_offsets_ = data->BuiltinOffsets();
DCHECK_EQ(Builtins::builtin_count, builtin_offsets_.length());
DCHECK(std::is_sorted(builtin_offsets_.begin(), builtin_offsets_.end()));
code_offsets_ = data->BuiltinOffsets();
DCHECK_EQ(BSU::kNumberOfCodeObjects, code_offsets_.length());
DCHECK(std::is_sorted(code_offsets_.begin(), code_offsets_.end()));
Initialize(isolate);
}
void BuiltinDeserializer::DeserializeEagerBuiltins() {
void BuiltinDeserializer::DeserializeEagerBuiltinsAndHandlers() {
DCHECK(!AllowHeapAllocation::IsAllowed());
DCHECK_EQ(0, source()->position());
// Deserialize builtins.
Builtins* builtins = isolate()->builtins();
for (int i = 0; i < Builtins::builtin_count; i++) {
for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
if (IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) {
// Do nothing. These builtins have been replaced by DeserializeLazy in
// InitializeBuiltinsTable.
......@@ -60,11 +69,52 @@ void BuiltinDeserializer::DeserializeEagerBuiltins() {
}
#ifdef DEBUG
for (int i = 0; i < Builtins::builtin_count; i++) {
for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
Object* o = builtins->builtin(i);
DCHECK(o->IsCode() && Code::cast(o)->is_builtin());
}
#endif
// Deserialize bytecode handlers.
// The dispatch table has been initialized during memory reservation.
Interpreter* interpreter = isolate()->interpreter();
DCHECK(isolate()->interpreter()->IsDispatchTableInitialized());
BSU::ForEachBytecode([=](Bytecode bytecode, OperandScale operand_scale) {
// TODO(jgruber): Replace with DeserializeLazy handler.
// Bytecodes without a dedicated handler are patched up in a second pass.
if (!BSU::BytecodeHasDedicatedHandler(bytecode, operand_scale)) return;
Code* code = DeserializeHandlerRaw(bytecode, operand_scale);
interpreter->SetBytecodeHandler(bytecode, operand_scale, code);
});
// Patch up holes in the dispatch table.
DCHECK(BSU::BytecodeHasDedicatedHandler(Bytecode::kIllegal,
OperandScale::kSingle));
Code* illegal_handler = interpreter->GetBytecodeHandler(
Bytecode::kIllegal, OperandScale::kSingle);
BSU::ForEachBytecode([=](Bytecode bytecode, OperandScale operand_scale) {
if (BSU::BytecodeHasDedicatedHandler(bytecode, operand_scale)) return;
Bytecode maybe_reused_bytecode;
if (Bytecodes::ReusesExistingHandler(bytecode, &maybe_reused_bytecode)) {
interpreter->SetBytecodeHandler(
bytecode, operand_scale,
interpreter->GetBytecodeHandler(maybe_reused_bytecode,
operand_scale));
return;
}
DCHECK(!Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
interpreter->SetBytecodeHandler(bytecode, operand_scale, illegal_handler);
});
DCHECK(isolate()->interpreter()->IsDispatchTableInitialized());
}
Code* BuiltinDeserializer::DeserializeBuiltin(int builtin_id) {
......@@ -77,10 +127,10 @@ Code* BuiltinDeserializer::DeserializeBuiltinRaw(int builtin_id) {
DCHECK(!AllowHeapAllocation::IsAllowed());
DCHECK(Builtins::IsBuiltinId(builtin_id));
DeserializingBuiltinScope scope(this, builtin_id);
DeserializingCodeObjectScope scope(this, builtin_id);
const int initial_position = source()->position();
source()->set_position(builtin_offsets_[builtin_id]);
source()->set_position(code_offsets_[builtin_id]);
Object* o = ReadDataSingle();
DCHECK(o->IsCode() && Code::cast(o)->is_builtin());
......@@ -96,13 +146,38 @@ Code* BuiltinDeserializer::DeserializeBuiltinRaw(int builtin_id) {
return code;
}
uint32_t BuiltinDeserializer::ExtractBuiltinSize(int builtin_id) {
DCHECK(Builtins::IsBuiltinId(builtin_id));
Code* BuiltinDeserializer::DeserializeHandlerRaw(Bytecode bytecode,
OperandScale operand_scale) {
DCHECK(!AllowHeapAllocation::IsAllowed());
DCHECK(BSU::BytecodeHasDedicatedHandler(bytecode, operand_scale));
const int code_object_id = BSU::BytecodeToIndex(bytecode, operand_scale);
DeserializingCodeObjectScope scope(this, code_object_id);
const int initial_position = source()->position();
source()->set_position(code_offsets_[code_object_id]);
Object* o = ReadDataSingle();
DCHECK(o->IsCode() && Code::cast(o)->kind() == Code::BYTECODE_HANDLER);
// Rewind.
source()->set_position(initial_position);
// Flush the instruction cache.
Code* code = Code::cast(o);
Assembler::FlushICache(isolate(), code->instruction_start(),
code->instruction_size());
return code;
}
uint32_t BuiltinDeserializer::ExtractCodeObjectSize(int code_object_id) {
DCHECK_LT(code_object_id, BSU::kNumberOfCodeObjects);
const int initial_position = source()->position();
// Grab the size of the code object.
source()->set_position(builtin_offsets_[builtin_id]);
source()->set_position(code_offsets_[code_object_id]);
byte data = source()->Get();
USE(data);
......
......@@ -5,7 +5,9 @@
#ifndef V8_SNAPSHOT_BUILTIN_DESERIALIZER_H_
#define V8_SNAPSHOT_BUILTIN_DESERIALIZER_H_
#include "src/interpreter/interpreter.h"
#include "src/snapshot/builtin-deserializer-allocator.h"
#include "src/snapshot/builtin-snapshot-utils.h"
#include "src/snapshot/deserializer.h"
namespace v8 {
......@@ -16,6 +18,8 @@ class BuiltinSnapshotData;
// Deserializes the builtins blob.
class BuiltinDeserializer final
: public Deserializer<BuiltinDeserializerAllocator> {
using BSU = BuiltinSnapshotUtils;
public:
BuiltinDeserializer(Isolate* isolate, const BuiltinSnapshotData* data);
......@@ -26,7 +30,7 @@ class BuiltinDeserializer final
//
// After this, the instruction cache must be flushed by the caller (we don't
// do it ourselves since the startup serializer batch-flushes all code pages).
void DeserializeEagerBuiltins();
void DeserializeEagerBuiltinsAndHandlers();
// Deserializes the single given builtin. This is used whenever a builtin is
// lazily deserialized at runtime.
......@@ -37,9 +41,13 @@ class BuiltinDeserializer final
// already been allocated.
Code* DeserializeBuiltinRaw(int builtin_id);
// Deserializes the single given bytecode handler. Assumes that reservations
// have already been allocated.
Code* DeserializeHandlerRaw(interpreter::Bytecode bytecode,
interpreter::OperandScale operand_scale);
// Extracts the size builtin Code objects (baked into the snapshot).
uint32_t ExtractBuiltinSize(int builtin_id);
uint32_t ExtractCodeObjectSize(int builtin_id);
// BuiltinDeserializer implements its own builtin iteration logic. Make sure
// the RootVisitor API is not used accidentally.
......@@ -47,22 +55,24 @@ class BuiltinDeserializer final
UNREACHABLE();
}
int CurrentBuiltinId() const { return current_builtin_id_; }
int CurrentCodeObjectId() const { return current_code_object_id_; }
private:
// Stores the builtin currently being deserialized. We need this to determine
// where to 'allocate' from during deserialization.
static const int kNoBuiltinId = -1;
int current_builtin_id_ = kNoBuiltinId;
// Stores the code object currently being deserialized. The
// {current_code_object_id} stores the index of the currently-deserialized
// code object within the snapshot (and within {code_offsets_}). We need this
// to determine where to 'allocate' from during deserialization.
static const int kNoCodeObjectId = -1;
int current_code_object_id_ = kNoCodeObjectId;
// The offsets of each builtin within the serialized data. Equivalent to
// BuiltinSerializer::builtin_offsets_ but on the deserialization side.
Vector<const uint32_t> builtin_offsets_;
Vector<const uint32_t> code_offsets_;
// For current_builtin_id_.
friend class DeserializingBuiltinScope;
// For current_code_object_id_.
friend class DeserializingCodeObjectScope;
// For isolate(), IsLazyDeserializationEnabled(), CurrentBuiltinId() and
// For isolate(), IsLazyDeserializationEnabled(), CurrentCodeObjectId() and
// ExtractBuiltinSize().
friend class BuiltinDeserializerAllocator;
};
......
......@@ -4,12 +4,17 @@
#include "src/snapshot/builtin-serializer.h"
#include "src/interpreter/interpreter.h"
#include "src/objects-inl.h"
#include "src/snapshot/startup-serializer.h"
namespace v8 {
namespace internal {
using interpreter::Bytecode;
using interpreter::Bytecodes;
using interpreter::OperandScale;
BuiltinSerializer::BuiltinSerializer(Isolate* isolate,
StartupSerializer* startup_serializer)
: Serializer(isolate), startup_serializer_(startup_serializer) {}
......@@ -18,17 +23,35 @@ BuiltinSerializer::~BuiltinSerializer() {
OutputStatistics("BuiltinSerializer");
}
void BuiltinSerializer::SerializeBuiltins() {
for (int i = 0; i < Builtins::builtin_count; i++) {
builtin_offsets_[i] = sink_.Position();
void BuiltinSerializer::SerializeBuiltinsAndHandlers() {
// Serialize builtins.
STATIC_ASSERT(0 == BSU::kFirstBuiltinIndex);
for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
SetBuiltinOffset(i, sink_.Position());
SerializeBuiltin(isolate()->builtins()->builtin(i));
}
Pad(); // Pad with kNop since GetInt() might read too far.
// Serialize bytecode handlers.
STATIC_ASSERT(BSU::kNumberOfBuiltins == BSU::kFirstHandlerIndex);
BSU::ForEachBytecode([=](Bytecode bytecode, OperandScale operand_scale) {
SetHandlerOffset(bytecode, operand_scale, sink_.Position());
if (!BSU::BytecodeHasDedicatedHandler(bytecode, operand_scale)) return;
SerializeHandler(
isolate()->interpreter()->GetBytecodeHandler(bytecode, operand_scale));
});
// Pad with kNop since GetInt() might read too far.
Pad();
// Append the offset table. During deserialization, the offset table is
// extracted by BuiltinSnapshotData.
const byte* data = reinterpret_cast<const byte*>(&builtin_offsets_[0]);
int data_length = static_cast<int>(sizeof(builtin_offsets_));
const byte* data = reinterpret_cast<const byte*>(&code_offsets_[0]);
int data_length = static_cast<int>(sizeof(code_offsets_));
sink_.PutRaw(data, data_length, "BuiltinOffsets");
}
......@@ -50,6 +73,13 @@ void BuiltinSerializer::SerializeBuiltin(Code* code) {
object_serializer.Serialize();
}
void BuiltinSerializer::SerializeHandler(Code* code) {
DCHECK_EQ(Code::BYTECODE_HANDLER, code->kind());
ObjectSerializer object_serializer(this, code, &sink_, kPlain,
kStartOfObject);
object_serializer.Serialize();
}
void BuiltinSerializer::SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
DCHECK(!o->IsSmi());
......@@ -86,5 +116,19 @@ void BuiltinSerializer::SerializeObject(HeapObject* o, HowToCode how_to_code,
sink_.PutInt(cache_index, "partial_snapshot_cache_index");
}
void BuiltinSerializer::SetBuiltinOffset(int builtin_id, uint32_t offset) {
DCHECK(Builtins::IsBuiltinId(builtin_id));
DCHECK(BSU::IsBuiltinIndex(builtin_id));
code_offsets_[builtin_id] = offset;
}
void BuiltinSerializer::SetHandlerOffset(Bytecode bytecode,
OperandScale operand_scale,
uint32_t offset) {
const int index = BSU::BytecodeToIndex(bytecode, operand_scale);
DCHECK(BSU::IsHandlerIndex(index));
code_offsets_[index] = offset;
}
} // namespace internal
} // namespace v8
......@@ -5,7 +5,9 @@
#ifndef V8_SNAPSHOT_BUILTIN_SERIALIZER_H_
#define V8_SNAPSHOT_BUILTIN_SERIALIZER_H_
#include "src/interpreter/interpreter.h"
#include "src/snapshot/builtin-serializer-allocator.h"
#include "src/snapshot/builtin-snapshot-utils.h"
#include "src/snapshot/serializer.h"
namespace v8 {
......@@ -13,31 +15,45 @@ namespace internal {
class StartupSerializer;
// Responsible for serializing all builtin objects during startup snapshot
// creation. Builtins are serialized into a dedicated area of the snapshot.
// Responsible for serializing builtin and bytecode handler objects during
// startup snapshot creation into a dedicated area of the snapshot.
// See snapshot.h for documentation of the snapshot layout.
class BuiltinSerializer : public Serializer<BuiltinSerializerAllocator> {
using BSU = BuiltinSnapshotUtils;
public:
BuiltinSerializer(Isolate* isolate, StartupSerializer* startup_serializer);
~BuiltinSerializer() override;
void SerializeBuiltins();
void SerializeBuiltinsAndHandlers();
private:
void VisitRootPointers(Root root, Object** start, Object** end) override;
void SerializeBuiltin(Code* code);
void SerializeHandler(Code* code);
void SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) override;
void SetBuiltinOffset(int builtin_id, uint32_t offset);
void SetHandlerOffset(interpreter::Bytecode bytecode,
interpreter::OperandScale operand_scale,
uint32_t offset);
// The startup serializer is needed for access to the partial snapshot cache,
// which is used to serialize things like embedded constants.
StartupSerializer* startup_serializer_;
// Stores the starting offset, within the serialized data, of each builtin.
// This is later packed into the builtin snapshot, and used by the builtin
// deserializer to deserialize individual builtins.
uint32_t builtin_offsets_[Builtins::builtin_count];
// Stores the starting offset, within the serialized data, of each code
// object. This is later packed into the builtin snapshot, and used by the
// builtin deserializer to deserialize individual builtins and bytecode
// handlers.
//
// Indices [kFirstBuiltinIndex, kFirstBuiltinIndex + kNumberOfBuiltins[:
// Builtin offsets.
// Indices [kFirstHandlerIndex, kFirstHandlerIndex + kNumberOfHandlers[:
// Bytecode handler offsets.
uint32_t code_offsets_[BuiltinSnapshotUtils::kNumberOfCodeObjects];
DISALLOW_COPY_AND_ASSIGN(BuiltinSerializer);
};
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/snapshot/builtin-snapshot-utils.h"
namespace v8 {
namespace internal {
// static
bool BuiltinSnapshotUtils::IsBuiltinIndex(int maybe_index) {
return (kFirstBuiltinIndex <= maybe_index &&
maybe_index < kFirstBuiltinIndex + kNumberOfBuiltins);
}
// static
bool BuiltinSnapshotUtils::IsHandlerIndex(int maybe_index) {
return (kFirstHandlerIndex <= maybe_index &&
maybe_index < kFirstHandlerIndex + kNumberOfHandlers);
}
// static
int BuiltinSnapshotUtils::BytecodeToIndex(Bytecode bytecode,
OperandScale operand_scale) {
int index =
BuiltinSnapshotUtils::kNumberOfBuiltins + static_cast<int>(bytecode);
switch (operand_scale) { // clang-format off
case OperandScale::kSingle: return index;
case OperandScale::kDouble: return index + Bytecodes::kBytecodeCount;
case OperandScale::kQuadruple: return index + 2 * Bytecodes::kBytecodeCount;
} // clang-format on
UNREACHABLE();
}
// static
std::pair<interpreter::Bytecode, interpreter::OperandScale>
BuiltinSnapshotUtils::BytecodeFromIndex(int index) {
DCHECK(IsHandlerIndex(index));
const int x = index - BuiltinSnapshotUtils::kNumberOfBuiltins;
Bytecode bytecode = Bytecodes::FromByte(x % Bytecodes::kBytecodeCount);
switch (x / Bytecodes::kBytecodeCount) { // clang-format off
case 0: return {bytecode, OperandScale::kSingle};
case 1: return {bytecode, OperandScale::kDouble};
case 2: return {bytecode, OperandScale::kQuadruple};
default: UNREACHABLE();
} // clang-format on
}
// static
void BuiltinSnapshotUtils::ForEachBytecode(
std::function<void(Bytecode, OperandScale)> f) {
static const OperandScale kOperandScales[] = {
#define VALUE(Name, _) OperandScale::k##Name,
OPERAND_SCALE_LIST(VALUE)
#undef VALUE
};
for (OperandScale operand_scale : kOperandScales) {
for (int i = 0; i < Bytecodes::kBytecodeCount; i++) {
f(Bytecodes::FromByte(i), operand_scale);
}
}
}
// static
bool BuiltinSnapshotUtils::BytecodeHasDedicatedHandler(
Bytecode bytecode, OperandScale operand_scale) {
// Some bytecodes don't have a handler. The dispatch table contains the
// kIllegal handler in these slots.
if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return false;
// Some handlers are reused for several bytecodes.
Bytecode dummy;
if (Bytecodes::ReusesExistingHandler(bytecode, &dummy)) return false;
return true;
}
} // namespace internal
} // namespace v8
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_SNAPSHOT_BUILTIN_SNAPSHOT_UTILS_H_
#define V8_SNAPSHOT_BUILTIN_SNAPSHOT_UTILS_H_
#include <functional>
#include "src/interpreter/interpreter.h"
namespace v8 {
namespace internal {
// Constants and utility methods used by builtin and bytecode handler
// (de)serialization.
class BuiltinSnapshotUtils : public AllStatic {
using Bytecode = interpreter::Bytecode;
using BytecodeOperands = interpreter::BytecodeOperands;
using Bytecodes = interpreter::Bytecodes;
using Interpreter = interpreter::Interpreter;
using OperandScale = interpreter::OperandScale;
public:
static const int kFirstBuiltinIndex = 0;
static const int kNumberOfBuiltins = Builtins::builtin_count;
static const int kFirstHandlerIndex = kFirstBuiltinIndex + kNumberOfBuiltins;
static const int kNumberOfHandlers =
Bytecodes::kBytecodeCount * BytecodeOperands::kOperandScaleCount;
// The number of code objects in the builtin snapshot.
// TODO(jgruber): This could be reduced by a bit since not every
// {bytecode, operand_scale} combination has an associated handler, and some
// handlers are reused (see BytecodeHasDedicatedHandler).
static const int kNumberOfCodeObjects = kNumberOfBuiltins + kNumberOfHandlers;
// Indexes into the offsets vector contained in snapshot.
// See e.g. BuiltinSerializer::code_offsets_.
static bool IsBuiltinIndex(int maybe_index);
static bool IsHandlerIndex(int maybe_index);
static int BytecodeToIndex(Bytecode bytecode, OperandScale operand_scale);
// Converts an index back into the {bytecode,operand_scale} tuple. This is the
// inverse operation of BytecodeToIndex().
static std::pair<Bytecode, OperandScale> BytecodeFromIndex(int index);
// Iteration over all {bytecode,operand_scale} pairs. Implemented here since
// (de)serialization depends on the iteration order.
static void ForEachBytecode(std::function<void(Bytecode, OperandScale)> f);
// True, iff the given {bytecode,operand_scale} has a dedicated handler, where
// dedicated means: a handler exists, and it does not reuse another handler.
static bool BytecodeHasDedicatedHandler(Bytecode bytecode,
OperandScale operand_scale);
};
} // namespace internal
} // namespace v8
#endif // V8_SNAPSHOT_BUILTIN_SNAPSHOT_UTILS_H_
......@@ -169,7 +169,8 @@ bool DefaultDeserializerAllocator::ReserveSpace(
}
Heap::Reservation builtin_reservations =
builtin_deserializer->allocator()->CreateReservationsForEagerBuiltins();
builtin_deserializer->allocator()
->CreateReservationsForEagerBuiltinsAndHandlers();
DCHECK(!builtin_reservations.empty());
for (const auto& c : builtin_reservations) {
......@@ -199,7 +200,7 @@ bool DefaultDeserializerAllocator::ReserveSpace(
merged_reservations[CODE_SPACE].pop_back();
}
builtin_deserializer->allocator()->InitializeBuiltinsTable(
builtin_deserializer->allocator()->InitializeFromReservations(
builtin_reservations);
}
......
......@@ -55,8 +55,9 @@ Deserializer<AllocatorT>::~Deserializer() {
template <class AllocatorT>
void Deserializer<AllocatorT>::VisitRootPointers(Root root, Object** start,
Object** end) {
// Builtins are deserialized in a separate pass by the BuiltinDeserializer.
if (root == Root::kBuiltins) return;
// Builtins and bytecode handlers are deserialized in a separate pass by the
// BuiltinDeserializer.
if (root == Root::kBuiltins || root == Root::kDispatchTable) return;
// The space must be new space. Any other space would cause ReadChunk to try
// to update the remembered using nullptr as the address.
......
......@@ -53,6 +53,8 @@ void PartialSerializer::Serialize(Object** o, bool include_global_proxy) {
void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
DCHECK(!ObjectIsBytecodeHandler(obj)); // Only referenced in dispatch table.
BuiltinReferenceSerializationMode mode =
startup_serializer_->clear_function_code() ? kCanonicalizeCompileLazy
: kDefault;
......
......@@ -5,6 +5,7 @@
#include "src/snapshot/serializer.h"
#include "src/assembler-inl.h"
#include "src/interpreter/interpreter.h"
#include "src/objects/map.h"
#include "src/snapshot/builtin-serializer-allocator.h"
#include "src/snapshot/natives.h"
......@@ -92,8 +93,9 @@ bool Serializer<AllocatorT>::MustBeDeferred(HeapObject* object) {
template <class AllocatorT>
void Serializer<AllocatorT>::VisitRootPointers(Root root, Object** start,
Object** end) {
// Builtins are serialized in a separate pass by the BuiltinSerializer.
if (root == Root::kBuiltins) return;
// Builtins and bytecode handlers are serialized in a separate pass by the
// BuiltinSerializer.
if (root == Root::kBuiltins || root == Root::kDispatchTable) return;
for (Object** current = start; current < end; current++) {
if ((*current)->IsSmi()) {
......@@ -210,6 +212,13 @@ bool Serializer<AllocatorT>::SerializeBuiltinReference(
return true;
}
// static
template <class AllocatorT>
bool Serializer<AllocatorT>::ObjectIsBytecodeHandler(HeapObject* obj) {
if (!obj->IsCode()) return false;
return (Code::cast(obj)->kind() == Code::BYTECODE_HANDLER);
}
template <class AllocatorT>
void Serializer<AllocatorT>::PutRoot(
int root_index, HeapObject* object,
......
......@@ -194,6 +194,9 @@ class Serializer : public SerializerDeserializer {
HeapObject* obj, HowToCode how_to_code, WhereToPoint where_to_point,
int skip, BuiltinReferenceSerializationMode mode = kDefault);
// Returns true if the given heap object is a bytecode handler code object.
static bool ObjectIsBytecodeHandler(HeapObject* obj);
inline void FlushSkip(int skip) {
if (skip != 0) {
sink_.Put(kSkip, "SkipFromSerializeObject");
......
......@@ -350,7 +350,8 @@ Vector<const byte> BuiltinSnapshotData::Payload() const {
uint32_t reservations_size =
GetHeaderValue(kNumReservationsOffset) * kUInt32Size;
const byte* payload = data_ + kHeaderSize + reservations_size;
int builtin_offsets_size = Builtins::builtin_count * kUInt32Size;
const int builtin_offsets_size =
BuiltinSnapshotUtils::kNumberOfCodeObjects * kUInt32Size;
uint32_t payload_length = GetHeaderValue(kPayloadLengthOffset);
DCHECK_EQ(data_ + size_, payload + payload_length);
DCHECK_GT(payload_length, builtin_offsets_size);
......@@ -361,13 +362,15 @@ Vector<const uint32_t> BuiltinSnapshotData::BuiltinOffsets() const {
uint32_t reservations_size =
GetHeaderValue(kNumReservationsOffset) * kUInt32Size;
const byte* payload = data_ + kHeaderSize + reservations_size;
int builtin_offsets_size = Builtins::builtin_count * kUInt32Size;
const int builtin_offsets_size =
BuiltinSnapshotUtils::kNumberOfCodeObjects * kUInt32Size;
uint32_t payload_length = GetHeaderValue(kPayloadLengthOffset);
DCHECK_EQ(data_ + size_, payload + payload_length);
DCHECK_GT(payload_length, builtin_offsets_size);
const uint32_t* data = reinterpret_cast<const uint32_t*>(
payload + payload_length - builtin_offsets_size);
return Vector<const uint32_t>(data, Builtins::builtin_count);
return Vector<const uint32_t>(data,
BuiltinSnapshotUtils::kNumberOfCodeObjects);
}
} // namespace internal
......
......@@ -45,7 +45,7 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
// Deserialize eager builtins from the builtin snapshot. Note that deferred
// objects must have been deserialized prior to this.
builtin_deserializer.DeserializeEagerBuiltins();
builtin_deserializer.DeserializeEagerBuiltinsAndHandlers();
// Flush the instruction cache for the entire code-space. Must happen after
// builtins deserialization.
......
......@@ -28,6 +28,7 @@ StartupSerializer::~StartupSerializer() {
void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
DCHECK(!ObjectIsBytecodeHandler(obj)); // Only referenced in dispatch table.
DCHECK(!obj->IsJSFunction());
if (clear_function_code() && obj->IsBytecodeArray()) {
......
......@@ -1347,6 +1347,8 @@
'snapshot/builtin-serializer-allocator.h',
'snapshot/builtin-serializer.cc',
'snapshot/builtin-serializer.h',
'snapshot/builtin-snapshot-utils.cc',
'snapshot/builtin-snapshot-utils.h',
'snapshot/code-serializer.cc',
'snapshot/code-serializer.h',
'snapshot/default-deserializer-allocator.cc',
......
......@@ -138,7 +138,7 @@ static StartupBlobs Serialize(v8::Isolate* isolate) {
ser.SerializeStrongReferences();
i::BuiltinSerializer builtin_serializer(internal_isolate, &ser);
builtin_serializer.SerializeBuiltins();
builtin_serializer.SerializeBuiltinsAndHandlers();
ser.SerializeWeakReferencesAndDeferred();
SnapshotData startup_snapshot(&ser);
......@@ -385,7 +385,7 @@ static void PartiallySerializeContext(Vector<const byte>* startup_blob_out,
partial_serializer.Serialize(&raw_context, false);
i::BuiltinSerializer builtin_serializer(isolate, &startup_serializer);
builtin_serializer.SerializeBuiltins();
builtin_serializer.SerializeBuiltinsAndHandlers();
startup_serializer.SerializeWeakReferencesAndDeferred();
......@@ -510,7 +510,7 @@ static void PartiallySerializeCustomContext(
partial_serializer.Serialize(&raw_context, false);
i::BuiltinSerializer builtin_serializer(isolate, &startup_serializer);
builtin_serializer.SerializeBuiltins();
builtin_serializer.SerializeBuiltinsAndHandlers();
startup_serializer.SerializeWeakReferencesAndDeferred();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment