Commit 9e4543a2 authored by jgruber's avatar jgruber Committed by Commit Bot

Revert lazy bytecode handler support

Speculative revert due to canary crashes. I'll begin relanding these
one-by-one next week.

This bundles two reverts:

Revert "[snapshot] Lazy-deserialize bytecode handlers"
This reverts commit b4587369.

Revert "[interpreter] Remove mechanism for bytecode handler reuse"
This reverts commit 07fc87a2.

TBR: rmcilroy@chromium.org,mlippautz@chromium.org,yangguo@chromium.org
Bug: chromium:783708
Change-Id: I6f8314b9eeafd9412a1c69843bc242e7da240eee
Reviewed-on: https://chromium-review.googlesource.com/763428
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49294}
parent 693d7adb
......@@ -960,10 +960,6 @@ DEFINE_VALUE_IMPLICATION(runtime_call_stats, runtime_stats, 1)
// snapshot-common.cc
DEFINE_BOOL(lazy_deserialization, true,
"Deserialize code lazily from the snapshot.")
DEFINE_BOOL(lazy_handler_deserialization, false,
"Deserialize bytecode handlers lazily from the snapshot.")
DEFINE_IMPLICATION(lazy_handler_deserialization, lazy_deserialization)
DEFINE_IMPLICATION(future, lazy_handler_deserialization)
DEFINE_BOOL(trace_lazy_deserialization, false, "Trace lazy deserialization.")
DEFINE_BOOL(profile_deserialization, false,
"Print the time it takes to deserialize the snapshot.")
......
......@@ -2581,9 +2581,6 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kApiSymbolTableRootIndex:
case kApiPrivateSymbolTableRootIndex:
case kMessageListenersRootIndex:
case kDeserializeLazyHandlerRootIndex:
case kDeserializeLazyHandlerWideRootIndex:
case kDeserializeLazyHandlerExtraWideRootIndex:
// Smi values
#define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
SMI_ROOT_LIST(SMI_ENTRY)
......@@ -6247,23 +6244,6 @@ void Heap::UnregisterStrongRoots(Object** start) {
}
}
bool Heap::IsDeserializeLazyHandler(Code* code) {
return (code == deserialize_lazy_handler() ||
code == deserialize_lazy_handler_wide() ||
code == deserialize_lazy_handler_extra_wide());
}
void Heap::SetDeserializeLazyHandler(Code* code) {
set_deserialize_lazy_handler(code);
}
void Heap::SetDeserializeLazyHandlerWide(Code* code) {
set_deserialize_lazy_handler_wide(code);
}
void Heap::SetDeserializeLazyHandlerExtraWide(Code* code) {
set_deserialize_lazy_handler_extra_wide(code);
}
size_t Heap::NumberOfTrackedHeapObjectTypes() {
return ObjectStats::OBJECT_STATS_COUNT;
......
......@@ -241,11 +241,6 @@ using v8::MemoryPressureLevel;
V(FixedArray, serialized_templates, SerializedTemplates) \
V(FixedArray, serialized_global_proxy_sizes, SerializedGlobalProxySizes) \
V(TemplateList, message_listeners, MessageListeners) \
/* DeserializeLazy handlers for lazy bytecode deserialization */ \
V(Object, deserialize_lazy_handler, DeserializeLazyHandler) \
V(Object, deserialize_lazy_handler_wide, DeserializeLazyHandlerWide) \
V(Object, deserialize_lazy_handler_extra_wide, \
DeserializeLazyHandlerExtraWide) \
/* JS Entries */ \
V(Code, js_entry_code, JsEntryCode) \
V(Code, js_construct_entry_code, JsConstructEntryCode)
......@@ -1099,11 +1094,6 @@ class Heap {
void RegisterStrongRoots(Object** start, Object** end);
void UnregisterStrongRoots(Object** start);
bool IsDeserializeLazyHandler(Code* code);
void SetDeserializeLazyHandler(Code* code);
void SetDeserializeLazyHandlerWide(Code* code);
void SetDeserializeLazyHandlerExtraWide(Code* code);
// ===========================================================================
// Inline allocation. ========================================================
// ===========================================================================
......
......@@ -11,7 +11,6 @@
#include "src/factory.h"
#include "src/heap-symbols.h"
#include "src/heap/heap.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate.h"
#include "src/layout-descriptor.h"
#include "src/lookup-cache.h"
......@@ -630,11 +629,6 @@ void Heap::CreateInitialObjects() {
set_noscript_shared_function_infos(Smi::kZero);
STATIC_ASSERT(interpreter::BytecodeOperands::kOperandScaleCount == 3);
set_deserialize_lazy_handler(Smi::kZero);
set_deserialize_lazy_handler_wide(Smi::kZero);
set_deserialize_lazy_handler_extra_wide(Smi::kZero);
// Initialize context slot cache.
isolate_->context_slot_cache()->Clear();
......
......@@ -8,7 +8,6 @@
#include <cstdint>
#include <iosfwd>
#include <string>
#include <vector>
#include "src/globals.h"
#include "src/interpreter/bytecode-operands.h"
......@@ -676,12 +675,6 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
bytecode == Bytecode::kDebugBreakWide;
}
// Returns true if the bytecode can be lazily deserialized.
static constexpr bool IsLazy(Bytecode bytecode) {
// Currently, all handlers are deserialized lazily.
return true;
}
// Returns the number of values which |bytecode| returns.
static constexpr size_t ReturnCount(Bytecode bytecode) {
return bytecode == Bytecode::kReturn ? 1 : 0;
......@@ -840,6 +833,26 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
UNREACHABLE();
}
// Returns true, iff the given bytecode reuses an existing handler. If so,
// the bytecode of the reused handler is written into {reused}.
static bool ReusesExistingHandler(Bytecode bytecode, Bytecode* reused) {
switch (bytecode) {
case Bytecode::kLdaImmutableContextSlot:
STATIC_ASSERT(static_cast<int>(Bytecode::kLdaContextSlot) <
static_cast<int>(Bytecode::kLdaImmutableContextSlot));
*reused = Bytecode::kLdaContextSlot;
return true;
case Bytecode::kLdaImmutableCurrentContextSlot:
STATIC_ASSERT(
static_cast<int>(Bytecode::kLdaCurrentContextSlot) <
static_cast<int>(Bytecode::kLdaImmutableCurrentContextSlot));
*reused = Bytecode::kLdaCurrentContextSlot;
return true;
default:
return false;
}
}
// Returns the size of |operand_type| for |operand_scale|.
static OperandSize SizeOfOperand(OperandType operand_type,
OperandScale operand_scale) {
......
......@@ -1527,18 +1527,6 @@ void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) {
Dispatch();
}
void InterpreterAssembler::DeserializeLazyAndDispatch() {
Node* context = GetContext();
Node* bytecode_offset = BytecodeOffset();
Node* bytecode = LoadBytecode(bytecode_offset);
Node* target_handler =
CallRuntime(Runtime::kInterpreterDeserializeLazy, context,
SmiTag(bytecode), SmiConstant(operand_scale()));
DispatchToBytecodeHandler(target_handler, bytecode_offset);
}
} // namespace interpreter
} // namespace internal
} // namespace v8
......@@ -245,9 +245,6 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
void ToNumberOrNumeric(Object::Conversion mode);
// Lazily deserializes the current bytecode's handler and tail-calls into it.
void DeserializeLazyAndDispatch();
private:
// Returns a tagged pointer to the current function's BytecodeArray object.
compiler::Node* BytecodeArrayTaggedPointer();
......
......@@ -311,14 +311,8 @@ IGNITION_HANDLER(LdaContextSlot, InterpreterAssembler) {
// Load the object in |slot_index| of the context at |depth| in the context
// chain starting at |context| into the accumulator.
IGNITION_HANDLER(LdaImmutableContextSlot, InterpreterAssembler) {
Node* reg_index = BytecodeOperandReg(0);
Node* context = LoadRegister(reg_index);
Node* slot_index = BytecodeOperandIdx(1);
Node* depth = BytecodeOperandUImm(2);
Node* slot_context = GetContextAtDepth(context, depth);
Node* result = LoadContextElement(slot_context, slot_index);
SetAccumulator(result);
Dispatch();
// Same as LdaContextSlot, should never be called.
UNREACHABLE();
}
// LdaCurrentContextSlot <slot_index>
......@@ -336,11 +330,8 @@ IGNITION_HANDLER(LdaCurrentContextSlot, InterpreterAssembler) {
//
// Load the object in |slot_index| of the current context into the accumulator.
IGNITION_HANDLER(LdaImmutableCurrentContextSlot, InterpreterAssembler) {
Node* slot_index = BytecodeOperandIdx(0);
Node* slot_context = GetContext();
Node* result = LoadContextElement(slot_context, slot_index);
SetAccumulator(result);
Dispatch();
// Same as LdaCurrentContextSlot, should never be called.
UNREACHABLE();
}
// StaContextSlot <context> <slot_index> <depth>
......@@ -3252,71 +3243,6 @@ Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
return code;
}
namespace {
// DeserializeLazy
//
// Deserialize the bytecode handler, store it in the dispatch table, and
// finally jump there (preserving existing args).
// We manually create a custom assembler instead of using the helper macros
// above since no corresponding bytecode exists.
class DeserializeLazyAssembler : public InterpreterAssembler {
public:
static const Bytecode kFakeBytecode = Bytecode::kIllegal;
explicit DeserializeLazyAssembler(compiler::CodeAssemblerState* state,
OperandScale operand_scale)
: InterpreterAssembler(state, kFakeBytecode, operand_scale) {}
static void Generate(compiler::CodeAssemblerState* state,
OperandScale operand_scale) {
DeserializeLazyAssembler assembler(state, operand_scale);
state->SetInitialDebugInformation("DeserializeLazy", __FILE__, __LINE__);
assembler.GenerateImpl();
}
private:
void GenerateImpl() { DeserializeLazyAndDispatch(); }
DISALLOW_COPY_AND_ASSIGN(DeserializeLazyAssembler);
};
} // namespace
Handle<Code> GenerateDeserializeLazyHandler(Isolate* isolate,
OperandScale operand_scale) {
Zone zone(isolate->allocator(), ZONE_NAME);
const size_t return_count = 0;
std::string debug_name = std::string("DeserializeLazy");
if (operand_scale > OperandScale::kSingle) {
Bytecode prefix_bytecode =
Bytecodes::OperandScaleToPrefixBytecode(operand_scale);
debug_name = debug_name.append(Bytecodes::ToString(prefix_bytecode));
}
InterpreterDispatchDescriptor descriptor(isolate);
compiler::CodeAssemblerState state(isolate, &zone, descriptor,
Code::BYTECODE_HANDLER, debug_name.c_str(),
return_count);
DeserializeLazyAssembler::Generate(&state, operand_scale);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
PROFILE(isolate,
CodeCreateEvent(CodeEventListener::BYTECODE_HANDLER_TAG,
AbstractCode::cast(*code), debug_name.c_str()));
#ifdef ENABLE_DISASSEMBLER
if (FLAG_trace_ignition_codegen) {
OFStream os(stdout);
code->Disassemble(debug_name.c_str(), os);
os << std::flush;
}
#endif // ENABLE_DISASSEMBLER
return code;
}
} // namespace interpreter
} // namespace internal
} // namespace v8
......@@ -15,9 +15,6 @@ namespace interpreter {
extern Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
OperandScale operand_scale);
extern Handle<Code> GenerateDeserializeLazyHandler(Isolate* isolate,
OperandScale operand_scale);
} // namespace interpreter
} // namespace internal
} // namespace v8
......
......@@ -19,7 +19,6 @@
#include "src/objects/shared-function-info.h"
#include "src/parsing/parse-info.h"
#include "src/setup-isolate.h"
#include "src/snapshot/snapshot.h"
#include "src/visitors.h"
namespace v8 {
......@@ -88,31 +87,6 @@ Interpreter::Interpreter(Isolate* isolate) : isolate_(isolate) {
}
}
Code* Interpreter::GetAndMaybeDeserializeBytecodeHandler(
Bytecode bytecode, OperandScale operand_scale) {
Code* code = GetBytecodeHandler(bytecode, operand_scale);
// Already deserialized? Then just return the handler.
if (!isolate_->heap()->IsDeserializeLazyHandler(code)) return code;
DCHECK(FLAG_lazy_handler_deserialization);
if (FLAG_trace_lazy_deserialization) {
PrintF("Lazy-deserializing handler %s\n",
Bytecodes::ToString(bytecode, operand_scale).c_str());
}
DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
code = Snapshot::DeserializeHandler(isolate_, bytecode, operand_scale);
DCHECK(code->IsCode());
DCHECK_EQ(code->kind(), Code::BYTECODE_HANDLER);
DCHECK(!isolate_->heap()->IsDeserializeLazyHandler(code));
SetBytecodeHandler(bytecode, operand_scale, code);
return code;
}
Code* Interpreter::GetBytecodeHandler(Bytecode bytecode,
OperandScale operand_scale) {
DCHECK(IsDispatchTableInitialized());
......
......@@ -42,11 +42,6 @@ class Interpreter {
FunctionLiteral* literal,
Isolate* isolate);
// If the bytecode handler for |bytecode| and |operand_scale| has not yet
// been loaded, deserialize it. Then return the handler.
Code* GetAndMaybeDeserializeBytecodeHandler(Bytecode bytecode,
OperandScale operand_scale);
// Return bytecode handler for |bytecode| and |operand_scale|.
Code* GetBytecodeHandler(Bytecode bytecode, OperandScale operand_scale);
......
......@@ -56,28 +56,33 @@ void SetupInterpreter::InstallBytecodeHandlers(Interpreter* interpreter) {
}
}
// Generate the DeserializeLazy handlers, one for each operand scale.
Heap* heap = interpreter->isolate_->heap();
DCHECK_EQ(Smi::kZero, heap->deserialize_lazy_handler());
heap->SetDeserializeLazyHandler(*GenerateDeserializeLazyHandler(
interpreter->isolate_, OperandScale::kSingle));
DCHECK_EQ(Smi::kZero, heap->deserialize_lazy_handler_wide());
heap->SetDeserializeLazyHandlerWide(*GenerateDeserializeLazyHandler(
interpreter->isolate_, OperandScale::kDouble));
DCHECK_EQ(Smi::kZero, heap->deserialize_lazy_handler_extra_wide());
heap->SetDeserializeLazyHandlerExtraWide(*GenerateDeserializeLazyHandler(
interpreter->isolate_, OperandScale::kQuadruple));
// Initialization should have been successful.
DCHECK(interpreter->IsDispatchTableInitialized());
}
// static
bool SetupInterpreter::ReuseExistingHandler(Address* dispatch_table,
Bytecode bytecode,
OperandScale operand_scale) {
Bytecode reused_bytecode;
if (!Bytecodes::ReusesExistingHandler(bytecode, &reused_bytecode)) {
return false;
}
size_t index = Interpreter::GetDispatchTableIndex(bytecode, operand_scale);
dispatch_table[index] = dispatch_table[Interpreter::GetDispatchTableIndex(
reused_bytecode, operand_scale)];
return true;
}
// static
void SetupInterpreter::InstallBytecodeHandler(Isolate* isolate,
Address* dispatch_table,
Bytecode bytecode,
OperandScale operand_scale) {
if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
if (ReuseExistingHandler(dispatch_table, bytecode, operand_scale)) return;
size_t index = Interpreter::GetDispatchTableIndex(bytecode, operand_scale);
Handle<Code> code = GenerateBytecodeHandler(isolate, bytecode, operand_scale);
......
......@@ -19,6 +19,10 @@ class SetupInterpreter {
static void InstallBytecodeHandlers(Interpreter* interpreter);
private:
// In the case of bytecodes that share handler implementations, copy the code
// into the bytecode's dispatcher table entry and return true.
static bool ReuseExistingHandler(Address* dispatch_table, Bytecode bytecode,
OperandScale operand_scale);
// Generates handler for given |bytecode| and |operand_scale|
// and installs it into the |dispatch_table|.
static void InstallBytecodeHandler(Isolate* isolate, Address* dispatch_table,
......
......@@ -1598,7 +1598,6 @@ void Logger::LogBytecodeHandlers() {
interpreter::Bytecode bytecode = interpreter::Bytecodes::FromByte(index);
if (interpreter::Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
Code* code = interpreter->GetBytecodeHandler(bytecode, operand_scale);
if (isolate_->heap()->IsDeserializeLazyHandler(code)) continue;
std::string bytecode_name =
interpreter::Bytecodes::ToString(bytecode, operand_scale);
PROFILE(isolate_, CodeCreateEvent(
......
......@@ -21,17 +21,12 @@
#include "src/isolate-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/snapshot.h"
#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
namespace internal {
RUNTIME_FUNCTION_RETURN_PAIR(Runtime_DebugBreakOnBytecode) {
using interpreter::Bytecode;
using interpreter::Bytecodes;
using interpreter::OperandScale;
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
......@@ -52,22 +47,20 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_DebugBreakOnBytecode) {
SharedFunctionInfo* shared = interpreted_frame->function()->shared();
BytecodeArray* bytecode_array = shared->bytecode_array();
int bytecode_offset = interpreted_frame->GetBytecodeOffset();
Bytecode bytecode = Bytecodes::FromByte(bytecode_array->get(bytecode_offset));
if (bytecode == Bytecode::kReturn) {
interpreter::Bytecode bytecode =
interpreter::Bytecodes::FromByte(bytecode_array->get(bytecode_offset));
if (bytecode == interpreter::Bytecode::kReturn) {
// If we are returning, reset the bytecode array on the interpreted stack
// frame to the non-debug variant so that the interpreter entry trampoline
// sees the return bytecode rather than the DebugBreak.
interpreted_frame->PatchBytecodeArray(bytecode_array);
}
// We do not have to deal with operand scale here. If the bytecode at the
// break is prefixed by operand scaling, we would have patched over the
// scaling prefix. We now simply dispatch to the handler for the prefix.
OperandScale operand_scale = OperandScale::kSingle;
Code* code = isolate->interpreter()->GetAndMaybeDeserializeBytecodeHandler(
bytecode, operand_scale);
return MakePair(isolate->debug()->return_value(), code);
return MakePair(isolate->debug()->return_value(),
isolate->interpreter()->GetBytecodeHandler(
bytecode, interpreter::OperandScale::kSingle));
}
......
......@@ -526,7 +526,7 @@ RUNTIME_FUNCTION(Runtime_DeserializeLazy) {
DCHECK_EQ(Builtins::TFJ, Builtins::KindOf(builtin_id));
if (FLAG_trace_lazy_deserialization) {
PrintF("Lazy-deserializing builtin %s\n", Builtins::name(builtin_id));
PrintF("Lazy-deserializing %s\n", Builtins::name(builtin_id));
}
Code* code = Snapshot::DeserializeBuiltin(isolate, builtin_id);
......
......@@ -13,34 +13,12 @@
#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/ostreams.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
RUNTIME_FUNCTION(Runtime_InterpreterDeserializeLazy) {
HandleScope scope(isolate);
DCHECK(FLAG_lazy_handler_deserialization);
DCHECK(FLAG_lazy_deserialization);
DCHECK_EQ(2, args.length());
CONVERT_SMI_ARG_CHECKED(bytecode_int, 0);
CONVERT_SMI_ARG_CHECKED(operand_scale_int, 1);
using interpreter::Bytecode;
using interpreter::Bytecodes;
using interpreter::OperandScale;
Bytecode bytecode = Bytecodes::FromByte(bytecode_int);
OperandScale operand_scale = static_cast<OperandScale>(operand_scale_int);
return isolate->interpreter()->GetAndMaybeDeserializeBytecodeHandler(
bytecode, operand_scale);
}
RUNTIME_FUNCTION(Runtime_InterpreterNewClosure) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
......
......@@ -222,7 +222,6 @@ namespace internal {
#define FOR_EACH_INTRINSIC_INTERPRETER(F) \
FOR_EACH_INTRINSIC_INTERPRETER_TRACE(F) \
FOR_EACH_INTRINSIC_INTERPRETER_TRACE_FEEDBACK(F) \
F(InterpreterDeserializeLazy, 2, 1) \
F(InterpreterNewClosure, 4, 1)
#define FOR_EACH_INTRINSIC_FUNCTION(F) \
......
......@@ -12,23 +12,15 @@
namespace v8 {
namespace internal {
using interpreter::Bytecode;
using interpreter::Bytecodes;
using interpreter::Interpreter;
using interpreter::OperandScale;
BuiltinDeserializerAllocator::BuiltinDeserializerAllocator(
Deserializer<BuiltinDeserializerAllocator>* deserializer)
: deserializer_(deserializer) {}
BuiltinDeserializerAllocator::~BuiltinDeserializerAllocator() {
delete handler_allocations_;
}
namespace {
int HandlerAllocationIndex(int code_object_id) {
return code_object_id - BuiltinSnapshotUtils::kFirstHandlerIndex;
}
} // namespace
Address BuiltinDeserializerAllocator::Allocate(AllocationSpace space,
int size) {
const int code_object_id = deserializer()->CurrentCodeObjectId();
......@@ -44,18 +36,18 @@ Address BuiltinDeserializerAllocator::Allocate(AllocationSpace space,
DCHECK(Internals::HasHeapObjectTag(obj));
return HeapObject::cast(obj)->address();
} else if (BSU::IsHandlerIndex(code_object_id)) {
if (handler_allocation_ != nullptr) {
// Lazy deserialization.
DCHECK_NULL(handler_allocations_);
return handler_allocation_;
} else {
// Eager deserialization.
DCHECK_NULL(handler_allocation_);
DCHECK_NOT_NULL(handler_allocations_);
int index = HandlerAllocationIndex(code_object_id);
DCHECK_NOT_NULL(handler_allocations_->at(index));
return handler_allocations_->at(index);
}
Bytecode bytecode;
OperandScale operand_scale;
std::tie(bytecode, operand_scale) = BSU::BytecodeFromIndex(code_object_id);
Address* dispatch_table = isolate()->interpreter()->dispatch_table_;
const size_t index =
Interpreter::GetDispatchTableIndex(bytecode, operand_scale);
Object* obj = HeapObject::FromAddress(dispatch_table[index]);
DCHECK(Internals::HasHeapObjectTag(obj));
return HeapObject::cast(obj)->address();
}
UNREACHABLE();
......@@ -67,8 +59,8 @@ BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltinsAndHandlers() {
// Reservations for builtins.
// DeserializeLazy is always the first builtin reservation (to simplify logic
// in InitializeBuiltinsTable).
// DeserializeLazy is always the first reservation (to simplify logic in
// InitializeBuiltinsTable).
{
DCHECK(!Builtins::IsLazy(Builtins::kDeserializeLazy));
uint32_t builtin_size =
......@@ -81,7 +73,7 @@ BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltinsAndHandlers() {
if (i == Builtins::kDeserializeLazy) continue;
// Skip lazy builtins. These will be replaced by the DeserializeLazy code
// object in InitializeFromReservations and thus require no reserved space.
// object in InitializeBuiltinsTable and thus require no reserved space.
if (deserializer()->IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) {
continue;
}
......@@ -95,17 +87,9 @@ BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltinsAndHandlers() {
BSU::ForEachBytecode(
[=, &result](Bytecode bytecode, OperandScale operand_scale) {
if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
// Bytecodes without a handler don't require a reservation.
return;
} else if (FLAG_lazy_handler_deserialization &&
deserializer()->IsLazyDeserializationEnabled() &&
Bytecodes::IsLazy(bytecode)) {
// Skip lazy handlers. These will be replaced by the DeserializeLazy
// code object in InitializeFromReservations and thus require no
// reserved space.
return;
}
// TODO(jgruber): Replace with DeserializeLazy handler.
if (!BSU::BytecodeHasDedicatedHandler(bytecode, operand_scale)) return;
const int index = BSU::BytecodeToIndex(bytecode, operand_scale);
uint32_t handler_size = deserializer()->ExtractCodeObjectSize(index);
......@@ -140,10 +124,14 @@ void BuiltinDeserializerAllocator::InitializeHandlerFromReservation(
SkipList::Update(chunk.start, chunk.size);
DCHECK_NOT_NULL(handler_allocations_);
const int index =
HandlerAllocationIndex(BSU::BytecodeToIndex(bytecode, operand_scale));
handler_allocations_->at(index) = chunk.start;
Address* dispatch_table = isolate()->interpreter()->dispatch_table_;
const size_t index =
Interpreter::GetDispatchTableIndex(bytecode, operand_scale);
// At this point, the HeapObject is not yet a Code object, and thus we don't
// initialize with code->entry() here. Once deserialization completes, this
// is overwritten with the final code->entry() value.
dispatch_table[index] = chunk.start;
#ifdef DEBUG
RegisterCodeObjectReservation(BSU::BytecodeToIndex(bytecode, operand_scale));
......@@ -181,23 +169,13 @@ void BuiltinDeserializerAllocator::InitializeFromReservations(
}
}
// Initialize interpreter bytecode handler reservations.
DCHECK_NULL(handler_allocations_);
handler_allocations_ = new std::vector<Address>(BSU::kNumberOfHandlers);
// Initialize the interpreter dispatch table.
BSU::ForEachBytecode(
[=, &reservation_index](Bytecode bytecode, OperandScale operand_scale) {
if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
// Bytecodes without a handler don't have a reservation.
return;
} else if (FLAG_lazy_handler_deserialization &&
deserializer()->IsLazyDeserializationEnabled() &&
Bytecodes::IsLazy(bytecode)) {
// Likewise, bytecodes with lazy handlers don't either.
return;
}
// TODO(jgruber): Replace with DeserializeLazy handler.
if (!BSU::BytecodeHasDedicatedHandler(bytecode, operand_scale)) return;
InitializeHandlerFromReservation(reservation[reservation_index],
bytecode, operand_scale);
reservation_index++;
......@@ -236,28 +214,6 @@ void BuiltinDeserializerAllocator::ReserveAndInitializeBuiltinsTableForBuiltin(
#endif
}
void BuiltinDeserializerAllocator::ReserveForHandler(
Bytecode bytecode, OperandScale operand_scale) {
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK(isolate()->interpreter()->IsDispatchTableInitialized());
const int code_object_id = BSU::BytecodeToIndex(bytecode, operand_scale);
const uint32_t handler_size =
deserializer()->ExtractCodeObjectSize(code_object_id);
DCHECK_LE(handler_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
handler_allocation_ =
isolate()->factory()->NewCodeForDeserialization(handler_size)->address();
// Note: After this point and until deserialization finishes, heap allocation
// is disallowed. We currently can't safely assert this since we'd need to
// pass the DisallowHeapAllocation scope out of this function.
#ifdef DEBUG
RegisterCodeObjectReservation(code_object_id);
#endif
}
#ifdef DEBUG
void BuiltinDeserializerAllocator::RegisterCodeObjectReservation(
int code_object_id) {
......
......@@ -9,7 +9,6 @@
#include "src/globals.h"
#include "src/heap/heap.h"
#include "src/interpreter/interpreter.h"
#include "src/snapshot/serializer-common.h"
namespace v8 {
......@@ -23,15 +22,11 @@ class BuiltinSnapshotUtils;
class BuiltinDeserializerAllocator final {
using BSU = BuiltinSnapshotUtils;
using Bytecode = interpreter::Bytecode;
using OperandScale = interpreter::OperandScale;
public:
BuiltinDeserializerAllocator(
Deserializer<BuiltinDeserializerAllocator>* deserializer);
~BuiltinDeserializerAllocator();
// ------- Allocation Methods -------
// Methods related to memory allocation during deserialization.
......@@ -42,16 +37,8 @@ class BuiltinDeserializerAllocator final {
// deserialization) in order to avoid having to patch builtin references
// later on. See also the kBuiltin case in deserializer.cc.
//
// There are three ways that we use to reserve / allocate space. In all
// cases, required objects are requested from the GC prior to
// deserialization. 1. pre-allocated builtin code objects are written into
// the builtins table (this is to make deserialization of builtin references
// easier). Pre-allocated handler code objects are 2. stored in the
// {handler_allocations_} vector (at eager-deserialization time) and 3.
// stored in {handler_allocation_} (at lazy-deserialization time).
//
// Allocate simply returns the pre-allocated object prepared by
// InitializeFromReservations.
// InitializeBuiltinsTable.
Address Allocate(AllocationSpace space, int size);
void MoveToNextChunk(AllocationSpace space) { UNREACHABLE(); }
......@@ -82,10 +69,6 @@ class BuiltinDeserializerAllocator final {
// lazily deserializing a single builtin.
void ReserveAndInitializeBuiltinsTableForBuiltin(int builtin_id);
// Pre-allocates a code object preparation for lazily deserializing a single
// handler.
void ReserveForHandler(Bytecode bytecode, OperandScale operand_scale);
#ifdef DEBUG
bool ReservationsAreFullyUsed() const;
#endif
......@@ -122,13 +105,6 @@ class BuiltinDeserializerAllocator final {
// construction since that makes vtable-based checks fail.
Deserializer<BuiltinDeserializerAllocator>* const deserializer_;
// Stores allocated space for bytecode handlers during eager deserialization.
std::vector<Address>* handler_allocations_ = nullptr;
// Stores the allocated space for a single handler during lazy
// deserialization.
Address handler_allocation_ = nullptr;
DISALLOW_COPY_AND_ASSIGN(BuiltinDeserializerAllocator)
};
......
......@@ -12,8 +12,10 @@
namespace v8 {
namespace internal {
using interpreter::Bytecode;
using interpreter::Bytecodes;
using interpreter::Interpreter;
using interpreter::OperandScale;
// Tracks the code object currently being deserialized (required for
// allocation).
......@@ -58,7 +60,7 @@ void BuiltinDeserializer::DeserializeEagerBuiltinsAndHandlers() {
for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
if (IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) {
// Do nothing. These builtins have been replaced by DeserializeLazy in
// InitializeFromReservations.
// InitializeBuiltinsTable.
DCHECK_EQ(builtins->builtin(Builtins::kDeserializeLazy),
builtins->builtin(i));
} else {
......@@ -75,31 +77,40 @@ void BuiltinDeserializer::DeserializeEagerBuiltinsAndHandlers() {
// Deserialize bytecode handlers.
// The dispatch table has been initialized during memory reservation.
Interpreter* interpreter = isolate()->interpreter();
DCHECK(!isolate()->interpreter()->IsDispatchTableInitialized());
DCHECK(isolate()->interpreter()->IsDispatchTableInitialized());
BSU::ForEachBytecode([=](Bytecode bytecode, OperandScale operand_scale) {
// Bytecodes without a dedicated handler are patched up in a second pass.
if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
// TODO(jgruber): Replace with DeserializeLazy handler.
// If lazy-deserialization is enabled and the current bytecode is lazy,
// we write the generic LazyDeserialization handler into the dispatch table
// and deserialize later upon first use.
Code* code = (FLAG_lazy_handler_deserialization &&
IsLazyDeserializationEnabled() && Bytecodes::IsLazy(bytecode))
? GetDeserializeLazyHandler(operand_scale)
: DeserializeHandlerRaw(bytecode, operand_scale);
// Bytecodes without a dedicated handler are patched up in a second pass.
if (!BSU::BytecodeHasDedicatedHandler(bytecode, operand_scale)) return;
Code* code = DeserializeHandlerRaw(bytecode, operand_scale);
interpreter->SetBytecodeHandler(bytecode, operand_scale, code);
});
// Patch up holes in the dispatch table.
DCHECK(BSU::BytecodeHasDedicatedHandler(Bytecode::kIllegal,
OperandScale::kSingle));
Code* illegal_handler = interpreter->GetBytecodeHandler(
Bytecode::kIllegal, OperandScale::kSingle);
BSU::ForEachBytecode([=](Bytecode bytecode, OperandScale operand_scale) {
if (Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
if (BSU::BytecodeHasDedicatedHandler(bytecode, operand_scale)) return;
Bytecode maybe_reused_bytecode;
if (Bytecodes::ReusesExistingHandler(bytecode, &maybe_reused_bytecode)) {
interpreter->SetBytecodeHandler(
bytecode, operand_scale,
interpreter->GetBytecodeHandler(maybe_reused_bytecode,
operand_scale));
return;
}
DCHECK(!Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
interpreter->SetBytecodeHandler(bytecode, operand_scale, illegal_handler);
});
......@@ -112,13 +123,6 @@ Code* BuiltinDeserializer::DeserializeBuiltin(int builtin_id) {
return DeserializeBuiltinRaw(builtin_id);
}
Code* BuiltinDeserializer::DeserializeHandler(Bytecode bytecode,
OperandScale operand_scale) {
allocator()->ReserveForHandler(bytecode, operand_scale);
DisallowHeapAllocation no_gc;
return DeserializeHandlerRaw(bytecode, operand_scale);
}
Code* BuiltinDeserializer::DeserializeBuiltinRaw(int builtin_id) {
DCHECK(!AllowHeapAllocation::IsAllowed());
DCHECK(Builtins::IsBuiltinId(builtin_id));
......@@ -145,7 +149,7 @@ Code* BuiltinDeserializer::DeserializeBuiltinRaw(int builtin_id) {
Code* BuiltinDeserializer::DeserializeHandlerRaw(Bytecode bytecode,
OperandScale operand_scale) {
DCHECK(!AllowHeapAllocation::IsAllowed());
DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
DCHECK(BSU::BytecodeHasDedicatedHandler(bytecode, operand_scale));
const int code_object_id = BSU::BytecodeToIndex(bytecode, operand_scale);
DeserializingCodeObjectScope scope(this, code_object_id);
......@@ -186,20 +190,5 @@ uint32_t BuiltinDeserializer::ExtractCodeObjectSize(int code_object_id) {
return result;
}
Code* BuiltinDeserializer::GetDeserializeLazyHandler(
interpreter::OperandScale operand_scale) const {
STATIC_ASSERT(interpreter::BytecodeOperands::kOperandScaleCount == 3);
switch (operand_scale) {
case OperandScale::kSingle:
return Code::cast(isolate()->heap()->deserialize_lazy_handler());
case OperandScale::kDouble:
return Code::cast(isolate()->heap()->deserialize_lazy_handler_wide());
case OperandScale::kQuadruple:
return Code::cast(
isolate()->heap()->deserialize_lazy_handler_extra_wide());
}
UNREACHABLE();
}
} // namespace internal
} // namespace v8
......@@ -19,8 +19,6 @@ class BuiltinSnapshotData;
class BuiltinDeserializer final
: public Deserializer<BuiltinDeserializerAllocator> {
using BSU = BuiltinSnapshotUtils;
using Bytecode = interpreter::Bytecode;
using OperandScale = interpreter::OperandScale;
public:
BuiltinDeserializer(Isolate* isolate, const BuiltinSnapshotData* data);
......@@ -38,10 +36,6 @@ class BuiltinDeserializer final
// lazily deserialized at runtime.
Code* DeserializeBuiltin(int builtin_id);
// Deserializes the single given handler. This is used whenever a handler is
// lazily deserialized at runtime.
Code* DeserializeHandler(Bytecode bytecode, OperandScale operand_scale);
private:
// Deserializes the single given builtin. Assumes that reservations have
// already been allocated.
......@@ -49,7 +43,8 @@ class BuiltinDeserializer final
// Deserializes the single given bytecode handler. Assumes that reservations
// have already been allocated.
Code* DeserializeHandlerRaw(Bytecode bytecode, OperandScale operand_scale);
Code* DeserializeHandlerRaw(interpreter::Bytecode bytecode,
interpreter::OperandScale operand_scale);
// Extracts the size builtin Code objects (baked into the snapshot).
uint32_t ExtractCodeObjectSize(int builtin_id);
......@@ -62,9 +57,6 @@ class BuiltinDeserializer final
int CurrentCodeObjectId() const { return current_code_object_id_; }
// Convenience function to grab the handler off the heap's strong root list.
Code* GetDeserializeLazyHandler(OperandScale operand_scale) const;
private:
// Stores the code object currently being deserialized. The
// {current_code_object_id} stores the index of the currently-deserialized
......
......@@ -39,22 +39,12 @@ void BuiltinSerializer::SerializeBuiltinsAndHandlers() {
BSU::ForEachBytecode([=](Bytecode bytecode, OperandScale operand_scale) {
SetHandlerOffset(bytecode, operand_scale, sink_.Position());
if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
if (!BSU::BytecodeHasDedicatedHandler(bytecode, operand_scale)) return;
SerializeHandler(
isolate()->interpreter()->GetBytecodeHandler(bytecode, operand_scale));
});
STATIC_ASSERT(BSU::kFirstHandlerIndex + BSU::kNumberOfHandlers ==
BSU::kNumberOfCodeObjects);
// The DeserializeLazy handlers are serialized by the StartupSerializer
// during strong root iteration.
DCHECK(isolate()->heap()->deserialize_lazy_handler()->IsCode());
DCHECK(isolate()->heap()->deserialize_lazy_handler_wide()->IsCode());
DCHECK(isolate()->heap()->deserialize_lazy_handler_extra_wide()->IsCode());
// Pad with kNop since GetInt() might read too far.
Pad();
......@@ -84,7 +74,7 @@ void BuiltinSerializer::SerializeBuiltin(Code* code) {
}
void BuiltinSerializer::SerializeHandler(Code* code) {
DCHECK(ObjectIsBytecodeHandler(code));
DCHECK_EQ(Code::BYTECODE_HANDLER, code->kind());
ObjectSerializer object_serializer(this, code, &sink_, kPlain,
kStartOfObject);
object_serializer.Serialize();
......
......@@ -63,5 +63,19 @@ void BuiltinSnapshotUtils::ForEachBytecode(
}
}
// static
bool BuiltinSnapshotUtils::BytecodeHasDedicatedHandler(
Bytecode bytecode, OperandScale operand_scale) {
// Some bytecodes don't have a handler. The dispatch table contains the
// kIllegal handler in these slots.
if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return false;
// Some handlers are reused for several bytecodes.
Bytecode dummy;
if (Bytecodes::ReusesExistingHandler(bytecode, &dummy)) return false;
return true;
}
} // namespace internal
} // namespace v8
......@@ -31,8 +31,8 @@ class BuiltinSnapshotUtils : public AllStatic {
// The number of code objects in the builtin snapshot.
// TODO(jgruber): This could be reduced by a bit since not every
// {bytecode, operand_scale} combination has an associated handler
// (see Bytecodes::BytecodeHasHandler).
// {bytecode, operand_scale} combination has an associated handler, and some
// handlers are reused (see BytecodeHasDedicatedHandler).
static const int kNumberOfCodeObjects = kNumberOfBuiltins + kNumberOfHandlers;
// Indexes into the offsets vector contained in snapshot.
......@@ -48,6 +48,11 @@ class BuiltinSnapshotUtils : public AllStatic {
// Iteration over all {bytecode,operand_scale} pairs. Implemented here since
// (de)serialization depends on the iteration order.
static void ForEachBytecode(std::function<void(Bytecode, OperandScale)> f);
// True, iff the given {bytecode,operand_scale} has a dedicated handler, where
// dedicated means: a handler exists, and it does not reuse another handler.
static bool BytecodeHasDedicatedHandler(Bytecode bytecode,
OperandScale operand_scale);
};
} // namespace internal
......
......@@ -212,12 +212,11 @@ bool Serializer<AllocatorT>::SerializeBuiltinReference(
return true;
}
// static
template <class AllocatorT>
bool Serializer<AllocatorT>::ObjectIsBytecodeHandler(HeapObject* obj) const {
bool Serializer<AllocatorT>::ObjectIsBytecodeHandler(HeapObject* obj) {
if (!obj->IsCode()) return false;
Code* code = Code::cast(obj);
if (isolate()->heap()->IsDeserializeLazyHandler(code)) return false;
return (code->kind() == Code::BYTECODE_HANDLER);
return (Code::cast(obj)->kind() == Code::BYTECODE_HANDLER);
}
template <class AllocatorT>
......
......@@ -195,7 +195,7 @@ class Serializer : public SerializerDeserializer {
int skip, BuiltinReferenceSerializationMode mode = kDefault);
// Returns true if the given heap object is a bytecode handler code object.
bool ObjectIsBytecodeHandler(HeapObject* obj) const;
static bool ObjectIsBytecodeHandler(HeapObject* obj);
inline void FlushSkip(int skip) {
if (skip != 0) {
......
......@@ -115,36 +115,6 @@ Code* Snapshot::DeserializeBuiltin(Isolate* isolate, int builtin_id) {
return code;
}
// static
Code* Snapshot::DeserializeHandler(Isolate* isolate,
interpreter::Bytecode bytecode,
interpreter::OperandScale operand_scale) {
base::ElapsedTimer timer;
if (FLAG_profile_deserialization) timer.Start();
const v8::StartupData* blob = isolate->snapshot_blob();
Vector<const byte> builtin_data = Snapshot::ExtractBuiltinData(blob);
BuiltinSnapshotData builtin_snapshot_data(builtin_data);
CodeSpaceMemoryModificationScope code_allocation(isolate->heap());
BuiltinDeserializer builtin_deserializer(isolate, &builtin_snapshot_data);
Code* code = builtin_deserializer.DeserializeHandler(bytecode, operand_scale);
if (FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
int bytes = code->Size();
PrintF("[Deserializing handler %s (%d bytes) took %0.3f ms]\n",
interpreter::Bytecodes::ToString(bytecode, operand_scale).c_str(),
bytes, ms);
}
if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
isolate->logger()->LogCodeObject(code);
}
return code;
}
void ProfileDeserialization(
const SnapshotData* startup_snapshot, const SnapshotData* builtin_snapshot,
const std::vector<SnapshotData*>& context_snapshots) {
......
......@@ -98,12 +98,6 @@ class Snapshot : public AllStatic {
// initialized.
static Code* DeserializeBuiltin(Isolate* isolate, int builtin_id);
// Deserializes a single given handler code object. Intended to be called at
// runtime after the isolate has been fully initialized.
static Code* DeserializeHandler(Isolate* isolate,
interpreter::Bytecode bytecode,
interpreter::OperandScale operand_scale);
// ---------------- Helper methods ----------------
static bool HasContextSnapshot(Isolate* isolate, size_t index);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment