Commit daa296b5 authored by Dan Elphick's avatar Dan Elphick Committed by Commit Bot

[interpreter] Always put bytecode handlers in builtins table

This always creates the bytecode handlers as part of the builtins table
regardless of the V8_EMBEDDED_BYTECODE_HANDLERS definition.

Lazy deserialization of bytecode handlers is enabled for this flow by
moving the three lazy bytecode deserializers from the strong roots into
the builtins table (ensuring that they not marked lazy themselves).

To simplify lazy deserialization, the illegal bytecode handler is made
non-lazy so that GetAndMaybeDeserializeBytecodeHandler doesn't to know
about it.

Since the bytecode handlers are now always part of the builtins table,
many bytecode specific methods are removed, including logging and in
BuiltinsSerializer and BuiltinsDeserializer.

Removes setup-interpreter.h, setup-interpreter-internal.cc and
builtin-snapshot-utils.*.

Change-Id: Ie421aa897a04f7b3bcb964c476eb7ab149388d53
Reviewed-on: https://chromium-review.googlesource.com/1220046Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarRoss McIlroy <rmcilroy@chromium.org>
Commit-Queue: Dan Elphick <delphick@chromium.org>
Cr-Commit-Position: refs/heads/master@{#56063}
parent c7c35719
......@@ -1402,8 +1402,6 @@ v8_source_set("v8_initializers") {
"src/interpreter/interpreter-generator.h",
"src/interpreter/interpreter-intrinsics-generator.cc",
"src/interpreter/interpreter-intrinsics-generator.h",
"src/interpreter/setup-interpreter-internal.cc",
"src/interpreter/setup-interpreter.h",
]
if (use_jumbo_build == true) {
......@@ -2434,8 +2432,6 @@ v8_source_set("v8_base") {
"src/snapshot/builtin-serializer-allocator.h",
"src/snapshot/builtin-serializer.cc",
"src/snapshot/builtin-serializer.h",
"src/snapshot/builtin-snapshot-utils.cc",
"src/snapshot/builtin-snapshot-utils.h",
"src/snapshot/code-serializer.cc",
"src/snapshot/code-serializer.h",
"src/snapshot/default-deserializer-allocator.cc",
......@@ -3309,10 +3305,6 @@ if (current_toolchain == v8_generator_toolchain) {
"src/interpreter/bytecodes.h",
]
if (v8_enable_embedded_builtins) {
defines = [ "V8_EMBEDDED_BUILTINS" ]
}
configs = [ ":internal_config" ]
deps = [
......
......@@ -26,11 +26,13 @@ namespace internal {
// TFH: Handlers in Turbofan, with CodeStub linkage.
// Args: name, interface descriptor
// BCH: Bytecode Handlers, with bytecode dispatch linkage.
// Args: name, Bytecode, OperandScale
// Args: name, OperandScale, Bytecode
// DLH: Deserialize Lazy Handlers, with bytecode dispatch linkage.
// Args: name, OperandScale
// ASM: Builtin in platform-dependent assembly.
// Args: name
#define BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
#define BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, DLH, ASM) \
/* GC write barrirer */ \
TFC(RecordWrite, RecordWrite, 1) \
\
......@@ -127,6 +129,10 @@ namespace internal {
TFC(CompileLazy, JSTrampoline, 1) \
TFC(CompileLazyDeoptimizedCode, JSTrampoline, 1) \
TFC(DeserializeLazy, JSTrampoline, 1) \
/* The three lazy bytecode handlers do not declare a bytecode. */ \
DLH(DeserializeLazyHandler, interpreter::OperandScale::kSingle) \
DLH(DeserializeLazyWideHandler, interpreter::OperandScale::kDouble) \
DLH(DeserializeLazyExtraWideHandler, interpreter::OperandScale::kQuadruple) \
ASM(InstantiateAsmJs) \
ASM(NotifyDeoptimized) \
\
......@@ -1450,10 +1456,10 @@ namespace internal {
CPP(StringPrototypeToUpperCase)
#endif // V8_INTL_SUPPORT
#define BUILTIN_LIST(CPP, API, TFJ, TFC, TFS, TFH, BCH, ASM) \
BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
BUILTIN_LIST_FROM_DSL(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
BUILTIN_LIST_INTL(CPP, TFJ, TFS) \
#define BUILTIN_LIST(CPP, API, TFJ, TFC, TFS, TFH, BCH, DLH, ASM) \
BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, DLH, ASM) \
BUILTIN_LIST_FROM_DSL(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
BUILTIN_LIST_INTL(CPP, TFJ, TFS) \
BUILTIN_LIST_BYTECODE_HANDLERS(BCH)
// The exception thrown in the following builtins are caught
......@@ -1498,23 +1504,27 @@ namespace internal {
#define BUILTIN_LIST_C(V) \
BUILTIN_LIST(V, V, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
#define BUILTIN_LIST_A(V) \
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, V)
IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
V)
#define BUILTIN_LIST_TFS(V) \
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
V, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
V, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
IGNORE_BUILTIN)
#define BUILTIN_LIST_TFJ(V) \
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, V, IGNORE_BUILTIN, \
IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
#define BUILTIN_LIST_TFJ(V) \
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, V, IGNORE_BUILTIN, \
IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
IGNORE_BUILTIN)
#define BUILTIN_LIST_TFC(V) \
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, V, \
IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
#define BUILTIN_LIST_TFC(V) \
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, V, \
IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
IGNORE_BUILTIN)
} // namespace internal
} // namespace v8
......
......@@ -43,7 +43,8 @@ namespace internal {
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, DEFINE_TFJ_INTERFACE_DESCRIPTOR,
DEFINE_TFC_INTERFACE_DESCRIPTOR, DEFINE_TFS_INTERFACE_DESCRIPTOR,
DEFINE_TFH_INTERFACE_DESCRIPTOR, IGNORE_BUILTIN, IGNORE_BUILTIN)
DEFINE_TFH_INTERFACE_DESCRIPTOR, IGNORE_BUILTIN, IGNORE_BUILTIN,
IGNORE_BUILTIN)
#undef DEFINE_TFJ_INTERFACE_DESCRIPTOR
#undef DEFINE_TFC_INTERFACE_DESCRIPTOR
......
......@@ -52,10 +52,11 @@ struct BuiltinMetadata {
#define DECL_TFS(Name, ...) { #Name, Builtins::TFS, {} },
#define DECL_TFH(Name, ...) { #Name, Builtins::TFH, {} },
#define DECL_BCH(Name, ...) { #Name, Builtins::BCH, {} },
#define DECL_DLH(Name, ...) { #Name, Builtins::DLH, {} },
#define DECL_ASM(Name, ...) { #Name, Builtins::ASM, {} },
const BuiltinMetadata builtin_metadata[] = {
BUILTIN_LIST(DECL_CPP, DECL_API, DECL_TFJ, DECL_TFC, DECL_TFS, DECL_TFH,
DECL_BCH, DECL_ASM)
DECL_BCH, DECL_DLH, DECL_ASM)
};
#undef DECL_CPP
#undef DECL_API
......@@ -64,6 +65,7 @@ const BuiltinMetadata builtin_metadata[] = {
#undef DECL_TFS
#undef DECL_TFH
#undef DECL_BCH
#undef DECL_DLH
#undef DECL_ASM
// clang-format on
......@@ -164,11 +166,12 @@ Callable Builtins::CallableFor(Isolate* isolate, Name name) {
break; \
}
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, CASE_OTHER,
CASE_OTHER, CASE_OTHER, IGNORE_BUILTIN, IGNORE_BUILTIN)
CASE_OTHER, CASE_OTHER, IGNORE_BUILTIN, IGNORE_BUILTIN,
IGNORE_BUILTIN)
#undef CASE_OTHER
default:
Builtins::Kind kind = Builtins::KindOf(name);
DCHECK_NE(kind, BCH);
DCHECK(kind != BCH && kind != DLH);
if (kind == TFJ || kind == CPP) {
return Callable(code, JSTrampolineDescriptor{});
}
......@@ -279,9 +282,13 @@ bool Builtins::IsLazy(int index) {
case kCompileLazy:
case kDebugBreakTrampoline:
case kDeserializeLazy:
case kDeserializeLazyHandler:
case kDeserializeLazyWideHandler:
case kDeserializeLazyExtraWideHandler:
case kFunctionPrototypeHasInstance: // https://crbug.com/v8/6786.
case kHandleApiCall:
case kIllegal:
case kIllegalHandler:
case kInstantiateAsmJs:
case kInterpreterEnterBytecodeAdvance:
case kInterpreterEnterBytecodeDispatch:
......@@ -304,11 +311,16 @@ bool Builtins::IsLazy(int index) {
return false;
default:
// TODO(6624): Extend to other kinds.
return KindOf(index) == TFJ;
return KindOf(index) == TFJ || KindOf(index) == BCH;
}
UNREACHABLE();
}
// static
bool Builtins::IsLazyDeserializer(Code* code) {
return IsLazyDeserializer(code->builtin_index());
}
// static
bool Builtins::IsIsolateIndependent(int index) {
DCHECK(IsBuiltinId(index));
......@@ -410,6 +422,7 @@ const char* Builtins::KindNameOf(int index) {
case TFS: return "TFS";
case TFH: return "TFH";
case BCH: return "BCH";
case DLH: return "DLH";
case ASM: return "ASM";
}
// clang-format on
......
......@@ -46,17 +46,15 @@ class Builtins {
enum Name : int32_t {
#define DEF_ENUM(Name, ...) k##Name,
BUILTIN_LIST(DEF_ENUM, DEF_ENUM, DEF_ENUM, DEF_ENUM, DEF_ENUM, DEF_ENUM,
DEF_ENUM, DEF_ENUM)
DEF_ENUM, DEF_ENUM, DEF_ENUM)
#undef DEF_ENUM
builtin_count,
#ifdef V8_EMBEDDED_BYTECODE_HANDLERS
#define EXTRACT_NAME(Name, ...) k##Name,
// Define kFirstBytecodeHandler,
kFirstBytecodeHandler =
FirstFromVarArgs(BUILTIN_LIST_BYTECODE_HANDLERS(EXTRACT_NAME) 0)
#undef EXTRACT_NAME
#endif // V8_EMBEDDED_BYTECODE_HANDLERS
};
static const int32_t kNoBuiltinId = -1;
......@@ -66,7 +64,7 @@ class Builtins {
}
// The different builtin kinds are documented in builtins-definitions.h.
enum Kind { CPP, API, TFJ, TFC, TFS, TFH, BCH, ASM };
enum Kind { CPP, API, TFJ, TFC, TFS, TFH, BCH, DLH, ASM };
static BailoutId GetContinuationBailoutId(Name name);
static Name GetBuiltinFromBailoutId(BailoutId);
......@@ -121,6 +119,35 @@ class Builtins {
// special cases such as CompileLazy and DeserializeLazy.
static bool IsLazy(int index);
static constexpr int kFirstWideBytecodeHandler =
kFirstBytecodeHandler + kNumberOfBytecodeHandlers;
static constexpr int kFirstExtraWideBytecodeHandler =
kFirstWideBytecodeHandler + kNumberOfWideBytecodeHandlers;
STATIC_ASSERT(kFirstExtraWideBytecodeHandler +
kNumberOfWideBytecodeHandlers ==
builtin_count);
// Returns the index of the appropriate lazy deserializer in the builtins
// table.
static constexpr int LazyDeserializerForBuiltin(const int index) {
return index < kFirstWideBytecodeHandler
? (index < kFirstBytecodeHandler
? Builtins::kDeserializeLazy
: Builtins::kDeserializeLazyHandler)
: (index < kFirstExtraWideBytecodeHandler
? Builtins::kDeserializeLazyWideHandler
: Builtins::kDeserializeLazyExtraWideHandler);
}
static constexpr bool IsLazyDeserializer(int builtin_index) {
return builtin_index == kDeserializeLazy ||
builtin_index == kDeserializeLazyHandler ||
builtin_index == kDeserializeLazyWideHandler ||
builtin_index == kDeserializeLazyExtraWideHandler;
}
static bool IsLazyDeserializer(Code* code);
// Helper methods used for testing isolate-independent builtins.
// TODO(jgruber,v8:6666): Remove once all builtins have been migrated.
static bool IsIsolateIndependent(int index);
......@@ -189,7 +216,8 @@ class Builtins {
static void Generate_##Name(compiler::CodeAssemblerState* state);
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, DECLARE_TF, DECLARE_TF,
DECLARE_TF, DECLARE_TF, IGNORE_BUILTIN, DECLARE_ASM)
DECLARE_TF, DECLARE_TF, IGNORE_BUILTIN, IGNORE_BUILTIN,
DECLARE_ASM)
#undef DECLARE_ASM
#undef DECLARE_TF
......
......@@ -12,11 +12,17 @@ namespace internal {
namespace interpreter {
void WriteBytecode(std::ofstream& out, Bytecode bytecode,
OperandScale operand_scale) {
OperandScale operand_scale, int* count, int offset_table[],
int table_index) {
DCHECK_NOT_NULL(count);
if (Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
out << " \\\n V(" << Bytecodes::ToString(bytecode, operand_scale, "")
<< "Handler, interpreter::Bytecode::k" << Bytecodes::ToString(bytecode)
<< ", interpreter::OperandScale::k" << operand_scale << ")";
<< "Handler, interpreter::OperandScale::k" << operand_scale
<< ", interpreter::Bytecode::k" << Bytecodes::ToString(bytecode) << ")";
offset_table[table_index] = *count;
(*count)++;
} else {
offset_table[table_index] = -1;
}
}
......@@ -27,20 +33,54 @@ void WriteHeader(const char* header_filename) {
<< "// The following list macro is used to populate the builtins list\n"
<< "// with the bytecode handlers\n\n"
<< "#ifndef V8_BUILTINS_GENERATED_BYTECODES_BUILTINS_LIST\n"
<< "#define V8_BUILTINS_GENERATED_BYTECODES_BUILTINS_LIST\n"
<< "#define V8_BUILTINS_GENERATED_BYTECODES_BUILTINS_LIST\n\n"
<< "namespace v8 {\n"
<< "namespace internal {\n\n"
<< "#define BUILTIN_LIST_BYTECODE_HANDLERS(V)";
#ifdef V8_EMBEDDED_BUILTINS
#define ADD_BYTECODES(Name, ...) \
WriteBytecode(out, Bytecode::k##Name, operand_scale);
constexpr int kTableSize =
BytecodeOperands::kOperandScaleCount * Bytecodes::kBytecodeCount;
int offset_table[kTableSize];
int count = 0;
int index = 0;
#define ADD_BYTECODES(Name, ...) \
WriteBytecode(out, Bytecode::k##Name, operand_scale, &count, offset_table, \
index++);
OperandScale operand_scale = OperandScale::kSingle;
BYTECODE_LIST(ADD_BYTECODES)
int single_count = count;
operand_scale = OperandScale::kDouble;
BYTECODE_LIST(ADD_BYTECODES)
int wide_count = count - single_count;
operand_scale = OperandScale::kQuadruple;
BYTECODE_LIST(ADD_BYTECODES)
#undef ADD_BYTECODES
#endif
out << "\n#endif // V8_BUILTINS_GENERATED_BYTECODES_BUILTINS_LIST\n";
int extra_wide_count = count - wide_count - single_count;
CHECK_GT(single_count, wide_count);
CHECK_EQ(single_count, Bytecodes::kBytecodeCount);
CHECK_EQ(wide_count, extra_wide_count);
out << "\n\nconst int kNumberOfBytecodeHandlers = " << single_count << ";\n"
<< "const int kNumberOfWideBytecodeHandlers = " << wide_count << ";\n\n"
<< "#ifndef V8_EMBEDDED_BYTECODE_HANDLERS\n"
<< "// Mapping from (Bytecode + OperandScaleAsIndex * |Bytecodes|) to\n"
<< "// a dense form with all the illegal Bytecode/OperandScale\n"
<< "// combinations removed. Used to index into the builtins table.\n"
<< "constexpr int kBytecodeToBuiltinsMapping[" << kTableSize << "] = {\n"
<< " ";
for (int i = 0; i < kTableSize; ++i) {
if (i == single_count || i == 2 * single_count) {
out << "\n ";
}
out << offset_table[i] << ", ";
}
out << "};\n"
<< "#endif //V8_EMBEDDED_BYTECODE_HANDLERS\n\n"
<< "} // namespace internal\n"
<< "} // namespace v8\n"
<< "#endif // V8_BUILTINS_GENERATED_BYTECODES_BUILTINS_LIST\n";
}
} // namespace interpreter
......
......@@ -27,6 +27,7 @@ BUILTIN_LIST_C(FORWARD_DECLARE)
#undef FORWARD_DECLARE
namespace {
void PostBuildProfileAndTracing(Isolate* isolate, Code* code,
const char* name) {
PROFILE(isolate, CodeCreateEvent(CodeEventListener::BUILTIN_TAG,
......@@ -182,6 +183,7 @@ Code* BuildWithCodeStubAssemblerCS(Isolate* isolate, int32_t builtin_index,
PostBuildProfileAndTracing(isolate, *code, name);
return *code;
}
} // anonymous namespace
// static
......@@ -248,11 +250,12 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
}
}
#ifdef V8_EMBEDDED_BYTECODE_HANDLERS
namespace {
Code* GenerateBytecodeHandler(Isolate* isolate, int builtin_index,
const char* name, interpreter::Bytecode bytecode,
interpreter::OperandScale operand_scale) {
const char* name,
interpreter::OperandScale operand_scale,
interpreter::Bytecode bytecode) {
DCHECK(interpreter::Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
Handle<Code> code = interpreter::GenerateBytecodeHandler(
......@@ -263,8 +266,20 @@ Code* GenerateBytecodeHandler(Isolate* isolate, int builtin_index,
return *code;
}
Code* GenerateLazyBytecodeHandler(Isolate* isolate, int builtin_index,
const char* name,
interpreter::OperandScale operand_scale) {
Handle<Code> code = interpreter::GenerateDeserializeLazyHandler(
isolate, operand_scale, builtin_index,
BuiltinAssemblerOptions(isolate, builtin_index));
PostBuildProfileAndTracing(isolate, *code, name);
return *code;
}
} // namespace
#endif
// static
void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
......@@ -308,14 +323,15 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
CallDescriptors::InterfaceDescriptor, #Name, 1); \
AddBuiltin(builtins, index++, code);
#ifdef V8_EMBEDDED_BYTECODE_HANDLERS
#define BUILD_BCH(Code, Bytecode, OperandScale) \
#define BUILD_BCH(Name, OperandScale, Bytecode) \
code = GenerateBytecodeHandler(isolate, index, Builtins::name(index), \
Bytecode, OperandScale); \
OperandScale, Bytecode); \
AddBuiltin(builtins, index++, code);
#define BUILD_DLH(Name, OperandScale) \
code = GenerateLazyBytecodeHandler(isolate, index, Builtins::name(index), \
OperandScale); \
AddBuiltin(builtins, index++, code);
#else
#define BUILD_BCH(Code, ...) UNREACHABLE();
#endif // V8_EMBEDDED_BYTECODE_HANDLERS
#define BUILD_ASM(Name) \
code = BuildWithMacroAssembler(isolate, index, Builtins::Generate_##Name, \
......@@ -323,7 +339,7 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
AddBuiltin(builtins, index++, code);
BUILTIN_LIST(BUILD_CPP, BUILD_API, BUILD_TFJ, BUILD_TFC, BUILD_TFS, BUILD_TFH,
BUILD_BCH, BUILD_ASM);
BUILD_BCH, BUILD_DLH, BUILD_ASM);
#undef BUILD_CPP
#undef BUILD_API
......@@ -332,6 +348,7 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
#undef BUILD_TFS
#undef BUILD_TFH
#undef BUILD_BCH
#undef BUILD_DLH
#undef BUILD_ASM
CHECK_EQ(Builtins::builtin_count, index);
......
......@@ -2492,9 +2492,6 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kApiSymbolTableRootIndex:
case kApiPrivateSymbolTableRootIndex:
case kMessageListenersRootIndex:
case kDeserializeLazyHandlerRootIndex:
case kDeserializeLazyHandlerWideRootIndex:
case kDeserializeLazyHandlerExtraWideRootIndex:
// Smi values
#define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
SMI_ROOT_LIST(SMI_ENTRY)
......@@ -5394,24 +5391,6 @@ void Heap::UnregisterStrongRoots(Object** start) {
}
}
bool Heap::IsDeserializeLazyHandler(Code* code) {
return (code == deserialize_lazy_handler() ||
code == deserialize_lazy_handler_wide() ||
code == deserialize_lazy_handler_extra_wide());
}
void Heap::SetDeserializeLazyHandler(Code* code) {
set_deserialize_lazy_handler(code);
}
void Heap::SetDeserializeLazyHandlerWide(Code* code) {
set_deserialize_lazy_handler_wide(code);
}
void Heap::SetDeserializeLazyHandlerExtraWide(Code* code) {
set_deserialize_lazy_handler_extra_wide(code);
}
void Heap::SetBuiltinsConstantsTable(FixedArray* cache) {
set_builtins_constants_table(cache);
}
......
......@@ -905,11 +905,6 @@ class Heap {
void RegisterStrongRoots(Object** start, Object** end);
void UnregisterStrongRoots(Object** start);
bool IsDeserializeLazyHandler(Code* code);
void SetDeserializeLazyHandler(Code* code);
void SetDeserializeLazyHandlerWide(Code* code);
void SetDeserializeLazyHandlerExtraWide(Code* code);
void SetBuiltinsConstantsTable(FixedArray* cache);
// ===========================================================================
......
......@@ -876,11 +876,6 @@ void Heap::CreateInitialObjects() {
set_noscript_shared_function_infos(roots.empty_weak_array_list());
STATIC_ASSERT(interpreter::BytecodeOperands::kOperandScaleCount == 3);
set_deserialize_lazy_handler(Smi::kZero);
set_deserialize_lazy_handler_wide(Smi::kZero);
set_deserialize_lazy_handler_extra_wide(Smi::kZero);
// Evaluate the hash values which will then be cached in the strings.
isolate()->factory()->zero_string()->Hash();
isolate()->factory()->one_string()->Hash();
......
......@@ -690,12 +690,6 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
bytecode == Bytecode::kDebugBreakWide;
}
// Returns true if the bytecode can be lazily deserialized.
static constexpr bool IsLazy(Bytecode bytecode) {
// Currently, all handlers are deserialized lazily.
return true;
}
// Returns true if the bytecode returns.
static constexpr bool Returns(Bytecode bytecode) {
#define OR_BYTECODE(NAME) || bytecode == Bytecode::k##NAME
......
......@@ -3222,7 +3222,9 @@ class DeserializeLazyAssembler : public InterpreterAssembler {
} // namespace
Handle<Code> GenerateDeserializeLazyHandler(Isolate* isolate,
OperandScale operand_scale) {
OperandScale operand_scale,
int builtin_index,
const AssemblerOptions& options) {
Zone zone(isolate->allocator(), ZONE_NAME);
std::string debug_name = std::string("DeserializeLazy");
......@@ -3237,11 +3239,11 @@ Handle<Code> GenerateDeserializeLazyHandler(Isolate* isolate,
debug_name.c_str(),
FLAG_untrusted_code_mitigations
? PoisoningMitigationLevel::kPoisonCriticalOnly
: PoisoningMitigationLevel::kDontPoison);
: PoisoningMitigationLevel::kDontPoison,
0, builtin_index);
DeserializeLazyAssembler::Generate(&state, operand_scale);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(
&state, AssemblerOptions::Default(isolate));
Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state, options);
PROFILE(isolate,
CodeCreateEvent(CodeEventListener::BYTECODE_HANDLER_TAG,
AbstractCode::cast(*code), debug_name.c_str()));
......
......@@ -20,8 +20,9 @@ extern Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
int builtin_index,
const AssemblerOptions& options);
extern Handle<Code> GenerateDeserializeLazyHandler(Isolate* isolate,
OperandScale operand_scale);
extern Handle<Code> GenerateDeserializeLazyHandler(
Isolate* isolate, OperandScale operand_scale, int builtin_index,
const AssemblerOptions& options);
} // namespace interpreter
} // namespace internal
......
......@@ -7,6 +7,7 @@
#include <fstream>
#include <memory>
#include "builtins-generated/bytecodes-builtins-list.h"
#include "src/ast/prettyprinter.h"
#include "src/bootstrapper.h"
#include "src/compiler.h"
......@@ -59,23 +60,44 @@ Interpreter::Interpreter(Isolate* isolate) : isolate_(isolate) {
}
}
#ifndef V8_EMBEDDED_BYTECODE_HANDLERS
namespace {
int BuiltinIndexFromBytecode(Bytecode bytecode, OperandScale operand_scale) {
int index = BytecodeOperands::OperandScaleAsIndex(operand_scale) *
kNumberOfBytecodeHandlers +
static_cast<int>(bytecode);
int offset = kBytecodeToBuiltinsMapping[index];
return offset >= 0 ? Builtins::kFirstBytecodeHandler + offset
: Builtins::kIllegalHandler;
}
} // namespace
#endif // V8_EMBEDDED_BYTECODE_HANDLERS
Code* Interpreter::GetAndMaybeDeserializeBytecodeHandler(
Bytecode bytecode, OperandScale operand_scale) {
#ifdef V8_EMBEDDED_BYTECODE_HANDLERS
return GetBytecodeHandler(bytecode, operand_scale);
size_t index = GetDispatchTableIndex(bytecode, operand_scale);
Address pc = dispatch_table_[index];
Code* builtin = InstructionStream::TryLookupCode(isolate_, pc);
DCHECK(builtin->IsCode());
return builtin;
#else
Code* code = GetBytecodeHandler(bytecode, operand_scale);
int builtin_index = BuiltinIndexFromBytecode(bytecode, operand_scale);
Builtins* builtins = isolate_->builtins();
Code* code = builtins->builtin(builtin_index);
// Already deserialized? Then just return the handler.
if (!isolate_->heap()->IsDeserializeLazyHandler(code)) return code;
if (!Builtins::IsLazyDeserializer(code)) return code;
DCHECK(FLAG_lazy_handler_deserialization);
DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
code = Snapshot::DeserializeHandler(isolate_, bytecode, operand_scale);
code = Snapshot::DeserializeBuiltin(isolate_, builtin_index);
DCHECK(code->IsCode());
DCHECK_EQ(code->kind(), Code::BYTECODE_HANDLER);
DCHECK(!isolate_->heap()->IsDeserializeLazyHandler(code));
DCHECK(!Builtins::IsLazyDeserializer(code));
SetBytecodeHandler(bytecode, operand_scale, code);
......@@ -83,23 +105,6 @@ Code* Interpreter::GetAndMaybeDeserializeBytecodeHandler(
#endif // V8_EMBEDDED_BYTECODE_HANDLERS
}
Code* Interpreter::GetBytecodeHandler(Bytecode bytecode,
OperandScale operand_scale) {
#ifdef V8_EMBEDDED_BYTECODE_HANDLERS
size_t index = GetDispatchTableIndex(bytecode, operand_scale);
Address pc = dispatch_table_[index];
Code* builtin = InstructionStream::TryLookupCode(isolate_, pc);
DCHECK(builtin->IsCode());
return builtin;
#else
DCHECK(IsDispatchTableInitialized());
DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
size_t index = GetDispatchTableIndex(bytecode, operand_scale);
Address code_entry = dispatch_table_[index];
return Code::GetCodeFromTargetAddress(code_entry);
#endif
}
void Interpreter::SetBytecodeHandler(Bytecode bytecode,
OperandScale operand_scale,
Code* handler) {
......@@ -252,11 +257,11 @@ void Interpreter::ForEachBytecode(
}
void Interpreter::InitializeDispatchTable() {
#ifdef V8_EMBEDDED_BYTECODE_HANDLERS
Builtins* builtins = isolate_->builtins();
Code* illegal = builtins->builtin(Builtins::kIllegalHandler);
int builtin_id = Builtins::kFirstBytecodeHandler;
ForEachBytecode([&](Bytecode bytecode, OperandScale operand_scale) {
ForEachBytecode([=, &builtin_id](Bytecode bytecode,
OperandScale operand_scale) {
Code* handler = illegal;
if (Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
#ifdef DEBUG
......@@ -270,7 +275,16 @@ void Interpreter::InitializeDispatchTable() {
SetBytecodeHandler(bytecode, operand_scale, handler);
});
DCHECK(builtin_id == Builtins::builtin_count);
#endif // V8_EMBEDDED_BYTECODE_HANDLERS
DCHECK(IsDispatchTableInitialized());
#if defined(V8_USE_SNAPSHOT) && !defined(V8_USE_SNAPSHOT_WITH_UNWINDING_INFO)
if (!isolate_->serializer_enabled() && FLAG_perf_prof_unwinding_info) {
StdoutStream{}
<< "Warning: The --perf-prof-unwinding-info flag can be passed at "
"mksnapshot time to get better results."
<< std::endl;
}
#endif
}
bool Interpreter::IsDispatchTableInitialized() const {
......
......@@ -54,9 +54,6 @@ class Interpreter {
Code* GetAndMaybeDeserializeBytecodeHandler(Bytecode bytecode,
OperandScale operand_scale);
// Return bytecode handler for |bytecode| and |operand_scale|.
Code* GetBytecodeHandler(Bytecode bytecode, OperandScale operand_scale);
// Set the bytecode handler for |bytecode| and |operand_scale|.
void SetBytecodeHandler(Bytecode bytecode, OperandScale operand_scale,
Code* handler);
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/interpreter/setup-interpreter.h"
#include "src/assembler.h"
#include "src/handles-inl.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter-generator.h"
#include "src/interpreter/interpreter.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
namespace interpreter {
namespace {
void PrintBuiltinSize(Bytecode bytecode, OperandScale operand_scale,
Handle<Code> code) {
PrintF(stdout, "Ignition Handler, %s, %d\n",
Bytecodes::ToString(bytecode, operand_scale).c_str(),
code->InstructionSize());
}
} // namespace
// static
void SetupInterpreter::InstallBytecodeHandlers(Interpreter* interpreter) {
DCHECK(!interpreter->IsDispatchTableInitialized());
HandleScope scope(interpreter->isolate_);
// Canonicalize handles, so that we can share constant pool entries pointing
// to code targets without dereferencing their handles.
CanonicalHandleScope canonical(interpreter->isolate_);
Address* dispatch_table = interpreter->dispatch_table_;
// Generate bytecode handlers for all bytecodes and scales.
const OperandScale kOperandScales[] = {
#define VALUE(Name, _) OperandScale::k##Name,
OPERAND_SCALE_LIST(VALUE)
#undef VALUE
};
for (OperandScale operand_scale : kOperandScales) {
#define GENERATE_CODE(Name, ...) \
InstallBytecodeHandler(interpreter->isolate_, dispatch_table, \
Bytecode::k##Name, operand_scale);
BYTECODE_LIST(GENERATE_CODE)
#undef GENERATE_CODE
}
// Fill unused entries will the illegal bytecode handler.
size_t illegal_index = Interpreter::GetDispatchTableIndex(
Bytecode::kIllegal, OperandScale::kSingle);
for (size_t index = 0; index < Interpreter::kDispatchTableSize; ++index) {
if (dispatch_table[index] == kNullAddress) {
dispatch_table[index] = dispatch_table[illegal_index];
}
}
// Generate the DeserializeLazy handlers, one for each operand scale.
Heap* heap = interpreter->isolate_->heap();
DCHECK_EQ(Smi::kZero, heap->deserialize_lazy_handler());
heap->SetDeserializeLazyHandler(*GenerateDeserializeLazyHandler(
interpreter->isolate_, OperandScale::kSingle));
DCHECK_EQ(Smi::kZero, heap->deserialize_lazy_handler_wide());
heap->SetDeserializeLazyHandlerWide(*GenerateDeserializeLazyHandler(
interpreter->isolate_, OperandScale::kDouble));
DCHECK_EQ(Smi::kZero, heap->deserialize_lazy_handler_extra_wide());
heap->SetDeserializeLazyHandlerExtraWide(*GenerateDeserializeLazyHandler(
interpreter->isolate_, OperandScale::kQuadruple));
// Initialization should have been successful.
DCHECK(interpreter->IsDispatchTableInitialized());
}
// static
void SetupInterpreter::InstallBytecodeHandler(Isolate* isolate,
Address* dispatch_table,
Bytecode bytecode,
OperandScale operand_scale) {
if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
size_t index = Interpreter::GetDispatchTableIndex(bytecode, operand_scale);
// Here we explicitly set the bytecode handler to not be a builtin with an
// index of kNoBuiltinId.
// TODO(delphick): Use builtins version instead.
Handle<Code> code = GenerateBytecodeHandler(
isolate, bytecode, operand_scale, Builtins::kNoBuiltinId,
AssemblerOptions::Default(isolate));
dispatch_table[index] = code->entry();
if (FLAG_print_builtin_size) PrintBuiltinSize(bytecode, operand_scale, code);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_builtin_code) {
std::string name = Bytecodes::ToString(bytecode, operand_scale);
code->PrintBuiltinCode(isolate, name.c_str());
}
#endif // ENABLE_DISASSEMBLER
}
} // namespace interpreter
} // namespace internal
} // namespace v8
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_INTERPRETER_SETUP_INTERPRETER_H_
#define V8_INTERPRETER_SETUP_INTERPRETER_H_
#include "src/interpreter/bytecode-operands.h"
#include "src/interpreter/bytecodes.h"
namespace v8 {
namespace internal {
namespace interpreter {
class Interpreter;
class SetupInterpreter {
public:
static void InstallBytecodeHandlers(Interpreter* interpreter);
private:
// Generates handler for given |bytecode| and |operand_scale|
// and installs it into the |dispatch_table|.
static void InstallBytecodeHandler(Isolate* isolate, Address* dispatch_table,
Bytecode bytecode,
OperandScale operand_scale);
};
} // namespace interpreter
} // namespace internal
} // namespace v8
#endif // V8_INTERPRETER_SETUP_INTERPRETER_H_
......@@ -3041,8 +3041,6 @@ bool Isolate::Init(StartupDeserializer* des) {
load_stub_cache_->Initialize();
store_stub_cache_->Initialize();
interpreter_->InitializeDispatchTable();
setup_delegate_->SetupInterpreter(interpreter_);
heap_.NotifyDeserializationComplete();
}
delete setup_delegate_;
......
......@@ -349,7 +349,6 @@ void ExternalCodeEventListener::LogExistingCode() {
HandleScope scope(isolate_);
ExistingCodeLogger logger(isolate_, this);
logger.LogCodeObjects();
logger.LogBytecodeHandlers();
logger.LogCompiledFunctions();
}
......@@ -1834,16 +1833,6 @@ void Logger::LogCodeObject(Object* object) {
void Logger::LogCodeObjects() { existing_code_logger_.LogCodeObjects(); }
void Logger::LogBytecodeHandler(interpreter::Bytecode bytecode,
interpreter::OperandScale operand_scale,
Code* code) {
existing_code_logger_.LogBytecodeHandler(bytecode, operand_scale, code);
}
void Logger::LogBytecodeHandlers() {
existing_code_logger_.LogBytecodeHandlers();
}
void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
Handle<AbstractCode> code) {
existing_code_logger_.LogExistingFunction(shared, code);
......@@ -2158,37 +2147,6 @@ void ExistingCodeLogger::LogCompiledFunctions() {
}
}
void ExistingCodeLogger::LogBytecodeHandler(
interpreter::Bytecode bytecode, interpreter::OperandScale operand_scale,
Code* code) {
std::string bytecode_name =
interpreter::Bytecodes::ToString(bytecode, operand_scale);
CALL_CODE_EVENT_HANDLER(
CodeCreateEvent(CodeEventListener::BYTECODE_HANDLER_TAG,
AbstractCode::cast(code), bytecode_name.c_str()))
}
void ExistingCodeLogger::LogBytecodeHandlers() {
const interpreter::OperandScale kOperandScales[] = {
#define VALUE(Name, _) interpreter::OperandScale::k##Name,
OPERAND_SCALE_LIST(VALUE)
#undef VALUE
};
const int last_index = static_cast<int>(interpreter::Bytecode::kLast);
interpreter::Interpreter* interpreter = isolate_->interpreter();
for (auto operand_scale : kOperandScales) {
for (int index = 0; index <= last_index; ++index) {
interpreter::Bytecode bytecode = interpreter::Bytecodes::FromByte(index);
if (interpreter::Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
Code* code = interpreter->GetBytecodeHandler(bytecode, operand_scale);
if (isolate_->heap()->IsDeserializeLazyHandler(code)) continue;
LogBytecodeHandler(bytecode, operand_scale, code);
}
}
}
}
void ExistingCodeLogger::LogExistingFunction(
Handle<SharedFunctionInfo> shared, Handle<AbstractCode> code,
CodeEventListener::LogEventsAndTags tag) {
......
......@@ -75,11 +75,6 @@ class Profiler;
class RuntimeCallTimer;
class Ticker;
namespace interpreter {
enum class Bytecode : uint8_t;
enum class OperandScale : uint8_t;
} // namespace interpreter
#undef LOG
#define LOG(isolate, Call) \
do { \
......@@ -100,7 +95,6 @@ class ExistingCodeLogger {
: isolate_(isolate), listener_(listener) {}
void LogCodeObjects();
void LogBytecodeHandlers();
void LogCompiledFunctions();
void LogExistingFunction(Handle<SharedFunctionInfo> shared,
......@@ -108,8 +102,6 @@ class ExistingCodeLogger {
CodeEventListener::LogEventsAndTags tag =
CodeEventListener::LAZY_COMPILE_TAG);
void LogCodeObject(Object* object);
void LogBytecodeHandler(interpreter::Bytecode bytecode,
interpreter::OperandScale operand_scale, Code* code);
private:
Isolate* isolate_;
......@@ -282,10 +274,6 @@ class Logger : public CodeEventListener {
void LogAccessorCallbacks();
// Used for logging stubs found in the snapshot.
void LogCodeObjects();
// Used for logging bytecode handlers found in the snapshot.
void LogBytecodeHandlers();
void LogBytecodeHandler(interpreter::Bytecode bytecode,
interpreter::OperandScale operand_scale, Code* code);
// Logs all Mpas foind in the heap.
void LogMaps();
......
......@@ -250,11 +250,6 @@ namespace internal {
V(FixedArray, serialized_objects, SerializedObjects) \
V(FixedArray, serialized_global_proxy_sizes, SerializedGlobalProxySizes) \
V(TemplateList, message_listeners, MessageListeners) \
/* DeserializeLazy handlers for lazy bytecode deserialization */ \
V(Object, deserialize_lazy_handler, DeserializeLazyHandler) \
V(Object, deserialize_lazy_handler_wide, DeserializeLazyHandlerWide) \
V(Object, deserialize_lazy_handler_extra_wide, \
DeserializeLazyHandlerExtraWide) \
/* Hash seed */ \
V(ByteArray, hash_seed, HashSeed) \
/* JS Entries */ \
......
......@@ -17,20 +17,6 @@ void SetupIsolateDelegate::SetupBuiltins(Isolate* isolate) {
// No actual work to be done; builtins will be deserialized from the snapshot.
}
void SetupIsolateDelegate::SetupInterpreter(
interpreter::Interpreter* interpreter) {
#if !defined(V8_EMBEDDED_BYTECODE_HANDLERS) && defined(V8_USE_SNAPSHOT) && \
!defined(V8_USE_SNAPSHOT_WITH_UNWINDING_INFO)
if (FLAG_perf_prof_unwinding_info) {
StdoutStream{}
<< "Warning: The --perf-prof-unwinding-info flag can be passed at "
"mksnapshot time to get better results."
<< std::endl;
}
#endif
CHECK(interpreter->IsDispatchTableInitialized());
}
bool SetupIsolateDelegate::SetupHeap(Heap* heap) {
CHECK(!create_heap_objects_);
// No actual work to be done; heap will be deserialized from the snapshot.
......
......@@ -7,7 +7,6 @@
#include "src/base/logging.h"
#include "src/heap/heap-inl.h"
#include "src/interpreter/interpreter.h"
#include "src/interpreter/setup-interpreter.h"
#include "src/isolate.h"
namespace v8 {
......@@ -21,16 +20,6 @@ void SetupIsolateDelegate::SetupBuiltins(Isolate* isolate) {
}
}
void SetupIsolateDelegate::SetupInterpreter(
interpreter::Interpreter* interpreter) {
#ifndef V8_EMBEDDED_BYTECODE_HANDLERS
if (create_heap_objects_) {
interpreter::SetupInterpreter::InstallBytecodeHandlers(interpreter);
}
#endif // V8_EMBEDDED_BYTECODE_HANDLERS
CHECK(interpreter->IsDispatchTableInitialized());
}
bool SetupIsolateDelegate::SetupHeap(Heap* heap) {
if (create_heap_objects_) {
return SetupHeapInternal(heap);
......
......@@ -38,8 +38,6 @@ class SetupIsolateDelegate {
virtual void SetupBuiltins(Isolate* isolate);
virtual void SetupInterpreter(interpreter::Interpreter* interpreter);
virtual bool SetupHeap(Heap* heap);
protected:
......
......@@ -19,18 +19,6 @@ BuiltinDeserializerAllocator::BuiltinDeserializerAllocator(
Deserializer<BuiltinDeserializerAllocator>* deserializer)
: deserializer_(deserializer) {}
BuiltinDeserializerAllocator::~BuiltinDeserializerAllocator() {
delete handler_allocations_;
}
#ifndef V8_EMBEDDED_BYTECODE_HANDLERS
namespace {
int HandlerAllocationIndex(int code_object_id) {
return code_object_id - BuiltinSnapshotUtils::kFirstHandlerIndex;
}
} // namespace
#endif // V8_EMBEDDED_BYTECODE_HANDLERS
Address BuiltinDeserializerAllocator::Allocate(AllocationSpace space,
int size) {
const int code_object_id = deserializer()->CurrentCodeObjectId();
......@@ -41,34 +29,14 @@ Address BuiltinDeserializerAllocator::Allocate(AllocationSpace space,
RegisterCodeObjectAllocation(code_object_id);
#endif
if (BSU::IsBuiltinIndex(code_object_id)) {
Object* obj = isolate()->builtins()->builtin(code_object_id);
DCHECK(Internals::HasHeapObjectTag(obj));
return HeapObject::cast(obj)->address();
#ifdef V8_EMBEDDED_BYTECODE_HANDLERS
}
#else
} else if (BSU::IsHandlerIndex(code_object_id)) {
if (handler_allocation_ != kNullAddress) {
// Lazy deserialization.
DCHECK_NULL(handler_allocations_);
return handler_allocation_;
} else {
// Eager deserialization.
DCHECK_EQ(kNullAddress, handler_allocation_);
DCHECK_NOT_NULL(handler_allocations_);
int index = HandlerAllocationIndex(code_object_id);
DCHECK_NE(kNullAddress, handler_allocations_->at(index));
return handler_allocations_->at(index);
}
}
#endif // V8_EMBEDDED_BYTECODE_HANDLERS
UNREACHABLE();
DCHECK(Builtins::IsBuiltinId(code_object_id));
Object* obj = isolate()->builtins()->builtin(code_object_id);
DCHECK(Internals::HasHeapObjectTag(obj));
return HeapObject::cast(obj)->address();
}
Heap::Reservation
BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltinsAndHandlers() {
BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltins() {
Heap::Reservation result;
// Reservations for builtins.
......@@ -83,7 +51,7 @@ BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltinsAndHandlers() {
result.push_back({builtin_size, kNullAddress, kNullAddress});
}
for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
for (int i = 0; i < Builtins::builtin_count; i++) {
if (i == Builtins::kDeserializeLazy) continue;
// Skip lazy builtins. These will be replaced by the DeserializeLazy code
......@@ -97,30 +65,6 @@ BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltinsAndHandlers() {
result.push_back({builtin_size, kNullAddress, kNullAddress});
}
#ifndef V8_EMBEDDED_BYTECODE_HANDLERS
// Reservations for bytecode handlers.
BSU::ForEachBytecode(
[=, &result](Bytecode bytecode, OperandScale operand_scale) {
if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
// Bytecodes without a handler don't require a reservation.
return;
} else if (FLAG_lazy_handler_deserialization &&
deserializer()->IsLazyDeserializationEnabled() &&
Bytecodes::IsLazy(bytecode)) {
// Skip lazy handlers. These will be replaced by the DeserializeLazy
// code object in InitializeFromReservations and thus require no
// reserved space.
return;
}
const int index = BSU::BytecodeToIndex(bytecode, operand_scale);
uint32_t handler_size = deserializer()->ExtractCodeObjectSize(index);
DCHECK_LE(handler_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
result.push_back({handler_size, kNullAddress, kNullAddress});
});
#endif // V8_EMBEDDED_BYTECODE_HANDLERS
return result;
}
......@@ -138,28 +82,6 @@ void BuiltinDeserializerAllocator::InitializeBuiltinFromReservation(
#endif
}
#ifndef V8_EMBEDDED_BYTECODE_HANDLERS
void BuiltinDeserializerAllocator::InitializeHandlerFromReservation(
const Heap::Chunk& chunk, interpreter::Bytecode bytecode,
interpreter::OperandScale operand_scale) {
DCHECK_EQ(deserializer()->ExtractCodeObjectSize(
BSU::BytecodeToIndex(bytecode, operand_scale)),
chunk.size);
DCHECK_EQ(chunk.size, chunk.end - chunk.start);
SkipList::Update(chunk.start, chunk.size);
DCHECK_NOT_NULL(handler_allocations_);
const int index =
HandlerAllocationIndex(BSU::BytecodeToIndex(bytecode, operand_scale));
handler_allocations_->at(index) = chunk.start;
#ifdef DEBUG
RegisterCodeObjectReservation(BSU::BytecodeToIndex(bytecode, operand_scale));
#endif
}
#endif // V8_EMBEDDED_BYTECODE_HANDLERS
void BuiltinDeserializerAllocator::InitializeFromReservations(
const Heap::Reservation& reservation) {
DCHECK(!AllowHeapAllocation::IsAllowed());
......@@ -178,43 +100,18 @@ void BuiltinDeserializerAllocator::InitializeFromReservations(
reservation_index++;
}
Code* deserialize_lazy = builtins->builtin(Builtins::kDeserializeLazy);
for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
for (int i = 0; i < Builtins::builtin_count; i++) {
if (i == Builtins::kDeserializeLazy) continue;
if (deserializer()->IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) {
builtins->set_builtin(i, deserialize_lazy);
builtins->set_builtin(
i, builtins->builtin(builtins->LazyDeserializerForBuiltin(i)));
} else {
InitializeBuiltinFromReservation(reservation[reservation_index], i);
reservation_index++;
}
}
#ifndef V8_EMBEDDED_BYTECODE_HANDLERS
// Initialize interpreter bytecode handler reservations.
DCHECK_NULL(handler_allocations_);
handler_allocations_ = new std::vector<Address>(BSU::kNumberOfHandlers);
BSU::ForEachBytecode(
[=, &reservation_index](Bytecode bytecode, OperandScale operand_scale) {
if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
// Bytecodes without a handler don't have a reservation.
return;
} else if (FLAG_lazy_handler_deserialization &&
deserializer()->IsLazyDeserializationEnabled() &&
Bytecodes::IsLazy(bytecode)) {
// Likewise, bytecodes with lazy handlers don't either.
return;
}
InitializeHandlerFromReservation(reservation[reservation_index],
bytecode, operand_scale);
reservation_index++;
});
#endif // V8_EMBEDDED_BYTECODE_HANDLERS
DCHECK_EQ(reservation.size(), reservation_index);
}
......@@ -223,9 +120,9 @@ void BuiltinDeserializerAllocator::ReserveAndInitializeBuiltinsTableForBuiltin(
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK(isolate()->builtins()->is_initialized());
DCHECK(Builtins::IsBuiltinId(builtin_id));
DCHECK_NE(Builtins::kDeserializeLazy, builtin_id);
DCHECK_EQ(Builtins::kDeserializeLazy,
isolate()->builtins()->builtin(builtin_id)->builtin_index());
DCHECK(!Builtins::IsLazyDeserializer(builtin_id));
DCHECK(Builtins::IsLazyDeserializer(
isolate()->builtins()->builtin(builtin_id)->builtin_index()));
const uint32_t builtin_size =
deserializer()->ExtractCodeObjectSize(builtin_id);
......@@ -248,30 +145,6 @@ void BuiltinDeserializerAllocator::ReserveAndInitializeBuiltinsTableForBuiltin(
#endif
}
#ifndef V8_EMBEDDED_BYTECODE_HANDLERS
void BuiltinDeserializerAllocator::ReserveForHandler(
Bytecode bytecode, OperandScale operand_scale) {
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK(isolate()->interpreter()->IsDispatchTableInitialized());
const int code_object_id = BSU::BytecodeToIndex(bytecode, operand_scale);
const uint32_t handler_size =
deserializer()->ExtractCodeObjectSize(code_object_id);
DCHECK_LE(handler_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
handler_allocation_ =
isolate()->factory()->NewCodeForDeserialization(handler_size)->address();
// Note: After this point and until deserialization finishes, heap allocation
// is disallowed. We currently can't safely assert this since we'd need to
// pass the DisallowHeapAllocation scope out of this function.
#ifdef DEBUG
RegisterCodeObjectReservation(code_object_id);
#endif
}
#endif // V8_EMBEDDED_BYTECODE_HANDLERS
#ifdef DEBUG
void BuiltinDeserializerAllocator::RegisterCodeObjectReservation(
int code_object_id) {
......
......@@ -30,8 +30,6 @@ class BuiltinDeserializerAllocator final {
BuiltinDeserializerAllocator(
Deserializer<BuiltinDeserializerAllocator>* deserializer);
~BuiltinDeserializerAllocator();
// ------- Allocation Methods -------
// Methods related to memory allocation during deserialization.
......@@ -42,13 +40,10 @@ class BuiltinDeserializerAllocator final {
// deserialization) in order to avoid having to patch builtin references
// later on. See also the kBuiltin case in deserializer.cc.
//
// There are three ways that we use to reserve / allocate space. In all
// cases, required objects are requested from the GC prior to
// deserialization. 1. pre-allocated builtin code objects are written into
// the builtins table (this is to make deserialization of builtin references
// easier). Pre-allocated handler code objects are 2. stored in the
// {handler_allocations_} vector (at eager-deserialization time) and 3.
// stored in {handler_allocation_} (at lazy-deserialization time).
// There is one way that we use to reserve / allocate space. Required objects
// are requested from the GC prior to deserialization. Pre-allocated builtin
// code objects are written into the builtins table (this is to make
// deserialization of builtin references easier).
//
// Allocate simply returns the pre-allocated object prepared by
// InitializeFromReservations.
......@@ -89,17 +84,13 @@ class BuiltinDeserializerAllocator final {
// deserialization.
// TODO(jgruber): Refactor reservation/allocation logic in deserializers to
// make this less messy.
Heap::Reservation CreateReservationsForEagerBuiltinsAndHandlers();
Heap::Reservation CreateReservationsForEagerBuiltins();
void InitializeFromReservations(const Heap::Reservation& reservation);
// Creates reservations and initializes the builtins table in preparation for
// lazily deserializing a single builtin.
void ReserveAndInitializeBuiltinsTableForBuiltin(int builtin_id);
// Pre-allocates a code object preparation for lazily deserializing a single
// handler.
void ReserveForHandler(Bytecode bytecode, OperandScale operand_scale);
#ifdef DEBUG
bool ReservationsAreFullyUsed() const;
#endif
......@@ -113,11 +104,6 @@ class BuiltinDeserializerAllocator final {
void InitializeBuiltinFromReservation(const Heap::Chunk& chunk,
int builtin_id);
// As above, but for interpreter bytecode handlers.
void InitializeHandlerFromReservation(
const Heap::Chunk& chunk, interpreter::Bytecode bytecode,
interpreter::OperandScale operand_scale);
#ifdef DEBUG
void RegisterCodeObjectReservation(int code_object_id);
void RegisterCodeObjectAllocation(int code_object_id);
......@@ -130,13 +116,6 @@ class BuiltinDeserializerAllocator final {
// construction since that makes vtable-based checks fail.
Deserializer<BuiltinDeserializerAllocator>* const deserializer_;
// Stores allocated space for bytecode handlers during eager deserialization.
std::vector<Address>* handler_allocations_ = nullptr;
// Stores the allocated space for a single handler during lazy
// deserialization.
Address handler_allocation_ = kNullAddress;
bool next_reference_is_weak_ = false;
DISALLOW_COPY_AND_ASSIGN(BuiltinDeserializerAllocator)
......
......@@ -42,24 +42,24 @@ BuiltinDeserializer::BuiltinDeserializer(Isolate* isolate,
const BuiltinSnapshotData* data)
: Deserializer(data, false) {
code_offsets_ = data->BuiltinOffsets();
DCHECK_EQ(BSU::kNumberOfCodeObjects, code_offsets_.length());
DCHECK_EQ(Builtins::builtin_count, code_offsets_.length());
DCHECK(std::is_sorted(code_offsets_.begin(), code_offsets_.end()));
Initialize(isolate);
}
void BuiltinDeserializer::DeserializeEagerBuiltinsAndHandlers() {
void BuiltinDeserializer::DeserializeEagerBuiltins() {
DCHECK(!AllowHeapAllocation::IsAllowed());
DCHECK_EQ(0, source()->position());
// Deserialize builtins.
Builtins* builtins = isolate()->builtins();
for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
for (int i = 0; i < Builtins::builtin_count; i++) {
if (IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) {
// Do nothing. These builtins have been replaced by DeserializeLazy in
// InitializeFromReservations.
DCHECK_EQ(builtins->builtin(Builtins::kDeserializeLazy),
DCHECK_EQ(builtins->builtin(builtins->LazyDeserializerForBuiltin(i)),
builtins->builtin(i));
} else {
builtins->set_builtin(i, DeserializeBuiltinRaw(i));
......@@ -67,7 +67,7 @@ void BuiltinDeserializer::DeserializeEagerBuiltinsAndHandlers() {
}
#ifdef DEBUG
for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
for (int i = 0; i < Builtins::builtin_count; i++) {
Object* o = builtins->builtin(i);
DCHECK(o->IsCode() && Code::cast(o)->is_builtin());
}
......@@ -77,7 +77,7 @@ void BuiltinDeserializer::DeserializeEagerBuiltinsAndHandlers() {
if (FLAG_print_builtin_code) {
// We can't print builtins during deserialization because they may refer
// to not yet deserialized builtins.
for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
for (int i = 0; i < Builtins::builtin_count; i++) {
if (!IsLazyDeserializationEnabled() || !Builtins::IsLazy(i)) {
Code* code = builtins->builtin(i);
const char* name = Builtins::name(i);
......@@ -86,39 +86,6 @@ void BuiltinDeserializer::DeserializeEagerBuiltinsAndHandlers() {
}
}
#endif
#ifndef V8_EMBEDDED_BYTECODE_HANDLERS
// Deserialize bytecode handlers.
Interpreter* interpreter = isolate()->interpreter();
DCHECK(!interpreter->IsDispatchTableInitialized());
BSU::ForEachBytecode([=](Bytecode bytecode, OperandScale operand_scale) {
// Bytecodes without a dedicated handler are patched up in a second pass.
if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
// If lazy-deserialization is enabled and the current bytecode is lazy,
// we write the generic LazyDeserialization handler into the dispatch table
// and deserialize later upon first use.
Code* code = (FLAG_lazy_handler_deserialization &&
IsLazyDeserializationEnabled() && Bytecodes::IsLazy(bytecode))
? GetDeserializeLazyHandler(operand_scale)
: DeserializeHandlerRaw(bytecode, operand_scale);
interpreter->SetBytecodeHandler(bytecode, operand_scale, code);
});
// Patch up holes in the dispatch table.
Code* illegal_handler = interpreter->GetBytecodeHandler(
Bytecode::kIllegal, OperandScale::kSingle);
BSU::ForEachBytecode([=](Bytecode bytecode, OperandScale operand_scale) {
if (Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
interpreter->SetBytecodeHandler(bytecode, operand_scale, illegal_handler);
});
DCHECK(isolate()->interpreter()->IsDispatchTableInitialized());
#endif // V8_EMBEDDED_BYTECODE_HANDLERS
}
Code* BuiltinDeserializer::DeserializeBuiltin(int builtin_id) {
......@@ -136,15 +103,6 @@ Code* BuiltinDeserializer::DeserializeBuiltin(int builtin_id) {
return code;
}
#ifndef V8_EMBEDDED_BYTECODE_HANDLERS
Code* BuiltinDeserializer::DeserializeHandler(Bytecode bytecode,
OperandScale operand_scale) {
allocator()->ReserveForHandler(bytecode, operand_scale);
DisallowHeapAllocation no_gc;
return DeserializeHandlerRaw(bytecode, operand_scale);
}
#endif // V8_EMBEDDED_BYTECODE_HANDLERS
Code* BuiltinDeserializer::DeserializeBuiltinRaw(int builtin_id) {
DCHECK(!AllowHeapAllocation::IsAllowed());
DCHECK(Builtins::IsBuiltinId(builtin_id));
......@@ -175,44 +133,8 @@ Code* BuiltinDeserializer::DeserializeBuiltinRaw(int builtin_id) {
return code;
}
#ifndef V8_EMBEDDED_BYTECODE_HANDLERS
Code* BuiltinDeserializer::DeserializeHandlerRaw(Bytecode bytecode,
OperandScale operand_scale) {
DCHECK(!AllowHeapAllocation::IsAllowed());
DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
const int code_object_id = BSU::BytecodeToIndex(bytecode, operand_scale);
DeserializingCodeObjectScope scope(this, code_object_id);
const int initial_position = source()->position();
source()->set_position(code_offsets_[code_object_id]);
Object* o = ReadDataSingle();
DCHECK(o->IsCode() && Code::cast(o)->kind() == Code::BYTECODE_HANDLER);
// Rewind.
source()->set_position(initial_position);
// Flush the instruction cache.
Code* code = Code::cast(o);
Assembler::FlushICache(code->raw_instruction_start(),
code->raw_instruction_size());
std::string name = Bytecodes::ToString(bytecode, operand_scale);
PROFILE(isolate(), CodeCreateEvent(CodeEventListener::HANDLER_TAG,
AbstractCode::cast(code), name.c_str()));
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_builtin_code) {
code->PrintBuiltinCode(isolate(), name.c_str());
}
#endif // ENABLE_DISASSEMBLER
return code;
}
#endif // V8_EMBEDDED_BYTECODE_HANDLERS
uint32_t BuiltinDeserializer::ExtractCodeObjectSize(int code_object_id) {
DCHECK_LT(code_object_id, BSU::kNumberOfCodeObjects);
DCHECK_LT(code_object_id, Builtins::builtin_count);
const int initial_position = source()->position();
......@@ -230,20 +152,5 @@ uint32_t BuiltinDeserializer::ExtractCodeObjectSize(int code_object_id) {
return result;
}
Code* BuiltinDeserializer::GetDeserializeLazyHandler(
interpreter::OperandScale operand_scale) const {
STATIC_ASSERT(interpreter::BytecodeOperands::kOperandScaleCount == 3);
switch (operand_scale) {
case OperandScale::kSingle:
return Code::cast(isolate()->heap()->deserialize_lazy_handler());
case OperandScale::kDouble:
return Code::cast(isolate()->heap()->deserialize_lazy_handler_wide());
case OperandScale::kQuadruple:
return Code::cast(
isolate()->heap()->deserialize_lazy_handler_extra_wide());
}
UNREACHABLE();
}
} // namespace internal
} // namespace v8
......@@ -7,7 +7,6 @@
#include "src/interpreter/interpreter.h"
#include "src/snapshot/builtin-deserializer-allocator.h"
#include "src/snapshot/builtin-snapshot-utils.h"
#include "src/snapshot/deserializer.h"
namespace v8 {
......@@ -32,29 +31,17 @@ class BuiltinDeserializer final
//
// After this, the instruction cache must be flushed by the caller (we don't
// do it ourselves since the startup serializer batch-flushes all code pages).
void DeserializeEagerBuiltinsAndHandlers();
void DeserializeEagerBuiltins();
// Deserializes the single given builtin. This is used whenever a builtin is
// lazily deserialized at runtime.
Code* DeserializeBuiltin(int builtin_id);
#ifndef V8_EMBEDDED_BYTECODE_HANDLERS
// Deserializes the single given handler. This is used whenever a handler is
// lazily deserialized at runtime.
Code* DeserializeHandler(Bytecode bytecode, OperandScale operand_scale);
#endif // V8_EMBEDDED_BYTECODE_HANDLERS
private:
// Deserializes the single given builtin. Assumes that reservations have
// already been allocated.
Code* DeserializeBuiltinRaw(int builtin_id);
#ifndef V8_EMBEDDED_BYTECODE_HANDLERS
// Deserializes the single given bytecode handler. Assumes that reservations
// have already been allocated.
Code* DeserializeHandlerRaw(Bytecode bytecode, OperandScale operand_scale);
#endif // V8_EMBEDDED_BYTECODE_HANDLERS
// Extracts the size builtin Code objects (baked into the snapshot).
uint32_t ExtractCodeObjectSize(int builtin_id);
......
......@@ -26,39 +26,11 @@ BuiltinSerializer::~BuiltinSerializer() {
void BuiltinSerializer::SerializeBuiltinsAndHandlers() {
// Serialize builtins.
STATIC_ASSERT(0 == BSU::kFirstBuiltinIndex);
for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
for (int i = 0; i < Builtins::builtin_count; i++) {
SetBuiltinOffset(i, sink_.Position());
SerializeBuiltin(isolate()->builtins()->builtin(i));
}
#ifdef V8_EMBEDDED_BYTECODE_HANDLERS
STATIC_ASSERT(BSU::kNumberOfBuiltins == BSU::kNumberOfCodeObjects);
#else
// Serialize bytecode handlers.
STATIC_ASSERT(BSU::kNumberOfBuiltins == BSU::kFirstHandlerIndex);
BSU::ForEachBytecode([=](Bytecode bytecode, OperandScale operand_scale) {
SetHandlerOffset(bytecode, operand_scale, sink_.Position());
if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
SerializeHandler(
isolate()->interpreter()->GetBytecodeHandler(bytecode, operand_scale));
});
STATIC_ASSERT(BSU::kFirstHandlerIndex + BSU::kNumberOfHandlers ==
BSU::kNumberOfCodeObjects);
// The DeserializeLazy handlers are serialized by the StartupSerializer
// during strong root iteration.
DCHECK(isolate()->heap()->deserialize_lazy_handler()->IsCode());
DCHECK(isolate()->heap()->deserialize_lazy_handler_wide()->IsCode());
DCHECK(isolate()->heap()->deserialize_lazy_handler_extra_wide()->IsCode());
#endif // V8_EMBEDDED_BYTECODE_HANDLERS
// Pad with kNop since GetInt() might read too far.
Pad();
......@@ -87,13 +59,6 @@ void BuiltinSerializer::SerializeBuiltin(Code* code) {
object_serializer.Serialize();
}
void BuiltinSerializer::SerializeHandler(Code* code) {
DCHECK(ObjectIsBytecodeHandler(code));
ObjectSerializer object_serializer(this, code, &sink_, kPlain,
kStartOfObject);
object_serializer.Serialize();
}
void BuiltinSerializer::SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
DCHECK(!o->IsSmi());
......@@ -119,8 +84,8 @@ void BuiltinSerializer::SerializeObject(HeapObject* o, HowToCode how_to_code,
// * Strings: CSA_ASSERTs in debug builds, various other string constants.
// * HeapNumbers: Embedded constants.
// TODO(6624): Jump targets should never trigger content serialization, it
// should always result in a reference instead. Reloc infos and handler
// tables should not end up in the partial snapshot cache.
// should always result in a reference instead. Reloc infos and handler tables
// should not end up in the partial snapshot cache.
FlushSkip(skip);
......@@ -132,19 +97,8 @@ void BuiltinSerializer::SerializeObject(HeapObject* o, HowToCode how_to_code,
void BuiltinSerializer::SetBuiltinOffset(int builtin_id, uint32_t offset) {
DCHECK(Builtins::IsBuiltinId(builtin_id));
DCHECK(BSU::IsBuiltinIndex(builtin_id));
code_offsets_[builtin_id] = offset;
}
#ifndef V8_EMBEDDED_BYTECODE_HANDLERS
void BuiltinSerializer::SetHandlerOffset(Bytecode bytecode,
OperandScale operand_scale,
uint32_t offset) {
const int index = BSU::BytecodeToIndex(bytecode, operand_scale);
DCHECK(BSU::IsHandlerIndex(index));
code_offsets_[index] = offset;
}
#endif // V8_EMBEDDED_BYTECODE_HANDLERS
} // namespace internal
} // namespace v8
......@@ -5,9 +5,9 @@
#ifndef V8_SNAPSHOT_BUILTIN_SERIALIZER_H_
#define V8_SNAPSHOT_BUILTIN_SERIALIZER_H_
#include "src/builtins/builtins.h"
#include "src/interpreter/interpreter.h"
#include "src/snapshot/builtin-serializer-allocator.h"
#include "src/snapshot/builtin-snapshot-utils.h"
#include "src/snapshot/serializer.h"
namespace v8 {
......@@ -15,12 +15,10 @@ namespace internal {
class StartupSerializer;
// Responsible for serializing builtin and bytecode handler objects during
// startup snapshot creation into a dedicated area of the snapshot.
// Responsible for serializing builtin objects during startup snapshot creation
// into a dedicated area of the snapshot.
// See snapshot.h for documentation of the snapshot layout.
class BuiltinSerializer : public Serializer<BuiltinSerializerAllocator> {
using BSU = BuiltinSnapshotUtils;
public:
BuiltinSerializer(Isolate* isolate, StartupSerializer* startup_serializer);
~BuiltinSerializer() override;
......@@ -32,7 +30,6 @@ class BuiltinSerializer : public Serializer<BuiltinSerializerAllocator> {
Object** end) override;
void SerializeBuiltin(Code* code);
void SerializeHandler(Code* code);
void SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) override;
......@@ -47,14 +44,11 @@ class BuiltinSerializer : public Serializer<BuiltinSerializerAllocator> {
// Stores the starting offset, within the serialized data, of each code
// object. This is later packed into the builtin snapshot, and used by the
// builtin deserializer to deserialize individual builtins and bytecode
// handlers.
// builtin deserializer to deserialize individual builtins.
//
// Indices [kFirstBuiltinIndex, kFirstBuiltinIndex + kNumberOfBuiltins[:
// Builtin offsets.
// Indices [kFirstHandlerIndex, kFirstHandlerIndex + kNumberOfHandlers[:
// Bytecode handler offsets.
uint32_t code_offsets_[BuiltinSnapshotUtils::kNumberOfCodeObjects];
uint32_t code_offsets_[Builtins::builtin_count];
DISALLOW_COPY_AND_ASSIGN(BuiltinSerializer);
};
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/snapshot/builtin-snapshot-utils.h"
namespace v8 {
namespace internal {
// static
bool BuiltinSnapshotUtils::IsBuiltinIndex(int maybe_index) {
return (kFirstBuiltinIndex <= maybe_index &&
maybe_index < kFirstBuiltinIndex + kNumberOfBuiltins);
}
#ifndef V8_EMBEDDED_BYTECODE_HANDLERS
// static
bool BuiltinSnapshotUtils::IsHandlerIndex(int maybe_index) {
return (kFirstHandlerIndex <= maybe_index &&
maybe_index < kFirstHandlerIndex + kNumberOfHandlers);
}
// static
int BuiltinSnapshotUtils::BytecodeToIndex(Bytecode bytecode,
OperandScale operand_scale) {
int index =
BuiltinSnapshotUtils::kNumberOfBuiltins + static_cast<int>(bytecode);
switch (operand_scale) { // clang-format off
case OperandScale::kSingle: return index;
case OperandScale::kDouble: return index + Bytecodes::kBytecodeCount;
case OperandScale::kQuadruple: return index + 2 * Bytecodes::kBytecodeCount;
} // clang-format on
UNREACHABLE();
}
// static
std::pair<interpreter::Bytecode, interpreter::OperandScale>
BuiltinSnapshotUtils::BytecodeFromIndex(int index) {
DCHECK(IsHandlerIndex(index));
const int x = index - BuiltinSnapshotUtils::kNumberOfBuiltins;
Bytecode bytecode = Bytecodes::FromByte(x % Bytecodes::kBytecodeCount);
switch (x / Bytecodes::kBytecodeCount) { // clang-format off
case 0: return {bytecode, OperandScale::kSingle};
case 1: return {bytecode, OperandScale::kDouble};
case 2: return {bytecode, OperandScale::kQuadruple};
default: UNREACHABLE();
} // clang-format on
}
// static
void BuiltinSnapshotUtils::ForEachBytecode(
const std::function<void(Bytecode, OperandScale)>& f) {
static const OperandScale kOperandScales[] = {
#define VALUE(Name, _) OperandScale::k##Name,
OPERAND_SCALE_LIST(VALUE)
#undef VALUE
};
for (OperandScale operand_scale : kOperandScales) {
for (int i = 0; i < Bytecodes::kBytecodeCount; i++) {
f(Bytecodes::FromByte(i), operand_scale);
}
}
}
#endif // V8_EMBEDDED_BYTECODE_HANDLERS
} // namespace internal
} // namespace v8
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_SNAPSHOT_BUILTIN_SNAPSHOT_UTILS_H_
#define V8_SNAPSHOT_BUILTIN_SNAPSHOT_UTILS_H_
#include <functional>
#include "src/interpreter/interpreter.h"
namespace v8 {
namespace internal {
// Constants and utility methods used by builtin and bytecode handler
// (de)serialization.
class BuiltinSnapshotUtils : public AllStatic {
using Bytecode = interpreter::Bytecode;
using BytecodeOperands = interpreter::BytecodeOperands;
using Bytecodes = interpreter::Bytecodes;
using Interpreter = interpreter::Interpreter;
using OperandScale = interpreter::OperandScale;
public:
static const int kFirstBuiltinIndex = 0;
static const int kNumberOfBuiltins = Builtins::builtin_count;
#ifdef V8_EMBEDDED_BYTECODE_HANDLERS
static const int kNumberOfCodeObjects = kNumberOfBuiltins;
#else
static const int kFirstHandlerIndex = kFirstBuiltinIndex + kNumberOfBuiltins;
static const int kNumberOfHandlers =
Bytecodes::kBytecodeCount * BytecodeOperands::kOperandScaleCount;
// The number of code objects in the builtin snapshot.
// TODO(jgruber): This could be reduced by a bit since not every
// {bytecode, operand_scale} combination has an associated handler
// (see Bytecodes::BytecodeHasHandler).
static const int kNumberOfCodeObjects = kNumberOfBuiltins + kNumberOfHandlers;
#endif // V8_EMBEDDED_BYTECODE_HANDLERS
// Indexes into the offsets vector contained in snapshot.
// See e.g. BuiltinSerializer::code_offsets_.
static bool IsBuiltinIndex(int maybe_index);
#ifndef V8_EMBEDDED_BYTECODE_HANDLERS
static bool IsHandlerIndex(int maybe_index);
static int BytecodeToIndex(Bytecode bytecode, OperandScale operand_scale);
// Converts an index back into the {bytecode,operand_scale} tuple. This is the
// inverse operation of BytecodeToIndex().
static std::pair<Bytecode, OperandScale> BytecodeFromIndex(int index);
// Iteration over all {bytecode,operand_scale} pairs. Implemented here since
// (de)serialization depends on the iteration order.
static void ForEachBytecode(
const std::function<void(Bytecode, OperandScale)>& f);
#endif // V8_EMBEDDED_BYTECODE_HANDLERS
};
} // namespace internal
} // namespace v8
#endif // V8_SNAPSHOT_BUILTIN_SNAPSHOT_UTILS_H_
......@@ -167,8 +167,7 @@ bool DefaultDeserializerAllocator::ReserveSpace(
}
Heap::Reservation builtin_reservations =
builtin_deserializer->allocator()
->CreateReservationsForEagerBuiltinsAndHandlers();
builtin_deserializer->allocator()->CreateReservationsForEagerBuiltins();
DCHECK(!builtin_reservations.empty());
for (const auto& c : builtin_reservations) {
......
......@@ -69,8 +69,7 @@ template <class AllocatorT>
void Deserializer<AllocatorT>::VisitRootPointers(Root root,
const char* description,
Object** start, Object** end) {
// Builtins and bytecode handlers are deserialized in a separate pass by the
// BuiltinDeserializer.
// Builtins are deserialized in a separate pass by the BuiltinDeserializer.
if (root == Root::kBuiltins || root == Root::kDispatchTable) return;
// The space must be new space. Any other space would cause ReadChunk to try
......
......@@ -111,8 +111,7 @@ template <class AllocatorT>
void Serializer<AllocatorT>::VisitRootPointers(Root root,
const char* description,
Object** start, Object** end) {
// Builtins and bytecode handlers are serialized in a separate pass by the
// BuiltinSerializer.
// Builtins are serialized in a separate pass by the BuiltinSerializer.
if (root == Root::kBuiltins || root == Root::kDispatchTable) return;
for (Object** current = start; current < end; current++) {
......@@ -233,9 +232,7 @@ bool Serializer<AllocatorT>::SerializeBuiltinReference(
template <class AllocatorT>
bool Serializer<AllocatorT>::ObjectIsBytecodeHandler(HeapObject* obj) const {
if (!obj->IsCode()) return false;
Code* code = Code::cast(obj);
if (isolate()->heap()->IsDeserializeLazyHandler(code)) return false;
return (code->kind() == Code::BYTECODE_HANDLER);
return (Code::cast(obj)->kind() == Code::BYTECODE_HANDLER);
}
template <class AllocatorT>
......
......@@ -136,13 +136,17 @@ void Snapshot::EnsureAllBuiltinsAreDeserialized(Isolate* isolate) {
DCHECK_NE(Builtins::kDeserializeLazy, i);
Code* code = builtins->builtin(i);
if (code->builtin_index() == Builtins::kDeserializeLazy) {
if (code->builtin_index() == Builtins::LazyDeserializerForBuiltin(i)) {
code = Snapshot::DeserializeBuiltin(isolate, i);
}
DCHECK_EQ(i, code->builtin_index());
DCHECK_EQ(code, builtins->builtin(i));
}
// Re-initialize the dispatch table now that any bytecodes have been
// deserialized.
isolate->interpreter()->InitializeDispatchTable();
}
// static
......@@ -168,44 +172,6 @@ Code* Snapshot::EnsureBuiltinIsDeserialized(Isolate* isolate,
return code;
}
#ifndef V8_EMBEDDED_BYTECODE_HANDLERS
// static
Code* Snapshot::DeserializeHandler(Isolate* isolate,
interpreter::Bytecode bytecode,
interpreter::OperandScale operand_scale) {
if (FLAG_trace_lazy_deserialization) {
PrintF("Lazy-deserializing handler %s\n",
interpreter::Bytecodes::ToString(bytecode, operand_scale).c_str());
}
base::ElapsedTimer timer;
if (FLAG_profile_deserialization) timer.Start();
const v8::StartupData* blob = isolate->snapshot_blob();
Vector<const byte> builtin_data = Snapshot::ExtractBuiltinData(blob);
BuiltinSnapshotData builtin_snapshot_data(builtin_data);
CodeSpaceMemoryModificationScope code_allocation(isolate->heap());
BuiltinDeserializer builtin_deserializer(isolate, &builtin_snapshot_data);
Code* code = builtin_deserializer.DeserializeHandler(bytecode, operand_scale);
if (FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
int bytes = code->Size();
PrintF("[Deserializing handler %s (%d bytes) took %0.3f ms]\n",
interpreter::Bytecodes::ToString(bytecode, operand_scale).c_str(),
bytes, ms);
}
if (isolate->logger()->is_listening_to_code_events() ||
isolate->is_profiling()) {
isolate->logger()->LogBytecodeHandler(bytecode, operand_scale, code);
}
return code;
}
#endif // V8_EMBEDDED_BYTECODE_HANDLERS
void ProfileDeserialization(
const SnapshotData* startup_snapshot, const SnapshotData* builtin_snapshot,
const std::vector<SnapshotData*>& context_snapshots) {
......@@ -310,9 +276,11 @@ bool BuiltinAliasesOffHeapTrampolineRegister(Isolate* isolate, Code* code) {
case Builtins::TFS:
break;
// Bytecode handlers will only ever be used by the interpreter and so there
// will never be a need to use trampolines with them.
// Bytecode handlers (and their lazy deserializers) will only ever be used
// by the interpreter and so there will never be a need to use trampolines
// with them.
case Builtins::BCH:
case Builtins::DLH:
case Builtins::API:
case Builtins::ASM:
// TODO(jgruber): Extend checks to remaining kinds.
......@@ -694,8 +662,7 @@ Vector<const byte> BuiltinSnapshotData::Payload() const {
uint32_t reservations_size =
GetHeaderValue(kNumReservationsOffset) * kUInt32Size;
const byte* payload = data_ + kHeaderSize + reservations_size;
const int builtin_offsets_size =
BuiltinSnapshotUtils::kNumberOfCodeObjects * kUInt32Size;
const int builtin_offsets_size = Builtins::builtin_count * kUInt32Size;
uint32_t payload_length = GetHeaderValue(kPayloadLengthOffset);
DCHECK_EQ(data_ + size_, payload + payload_length);
DCHECK_GT(payload_length, builtin_offsets_size);
......@@ -706,15 +673,13 @@ Vector<const uint32_t> BuiltinSnapshotData::BuiltinOffsets() const {
uint32_t reservations_size =
GetHeaderValue(kNumReservationsOffset) * kUInt32Size;
const byte* payload = data_ + kHeaderSize + reservations_size;
const int builtin_offsets_size =
BuiltinSnapshotUtils::kNumberOfCodeObjects * kUInt32Size;
const int builtin_offsets_size = Builtins::builtin_count * kUInt32Size;
uint32_t payload_length = GetHeaderValue(kPayloadLengthOffset);
DCHECK_EQ(data_ + size_, payload + payload_length);
DCHECK_GT(payload_length, builtin_offsets_size);
const uint32_t* data = reinterpret_cast<const uint32_t*>(
payload + payload_length - builtin_offsets_size);
return Vector<const uint32_t>(data,
BuiltinSnapshotUtils::kNumberOfCodeObjects);
return Vector<const uint32_t>(data, Builtins::builtin_count);
}
} // namespace internal
......
......@@ -175,14 +175,6 @@ class Snapshot : public AllStatic {
static Code* EnsureBuiltinIsDeserialized(Isolate* isolate,
Handle<SharedFunctionInfo> shared);
#ifndef V8_EMBEDDED_BYTECODE_HANDLERS
// Deserializes a single given handler code object. Intended to be called at
// runtime after the isolate has been fully initialized.
static Code* DeserializeHandler(Isolate* isolate,
interpreter::Bytecode bytecode,
interpreter::OperandScale operand_scale);
#endif // V8_EMBEDDED_BYTECODE_HANDLERS
// ---------------- Helper methods ----------------
static bool HasContextSnapshot(Isolate* isolate, size_t index);
......
......@@ -46,7 +46,7 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
// Deserialize eager builtins from the builtin snapshot. Note that deferred
// objects must have been deserialized prior to this.
builtin_deserializer.DeserializeEagerBuiltinsAndHandlers();
builtin_deserializer.DeserializeEagerBuiltins();
// Flush the instruction cache for the entire code-space. Must happen after
// builtins deserialization.
......@@ -64,10 +64,6 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
// Issue code events for newly deserialized code objects.
LOG_CODE_EVENT(isolate, LogCodeObjects());
#ifndef V8_EMBEDDED_BYTECODE_HANDLERS
// Log bytecode handlers if they haven't already been logged as builtins
LOG_CODE_EVENT(isolate, LogBytecodeHandlers());
#endif // V8_EMBEDDED_BYTECODE_HANDLERS
LOG_CODE_EVENT(isolate, LogCompiledFunctions());
isolate->builtins()->MarkInitialized();
......
......@@ -5049,6 +5049,35 @@ TEST(InterpreterWithNativeStack) {
interpreter_entry_trampoline->InstructionStart());
}
TEST(InterpreterGetAndMaybeDeserializeBytecodeHandler) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Interpreter* interpreter = isolate->interpreter();
// Test that single-width bytecode handlers deserializer correctly.
Code* wide_handler = interpreter->GetAndMaybeDeserializeBytecodeHandler(
Bytecode::kWide, OperandScale::kSingle);
CHECK_EQ(wide_handler->builtin_index(), Builtins::kWideHandler);
Code* add_handler = interpreter->GetAndMaybeDeserializeBytecodeHandler(
Bytecode::kAdd, OperandScale::kSingle);
CHECK_EQ(add_handler->builtin_index(), Builtins::kAddHandler);
// Test that double-width bytecode handlers deserializer correctly, including
// an illegal bytecode handler since there is no Wide.Wide handler.
Code* wide_wide_handler = interpreter->GetAndMaybeDeserializeBytecodeHandler(
Bytecode::kWide, OperandScale::kDouble);
CHECK_EQ(wide_wide_handler->builtin_index(), Builtins::kIllegalHandler);
Code* add_wide_handler = interpreter->GetAndMaybeDeserializeBytecodeHandler(
Bytecode::kAdd, OperandScale::kDouble);
CHECK_EQ(add_wide_handler->builtin_index(), Builtins::kAddWideHandler);
}
} // namespace interpreter
} // namespace internal
} // namespace v8
......@@ -4,8 +4,6 @@
#include "test/cctest/setup-isolate-for-tests.h"
#include "src/interpreter/setup-interpreter.h"
namespace v8 {
namespace internal {
......@@ -15,15 +13,6 @@ void SetupIsolateDelegateForTests::SetupBuiltins(Isolate* isolate) {
}
}
void SetupIsolateDelegateForTests::SetupInterpreter(
interpreter::Interpreter* interpreter) {
#ifndef V8_EMBEDDED_BYTECODE_HANDLERS
if (create_heap_objects_) {
interpreter::SetupInterpreter::InstallBytecodeHandlers(interpreter);
}
#endif
}
bool SetupIsolateDelegateForTests::SetupHeap(Heap* heap) {
if (create_heap_objects_) {
return SetupHeapInternal(heap);
......
......@@ -18,8 +18,6 @@ class SetupIsolateDelegateForTests : public SetupIsolateDelegate {
void SetupBuiltins(Isolate* isolate) override;
void SetupInterpreter(interpreter::Interpreter* interpreter) override;
bool SetupHeap(Heap* heap) override;
};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment