Commit b10d24ff authored by rmcilroy's avatar rmcilroy Committed by Commit bot

[Interpreter] Add basic deoptimization support from TurboFan to Ignition.

Adds support for generating deoptimization translations for interpreter
stack frames, and building interpreter frames for these translations
when a function deopts. Also adds builtins for
InterpreterNotifyDeoptimized which resume the function's continuation at
the correct point in the interpreter after deopt.

MIPS patch contributed by balazs.kilvady@igmtec.com

BUG=v8:4280
LOG=N
TEST=test-deoptimization.cc with --ignition and --turbo

Review URL: https://codereview.chromium.org/1528913003

Cr-Commit-Position: refs/heads/master@{#32971}
parent a4e3a3b6
...@@ -946,6 +946,93 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) { ...@@ -946,6 +946,93 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
} }
static void Generate_InterpreterNotifyDeoptimizedHelper(
MacroAssembler* masm, Deoptimizer::BailoutType type) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(kInterpreterAccumulatorRegister); // Save accumulator register.
// Pass the deoptimization type to the runtime system.
__ mov(r1, Operand(Smi::FromInt(static_cast<int>(type))));
__ push(r1);
__ CallRuntime(Runtime::kNotifyDeoptimized, 1);
__ pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
// Tear down internal frame.
}
// Drop state (we don't use these for interpreter deopts) and push PC at top
// of stack (to simulate initial call to bytecode handler in interpreter entry
// trampoline).
__ pop(r1);
__ Drop(1);
__ push(r1);
// Initialize register file register and dispatch table register.
__ add(kInterpreterRegisterFileRegister, fp,
Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ LoadRoot(kInterpreterDispatchTableRegister,
Heap::kInterpreterTableRootIndex);
__ add(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// Get the context from the frame.
// TODO(rmcilroy): Update interpreter frame to expect current context at the
// context slot instead of the function context.
__ ldr(kContextRegister,
MemOperand(kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kContextFromRegisterPointer));
// Get the bytecode array pointer from the frame.
__ ldr(r1,
MemOperand(kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kFunctionFromRegisterPointer));
__ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r1, SharedFunctionInfo::kFunctionDataOffset));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
__ SmiTst(kInterpreterBytecodeArrayRegister);
__ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r1, no_reg,
BYTECODE_ARRAY_TYPE);
__ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Get the target bytecode offset from the frame.
__ ldr(kInterpreterBytecodeOffsetRegister,
MemOperand(
kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
__ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ ldr(ip, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
kPointerSizeLog2));
__ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ mov(pc, ip);
}
void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
}
void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
void Builtins::Generate_CompileLazy(MacroAssembler* masm) { void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy); CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm); GenerateTailCallToReturnedCode(masm);
......
...@@ -905,6 +905,94 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) { ...@@ -905,6 +905,94 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
} }
static void Generate_InterpreterNotifyDeoptimizedHelper(
MacroAssembler* masm, Deoptimizer::BailoutType type) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(kInterpreterAccumulatorRegister); // Save accumulator register.
// Pass the deoptimization type to the runtime system.
__ Mov(x1, Operand(Smi::FromInt(static_cast<int>(type))));
__ Push(x1);
__ CallRuntime(Runtime::kNotifyDeoptimized, 1);
__ Pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
// Tear down internal frame.
}
// Drop state (we don't use these for interpreter deopts) and push PC at top
// of stack (to simulate initial call to bytecode handler in interpreter entry
// trampoline).
__ Pop(x1);
__ Drop(1);
__ Push(x1);
// Initialize register file register and dispatch table register.
__ Add(kInterpreterRegisterFileRegister, fp,
Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ LoadRoot(kInterpreterDispatchTableRegister,
Heap::kInterpreterTableRootIndex);
__ Add(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// Get the context from the frame.
// TODO(rmcilroy): Update interpreter frame to expect current context at the
// context slot instead of the function context.
__ Ldr(kContextRegister,
MemOperand(kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kContextFromRegisterPointer));
// Get the bytecode array pointer from the frame.
__ Ldr(x1,
MemOperand(kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kFunctionFromRegisterPointer));
__ Ldr(x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(x1, SharedFunctionInfo::kFunctionDataOffset));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
__ AssertNotSmi(kInterpreterBytecodeArrayRegister,
kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, x1, x1,
BYTECODE_ARRAY_TYPE);
__ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Get the target bytecode offset from the frame.
__ Ldr(kInterpreterBytecodeOffsetRegister,
MemOperand(
kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
__ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ Mov(x1, Operand(x1, LSL, kPointerSizeLog2));
__ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x1));
__ Add(ip0, ip0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(ip0);
}
void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
}
void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
void Builtins::Generate_CompileLazy(MacroAssembler* masm) { void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy); CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm); GenerateTailCallToReturnedCode(masm);
......
This diff is collapsed.
...@@ -559,7 +559,9 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor( ...@@ -559,7 +559,9 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
(1 + descriptor->parameters_count()))); (1 + descriptor->parameters_count())));
break; break;
case FrameStateType::kInterpretedFunction: case FrameStateType::kInterpretedFunction:
// TODO(rmcilroy): Implement interpreted function translation. translation->BeginInterpretedFrame(
descriptor->bailout_id(), shared_info_id,
static_cast<unsigned int>(descriptor->locals_count()));
break; break;
case FrameStateType::kArgumentsAdaptor: case FrameStateType::kArgumentsAdaptor:
translation->BeginArgumentsAdaptorFrame( translation->BeginArgumentsAdaptorFrame(
......
This diff is collapsed.
...@@ -112,6 +112,7 @@ class TranslatedFrame { ...@@ -112,6 +112,7 @@ class TranslatedFrame {
public: public:
enum Kind { enum Kind {
kFunction, kFunction,
kInterpretedFunction,
kGetter, kGetter,
kSetter, kSetter,
kArgumentsAdaptor, kArgumentsAdaptor,
...@@ -172,6 +173,9 @@ class TranslatedFrame { ...@@ -172,6 +173,9 @@ class TranslatedFrame {
// Constructor static methods. // Constructor static methods.
static TranslatedFrame JSFrame(BailoutId node_id, static TranslatedFrame JSFrame(BailoutId node_id,
SharedFunctionInfo* shared_info, int height); SharedFunctionInfo* shared_info, int height);
static TranslatedFrame InterpretedFrame(BailoutId bytecode_offset,
SharedFunctionInfo* shared_info,
int height);
static TranslatedFrame AccessorFrame(Kind kind, static TranslatedFrame AccessorFrame(Kind kind,
SharedFunctionInfo* shared_info); SharedFunctionInfo* shared_info);
static TranslatedFrame ArgumentsAdaptorFrame(SharedFunctionInfo* shared_info, static TranslatedFrame ArgumentsAdaptorFrame(SharedFunctionInfo* shared_info,
...@@ -589,6 +593,7 @@ class Deoptimizer : public Malloced { ...@@ -589,6 +593,7 @@ class Deoptimizer : public Malloced {
void DoComputeOutputFrames(); void DoComputeOutputFrames();
void DoComputeJSFrame(int frame_index); void DoComputeJSFrame(int frame_index);
void DoComputeInterpretedFrame(int frame_index);
void DoComputeArgumentsAdaptorFrame(int frame_index); void DoComputeArgumentsAdaptorFrame(int frame_index);
void DoComputeConstructStubFrame(int frame_index); void DoComputeConstructStubFrame(int frame_index);
void DoComputeAccessorStubFrame(int frame_index, bool is_setter_stub_frame); void DoComputeAccessorStubFrame(int frame_index, bool is_setter_stub_frame);
...@@ -606,7 +611,8 @@ class Deoptimizer : public Malloced { ...@@ -606,7 +611,8 @@ class Deoptimizer : public Malloced {
const char* debug_hint_string); const char* debug_hint_string);
unsigned ComputeInputFrameSize() const; unsigned ComputeInputFrameSize() const;
unsigned ComputeFixedSize(JSFunction* function) const; unsigned ComputeJavascriptFixedSize(JSFunction* function) const;
unsigned ComputeInterpretedFixedSize(JSFunction* function) const;
unsigned ComputeIncomingArgumentSize(JSFunction* function) const; unsigned ComputeIncomingArgumentSize(JSFunction* function) const;
static unsigned ComputeOutgoingArgumentSize(Code* code, unsigned bailout_id); static unsigned ComputeOutgoingArgumentSize(Code* code, unsigned bailout_id);
...@@ -951,6 +957,7 @@ class TranslationIterator BASE_EMBEDDED { ...@@ -951,6 +957,7 @@ class TranslationIterator BASE_EMBEDDED {
#define TRANSLATION_OPCODE_LIST(V) \ #define TRANSLATION_OPCODE_LIST(V) \
V(BEGIN) \ V(BEGIN) \
V(JS_FRAME) \ V(JS_FRAME) \
V(INTERPRETED_FRAME) \
V(CONSTRUCT_STUB_FRAME) \ V(CONSTRUCT_STUB_FRAME) \
V(GETTER_STUB_FRAME) \ V(GETTER_STUB_FRAME) \
V(SETTER_STUB_FRAME) \ V(SETTER_STUB_FRAME) \
...@@ -996,6 +1003,8 @@ class Translation BASE_EMBEDDED { ...@@ -996,6 +1003,8 @@ class Translation BASE_EMBEDDED {
// Commands. // Commands.
void BeginJSFrame(BailoutId node_id, int literal_id, unsigned height); void BeginJSFrame(BailoutId node_id, int literal_id, unsigned height);
void BeginInterpretedFrame(BailoutId bytecode_offset, int literal_id,
unsigned height);
void BeginCompiledStubFrame(int height); void BeginCompiledStubFrame(int height);
void BeginArgumentsAdaptorFrame(int literal_id, unsigned height); void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
void BeginConstructStubFrame(int literal_id, unsigned height); void BeginConstructStubFrame(int literal_id, unsigned height);
......
...@@ -937,8 +937,9 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) { ...@@ -937,8 +937,9 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
TranslationIterator it(data->TranslationByteArray(), TranslationIterator it(data->TranslationByteArray(),
data->TranslationIndex(deopt_index)->value()); data->TranslationIndex(deopt_index)->value());
Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next()); Translation::Opcode frame_opcode =
DCHECK_EQ(Translation::BEGIN, opcode); static_cast<Translation::Opcode>(it.Next());
DCHECK_EQ(Translation::BEGIN, frame_opcode);
it.Next(); // Drop frame count. it.Next(); // Drop frame count.
int jsframe_count = it.Next(); int jsframe_count = it.Next();
...@@ -946,8 +947,9 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) { ...@@ -946,8 +947,9 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
// in the deoptimization translation are ordered bottom-to-top. // in the deoptimization translation are ordered bottom-to-top.
bool is_constructor = IsConstructor(); bool is_constructor = IsConstructor();
while (jsframe_count != 0) { while (jsframe_count != 0) {
opcode = static_cast<Translation::Opcode>(it.Next()); frame_opcode = static_cast<Translation::Opcode>(it.Next());
if (opcode == Translation::JS_FRAME) { if (frame_opcode == Translation::JS_FRAME ||
frame_opcode == Translation::INTERPRETED_FRAME) {
jsframe_count--; jsframe_count--;
BailoutId const ast_id = BailoutId(it.Next()); BailoutId const ast_id = BailoutId(it.Next());
SharedFunctionInfo* const shared_info = SharedFunctionInfo* const shared_info =
...@@ -956,7 +958,7 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) { ...@@ -956,7 +958,7 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
// The translation commands are ordered and the function is always // The translation commands are ordered and the function is always
// at the first position, and the receiver is next. // at the first position, and the receiver is next.
opcode = static_cast<Translation::Opcode>(it.Next()); Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
// Get the correct function in the optimized frame. // Get the correct function in the optimized frame.
JSFunction* function; JSFunction* function;
...@@ -993,25 +995,33 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) { ...@@ -993,25 +995,33 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
} }
Code* const code = shared_info->code(); Code* const code = shared_info->code();
DeoptimizationOutputData* const output_data =
DeoptimizationOutputData::cast(code->deoptimization_data());
unsigned const entry =
Deoptimizer::GetOutputInfo(output_data, ast_id, shared_info);
unsigned const pc_offset =
FullCodeGenerator::PcField::decode(entry) + Code::kHeaderSize;
DCHECK_NE(0U, pc_offset);
unsigned pc_offset;
if (frame_opcode == Translation::JS_FRAME) {
DeoptimizationOutputData* const output_data =
DeoptimizationOutputData::cast(code->deoptimization_data());
unsigned const entry =
Deoptimizer::GetOutputInfo(output_data, ast_id, shared_info);
pc_offset =
FullCodeGenerator::PcField::decode(entry) + Code::kHeaderSize;
DCHECK_NE(0U, pc_offset);
} else {
// TODO(rmcilroy): Modify FrameSummary to enable us to summarize
// based on the BytecodeArray and bytecode offset.
DCHECK_EQ(frame_opcode, Translation::INTERPRETED_FRAME);
pc_offset = 0;
}
FrameSummary summary(receiver, function, code, pc_offset, is_constructor); FrameSummary summary(receiver, function, code, pc_offset, is_constructor);
frames->Add(summary); frames->Add(summary);
is_constructor = false; is_constructor = false;
} else if (opcode == Translation::CONSTRUCT_STUB_FRAME) { } else if (frame_opcode == Translation::CONSTRUCT_STUB_FRAME) {
// The next encountered JS_FRAME will be marked as a constructor call. // The next encountered JS_FRAME will be marked as a constructor call.
it.Skip(Translation::NumberOfOperandsFor(opcode)); it.Skip(Translation::NumberOfOperandsFor(frame_opcode));
DCHECK(!is_constructor); DCHECK(!is_constructor);
is_constructor = true; is_constructor = true;
} else { } else {
// Skip over operands to advance to the next opcode. // Skip over operands to advance to the next opcode.
it.Skip(Translation::NumberOfOperandsFor(opcode)); it.Skip(Translation::NumberOfOperandsFor(frame_opcode));
} }
} }
DCHECK(!is_constructor); DCHECK(!is_constructor);
...@@ -1083,7 +1093,8 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) const { ...@@ -1083,7 +1093,8 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) const {
opcode = static_cast<Translation::Opcode>(it.Next()); opcode = static_cast<Translation::Opcode>(it.Next());
// Skip over operands to advance to the next opcode. // Skip over operands to advance to the next opcode.
it.Skip(Translation::NumberOfOperandsFor(opcode)); it.Skip(Translation::NumberOfOperandsFor(opcode));
if (opcode == Translation::JS_FRAME) { if (opcode == Translation::JS_FRAME ||
opcode == Translation::INTERPRETED_FRAME) {
jsframe_count--; jsframe_count--;
// The translation commands are ordered and the function is always at the // The translation commands are ordered and the function is always at the
......
...@@ -176,6 +176,12 @@ class ConstructFrameConstants : public AllStatic { ...@@ -176,6 +176,12 @@ class ConstructFrameConstants : public AllStatic {
class InterpreterFrameConstants : public AllStatic { class InterpreterFrameConstants : public AllStatic {
public: public:
// Fixed frame includes new.target and bytecode offset.
static const int kFixedFrameSize =
StandardFrameConstants::kFixedFrameSize + 2 * kPointerSize;
static const int kFixedFrameSizeFromFp =
StandardFrameConstants::kFixedFrameSizeFromFp + 2 * kPointerSize;
// FP-relative. // FP-relative.
static const int kRegisterFilePointerFromFp = static const int kRegisterFilePointerFromFp =
-StandardFrameConstants::kFixedFrameSizeFromFp - 3 * kPointerSize; -StandardFrameConstants::kFixedFrameSizeFromFp - 3 * kPointerSize;
......
...@@ -603,7 +603,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -603,7 +603,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadRoot(ebx, Heap::kInterpreterTableRootIndex); __ LoadRoot(ebx, Heap::kInterpreterTableRootIndex);
__ add(ebx, Immediate(FixedArray::kHeaderSize - kHeapObjectTag)); __ add(ebx, Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
// Push context as a stack located parameter to the bytecode handler. // Push dispatch table as a stack located parameter to the bytecode handler.
DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot); DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
__ push(ebx); __ push(ebx);
...@@ -733,6 +733,90 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) { ...@@ -733,6 +733,90 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
} }
static void Generate_InterpreterNotifyDeoptimizedHelper(
MacroAssembler* masm, Deoptimizer::BailoutType type) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(kInterpreterAccumulatorRegister); // Save accumulator register.
// Pass the deoptimization type to the runtime system.
__ Push(Smi::FromInt(static_cast<int>(type)));
__ CallRuntime(Runtime::kNotifyDeoptimized, 1);
__ Pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
// Tear down internal frame.
}
// Initialize register file register.
__ mov(kInterpreterRegisterFileRegister, ebp);
__ add(kInterpreterRegisterFileRegister,
Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
// Get the bytecode array pointer from the frame.
__ mov(ebx, Operand(kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kFunctionFromRegisterPointer));
__ mov(ebx, FieldOperand(ebx, JSFunction::kSharedFunctionInfoOffset));
__ mov(kInterpreterBytecodeArrayRegister,
FieldOperand(ebx, SharedFunctionInfo::kFunctionDataOffset));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
__ AssertNotSmi(kInterpreterBytecodeArrayRegister);
__ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
ebx);
__ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Get the target bytecode offset from the frame.
__ mov(
kInterpreterBytecodeOffsetRegister,
Operand(kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Push dispatch table as a stack located parameter to the bytecode handler -
// overwrite the state slot (we don't use these for interpreter deopts).
__ LoadRoot(ebx, Heap::kInterpreterTableRootIndex);
__ add(ebx, Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
__ mov(ebx, Operand(esp, -2 * kPointerSize));
// Dispatch to the target bytecode.
__ movzx_b(esi, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ mov(ebx, Operand(ebx, esi, times_pointer_size, 0));
// Get the context from the frame.
// TODO(rmcilroy): Update interpreter frame to expect current context at the
// context slot instead of the function context.
__ mov(kContextRegister,
Operand(kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kContextFromRegisterPointer));
// TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
// and header removal.
__ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(ebx);
}
void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
}
void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
void Builtins::Generate_CompileLazy(MacroAssembler* masm) { void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy); CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm); GenerateTailCallToReturnedCode(masm);
......
...@@ -942,6 +942,96 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) { ...@@ -942,6 +942,96 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
} }
static void Generate_InterpreterNotifyDeoptimizedHelper(
MacroAssembler* masm, Deoptimizer::BailoutType type) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(kInterpreterAccumulatorRegister); // Save accumulator register.
// Pass the deoptimization type to the runtime system.
__ li(a1, Operand(Smi::FromInt(static_cast<int>(type))));
__ push(a1);
__ CallRuntime(Runtime::kNotifyDeoptimized, 1);
__ pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
// Tear down internal frame.
}
// Drop state (we don't use these for interpreter deopts) and push PC at top
// of stack (to simulate initial call to bytecode handler in interpreter entry
// trampoline).
__ lw(a1, MemOperand(sp));
__ Drop(1);
__ sw(a1, MemOperand(sp));
// Initialize register file register and dispatch table register.
__ Addu(kInterpreterRegisterFileRegister, fp,
Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ LoadRoot(kInterpreterDispatchTableRegister,
Heap::kInterpreterTableRootIndex);
__ Addu(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// Get the context from the frame.
// TODO(rmcilroy): Update interpreter frame to expect current context at the
// context slot instead of the function context.
__ lw(kContextRegister,
MemOperand(kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kContextFromRegisterPointer));
// Get the bytecode array pointer from the frame.
__ lw(a1,
MemOperand(kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kFunctionFromRegisterPointer));
__ lw(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a1, SharedFunctionInfo::kFunctionDataOffset));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
__ SmiTst(kInterpreterBytecodeArrayRegister, at);
__ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, at,
Operand(zero_reg));
__ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
__ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a1,
Operand(BYTECODE_ARRAY_TYPE));
}
// Get the target bytecode offset from the frame.
__ lw(kInterpreterBytecodeOffsetRegister,
MemOperand(
kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
__ Addu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ lbu(a1, MemOperand(a1));
__ sll(a1, a1, kPointerSizeLog2);
__ Addu(a1, kInterpreterDispatchTableRegister, a1);
__ lw(a1, MemOperand(a1));
__ Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a1);
}
void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
}
void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
void Builtins::Generate_CompileLazy(MacroAssembler* masm) { void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy); CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm); GenerateTailCallToReturnedCode(masm);
......
...@@ -933,6 +933,96 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) { ...@@ -933,6 +933,96 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
} }
static void Generate_InterpreterNotifyDeoptimizedHelper(
MacroAssembler* masm, Deoptimizer::BailoutType type) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(kInterpreterAccumulatorRegister); // Save accumulator register.
// Pass the deoptimization type to the runtime system.
__ li(a1, Operand(Smi::FromInt(static_cast<int>(type))));
__ push(a1);
__ CallRuntime(Runtime::kNotifyDeoptimized, 1);
__ pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
// Tear down internal frame.
}
// Drop state (we don't use these for interpreter deopts) and push PC at top
// of stack (to simulate initial call to bytecode handler in interpreter entry
// trampoline).
__ ld(a1, MemOperand(sp));
__ Drop(1);
__ sd(a1, MemOperand(sp));
// Initialize register file register and dispatch table register.
__ Daddu(kInterpreterRegisterFileRegister, fp,
Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ LoadRoot(kInterpreterDispatchTableRegister,
Heap::kInterpreterTableRootIndex);
__ Daddu(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// Get the context from the frame.
// TODO(rmcilroy): Update interpreter frame to expect current context at the
// context slot instead of the function context.
__ ld(kContextRegister,
MemOperand(kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kContextFromRegisterPointer));
// Get the bytecode array pointer from the frame.
__ ld(a1,
MemOperand(kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kFunctionFromRegisterPointer));
__ ld(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ ld(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a1, SharedFunctionInfo::kFunctionDataOffset));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
__ SmiTst(kInterpreterBytecodeArrayRegister, at);
__ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, at,
Operand(zero_reg));
__ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
__ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a1,
Operand(BYTECODE_ARRAY_TYPE));
}
// Get the target bytecode offset from the frame.
__ ld(kInterpreterBytecodeOffsetRegister,
MemOperand(
kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
__ Daddu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ lbu(a1, MemOperand(a1));
__ dsll(a1, a1, kPointerSizeLog2);
__ Daddu(a1, kInterpreterDispatchTableRegister, a1);
__ ld(a1, MemOperand(a1));
__ Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a1);
}
void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
}
void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
void Builtins::Generate_CompileLazy(MacroAssembler* masm) { void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy); CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm); GenerateTailCallToReturnedCode(masm);
......
...@@ -14350,6 +14350,17 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint( ...@@ -14350,6 +14350,17 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
break; break;
} }
case Translation::INTERPRETED_FRAME: {
int bytecode_offset = iterator.Next();
int shared_info_id = iterator.Next();
unsigned height = iterator.Next();
Object* shared_info = LiteralArray()->get(shared_info_id);
os << "{bytecode_offset=" << bytecode_offset << ", function="
<< Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
<< ", height=" << height << "}";
break;
}
case Translation::JS_FRAME_FUNCTION: { case Translation::JS_FRAME_FUNCTION: {
os << "{function}"; os << "{function}";
break; break;
......
...@@ -784,6 +784,94 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) { ...@@ -784,6 +784,94 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
} }
static void Generate_InterpreterNotifyDeoptimizedHelper(
MacroAssembler* masm, Deoptimizer::BailoutType type) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(kInterpreterAccumulatorRegister); // Save accumulator register.
// Pass the deoptimization type to the runtime system.
__ Push(Smi::FromInt(static_cast<int>(type)));
__ CallRuntime(Runtime::kNotifyDeoptimized, 1);
__ Pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
// Tear down internal frame.
}
// Drop state (we don't use these for interpreter deopts) and push PC at top
// of stack (to simulate initial call to bytecode handler in interpreter entry
// trampoline).
__ Pop(rbx);
__ Drop(1);
__ Push(rbx);
// Initialize register file register and dispatch table register.
__ movp(kInterpreterRegisterFileRegister, rbp);
__ addp(kInterpreterRegisterFileRegister,
Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ LoadRoot(kInterpreterDispatchTableRegister,
Heap::kInterpreterTableRootIndex);
__ addp(kInterpreterDispatchTableRegister,
Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
// Get the context from the frame.
// TODO(rmcilroy): Update interpreter frame to expect current context at the
// context slot instead of the function context.
__ movp(kContextRegister,
Operand(kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kContextFromRegisterPointer));
// Get the bytecode array pointer from the frame.
__ movp(rbx,
Operand(kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kFunctionFromRegisterPointer));
__ movp(rbx, FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
__ movp(kInterpreterBytecodeArrayRegister,
FieldOperand(rbx, SharedFunctionInfo::kFunctionDataOffset));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
__ AssertNotSmi(kInterpreterBytecodeArrayRegister);
__ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
rbx);
__ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Get the target bytecode offset from the frame.
__ movp(
kInterpreterBytecodeOffsetRegister,
Operand(kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
__ SmiToInteger32(kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
__ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ movp(rbx, Operand(kInterpreterDispatchTableRegister, rbx,
times_pointer_size, 0));
__ addp(rbx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(rbx);
}
void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
}
void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
void Builtins::Generate_CompileLazy(MacroAssembler* masm) { void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy); CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm); GenerateTailCallToReturnedCode(masm);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment