Commit 064ca18c authored by Patrick Thier's avatar Patrick Thier Committed by Commit Bot

Reland "[sparkplug] OSR Ignition -> Sparkplug"

This is a reland of b9c521d0.

Fixes crashes by calling kInstallBaselineCode from BaselineEntry if
needed, i.e. when there is no feedback vector (required a bit of
register rejiggling).
This can happen with cross-realm calls. The OSR arming is stored as
part of the BytecodeArray and therefore shared across realms.

Original change's description:
> [sparkplug] OSR Ignition -> Sparkplug
>
> Add support for OSR to baseline code.
> We compile baseline and perform OSR immediately when the bytecode budget
> interrupt hits.
>
> Drive-by: Clean-up deoptimizer special handling of JumpLoop by using
> the newly introduced GetBaselinePCForNextExecutedBytecode instead of
> GetBaselineEndPCForBytecodeOffset.
>
> Bug: v8:11420
> Change-Id: Ifbea264d4a83a127dd2a11e28626bf2a5e8aca59
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2784687
> Commit-Queue: Patrick Thier <pthier@chromium.org>
> Reviewed-by: Leszek Swirski <leszeks@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#73677}

Bug: v8:11420
Change-Id: I67325450514ed5a1170b730b1dd59fa6acc6e1d8
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2800112Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Commit-Queue: Patrick Thier <pthier@chromium.org>
Cr-Commit-Position: refs/heads/master@{#73803}
parent d0778a8d
...@@ -1784,6 +1784,20 @@ void Builtins::Generate_TailCallOptimizedCodeSlot(MacroAssembler* masm) { ...@@ -1784,6 +1784,20 @@ void Builtins::Generate_TailCallOptimizedCodeSlot(MacroAssembler* masm) {
} }
namespace { namespace {
void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
Operand offset = Operand::Zero()) {
// Compute the target address = entry_address + offset
if (offset.IsImmediate() && offset.immediate() == 0) {
__ mov(lr, entry_address);
} else {
__ add(lr, entry_address, offset);
}
// "return" to the OSR entry point of the function.
__ Ret();
}
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
{ {
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
...@@ -1817,11 +1831,7 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { ...@@ -1817,11 +1831,7 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
__ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt( __ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(
DeoptimizationData::kOsrPcOffsetIndex))); DeoptimizationData::kOsrPcOffsetIndex)));
// Compute the target address = code start + osr_offset Generate_OSREntry(masm, r0, Operand::SmiUntag(r1));
__ add(lr, r0, Operand::SmiUntag(r1));
// And "return" to the OSR entry point of the function.
__ Ret();
} }
} }
} // namespace } // namespace
...@@ -3466,30 +3476,34 @@ namespace { ...@@ -3466,30 +3476,34 @@ namespace {
// Converts an interpreter frame into a baseline frame and continues execution // Converts an interpreter frame into a baseline frame and continues execution
// in baseline code (baseline code has to exist on the shared function info), // in baseline code (baseline code has to exist on the shared function info),
// either at the start or the end of the current bytecode. // either at the current or next (in execution order) bytecode.
void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) { void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
// Get bytecode array and bytecode offset from the stack frame. bool is_osr = false) {
__ ldr(kInterpreterBytecodeArrayRegister, __ Push(kInterpreterAccumulatorRegister);
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); Label start;
__ ldr(kInterpreterBytecodeOffsetRegister, __ bind(&start);
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Get function from the frame. // Get function from the frame.
Register closure = r1; Register closure = r1;
__ ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); __ ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
// Replace BytecodeOffset with the feedback vector. // Load the feedback vector.
Register feedback_vector = r2; Register feedback_vector = r2;
__ ldr(feedback_vector, __ ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); __ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
if (__ emit_debug_code()) {
Register scratch = r3; Label install_baseline_code;
__ CompareObjectType(feedback_vector, scratch, scratch, // Check if feedback vector is valid. If not, call prepare for baseline to
FEEDBACK_VECTOR_TYPE); // allocate it.
__ Assert(eq, AbortReason::kExpectedFeedbackVector); __ CompareObjectType(feedback_vector, r3, r3, FEEDBACK_VECTOR_TYPE);
} __ b(ne, &install_baseline_code);
// Save BytecodeOffset from the stack frame.
__ ldr(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Replace BytecodeOffset with the feedback vector.
__ str(feedback_vector, __ str(feedback_vector,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
feedback_vector = no_reg; feedback_vector = no_reg;
...@@ -3502,14 +3516,16 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) { ...@@ -3502,14 +3516,16 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
__ ldr(code_obj, __ ldr(code_obj,
FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset)); FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
closure = no_reg;
// Compute baseline pc for bytecode offset. // Compute baseline pc for bytecode offset.
__ Push(kInterpreterAccumulatorRegister); ExternalReference get_baseline_pc_extref;
ExternalReference get_baseline_pc_extref = if (next_bytecode || is_osr) {
next_bytecode get_baseline_pc_extref =
? ExternalReference::baseline_end_pc_for_bytecode_offset() ExternalReference::baseline_pc_for_next_executed_bytecode();
: ExternalReference::baseline_start_pc_for_bytecode_offset(); } else {
get_baseline_pc_extref =
ExternalReference::baseline_pc_for_bytecode_offset();
}
Register get_baseline_pc = r3; Register get_baseline_pc = r3;
__ Move(get_baseline_pc, get_baseline_pc_extref); __ Move(get_baseline_pc, get_baseline_pc_extref);
...@@ -3519,56 +3535,71 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) { ...@@ -3519,56 +3535,71 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
// TODO(pthier): Investigate if it is feasible to handle this special case // TODO(pthier): Investigate if it is feasible to handle this special case
// in TurboFan instead of here. // in TurboFan instead of here.
Label valid_bytecode_offset, function_entry_bytecode; Label valid_bytecode_offset, function_entry_bytecode;
__ cmp(kInterpreterBytecodeOffsetRegister, if (!is_osr) {
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag + __ cmp(kInterpreterBytecodeOffsetRegister,
kFunctionEntryBytecodeOffset)); Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
__ b(eq, &function_entry_bytecode); kFunctionEntryBytecodeOffset));
__ bind(&valid_bytecode_offset); __ b(eq, &function_entry_bytecode);
// In the case we advance the BC, check if the current bytecode is JumpLoop.
// If it is, re-execute it instead of continuing at the next bytecode.
if (next_bytecode) {
Label not_jump_loop;
Register bytecode = r1;
__ ldrb(bytecode, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ cmp(bytecode,
Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
__ b(ne, &not_jump_loop);
__ Move(get_baseline_pc,
ExternalReference::baseline_start_pc_for_bytecode_offset());
__ bind(&not_jump_loop);
} }
__ sub(kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeOffsetRegister, __ sub(kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ bind(&valid_bytecode_offset);
// Get bytecode array from the stack frame.
__ ldr(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
{ {
FrameScope scope(masm, StackFrame::INTERNAL);
__ PrepareCallCFunction(3, 0, r0);
Register arg_reg_1 = r0; Register arg_reg_1 = r0;
Register arg_reg_2 = r1; Register arg_reg_2 = r1;
Register arg_reg_3 = r2; Register arg_reg_3 = r2;
__ mov(arg_reg_1, code_obj); __ mov(arg_reg_1, code_obj);
__ mov(arg_reg_2, kInterpreterBytecodeOffsetRegister); __ mov(arg_reg_2, kInterpreterBytecodeOffsetRegister);
__ mov(arg_reg_3, kInterpreterBytecodeArrayRegister); __ mov(arg_reg_3, kInterpreterBytecodeArrayRegister);
FrameScope scope(masm, StackFrame::INTERNAL);
__ PrepareCallCFunction(3, 0);
__ CallCFunction(get_baseline_pc, 3, 0); __ CallCFunction(get_baseline_pc, 3, 0);
} }
__ add(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag));
__ add(code_obj, code_obj, kReturnRegister0); __ add(code_obj, code_obj, kReturnRegister0);
__ Pop(kInterpreterAccumulatorRegister); __ Pop(kInterpreterAccumulatorRegister);
__ Jump(code_obj); if (is_osr) {
// Reset the OSR loop nesting depth to disarm back edges.
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ mov(scratch, Operand(0));
__ strh(scratch, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrNestingLevelOffset));
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
} else {
__ add(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(code_obj);
}
__ Trap(); // Unreachable. __ Trap(); // Unreachable.
__ bind(&function_entry_bytecode); if (!is_osr) {
// If the bytecode offset is kFunctionEntryOffset, get the start address of __ bind(&function_entry_bytecode);
// the first bytecode. // If the bytecode offset is kFunctionEntryOffset, get the start address of
__ mov(kInterpreterBytecodeOffsetRegister, // the first bytecode.
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); __ mov(kInterpreterBytecodeOffsetRegister, Operand(0));
__ Move(get_baseline_pc, if (next_bytecode) {
ExternalReference::baseline_start_pc_for_bytecode_offset()); __ Move(get_baseline_pc,
__ b(&valid_bytecode_offset); ExternalReference::baseline_pc_for_bytecode_offset());
}
__ b(&valid_bytecode_offset);
}
__ bind(&install_baseline_code);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(closure);
__ CallRuntime(Runtime::kInstallBaselineCode, 1);
}
// Retry from the start after installing baseline code.
__ b(&start);
} }
} // namespace } // namespace
...@@ -3581,6 +3612,11 @@ void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) { ...@@ -3581,6 +3612,11 @@ void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
Generate_BaselineEntry(masm, true); Generate_BaselineEntry(masm, true);
} }
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
MacroAssembler* masm) {
Generate_BaselineEntry(masm, false, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) { void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL); FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL); __ EnterFrame(StackFrame::INTERNAL);
......
...@@ -2023,6 +2023,27 @@ void Builtins::Generate_TailCallOptimizedCodeSlot(MacroAssembler* masm) { ...@@ -2023,6 +2023,27 @@ void Builtins::Generate_TailCallOptimizedCodeSlot(MacroAssembler* masm) {
} }
namespace { namespace {
void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
Operand offset = Operand(0)) {
// Pop the return address to this function's caller from the return stack
// buffer, since we'll never return to it.
Label jump;
__ Adr(lr, &jump);
__ Ret();
__ Bind(&jump);
UseScratchRegisterScope temps(masm);
temps.Exclude(x17);
if (offset.IsZero()) {
__ Mov(x17, entry_address);
} else {
__ Add(x17, entry_address, offset);
}
__ Br(x17);
}
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
{ {
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
...@@ -2053,22 +2074,12 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { ...@@ -2053,22 +2074,12 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
x1, FieldMemOperand(x1, FixedArray::OffsetOfElementAt( x1, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(
DeoptimizationData::kOsrPcOffsetIndex))); DeoptimizationData::kOsrPcOffsetIndex)));
// Pop the return address to this function's caller from the return stack
// buffer, since we'll never return to it.
Label jump;
__ Adr(lr, &jump);
__ Ret();
__ Bind(&jump);
// Compute the target address = code_obj + header_size + osr_offset // Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset> // <entry_addr> = <code_obj> + #header_size + <osr_offset>
__ Add(x0, x0, x1); __ Add(x0, x0, x1);
UseScratchRegisterScope temps(masm); Generate_OSREntry(masm, x0, Code::kHeaderSize - kHeapObjectTag);
temps.Exclude(x17);
__ Add(x17, x0, Code::kHeaderSize - kHeapObjectTag);
__ Br(x17);
} }
} // namespace } // namespace
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
...@@ -3973,31 +3984,35 @@ namespace { ...@@ -3973,31 +3984,35 @@ namespace {
// Converts an interpreter frame into a baseline frame and continues execution // Converts an interpreter frame into a baseline frame and continues execution
// in baseline code (baseline code has to exist on the shared function info), // in baseline code (baseline code has to exist on the shared function info),
// either at the start or the end of the current bytecode. // either at the current or next (in execution order) bytecode.
void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) { void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
// Get bytecode array and bytecode offset from the stack frame. bool is_osr = false) {
__ Ldr(kInterpreterBytecodeArrayRegister, __ Push(padreg, kInterpreterAccumulatorRegister);
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); Label start;
__ SmiUntag(kInterpreterBytecodeOffsetRegister, __ bind(&start);
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
// Get function from the frame. // Get function from the frame.
Register closure = x1; Register closure = x1;
__ Ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); __ Ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
// Replace BytecodeOffset with the feedback vector. // Load the feedback vector.
Register feedback_vector = x2; Register feedback_vector = x2;
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
feedback_vector, feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
if (__ emit_debug_code()) {
Register scratch = x3; Label install_baseline_code;
__ CompareObjectType(feedback_vector, scratch, scratch, // Check if feedback vector is valid. If not, call prepare for baseline to
FEEDBACK_VECTOR_TYPE); // allocate it.
__ Assert(eq, AbortReason::kExpectedFeedbackVector); __ CompareObjectType(feedback_vector, x3, x3, FEEDBACK_VECTOR_TYPE);
} __ B(ne, &install_baseline_code);
// Save BytecodeOffset from the stack frame.
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
// Replace BytecodeOffset with the feedback vector.
__ Str(feedback_vector, __ Str(feedback_vector,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
feedback_vector = no_reg; feedback_vector = no_reg;
...@@ -4012,14 +4027,16 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) { ...@@ -4012,14 +4027,16 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset)); code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
closure = no_reg;
// Compute baseline pc for bytecode offset. // Compute baseline pc for bytecode offset.
__ Push(padreg, kInterpreterAccumulatorRegister); ExternalReference get_baseline_pc_extref;
ExternalReference get_baseline_pc_extref = if (next_bytecode || is_osr) {
next_bytecode get_baseline_pc_extref =
? ExternalReference::baseline_end_pc_for_bytecode_offset() ExternalReference::baseline_pc_for_next_executed_bytecode();
: ExternalReference::baseline_start_pc_for_bytecode_offset(); } else {
get_baseline_pc_extref =
ExternalReference::baseline_pc_for_bytecode_offset();
}
Register get_baseline_pc = x3; Register get_baseline_pc = x3;
__ Mov(get_baseline_pc, get_baseline_pc_extref); __ Mov(get_baseline_pc, get_baseline_pc_extref);
...@@ -4029,55 +4046,66 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) { ...@@ -4029,55 +4046,66 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
// TODO(pthier): Investigate if it is feasible to handle this special case // TODO(pthier): Investigate if it is feasible to handle this special case
// in TurboFan instead of here. // in TurboFan instead of here.
Label valid_bytecode_offset, function_entry_bytecode; Label valid_bytecode_offset, function_entry_bytecode;
__ cmp(kInterpreterBytecodeOffsetRegister, if (!is_osr) {
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag + __ cmp(kInterpreterBytecodeOffsetRegister,
kFunctionEntryBytecodeOffset)); Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
__ B(eq, &function_entry_bytecode); kFunctionEntryBytecodeOffset));
__ bind(&valid_bytecode_offset); __ B(eq, &function_entry_bytecode);
// In the case we advance the BC, check if the current bytecode is JumpLoop.
// If it is, re-execute it instead of continuing at the next bytecode.
if (next_bytecode) {
Label not_jump_loop;
Register bytecode = x1;
__ Ldrb(bytecode, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ Cmp(bytecode,
Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
__ B(ne, &not_jump_loop);
__ Mov(get_baseline_pc,
ExternalReference::baseline_start_pc_for_bytecode_offset());
__ bind(&not_jump_loop);
} }
__ Sub(kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeOffsetRegister, __ Sub(kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeOffsetRegister,
(BytecodeArray::kHeaderSize - kHeapObjectTag)); (BytecodeArray::kHeaderSize - kHeapObjectTag));
__ bind(&valid_bytecode_offset);
// Get bytecode array from the stack frame.
__ ldr(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
{ {
FrameScope scope(masm, StackFrame::INTERNAL);
Register arg_reg_1 = x0; Register arg_reg_1 = x0;
Register arg_reg_2 = x1; Register arg_reg_2 = x1;
Register arg_reg_3 = x2; Register arg_reg_3 = x2;
__ Mov(arg_reg_1, code_obj); __ Mov(arg_reg_1, code_obj);
__ Mov(arg_reg_2, kInterpreterBytecodeOffsetRegister); __ Mov(arg_reg_2, kInterpreterBytecodeOffsetRegister);
__ Mov(arg_reg_3, kInterpreterBytecodeArrayRegister); __ Mov(arg_reg_3, kInterpreterBytecodeArrayRegister);
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallCFunction(get_baseline_pc, 3, 0); __ CallCFunction(get_baseline_pc, 3, 0);
} }
__ Add(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
__ Add(code_obj, code_obj, kReturnRegister0); __ Add(code_obj, code_obj, kReturnRegister0);
__ Pop(kInterpreterAccumulatorRegister, padreg); __ Pop(kInterpreterAccumulatorRegister, padreg);
__ Jump(code_obj); if (is_osr) {
// Reset the OSR loop nesting depth to disarm back edges.
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
__ Strh(wzr, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrNestingLevelOffset));
Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag);
} else {
__ Add(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
__ Jump(code_obj);
}
__ Trap(); // Unreachable. __ Trap(); // Unreachable.
__ bind(&function_entry_bytecode); if (!is_osr) {
// If the bytecode offset is kFunctionEntryOffset, get the start address of __ bind(&function_entry_bytecode);
// the first bytecode. // If the bytecode offset is kFunctionEntryOffset, get the start address of
__ Mov(kInterpreterBytecodeOffsetRegister, // the first bytecode.
BytecodeArray::kHeaderSize - kHeapObjectTag); __ Mov(kInterpreterBytecodeOffsetRegister, Operand(0));
__ Mov(get_baseline_pc, if (next_bytecode) {
ExternalReference::baseline_start_pc_for_bytecode_offset()); __ Mov(get_baseline_pc,
__ B(&valid_bytecode_offset); ExternalReference::baseline_pc_for_bytecode_offset());
}
__ B(&valid_bytecode_offset);
}
__ bind(&install_baseline_code);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ PushArgument(closure);
__ CallRuntime(Runtime::kInstallBaselineCode, 1);
}
// Retry from the start after installing baseline code.
__ B(&start);
} }
} // namespace } // namespace
...@@ -4090,6 +4118,11 @@ void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) { ...@@ -4090,6 +4118,11 @@ void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
Generate_BaselineEntry(masm, true); Generate_BaselineEntry(masm, true);
} }
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
MacroAssembler* masm) {
Generate_BaselineEntry(masm, false, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) { void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL); FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL); __ EnterFrame(StackFrame::INTERNAL);
......
...@@ -143,6 +143,7 @@ namespace internal { ...@@ -143,6 +143,7 @@ namespace internal {
ASM(BaselineLeaveFrame, BaselineLeaveFrame) \ ASM(BaselineLeaveFrame, BaselineLeaveFrame) \
ASM(BaselineEnterAtBytecode, Void) \ ASM(BaselineEnterAtBytecode, Void) \
ASM(BaselineEnterAtNextBytecode, Void) \ ASM(BaselineEnterAtNextBytecode, Void) \
ASM(InterpreterOnStackReplacement_ToBaseline, Void) \
\ \
/* Code life-cycle */ \ /* Code life-cycle */ \
TFC(CompileLazy, JSTrampoline) \ TFC(CompileLazy, JSTrampoline) \
......
...@@ -2737,6 +2737,15 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { ...@@ -2737,6 +2737,15 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
} }
namespace { namespace {
void Generate_OSREntry(MacroAssembler* masm, Register entry_address) {
// Overwrite the return address on the stack.
__ mov(Operand(esp, 0), entry_address);
// And "return" to the OSR entry point of the function.
__ ret(0);
}
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
{ {
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
...@@ -2769,12 +2778,9 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { ...@@ -2769,12 +2778,9 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
// Compute the target address = code_obj + header_size + osr_offset // Compute the target address = code_obj + header_size + osr_offset
__ lea(eax, Operand(eax, ecx, times_1, Code::kHeaderSize - kHeapObjectTag)); __ lea(eax, Operand(eax, ecx, times_1, Code::kHeaderSize - kHeapObjectTag));
// Overwrite the return address on the stack. Generate_OSREntry(masm, eax);
__ mov(Operand(esp, 0), eax);
// And "return" to the OSR entry point of the function.
__ ret(0);
} }
} // namespace } // namespace
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
...@@ -4079,51 +4085,56 @@ namespace { ...@@ -4079,51 +4085,56 @@ namespace {
// Converts an interpreter frame into a baseline frame and continues execution // Converts an interpreter frame into a baseline frame and continues execution
// in baseline code (baseline code has to exist on the shared function info), // in baseline code (baseline code has to exist on the shared function info),
// either at the start or the end of the current bytecode. // either at the current or next (in execution order) bytecode.
void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) { void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
// Get bytecode array and bytecode offset from the stack frame. bool is_osr = false) {
__ mov(kInterpreterBytecodeArrayRegister, __ push(kInterpreterAccumulatorRegister);
MemOperand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp)); Label start;
__ mov(kInterpreterBytecodeOffsetRegister, __ bind(&start);
MemOperand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Get function from the frame. // Get function from the frame.
Register closure = esi; Register closure = eax;
__ mov(closure, MemOperand(ebp, StandardFrameConstants::kFunctionOffset)); __ mov(closure, MemOperand(ebp, StandardFrameConstants::kFunctionOffset));
// Replace BytecodeOffset with the feedback vector. // Load the feedback vector.
Register feedback_vector = ecx; Register feedback_vector = ecx;
__ mov(feedback_vector, __ mov(feedback_vector,
FieldOperand(closure, JSFunction::kFeedbackCellOffset)); FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset)); __ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
if (__ emit_debug_code()) {
__ push(kInterpreterAccumulatorRegister); Label install_baseline_code;
Register scratch = kInterpreterAccumulatorRegister; // Check if feedback vector is valid. If not, call prepare for baseline to
__ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, scratch); // allocate it.
__ Assert(equal, AbortReason::kExpectedFeedbackVector); __ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE,
scratch = no_reg; kInterpreterBytecodeOffsetRegister);
__ pop(kInterpreterAccumulatorRegister); __ j(not_equal, &install_baseline_code);
}
// Save BytecodeOffset from the stack frame.
__ mov(kInterpreterBytecodeOffsetRegister,
MemOperand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Replace BytecodeOffset with the feedback vector.
__ mov(MemOperand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp), __ mov(MemOperand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
feedback_vector); feedback_vector);
feedback_vector = no_reg; feedback_vector = no_reg;
// Get the Code object from the shared function info. // Get the Code object from the shared function info.
Register code_obj = closure; Register code_obj = esi;
__ mov(code_obj, __ mov(code_obj,
FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset)); FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
closure = no_reg;
__ mov(code_obj, __ mov(code_obj,
FieldOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); FieldOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
__ mov(code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset)); __ mov(code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset));
// Compute baseline pc for bytecode offset. // Compute baseline pc for bytecode offset.
__ push(kInterpreterAccumulatorRegister); ExternalReference get_baseline_pc_extref;
ExternalReference get_baseline_pc_extref = if (next_bytecode || is_osr) {
next_bytecode get_baseline_pc_extref =
? ExternalReference::baseline_end_pc_for_bytecode_offset() ExternalReference::baseline_pc_for_next_executed_bytecode();
: ExternalReference::baseline_start_pc_for_bytecode_offset(); } else {
get_baseline_pc_extref =
ExternalReference::baseline_pc_for_bytecode_offset();
}
Register get_baseline_pc = ecx; Register get_baseline_pc = ecx;
__ LoadAddress(get_baseline_pc, get_baseline_pc_extref); __ LoadAddress(get_baseline_pc, get_baseline_pc_extref);
...@@ -4133,33 +4144,20 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) { ...@@ -4133,33 +4144,20 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
// TODO(pthier): Investigate if it is feasible to handle this special case // TODO(pthier): Investigate if it is feasible to handle this special case
// in TurboFan instead of here. // in TurboFan instead of here.
Label valid_bytecode_offset, function_entry_bytecode; Label valid_bytecode_offset, function_entry_bytecode;
__ cmp(kInterpreterBytecodeOffsetRegister, if (!is_osr) {
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag + __ cmp(kInterpreterBytecodeOffsetRegister,
kFunctionEntryBytecodeOffset)); Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag +
__ j(equal, &function_entry_bytecode); kFunctionEntryBytecodeOffset));
__ bind(&valid_bytecode_offset); __ j(equal, &function_entry_bytecode);
// In the case we advance the BC, check if the current bytecode is JumpLoop.
// If it is, re-execute it instead of continuing at the next bytecode.
if (next_bytecode) {
Label not_jump_loop;
__ push(kInterpreterBytecodeOffsetRegister);
Register bytecode = kInterpreterBytecodeOffsetRegister;
__ movzx_b(bytecode,
Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ cmpb(bytecode,
Immediate(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
__ j(not_equal, &not_jump_loop, Label::kNear);
__ LoadAddress(get_baseline_pc,
ExternalReference::baseline_start_pc_for_bytecode_offset());
__ bind(&not_jump_loop);
__ pop(kInterpreterBytecodeOffsetRegister);
} }
__ sub(kInterpreterBytecodeOffsetRegister, __ sub(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag)); Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ bind(&valid_bytecode_offset);
// Get bytecode array from the stack frame.
__ mov(kInterpreterBytecodeArrayRegister,
MemOperand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
{ {
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
__ PrepareCallCFunction(3, eax); __ PrepareCallCFunction(3, eax);
...@@ -4174,17 +4172,39 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) { ...@@ -4174,17 +4172,39 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
FieldOperand(code_obj, kReturnRegister0, times_1, Code::kHeaderSize)); FieldOperand(code_obj, kReturnRegister0, times_1, Code::kHeaderSize));
__ pop(kInterpreterAccumulatorRegister); __ pop(kInterpreterAccumulatorRegister);
__ jmp(code_obj); if (is_osr) {
// Reset the OSR loop nesting depth to disarm back edges.
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
__ mov_w(FieldOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrNestingLevelOffset),
Immediate(0));
Generate_OSREntry(masm, code_obj);
} else {
__ jmp(code_obj);
}
__ Trap(); // Unreachable. __ Trap(); // Unreachable.
__ bind(&function_entry_bytecode); if (!is_osr) {
// If the bytecode offset is kFunctionEntryOffset, get the start address of __ bind(&function_entry_bytecode);
// the first bytecode. // If the bytecode offset is kFunctionEntryOffset, get the start address of
__ mov(kInterpreterBytecodeOffsetRegister, // the first bytecode.
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag)); __ mov(kInterpreterBytecodeOffsetRegister, Immediate(0));
__ LoadAddress(get_baseline_pc, if (next_bytecode) {
ExternalReference::baseline_start_pc_for_bytecode_offset()); __ LoadAddress(get_baseline_pc,
__ jmp(&valid_bytecode_offset); ExternalReference::baseline_pc_for_bytecode_offset());
}
__ jmp(&valid_bytecode_offset);
}
__ bind(&install_baseline_code);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(closure);
__ CallRuntime(Runtime::kInstallBaselineCode, 1);
}
// Retry from the start after installing baseline code.
__ jmp(&start);
} }
} // namespace } // namespace
...@@ -4197,6 +4217,11 @@ void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) { ...@@ -4197,6 +4217,11 @@ void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
Generate_BaselineEntry(masm, true); Generate_BaselineEntry(masm, true);
} }
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
MacroAssembler* masm) {
Generate_BaselineEntry(masm, false, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) { void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL); FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL); __ EnterFrame(StackFrame::INTERNAL);
......
...@@ -2631,6 +2631,15 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { ...@@ -2631,6 +2631,15 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
} }
namespace { namespace {
void Generate_OSREntry(MacroAssembler* masm, Register entry_address) {
// Overwrite the return address on the stack.
__ movq(StackOperandForReturnAddress(0), entry_address);
// And "return" to the OSR entry point of the function.
__ ret(0);
}
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
{ {
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
...@@ -2663,12 +2672,9 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { ...@@ -2663,12 +2672,9 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
// Compute the target address = code_obj + header_size + osr_offset // Compute the target address = code_obj + header_size + osr_offset
__ leaq(rax, FieldOperand(rax, rbx, times_1, Code::kHeaderSize)); __ leaq(rax, FieldOperand(rax, rbx, times_1, Code::kHeaderSize));
// Overwrite the return address on the stack. Generate_OSREntry(masm, rax);
__ movq(StackOperandForReturnAddress(0), rax);
// And "return" to the OSR entry point of the function.
__ ret(0);
} }
} // namespace } // namespace
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
...@@ -4347,29 +4353,35 @@ namespace { ...@@ -4347,29 +4353,35 @@ namespace {
// Converts an interpreter frame into a baseline frame and continues execution // Converts an interpreter frame into a baseline frame and continues execution
// in baseline code (baseline code has to exist on the shared function info), // in baseline code (baseline code has to exist on the shared function info),
// either at the start or the end of the current bytecode. // either at the current or next (in execution order) bytecode.
void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) { void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
// Get bytecode array and bytecode offset from the stack frame. bool is_osr = false) {
__ movq(kInterpreterBytecodeArrayRegister, __ pushq(kInterpreterAccumulatorRegister);
MemOperand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp)); Label start;
__ SmiUntag( __ bind(&start);
kInterpreterBytecodeOffsetRegister,
MemOperand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
// Get function from the frame. // Get function from the frame.
Register closure = rdi; Register closure = rdi;
__ movq(closure, MemOperand(rbp, StandardFrameConstants::kFunctionOffset)); __ movq(closure, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
// Replace BytecodeOffset with the feedback vector. // Load the feedback vector.
Register feedback_vector = rbx; Register feedback_vector = rbx;
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset)); feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(feedback_vector, __ LoadTaggedPointerField(feedback_vector,
FieldOperand(feedback_vector, Cell::kValueOffset)); FieldOperand(feedback_vector, Cell::kValueOffset));
if (__ emit_debug_code()) {
__ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, kScratchRegister); Label install_baseline_code;
__ Assert(equal, AbortReason::kExpectedFeedbackVector); // Check if feedback vector is valid. If not, call prepare for baseline to
} // allocate it.
__ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, kScratchRegister);
__ j(not_equal, &install_baseline_code);
// Save BytecodeOffset from the stack frame.
__ SmiUntag(
kInterpreterBytecodeOffsetRegister,
MemOperand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
// Replace BytecodeOffset with the feedback vector.
__ movq(MemOperand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp), __ movq(MemOperand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
feedback_vector); feedback_vector);
feedback_vector = no_reg; feedback_vector = no_reg;
...@@ -4383,14 +4395,16 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) { ...@@ -4383,14 +4395,16 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
FieldOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); FieldOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset)); code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset));
closure = no_reg;
// Compute baseline pc for bytecode offset. // Compute baseline pc for bytecode offset.
__ pushq(kInterpreterAccumulatorRegister); ExternalReference get_baseline_pc_extref;
ExternalReference get_baseline_pc_extref = if (next_bytecode || is_osr) {
next_bytecode get_baseline_pc_extref =
? ExternalReference::baseline_end_pc_for_bytecode_offset() ExternalReference::baseline_pc_for_next_executed_bytecode();
: ExternalReference::baseline_start_pc_for_bytecode_offset(); } else {
get_baseline_pc_extref =
ExternalReference::baseline_pc_for_bytecode_offset();
}
Register get_baseline_pc = rax; Register get_baseline_pc = rax;
__ LoadAddress(get_baseline_pc, get_baseline_pc_extref); __ LoadAddress(get_baseline_pc, get_baseline_pc_extref);
...@@ -4400,31 +4414,20 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) { ...@@ -4400,31 +4414,20 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
// TODO(pthier): Investigate if it is feasible to handle this special case // TODO(pthier): Investigate if it is feasible to handle this special case
// in TurboFan instead of here. // in TurboFan instead of here.
Label valid_bytecode_offset, function_entry_bytecode; Label valid_bytecode_offset, function_entry_bytecode;
__ cmpq(kInterpreterBytecodeOffsetRegister, if (!is_osr) {
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag + __ cmpq(kInterpreterBytecodeOffsetRegister,
kFunctionEntryBytecodeOffset)); Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag +
__ j(equal, &function_entry_bytecode); kFunctionEntryBytecodeOffset));
__ bind(&valid_bytecode_offset); __ j(equal, &function_entry_bytecode);
// In the case we advance the BC, check if the current bytecode is JumpLoop.
// If it is, re-execute it instead of continuing at the next bytecode.
if (next_bytecode) {
Label not_jump_loop;
Register bytecode = rdi;
__ movzxbq(bytecode,
Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ cmpb(bytecode,
Immediate(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
__ j(not_equal, &not_jump_loop, Label::kNear);
__ LoadAddress(get_baseline_pc,
ExternalReference::baseline_start_pc_for_bytecode_offset());
__ bind(&not_jump_loop);
} }
__ subq(kInterpreterBytecodeOffsetRegister, __ subq(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag)); Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ bind(&valid_bytecode_offset);
// Get bytecode array from the stack frame.
__ movq(kInterpreterBytecodeArrayRegister,
MemOperand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
{ {
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
__ PrepareCallCFunction(3); __ PrepareCallCFunction(3);
...@@ -4437,17 +4440,39 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) { ...@@ -4437,17 +4440,39 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
FieldOperand(code_obj, kReturnRegister0, times_1, Code::kHeaderSize)); FieldOperand(code_obj, kReturnRegister0, times_1, Code::kHeaderSize));
__ popq(kInterpreterAccumulatorRegister); __ popq(kInterpreterAccumulatorRegister);
__ jmp(code_obj); if (is_osr) {
// Reset the OSR loop nesting depth to disarm back edges.
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
__ movw(FieldOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrNestingLevelOffset),
Immediate(0));
Generate_OSREntry(masm, code_obj);
} else {
__ jmp(code_obj);
}
__ Trap(); // Unreachable. __ Trap(); // Unreachable.
__ bind(&function_entry_bytecode); if (!is_osr) {
// If the bytecode offset is kFunctionEntryOffset, get the start address of __ bind(&function_entry_bytecode);
// the first bytecode. // If the bytecode offset is kFunctionEntryOffset, get the start address of
__ movq(kInterpreterBytecodeOffsetRegister, // the first bytecode.
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag)); __ movq(kInterpreterBytecodeOffsetRegister, Immediate(0));
__ LoadAddress(get_baseline_pc, if (next_bytecode) {
ExternalReference::baseline_start_pc_for_bytecode_offset()); __ LoadAddress(get_baseline_pc,
__ jmp(&valid_bytecode_offset); ExternalReference::baseline_pc_for_bytecode_offset());
}
__ jmp(&valid_bytecode_offset);
}
__ bind(&install_baseline_code);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(closure);
__ CallRuntime(Runtime::kInstallBaselineCode, 1);
}
// Retry from the start after installing baseline code.
__ jmp(&start);
} }
} // namespace } // namespace
...@@ -4460,6 +4485,11 @@ void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) { ...@@ -4460,6 +4485,11 @@ void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
Generate_BaselineEntry(masm, true); Generate_BaselineEntry(masm, true);
} }
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
MacroAssembler* masm) {
Generate_BaselineEntry(masm, false, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) { void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL); FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL); __ EnterFrame(StackFrame::INTERNAL);
......
...@@ -404,6 +404,13 @@ Callable CodeFactory::InterpreterOnStackReplacement(Isolate* isolate) { ...@@ -404,6 +404,13 @@ Callable CodeFactory::InterpreterOnStackReplacement(Isolate* isolate) {
Builtins::kInterpreterOnStackReplacement); Builtins::kInterpreterOnStackReplacement);
} }
// static
Callable CodeFactory::InterpreterOnStackReplacement_ToBaseline(
Isolate* isolate) {
return Builtins::CallableFor(
isolate, Builtins::kInterpreterOnStackReplacement_ToBaseline);
}
// static // static
Callable CodeFactory::ArrayNoArgumentConstructor( Callable CodeFactory::ArrayNoArgumentConstructor(
Isolate* isolate, ElementsKind kind, Isolate* isolate, ElementsKind kind,
......
...@@ -92,6 +92,7 @@ class V8_EXPORT_PRIVATE CodeFactory final { ...@@ -92,6 +92,7 @@ class V8_EXPORT_PRIVATE CodeFactory final {
Isolate* isolate, InterpreterPushArgsMode mode); Isolate* isolate, InterpreterPushArgsMode mode);
static Callable InterpreterCEntry(Isolate* isolate, int result_size = 1); static Callable InterpreterCEntry(Isolate* isolate, int result_size = 1);
static Callable InterpreterOnStackReplacement(Isolate* isolate); static Callable InterpreterOnStackReplacement(Isolate* isolate);
static Callable InterpreterOnStackReplacement_ToBaseline(Isolate* isolate);
static Callable ArrayNoArgumentConstructor( static Callable ArrayNoArgumentConstructor(
Isolate* isolate, ElementsKind kind, Isolate* isolate, ElementsKind kind,
......
...@@ -623,9 +623,9 @@ ExternalReference::address_of_enable_experimental_regexp_engine() { ...@@ -623,9 +623,9 @@ ExternalReference::address_of_enable_experimental_regexp_engine() {
namespace { namespace {
static uintptr_t BaselineStartPCForBytecodeOffset(Address raw_code_obj, static uintptr_t BaselinePCForBytecodeOffset(Address raw_code_obj,
int bytecode_offset, int bytecode_offset,
Address raw_bytecode_array) { Address raw_bytecode_array) {
Code code_obj = Code::cast(Object(raw_code_obj)); Code code_obj = Code::cast(Object(raw_code_obj));
BytecodeArray bytecode_array = BytecodeArray bytecode_array =
BytecodeArray::cast(Object(raw_bytecode_array)); BytecodeArray::cast(Object(raw_bytecode_array));
...@@ -633,22 +633,21 @@ static uintptr_t BaselineStartPCForBytecodeOffset(Address raw_code_obj, ...@@ -633,22 +633,21 @@ static uintptr_t BaselineStartPCForBytecodeOffset(Address raw_code_obj,
bytecode_array); bytecode_array);
} }
static uintptr_t BaselineEndPCForBytecodeOffset(Address raw_code_obj, static uintptr_t BaselinePCForNextExecutedBytecode(Address raw_code_obj,
int bytecode_offset, int bytecode_offset,
Address raw_bytecode_array) { Address raw_bytecode_array) {
Code code_obj = Code::cast(Object(raw_code_obj)); Code code_obj = Code::cast(Object(raw_code_obj));
BytecodeArray bytecode_array = BytecodeArray bytecode_array =
BytecodeArray::cast(Object(raw_bytecode_array)); BytecodeArray::cast(Object(raw_bytecode_array));
return code_obj.GetBaselineEndPCForBytecodeOffset(bytecode_offset, return code_obj.GetBaselinePCForNextExecutedBytecode(bytecode_offset,
bytecode_array); bytecode_array);
} }
} // namespace } // namespace
FUNCTION_REFERENCE(baseline_end_pc_for_bytecode_offset, FUNCTION_REFERENCE(baseline_pc_for_bytecode_offset, BaselinePCForBytecodeOffset)
BaselineEndPCForBytecodeOffset) FUNCTION_REFERENCE(baseline_pc_for_next_executed_bytecode,
FUNCTION_REFERENCE(baseline_start_pc_for_bytecode_offset, BaselinePCForNextExecutedBytecode)
BaselineStartPCForBytecodeOffset)
ExternalReference ExternalReference::thread_in_wasm_flag_address_address( ExternalReference ExternalReference::thread_in_wasm_flag_address_address(
Isolate* isolate) { Isolate* isolate) {
......
...@@ -124,8 +124,9 @@ class StatsCounter; ...@@ -124,8 +124,9 @@ class StatsCounter;
V(address_of_wasm_i8x16_splat_0x33, "wasm_i8x16_splat_0x33") \ V(address_of_wasm_i8x16_splat_0x33, "wasm_i8x16_splat_0x33") \
V(address_of_wasm_i8x16_splat_0x55, "wasm_i8x16_splat_0x55") \ V(address_of_wasm_i8x16_splat_0x55, "wasm_i8x16_splat_0x55") \
V(address_of_wasm_i16x8_splat_0x0001, "wasm_16x8_splat_0x0001") \ V(address_of_wasm_i16x8_splat_0x0001, "wasm_16x8_splat_0x0001") \
V(baseline_start_pc_for_bytecode_offset, "BaselineStartPCForBytecodeOffset") \ V(baseline_pc_for_bytecode_offset, "BaselinePCForBytecodeOffset") \
V(baseline_end_pc_for_bytecode_offset, "BaselineEndPCForBytecodeOffset") \ V(baseline_pc_for_next_executed_bytecode, \
"BaselinePCForNextExecutedBytecode") \
V(bytecode_size_table_address, "Bytecodes::bytecode_size_table_address") \ V(bytecode_size_table_address, "Bytecodes::bytecode_size_table_address") \
V(check_object_type, "check_object_type") \ V(check_object_type, "check_object_type") \
V(compute_integer_hash, "ComputeSeededHash") \ V(compute_integer_hash, "ComputeSeededHash") \
......
...@@ -1330,6 +1330,34 @@ void InterpreterAssembler::MaybeDropFrames(TNode<Context> context) { ...@@ -1330,6 +1330,34 @@ void InterpreterAssembler::MaybeDropFrames(TNode<Context> context) {
BIND(&ok); BIND(&ok);
} }
void InterpreterAssembler::OnStackReplacement(TNode<Context> context,
TNode<IntPtrT> relative_jump) {
TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
TNode<HeapObject> shared_info = LoadJSFunctionSharedFunctionInfo(function);
TNode<Object> sfi_data =
LoadObjectField(shared_info, SharedFunctionInfo::kFunctionDataOffset);
TNode<Uint16T> data_type = LoadInstanceType(CAST(sfi_data));
Label baseline(this);
GotoIf(InstanceTypeEqual(data_type, BASELINE_DATA_TYPE), &baseline);
{
Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate());
CallStub(callable, context);
JumpBackward(relative_jump);
}
BIND(&baseline);
{
Callable callable =
CodeFactory::InterpreterOnStackReplacement_ToBaseline(isolate());
// We already compiled the baseline code, so we don't need to handle failed
// compilation as in the Ignition -> Turbofan case. Therefore we can just
// tailcall to the OSR builtin.
SaveBytecodeOffset();
TailCallStub(callable, context);
}
}
void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) { void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(), CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
SmiTag(BytecodeOffset()), GetAccumulatorUnchecked()); SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
......
...@@ -244,6 +244,9 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { ...@@ -244,6 +244,9 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Dispatch to frame dropper trampoline if necessary. // Dispatch to frame dropper trampoline if necessary.
void MaybeDropFrames(TNode<Context> context); void MaybeDropFrames(TNode<Context> context);
// Perform OnStackReplacement.
void OnStackReplacement(TNode<Context> context, TNode<IntPtrT> relative_jump);
// Returns the offset from the BytecodeArrayPointer of the current bytecode. // Returns the offset from the BytecodeArrayPointer of the current bytecode.
TNode<IntPtrT> BytecodeOffset(); TNode<IntPtrT> BytecodeOffset();
......
...@@ -2203,11 +2203,7 @@ IGNITION_HANDLER(JumpLoop, InterpreterAssembler) { ...@@ -2203,11 +2203,7 @@ IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
JumpBackward(relative_jump); JumpBackward(relative_jump);
BIND(&osr_armed); BIND(&osr_armed);
{ OnStackReplacement(context, relative_jump);
Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate());
CallStub(callable, context);
JumpBackward(relative_jump);
}
} }
// SwitchOnSmiNoFeedback <table_start> <table_length> <case_value_base> // SwitchOnSmiNoFeedback <table_start> <table_length> <case_value_base>
......
...@@ -411,6 +411,26 @@ uintptr_t Code::GetBaselineEndPCForBytecodeOffset(int bytecode_offset, ...@@ -411,6 +411,26 @@ uintptr_t Code::GetBaselineEndPCForBytecodeOffset(int bytecode_offset,
bytecodes); bytecodes);
} }
uintptr_t Code::GetBaselinePCForNextExecutedBytecode(int bytecode_offset,
BytecodeArray bytecodes) {
DisallowGarbageCollection no_gc;
CHECK_EQ(kind(), CodeKind::BASELINE);
baseline::BytecodeOffsetIterator offset_iterator(
ByteArray::cast(bytecode_offset_table()), bytecodes);
Handle<BytecodeArray> bytecodes_handle(
reinterpret_cast<Address*>(&bytecodes));
interpreter::BytecodeArrayIterator bytecode_iterator(bytecodes_handle,
bytecode_offset);
interpreter::Bytecode bytecode = bytecode_iterator.current_bytecode();
if (bytecode == interpreter::Bytecode::kJumpLoop) {
return GetBaselineStartPCForBytecodeOffset(
bytecode_iterator.GetJumpTargetOffset(), bytecodes);
} else {
DCHECK(!interpreter::Bytecodes::IsJump(bytecode));
return GetBaselineEndPCForBytecodeOffset(bytecode_offset, bytecodes);
}
}
void Code::initialize_flags(CodeKind kind, bool is_turbofanned, int stack_slots, void Code::initialize_flags(CodeKind kind, bool is_turbofanned, int stack_slots,
bool is_off_heap_trampoline) { bool is_off_heap_trampoline) {
CHECK(0 <= stack_slots && stack_slots < StackSlotsField::kMax); CHECK(0 <= stack_slots && stack_slots < StackSlotsField::kMax);
......
...@@ -412,6 +412,14 @@ class Code : public HeapObject { ...@@ -412,6 +412,14 @@ class Code : public HeapObject {
inline uintptr_t GetBaselineEndPCForBytecodeOffset(int bytecode_offset, inline uintptr_t GetBaselineEndPCForBytecodeOffset(int bytecode_offset,
BytecodeArray bytecodes); BytecodeArray bytecodes);
// Returns the PC of the next bytecode in execution order.
// If the bytecode at the given offset is JumpLoop, the PC of the jump target
// is returned. Other jumps are not allowed.
// For other bytecodes this is equivalent to
// GetBaselineEndPCForBytecodeOffset.
inline uintptr_t GetBaselinePCForNextExecutedBytecode(
int bytecode_offset, BytecodeArray bytecodes);
inline int GetBytecodeOffsetForBaselinePC(Address baseline_pc, inline int GetBytecodeOffsetForBaselinePC(Address baseline_pc,
BytecodeArray bytecodes); BytecodeArray bytecodes);
......
...@@ -337,14 +337,30 @@ RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptFromBytecode) { ...@@ -337,14 +337,30 @@ RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptFromBytecode) {
function->shared().is_compiled_scope(isolate)); function->shared().is_compiled_scope(isolate));
JSFunction::EnsureFeedbackVector(function, &is_compiled_scope); JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
DCHECK(is_compiled_scope.is_compiled()); DCHECK(is_compiled_scope.is_compiled());
if (FLAG_sparkplug) {
Compiler::CompileBaseline(isolate, function, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope);
}
// Also initialize the invocation count here. This is only really needed for // Also initialize the invocation count here. This is only really needed for
// OSR. When we OSR functions with lazy feedback allocation we want to have // OSR. When we OSR functions with lazy feedback allocation we want to have
// a non zero invocation count so we can inline functions. // a non zero invocation count so we can inline functions.
function->feedback_vector().set_invocation_count(1); function->feedback_vector().set_invocation_count(1);
if (FLAG_sparkplug) {
if (Compiler::CompileBaseline(isolate, function,
Compiler::CLEAR_EXCEPTION,
&is_compiled_scope)) {
if (FLAG_use_osr) {
JavaScriptFrameIterator it(isolate);
DCHECK(it.frame()->is_unoptimized());
UnoptimizedFrame* frame = UnoptimizedFrame::cast(it.frame());
if (FLAG_trace_osr) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(
scope.file(),
"[OSR - Entry at OSR bytecode offset %d into baseline code]\n",
frame->GetBytecodeOffset());
}
frame->GetBytecodeArray().set_osr_loop_nesting_level(
AbstractCode::kMaxLoopNestingMarker);
}
}
}
return ReadOnlyRoots(isolate).undefined_value(); return ReadOnlyRoots(isolate).undefined_value();
} }
{ {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment