Commit e1ad114e authored by mstarzinger's avatar mstarzinger Committed by Commit bot

Reland of [interpreter] Add explicit OSR polling bytecode. (patchset #1 id:1...

Reland of [interpreter] Add explicit OSR polling bytecode. (patchset #1 id:1 of https://codereview.chromium.org/2184553003/ )

Reason for revert:
Fix has been landed.

Original issue's description:
> Revert of [interpreter] Add explicit OSR polling bytecode. (patchset #6 id:100001 of https://codereview.chromium.org/2172233002/ )
>
> Reason for revert:
> Bunch of breakages. Maybe bad interaction with https://chromium.googlesource.com/v8/v8/+/e520e5da5550f0d1a975e87d6e66a2edecbb0c8e ?
>
> E.g.:
> https://build.chromium.org/p/client.v8/builders/V8%20Linux64/builds/11607
>
> Original issue's description:
> > [interpreter] Add explicit OSR polling bytecode.
> >
> > This adds an explicit {OsrPoll} bytecode into every loop header which
> > triggers on-stack replacement when armed. Note that each such bytecode
> > stores the static loop depths as an operand, and hence can be armed for
> > specific loop depths.
> >
> > This also adds builtin code that triggers OSR compilation and switches
> > execution over to optimized code in case compilation succeeds. In case
> > compilation fails, the bytecode dispatch just continues unhindered.
> >
> > R=rmcilroy@chromium.org
> > TEST=mjsunit/ignition/osr-from-bytecode
> > BUG=v8:4764
> >
> > Committed: https://crrev.com/a55beb68e0ededb3773affa294a71edc50621458
> > Cr-Commit-Position: refs/heads/master@{#38043}
>
> TBR=rmcilroy@chromium.org,mstarzinger@chromium.org
> # Skipping CQ checks because original CL landed less than 1 days ago.
> NOPRESUBMIT=true
> NOTREECHECKS=true
> NOTRY=true
> BUG=v8:4764
>
> Committed: https://crrev.com/439aa2c6d708bfd95db725bd6f97c4c49bbc51fc
> Cr-Commit-Position: refs/heads/master@{#38044}

TBR=rmcilroy@chromium.org,machenbach@chromium.org
BUG=v8:4764

Review-Url: https://codereview.chromium.org/2184713002
Cr-Commit-Position: refs/heads/master@{#38056}
parent 968d3bce
......@@ -1723,9 +1723,16 @@ void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kThrowIllegalInvocation);
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
bool has_handler_frame) {
// Lookup the function in the JavaScript frame.
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
if (has_handler_frame) {
__ ldr(r0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(r0, MemOperand(r0, JavaScriptFrameConstants::kFunctionOffset));
} else {
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
......@@ -1733,7 +1740,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
// If the code object is null, just return to the unoptimized code.
// If the code object is null, just return to the caller.
Label skip;
__ cmp(r0, Operand(Smi::FromInt(0)));
__ b(ne, &skip);
......@@ -1741,6 +1748,12 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ bind(&skip);
// Drop any potential handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode.
if (has_handler_frame) {
__ LeaveFrame(StackFrame::STUB);
}
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ ldr(r1, FieldMemOperand(r0, Code::kDeoptimizationDataOffset));
......@@ -1767,6 +1780,14 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
Generate_OnStackReplacementHelper(masm, false);
}
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
Generate_OnStackReplacementHelper(masm, true);
}
// static
void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
int field_index) {
......
......@@ -1735,9 +1735,16 @@ void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kThrowIllegalInvocation);
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
bool has_handler_frame) {
// Lookup the function in the JavaScript frame.
__ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
if (has_handler_frame) {
__ Ldr(x0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ Ldr(x0, MemOperand(x0, JavaScriptFrameConstants::kFunctionOffset));
} else {
__ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
......@@ -1745,13 +1752,19 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
// If the code object is null, just return to the unoptimized code.
// If the code object is null, just return to the caller.
Label skip;
__ CompareAndBranch(x0, Smi::FromInt(0), ne, &skip);
__ Ret();
__ Bind(&skip);
// Drop any potential handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode.
if (has_handler_frame) {
__ LeaveFrame(StackFrame::STUB);
}
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ Ldr(x1, MemOperand(x0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
......@@ -1771,6 +1784,14 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ Ret();
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
Generate_OnStackReplacementHelper(masm, false);
}
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
Generate_OnStackReplacementHelper(masm, true);
}
// static
void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
int field_index) {
......
......@@ -130,6 +130,7 @@ namespace internal {
ASM(InterpreterPushArgsAndTailCall) \
ASM(InterpreterPushArgsAndTailCallFunction) \
ASM(InterpreterEnterBytecodeDispatch) \
ASM(InterpreterOnStackReplacement) \
\
/* Code life-cycle */ \
ASM(CompileLazy) \
......
......@@ -2968,9 +2968,16 @@ void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
}
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
bool has_handler_frame) {
// Lookup the function in the JavaScript frame.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
if (has_handler_frame) {
__ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(eax, Operand(eax, JavaScriptFrameConstants::kFunctionOffset));
} else {
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
}
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
......@@ -2979,13 +2986,19 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
Label skip;
// If the code object is null, just return to the unoptimized code.
// If the code object is null, just return to the caller.
__ cmp(eax, Immediate(0));
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
__ bind(&skip);
// Drop any potential handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode.
if (has_handler_frame) {
__ leave();
}
// Load deoptimization data from the code object.
__ mov(ebx, Operand(eax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
......@@ -3005,6 +3018,14 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ ret(0);
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
Generate_OnStackReplacementHelper(masm, false);
}
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
Generate_OnStackReplacementHelper(masm, true);
}
#undef __
} // namespace internal
} // namespace v8
......
......@@ -1723,9 +1723,16 @@ void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kThrowIllegalInvocation);
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
bool has_handler_frame) {
// Lookup the function in the JavaScript frame.
__ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
if (has_handler_frame) {
__ lw(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ lw(a0, MemOperand(a0, JavaScriptFrameConstants::kFunctionOffset));
} else {
__ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
......@@ -1733,9 +1740,15 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
// If the code object is null, just return to the unoptimized code.
// If the code object is null, just return to the caller.
__ Ret(eq, v0, Operand(Smi::FromInt(0)));
// Drop any potential handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode.
if (has_handler_frame) {
__ LeaveFrame(StackFrame::STUB);
}
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ lw(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
......@@ -1756,6 +1769,14 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ Ret();
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
Generate_OnStackReplacementHelper(masm, false);
}
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
Generate_OnStackReplacementHelper(masm, true);
}
// static
void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
int field_index) {
......
......@@ -1718,9 +1718,16 @@ void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kThrowIllegalInvocation);
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
bool has_handler_frame) {
// Lookup the function in the JavaScript frame.
__ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
if (has_handler_frame) {
__ ld(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ld(a0, MemOperand(a0, JavaScriptFrameConstants::kFunctionOffset));
} else {
__ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
......@@ -1728,9 +1735,15 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
// If the code object is null, just return to the unoptimized code.
// If the code object is null, just return to the caller.
__ Ret(eq, v0, Operand(Smi::FromInt(0)));
// Drop any potential handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode.
if (has_handler_frame) {
__ LeaveFrame(StackFrame::STUB);
}
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ ld(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
......@@ -1751,6 +1764,14 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ Ret();
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
Generate_OnStackReplacementHelper(masm, false);
}
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
Generate_OnStackReplacementHelper(masm, true);
}
// static
void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
int field_index) {
......
......@@ -3011,9 +3011,16 @@ void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
}
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
bool has_handler_frame) {
// Lookup the function in the JavaScript frame.
__ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
if (has_handler_frame) {
__ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ movp(rax, Operand(rax, JavaScriptFrameConstants::kFunctionOffset));
} else {
__ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
......@@ -3022,13 +3029,19 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
Label skip;
// If the code object is null, just return to the unoptimized code.
// If the code object is null, just return to the caller.
__ cmpp(rax, Immediate(0));
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
__ bind(&skip);
// Drop any potential handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode.
if (has_handler_frame) {
__ leave();
}
// Load deoptimization data from the code object.
__ movp(rbx, Operand(rax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
......@@ -3048,6 +3061,14 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ ret(0);
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
Generate_OnStackReplacementHelper(masm, false);
}
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
Generate_OnStackReplacementHelper(masm, true);
}
#undef __
} // namespace internal
......
......@@ -601,5 +601,11 @@ Callable CodeFactory::InterpreterCEntry(Isolate* isolate, int result_size) {
return Callable(stub.GetCode(), InterpreterCEntryDescriptor(isolate));
}
// static
Callable CodeFactory::InterpreterOnStackReplacement(Isolate* isolate) {
return Callable(isolate->builtins()->InterpreterOnStackReplacement(),
ContextOnlyDescriptor(isolate));
}
} // namespace internal
} // namespace v8
......@@ -157,6 +157,7 @@ class CodeFactory final {
CallableType function_type = CallableType::kAny);
static Callable InterpreterPushArgsAndConstruct(Isolate* isolate);
static Callable InterpreterCEntry(Isolate* isolate, int result_size = 1);
static Callable InterpreterOnStackReplacement(Isolate* isolate);
};
} // namespace internal
......
......@@ -745,6 +745,11 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
Isolate* isolate = function->GetIsolate();
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
// TODO(4764): Remove this guard once OSR graph construction works.
if (!osr_ast_id.IsNone() && osr_frame->is_interpreted()) {
return MaybeHandle<Code>();
}
Handle<Code> cached_code;
if (GetCodeFromOptimizedCodeMap(function, osr_ast_id)
.ToHandle(&cached_code)) {
......
......@@ -1392,6 +1392,11 @@ void BytecodeGraphBuilder::VisitStackCheck() {
environment()->RecordAfterState(node, &states);
}
void BytecodeGraphBuilder::VisitOsrPoll() {
// TODO(4764): Implement OSR graph construction. Not marked UNIMPLEMENTED to
// ensure the --ignition-osr flag can already be fuzzed without crashing.
}
void BytecodeGraphBuilder::VisitReturn() {
Node* control =
NewNode(common()->Return(), environment()->LookupAccumulator());
......
......@@ -436,6 +436,19 @@ Node* CodeAssembler::CallStubN(Callable const& callable, Node** args,
return CallStubN(callable.descriptor(), target, args, result_size);
}
Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
Node* target, Node* context, size_t result_size) {
CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), result_size);
Node** args = zone()->NewArray<Node*>(1);
args[0] = context;
return CallN(call_descriptor, target, args);
}
Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
Node* target, Node* context, Node* arg1,
size_t result_size) {
......
......@@ -335,6 +335,8 @@ class CodeAssembler {
Node* CallStubN(Callable const& callable, Node** args,
size_t result_size = 1);
Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, size_t result_size = 1);
Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, Node* arg1, size_t result_size = 1);
Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
......
......@@ -470,6 +470,11 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StackCheck(int position) {
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::OsrPoll(int loop_depth) {
Output(Bytecode::kOsrPoll, UnsignedOperand(loop_depth));
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::Throw() {
Output(Bytecode::kThrow);
return *this;
......
......@@ -233,6 +233,8 @@ class BytecodeArrayBuilder final : public ZoneObject {
BytecodeArrayBuilder& StackCheck(int position);
BytecodeArrayBuilder& OsrPoll(int loop_depth);
BytecodeArrayBuilder& Throw();
BytecodeArrayBuilder& ReThrow();
BytecodeArrayBuilder& Return();
......
......@@ -714,6 +714,15 @@ void BytecodeGenerator::VisitIterationHeader(IterationStatement* stmt,
loop_builder->LoopHeader(&resume_points_in_loop);
// Insert an explicit {OsrPoll} right after the loop header, to trigger
// on-stack replacement when armed for the given loop nesting depth.
if (FLAG_ignition_osr) {
// TODO(4764): Merge this with another bytecode (e.g. {Jump} back edge).
// TODO(4764): Investigate interaction with generators.
// TODO(4764): Track and pass correct loop depth.
builder()->OsrPoll(0);
}
if (stmt->yield_count() > 0) {
// If we are not resuming, fall through to loop body.
// If we are resuming, perform state dispatch.
......
......@@ -260,6 +260,9 @@ namespace interpreter {
/* Perform a stack guard check */ \
V(StackCheck, AccumulatorUse::kNone) \
\
/* Perform a check to trigger on-stack replacement */ \
V(OsrPoll, AccumulatorUse::kNone, OperandType::kImm) \
\
/* Non-local flow control */ \
V(Throw, AccumulatorUse::kRead) \
V(ReThrow, AccumulatorUse::kRead) \
......
......@@ -848,6 +848,12 @@ Node* InterpreterAssembler::StackCheckTriggeredInterrupt() {
return UintPtrLessThan(sp, stack_limit);
}
Node* InterpreterAssembler::LoadOSRNestingLevel() {
Node* offset =
IntPtrConstant(BytecodeArray::kOSRNestingLevelOffset - kHeapObjectTag);
return Load(MachineType::Int32(), BytecodeArrayTaggedPointer(), offset);
}
void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
disable_stack_check_across_call_ = true;
Node* abort_id = SmiTag(Int32Constant(bailout_reason));
......
......@@ -145,6 +145,9 @@ class InterpreterAssembler : public CodeStubAssembler {
// Updates the profiler interrupt budget for a return.
void UpdateInterruptBudgetOnReturn();
// Returns the OSR nesting level from the bytecode header.
compiler::Node* LoadOSRNestingLevel();
// Dispatch to the bytecode.
compiler::Node* Dispatch();
......
......@@ -1796,6 +1796,32 @@ void Interpreter::DoStackCheck(InterpreterAssembler* assembler) {
}
}
// OsrPoll <loop_depth>
//
// Performs a loop nesting check and potentially triggers OSR.
void Interpreter::DoOsrPoll(InterpreterAssembler* assembler) {
Node* loop_depth = __ BytecodeOperandImm(0);
Node* osr_level = __ LoadOSRNestingLevel();
// Check if OSR points at the given {loop_depth} are armed by comparing it to
// the current {osr_level} loaded from the header of the BytecodeArray.
Label ok(assembler), osr_armed(assembler, Label::kDeferred);
Node* condition = __ Int32GreaterThanOrEqual(loop_depth, osr_level);
__ Branch(condition, &ok, &osr_armed);
__ Bind(&ok);
__ Dispatch();
__ Bind(&osr_armed);
{
Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate_);
Node* target = __ HeapConstant(callable.code());
Node* context = __ GetContext();
__ CallStub(callable.descriptor(), target, context);
__ Dispatch();
}
}
// Throw
//
// Throws the exception in the accumulator.
......
......@@ -217,38 +217,75 @@ static bool IsSuitableForOnStackReplacement(Isolate* isolate,
return true;
}
namespace {
BailoutId DetermineEntryAndDisarmOSRForBaseline(JavaScriptFrame* frame) {
Handle<Code> caller_code(frame->function()->shared()->code());
// Passing the PC in the JavaScript frame from the caller directly is
// not GC safe, so we walk the stack to get it.
if (!caller_code->contains(frame->pc())) {
// Code on the stack may not be the code object referenced by the shared
// function info. It may have been replaced to include deoptimization data.
caller_code = Handle<Code>(frame->LookupCode());
}
DCHECK_EQ(frame->LookupCode(), *caller_code);
DCHECK_EQ(Code::FUNCTION, caller_code->kind());
DCHECK(caller_code->contains(frame->pc()));
// Revert the patched back edge table, regardless of whether OSR succeeds.
BackEdgeTable::Revert(frame->isolate(), *caller_code);
uint32_t pc_offset =
static_cast<uint32_t>(frame->pc() - caller_code->instruction_start());
return caller_code->TranslatePcOffsetToAstId(pc_offset);
}
BailoutId DetermineEntryAndDisarmOSRForInterpreter(JavaScriptFrame* frame) {
InterpretedFrame* iframe = reinterpret_cast<InterpretedFrame*>(frame);
// Note that the bytecode array active on the stack might be different from
// the one installed on the function (e.g. patched by debugger). This however
// is fine because we guarantee the layout to be in sync, hence any BailoutId
// representing the entry point will be valid for any copy of the bytecode.
Handle<BytecodeArray> bytecode(iframe->GetBytecodeArray());
DCHECK(frame->LookupCode()->is_interpreter_trampoline_builtin());
DCHECK(frame->function()->shared()->HasBytecodeArray());
DCHECK(frame->is_interpreted());
DCHECK(FLAG_ignition_osr);
// Reset the OSR loop nesting depth to disarm back edges.
bytecode->set_osr_loop_nesting_level(0);
return BailoutId(iframe->GetBytecodeOffset());
}
} // namespace
RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
Handle<Code> caller_code(function->shared()->code());
// We're not prepared to handle a function with arguments object.
DCHECK(!function->shared()->uses_arguments());
// Only reachable when OST is enabled.
CHECK(FLAG_use_osr);
// Passing the PC in the javascript frame from the caller directly is
// not GC safe, so we walk the stack to get it.
// Determine frame triggering OSR request.
JavaScriptFrameIterator it(isolate);
JavaScriptFrame* frame = it.frame();
if (!caller_code->contains(frame->pc())) {
// Code on the stack may not be the code object referenced by the shared
// function info. It may have been replaced to include deoptimization data.
caller_code = Handle<Code>(frame->LookupCode());
}
uint32_t pc_offset =
static_cast<uint32_t>(frame->pc() - caller_code->instruction_start());
#ifdef DEBUG
DCHECK_EQ(frame->function(), *function);
DCHECK_EQ(frame->LookupCode(), *caller_code);
DCHECK(caller_code->contains(frame->pc()));
#endif // DEBUG
BailoutId ast_id = caller_code->TranslatePcOffsetToAstId(pc_offset);
// Determine the entry point for which this OSR request has been fired and
// also disarm all back edges in the calling code to stop new requests.
BailoutId ast_id = frame->is_interpreted()
? DetermineEntryAndDisarmOSRForInterpreter(frame)
: DetermineEntryAndDisarmOSRForBaseline(frame);
DCHECK(!ast_id.IsNone());
MaybeHandle<Code> maybe_result;
......@@ -261,9 +298,6 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
maybe_result = Compiler::GetOptimizedCodeForOSR(function, ast_id, frame);
}
// Revert the patched back edge table, regardless of whether OSR succeeds.
BackEdgeTable::Revert(isolate, *caller_code);
// Check whether we ended up with usable optimized code.
Handle<Code> result;
if (maybe_result.ToHandle(&result) &&
......
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --ignition --ignition-osr --turbo-from-bytecode
function f() {
for (var i = 0; i < 10; i++) {
if (i == 5) %OptimizeOsr();
}
}
f();
......@@ -257,6 +257,9 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
// Emit stack check bytecode.
builder.StackCheck(0);
// Emit an OSR poll bytecode.
builder.OsrPoll(1);
// Emit throw and re-throw in it's own basic block so that the rest of the
// code isn't omitted due to being dead.
BytecodeLabel after_throw;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment