Commit 9dfe6359 authored by Dan Elphick's avatar Dan Elphick Committed by Commit Bot

[builtins] Reordered optimised code path in InterpreterEntryTrampoline

Pushes the optimised code path to after the main interpreter path, so
the straightline path is just normal interpretation.

Bug: v8:9771
Change-Id: I2f48ff290efcd85a5e30cf823027919560f8a56a
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1829220Reviewed-by: 's avatarRoss McIlroy <rmcilroy@chromium.org>
Commit-Queue: Dan Elphick <delphick@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64066}
parent 615e9003
......@@ -885,102 +885,70 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
__ bind(&no_match);
}
static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register feedback_vector,
Register scratch1,
Register scratch2) {
static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Register optimized_code_entry,
Register scratch) {
// ----------- S t a t e -------------
// -- r3 : new target (preserved for callee if needed, and caller)
// -- r1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, r1, r3, scratch1, scratch2));
Label optimized_code_slot_is_weak_ref, fallthrough;
DCHECK(!AreAliased(r1, r3, optimized_code_entry, scratch));
Register closure = r1;
Register optimized_code_entry = scratch1;
__ ldr(
optimized_code_entry,
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret it as a weak reference to a code
// object.
__ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
{
// Optimized code slot is a Smi optimization marker.
// Fall through if no optimization trigger.
__ cmp(optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kNone)));
__ b(eq, &fallthrough);
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
masm, optimized_code_entry,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
__ ldr(scratch,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ ldr(scratch,
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
__ tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ b(ne, &found_deoptimized_code);
{
// Otherwise, the marker is InOptimizationQueue, so fall through hoping
// that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ cmp(
optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
__ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
}
__ jmp(&fallthrough);
}
}
// Optimized code is good, get it into the closure and link the closure
// into the optimized functions list, then tail call the optimized code.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ LoadCodeObjectEntry(r2, optimized_code_entry);
__ Jump(r2);
{
// Optimized code slot is a weak reference.
__ bind(&optimized_code_slot_is_weak_ref);
__ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
__ ldr(scratch2, FieldMemOperand(optimized_code_entry,
Code::kCodeDataContainerOffset));
__ ldr(
scratch2,
FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
__ tst(scratch2, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ b(ne, &found_deoptimized_code);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
// The feedback vector is no longer used, so re-use it as a scratch
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ LoadCodeObjectEntry(r2, optimized_code_entry);
__ Jump(r2);
// Optimized code slot contains deoptimized code, evict it and re-enter
// the closure's code.
__ bind(&found_deoptimized_code);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
__ bind(&found_deoptimized_code);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
Register optimization_marker) {
// ----------- S t a t e -------------
// -- r3 : new target (preserved for callee if needed, and caller)
// -- r1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -- optimization_marker : a Smi containing a non-zero optimization marker.
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, r1, r3, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
// Otherwise, the marker is InOptimizationQueue, so fall through hoping
// that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ cmp(optimization_marker,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
__ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
}
// Fall-through if the optimized code cell is clear and there is no
// optimization marker.
__ bind(&fallthrough);
}
// Advance the current bytecode offset. This simulates what all bytecode
......@@ -1081,9 +1049,27 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ cmp(r4, Operand(FEEDBACK_VECTOR_TYPE));
__ b(ne, &push_stack_frame);
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6);
Label has_optimization_marker;
Label maybe_has_optimized_code;
Register optimized_code_entry = r4;
// Read off the optimized code slot in the feedback vector.
__ ldr(optimized_code_entry,
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// If not a Smi, then it must be a weak reference to the optimized code.
__ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
// Check if there is an optimization marker, if so carry onto the the
// MaybeOptimizeCode path.
__ cmp(optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kNone)));
__ b(ne, &has_optimization_marker);
Label not_optimized;
__ bind(&not_optimized);
// Increment invocation count for the function.
__ ldr(r9, FieldMemOperand(feedback_vector,
......@@ -1193,9 +1179,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, r2);
__ Jump(lr);
__ bind(&has_optimization_marker);
MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
// Load code entry from the weak reference, if it was cleared, resume
// execution of unoptimized code.
__ LoadWeakValue(optimized_code_entry, optimized_code_entry, &not_optimized);
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r6);
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
__ bkpt(0); // Should not return.
__ bind(&stack_overflow);
__ CallRuntime(Runtime::kThrowStackOverflow);
......
......@@ -1007,103 +1007,72 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
__ bind(&no_match);
}
static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register feedback_vector,
Register scratch1,
Register scratch2) {
static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Register optimized_code_entry,
Register scratch) {
// ----------- S t a t e -------------
// -- x3 : new target (preserved for callee if needed, and caller)
// -- x1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, x1, x3, scratch1, scratch2));
Label optimized_code_slot_is_weak_ref, fallthrough;
DCHECK(!AreAliased(x1, x3, optimized_code_entry, scratch));
Register closure = x1;
Register optimized_code_entry = scratch1;
__ LoadAnyTaggedField(
optimized_code_entry,
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret is at a weak reference to a code
// object.
__ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
{
// Optimized code slot is a Smi optimization marker.
// Fall through if no optimization trigger.
__ CompareTaggedAndBranch(optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kNone)),
eq, &fallthrough);
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
masm, optimized_code_entry,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
{
// Otherwise, the marker is InOptimizationQueue, so fall through hoping
// that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ CmpTagged(
optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
__ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
}
__ B(&fallthrough);
}
}
{
// Optimized code slot is a weak reference.
__ bind(&optimized_code_slot_is_weak_ref);
__ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
__ LoadTaggedPointerField(
scratch,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ Ldr(scratch.W(),
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
__ Tbnz(scratch.W(), Code::kMarkedForDeoptimizationBit,
&found_deoptimized_code);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ LoadCodeObjectEntry(x2, optimized_code_entry);
__ Jump(x2);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
__ LoadTaggedPointerField(
scratch2,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ Ldr(
scratch2.W(),
FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
__ Tbnz(scratch2.W(), Code::kMarkedForDeoptimizationBit,
&found_deoptimized_code);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
// The feedback vector is no longer used, so re-use it as a scratch
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ LoadCodeObjectEntry(x2, optimized_code_entry);
__ Jump(x2);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
__ bind(&found_deoptimized_code);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
__ bind(&found_deoptimized_code);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
Register optimization_marker) {
// ----------- S t a t e -------------
// -- x3 : new target (preserved for callee if needed, and caller)
// -- x1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -- optimization_marker : a Smi containing a non-zero optimization marker.
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, x1, x3, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
// Otherwise, the marker is InOptimizationQueue, so fall through hoping
// that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ CmpTagged(
optimization_marker,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
__ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
}
// Fall-through if the optimized code cell is clear and there is no
// optimization marker.
__ bind(&fallthrough);
}
// Advance the current bytecode offset. This simulates what all bytecode
......@@ -1210,9 +1179,28 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Cmp(x7, FEEDBACK_VECTOR_TYPE);
__ B(ne, &push_stack_frame);
Label has_optimization_marker;
Label maybe_has_optimized_code;
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4);
Register optimized_code_entry = x7;
__ LoadAnyTaggedField(
optimized_code_entry,
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// If not a Smi, then it must be a weak reference to the optimized code.
__ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
// Check if there is an optimization marker and if so carry onto the the
// MaybeOptimizeCode path.
__ CompareTaggedAndBranch(optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kNone)),
ne, &has_optimization_marker);
Label not_optimized;
__ bind(&not_optimized);
// Increment invocation count for the function.
// MaybeTailCallOptimizedCodeSlot preserves feedback_vector, so safe to reuse
......@@ -1330,6 +1318,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, x2);
__ Ret();
__ bind(&has_optimization_marker);
MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
// Load code entry from the weak reference, if it was cleared, resume
// execution of unoptimized code.
__ LoadWeakValue(optimized_code_entry, optimized_code_entry, &not_optimized);
TailCallOptimizedCodeSlot(masm, optimized_code_entry, x4);
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
__ Unreachable(); // Should not return.
......
......@@ -785,103 +785,75 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
__ bind(&no_match);
}
static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register scratch) {
static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Register optimized_code_entry) {
// ----------- S t a t e -------------
// -- edx : new target (preserved for callee if needed, and caller)
// -- edi : target function (preserved for callee if needed, and caller)
// -- ecx : feedback vector (also used as scratch, value is not preserved)
// -----------------------------------
DCHECK(!AreAliased(edx, edi, scratch));
Label optimized_code_slot_is_weak_ref, fallthrough;
DCHECK(!AreAliased(edx, edi, optimized_code_entry));
Register closure = edi;
// Scratch contains feedback_vector.
Register feedback_vector = scratch;
// Load the optimized code from the feedback vector and re-use the register.
Register optimized_code_entry = scratch;
__ mov(optimized_code_entry,
FieldOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret it as a weak reference to a code
// object.
__ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
__ push(edx);
// Check if the optimized code is marked for deopt. If it is, bailout to a
// given label.
Label found_deoptimized_code;
__ mov(eax,
FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ test(FieldOperand(eax, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
__ j(not_zero, &found_deoptimized_code);
// Optimized code is good, get it into the closure and link the closure
// into the optimized functions list, then tail call the optimized code.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, edx,
eax);
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ LoadCodeObjectEntry(ecx, optimized_code_entry);
__ pop(edx);
__ jmp(ecx);
{
// Optimized code slot is an optimization marker.
// Fall through if no optimization trigger.
__ cmp(optimized_code_entry,
Immediate(Smi::FromEnum(OptimizationMarker::kNone)));
__ j(equal, &fallthrough);
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
masm, optimized_code_entry,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
// Optimized code slot contains deoptimized code, evict it and re-enter
// the closure's code.
__ bind(&found_deoptimized_code);
__ pop(edx);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
{
// Otherwise, the marker is InOptimizationQueue, so fall through hoping
// that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ cmp(
optimized_code_entry,
Immediate(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
__ Assert(equal, AbortReason::kExpectedOptimizationSentinel);
}
__ jmp(&fallthrough);
}
}
static void MaybeOptimizeCode(MacroAssembler* masm,
Register optimization_marker) {
// ----------- S t a t e -------------
// -- edx : new target (preserved for callee if needed, and caller)
// -- edi : target function (preserved for callee if needed, and caller)
// -- optimization_marker : a Smi containing a non-zero optimization marker.
// -----------------------------------
DCHECK(!AreAliased(edx, edi, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
{
// Optimized code slot is a weak reference.
__ bind(&optimized_code_slot_is_weak_ref);
__ LoadWeakValue(optimized_code_entry, &fallthrough);
__ push(edx);
// Check if the optimized code is marked for deopt. If it is, bailout to a
// given label.
Label found_deoptimized_code;
__ mov(eax,
FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ test(FieldOperand(eax, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
__ j(not_zero, &found_deoptimized_code);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
edx, eax);
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ LoadCodeObjectEntry(ecx, optimized_code_entry);
__ pop(edx);
__ jmp(ecx);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
__ bind(&found_deoptimized_code);
__ pop(edx);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
// Otherwise, the marker is InOptimizationQueue, so fall through hoping
// that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ cmp(
optimization_marker,
Immediate(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
__ Assert(equal, AbortReason::kExpectedOptimizationSentinel);
}
}
// Fall-through if the optimized code cell is clear and there is no
// optimization marker.
__ bind(&fallthrough);
}
// Advance the current bytecode offset. This simulates what all bytecode
......@@ -982,9 +954,27 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ CmpInstanceType(eax, FEEDBACK_VECTOR_TYPE);
__ j(not_equal, &push_stack_frame);
// Read off the optimized code slot in the closure's feedback vector, and if
// there is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, ecx);
Label has_optimization_marker;
Label maybe_has_optimized_code;
// Read off the optimized code slot in the feedback vector.
// Load the optimized code from the feedback vector and re-use the register.
Register optimized_code_entry = ecx;
__ mov(optimized_code_entry,
FieldOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// If not a Smi, then it must be a weak reference to the optimized code.
__ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
// Check if there is an optimization marker and if so carry onto the the
// MaybeOptimizeCode path.
__ cmp(optimized_code_entry,
Immediate(Smi::FromEnum(OptimizationMarker::kNone)));
__ j(not_equal, &has_optimization_marker);
Label not_optimized;
__ bind(&not_optimized);
// Load the feedback vector and increment the invocation count.
__ mov(feedback_vector,
......@@ -1117,6 +1107,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, edx, ecx);
__ ret(0);
__ bind(&has_optimization_marker);
MaybeOptimizeCode(masm, optimized_code_entry);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
// Load code entry from the weak reference, if it was cleared, resume
// execution of unoptimized code.
__ LoadWeakValue(optimized_code_entry, &not_optimized);
TailCallOptimizedCodeSlot(masm, optimized_code_entry);
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
__ int3(); // Should not return.
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment