Commit 9dfe6359 authored by Dan Elphick's avatar Dan Elphick Committed by Commit Bot

[builtins] Reordered optimised code path in InterpreterEntryTrampoline

Pushes the optimised code path to after the main interpreter path, so
the straightline path is just normal interpretation.

Bug: v8:9771
Change-Id: I2f48ff290efcd85a5e30cf823027919560f8a56a
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1829220Reviewed-by: 's avatarRoss McIlroy <rmcilroy@chromium.org>
Commit-Queue: Dan Elphick <delphick@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64066}
parent 615e9003
...@@ -885,102 +885,70 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, ...@@ -885,102 +885,70 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
__ bind(&no_match); __ bind(&no_match);
} }
static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Register feedback_vector, Register optimized_code_entry,
Register scratch1, Register scratch) {
Register scratch2) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r3 : new target (preserved for callee if needed, and caller) // -- r3 : new target (preserved for callee if needed, and caller)
// -- r1 : target function (preserved for callee if needed, and caller) // -- r1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// ----------------------------------- // -----------------------------------
DCHECK(!AreAliased(feedback_vector, r1, r3, scratch1, scratch2)); DCHECK(!AreAliased(r1, r3, optimized_code_entry, scratch));
Label optimized_code_slot_is_weak_ref, fallthrough;
Register closure = r1; Register closure = r1;
Register optimized_code_entry = scratch1;
__ ldr(
optimized_code_entry,
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an // Check if the optimized code is marked for deopt. If it is, call the
// optimisation marker. Otherwise, interpret it as a weak reference to a code // runtime to clear it.
// object. Label found_deoptimized_code;
__ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref); __ ldr(scratch,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
{ __ ldr(scratch,
// Optimized code slot is a Smi optimization marker. FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
__ tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
// Fall through if no optimization trigger. __ b(ne, &found_deoptimized_code);
__ cmp(optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kNone)));
__ b(eq, &fallthrough);
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
masm, optimized_code_entry,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
{ // Optimized code is good, get it into the closure and link the closure
// Otherwise, the marker is InOptimizationQueue, so fall through hoping // into the optimized functions list, then tail call the optimized code.
// that an interrupt will eventually update the slot with optimized code. ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
if (FLAG_debug_code) { static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ cmp( __ LoadCodeObjectEntry(r2, optimized_code_entry);
optimized_code_entry, __ Jump(r2);
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
__ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
}
__ jmp(&fallthrough);
}
}
{ // Optimized code slot contains deoptimized code, evict it and re-enter
// Optimized code slot is a weak reference. // the closure's code.
__ bind(&optimized_code_slot_is_weak_ref); __ bind(&found_deoptimized_code);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
__ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough); }
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
__ ldr(scratch2, FieldMemOperand(optimized_code_entry,
Code::kCodeDataContainerOffset));
__ ldr(
scratch2,
FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
__ tst(scratch2, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ b(ne, &found_deoptimized_code);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
// The feedback vector is no longer used, so re-use it as a scratch
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ LoadCodeObjectEntry(r2, optimized_code_entry);
__ Jump(r2);
// Optimized code slot contains deoptimized code, evict it and re-enter the static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// closure's code. Register optimization_marker) {
__ bind(&found_deoptimized_code); // ----------- S t a t e -------------
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); // -- r3 : new target (preserved for callee if needed, and caller)
// -- r1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -- optimization_marker : a Smi containing a non-zero optimization marker.
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, r1, r3, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
// Otherwise, the marker is InOptimizationQueue, so fall through hoping
// that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ cmp(optimization_marker,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
__ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
} }
// Fall-through if the optimized code cell is clear and there is no
// optimization marker.
__ bind(&fallthrough);
} }
// Advance the current bytecode offset. This simulates what all bytecode // Advance the current bytecode offset. This simulates what all bytecode
...@@ -1081,9 +1049,27 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1081,9 +1049,27 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ cmp(r4, Operand(FEEDBACK_VECTOR_TYPE)); __ cmp(r4, Operand(FEEDBACK_VECTOR_TYPE));
__ b(ne, &push_stack_frame); __ b(ne, &push_stack_frame);
// Read off the optimized code slot in the feedback vector, and if there Label has_optimization_marker;
// is optimized code or an optimization marker, call that instead. Label maybe_has_optimized_code;
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6);
Register optimized_code_entry = r4;
// Read off the optimized code slot in the feedback vector.
__ ldr(optimized_code_entry,
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// If not a Smi, then it must be a weak reference to the optimized code.
__ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
// Check if there is an optimization marker, if so carry onto the the
// MaybeOptimizeCode path.
__ cmp(optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kNone)));
__ b(ne, &has_optimization_marker);
Label not_optimized;
__ bind(&not_optimized);
// Increment invocation count for the function. // Increment invocation count for the function.
__ ldr(r9, FieldMemOperand(feedback_vector, __ ldr(r9, FieldMemOperand(feedback_vector,
...@@ -1193,9 +1179,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1193,9 +1179,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, r2); LeaveInterpreterFrame(masm, r2);
__ Jump(lr); __ Jump(lr);
__ bind(&has_optimization_marker);
MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
// Load code entry from the weak reference, if it was cleared, resume
// execution of unoptimized code.
__ LoadWeakValue(optimized_code_entry, optimized_code_entry, &not_optimized);
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r6);
__ bind(&compile_lazy); __ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
__ bkpt(0); // Should not return.
__ bind(&stack_overflow); __ bind(&stack_overflow);
__ CallRuntime(Runtime::kThrowStackOverflow); __ CallRuntime(Runtime::kThrowStackOverflow);
......
...@@ -1007,103 +1007,72 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, ...@@ -1007,103 +1007,72 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
__ bind(&no_match); __ bind(&no_match);
} }
static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Register feedback_vector, Register optimized_code_entry,
Register scratch1, Register scratch) {
Register scratch2) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- x3 : new target (preserved for callee if needed, and caller) // -- x3 : new target (preserved for callee if needed, and caller)
// -- x1 : target function (preserved for callee if needed, and caller) // -- x1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// ----------------------------------- // -----------------------------------
DCHECK(!AreAliased(feedback_vector, x1, x3, scratch1, scratch2)); DCHECK(!AreAliased(x1, x3, optimized_code_entry, scratch));
Label optimized_code_slot_is_weak_ref, fallthrough;
Register closure = x1; Register closure = x1;
Register optimized_code_entry = scratch1;
__ LoadAnyTaggedField(
optimized_code_entry,
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret is at a weak reference to a code
// object.
__ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
{
// Optimized code slot is a Smi optimization marker.
// Fall through if no optimization trigger.
__ CompareTaggedAndBranch(optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kNone)),
eq, &fallthrough);
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
masm, optimized_code_entry,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
{ // Check if the optimized code is marked for deopt. If it is, call the
// Otherwise, the marker is InOptimizationQueue, so fall through hoping // runtime to clear it.
// that an interrupt will eventually update the slot with optimized code. Label found_deoptimized_code;
if (FLAG_debug_code) { __ LoadTaggedPointerField(
__ CmpTagged( scratch,
optimized_code_entry, FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); __ Ldr(scratch.W(),
__ Assert(eq, AbortReason::kExpectedOptimizationSentinel); FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
} __ Tbnz(scratch.W(), Code::kMarkedForDeoptimizationBit,
__ B(&fallthrough); &found_deoptimized_code);
}
} // Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
{ ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
// Optimized code slot is a weak reference. static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ bind(&optimized_code_slot_is_weak_ref); __ LoadCodeObjectEntry(x2, optimized_code_entry);
__ Jump(x2);
__ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the // Optimized code slot contains deoptimized code, evict it and re-enter the
// runtime to clear it. // closure's code.
Label found_deoptimized_code; __ bind(&found_deoptimized_code);
__ LoadTaggedPointerField( GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
scratch2, }
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ Ldr(
scratch2.W(),
FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
__ Tbnz(scratch2.W(), Code::kMarkedForDeoptimizationBit,
&found_deoptimized_code);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
// The feedback vector is no longer used, so re-use it as a scratch
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ LoadCodeObjectEntry(x2, optimized_code_entry);
__ Jump(x2);
// Optimized code slot contains deoptimized code, evict it and re-enter the static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// closure's code. Register optimization_marker) {
__ bind(&found_deoptimized_code); // ----------- S t a t e -------------
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); // -- x3 : new target (preserved for callee if needed, and caller)
// -- x1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -- optimization_marker : a Smi containing a non-zero optimization marker.
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, x1, x3, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
// Otherwise, the marker is InOptimizationQueue, so fall through hoping
// that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ CmpTagged(
optimization_marker,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
__ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
} }
// Fall-through if the optimized code cell is clear and there is no
// optimization marker.
__ bind(&fallthrough);
} }
// Advance the current bytecode offset. This simulates what all bytecode // Advance the current bytecode offset. This simulates what all bytecode
...@@ -1210,9 +1179,28 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1210,9 +1179,28 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Cmp(x7, FEEDBACK_VECTOR_TYPE); __ Cmp(x7, FEEDBACK_VECTOR_TYPE);
__ B(ne, &push_stack_frame); __ B(ne, &push_stack_frame);
Label has_optimization_marker;
Label maybe_has_optimized_code;
// Read off the optimized code slot in the feedback vector, and if there // Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead. // is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4); Register optimized_code_entry = x7;
__ LoadAnyTaggedField(
optimized_code_entry,
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// If not a Smi, then it must be a weak reference to the optimized code.
__ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
// Check if there is an optimization marker and if so carry onto the the
// MaybeOptimizeCode path.
__ CompareTaggedAndBranch(optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kNone)),
ne, &has_optimization_marker);
Label not_optimized;
__ bind(&not_optimized);
// Increment invocation count for the function. // Increment invocation count for the function.
// MaybeTailCallOptimizedCodeSlot preserves feedback_vector, so safe to reuse // MaybeTailCallOptimizedCodeSlot preserves feedback_vector, so safe to reuse
...@@ -1330,6 +1318,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1330,6 +1318,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, x2); LeaveInterpreterFrame(masm, x2);
__ Ret(); __ Ret();
__ bind(&has_optimization_marker);
MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
// Load code entry from the weak reference, if it was cleared, resume
// execution of unoptimized code.
__ LoadWeakValue(optimized_code_entry, optimized_code_entry, &not_optimized);
TailCallOptimizedCodeSlot(masm, optimized_code_entry, x4);
__ bind(&compile_lazy); __ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
__ Unreachable(); // Should not return. __ Unreachable(); // Should not return.
......
...@@ -785,103 +785,75 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, ...@@ -785,103 +785,75 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
__ bind(&no_match); __ bind(&no_match);
} }
static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Register scratch) { Register optimized_code_entry) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- edx : new target (preserved for callee if needed, and caller) // -- edx : new target (preserved for callee if needed, and caller)
// -- edi : target function (preserved for callee if needed, and caller) // -- edi : target function (preserved for callee if needed, and caller)
// -- ecx : feedback vector (also used as scratch, value is not preserved)
// ----------------------------------- // -----------------------------------
DCHECK(!AreAliased(edx, edi, scratch)); DCHECK(!AreAliased(edx, edi, optimized_code_entry));
Label optimized_code_slot_is_weak_ref, fallthrough;
Register closure = edi; Register closure = edi;
// Scratch contains feedback_vector.
Register feedback_vector = scratch;
// Load the optimized code from the feedback vector and re-use the register.
Register optimized_code_entry = scratch;
__ mov(optimized_code_entry,
FieldOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an __ push(edx);
// optimisation marker. Otherwise, interpret it as a weak reference to a code
// object. // Check if the optimized code is marked for deopt. If it is, bailout to a
__ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref); // given label.
Label found_deoptimized_code;
__ mov(eax,
FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ test(FieldOperand(eax, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
__ j(not_zero, &found_deoptimized_code);
// Optimized code is good, get it into the closure and link the closure
// into the optimized functions list, then tail call the optimized code.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, edx,
eax);
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ LoadCodeObjectEntry(ecx, optimized_code_entry);
__ pop(edx);
__ jmp(ecx);
{ // Optimized code slot contains deoptimized code, evict it and re-enter
// Optimized code slot is an optimization marker. // the closure's code.
__ bind(&found_deoptimized_code);
// Fall through if no optimization trigger. __ pop(edx);
__ cmp(optimized_code_entry, GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
Immediate(Smi::FromEnum(OptimizationMarker::kNone))); }
__ j(equal, &fallthrough);
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
masm, optimized_code_entry,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
{ static void MaybeOptimizeCode(MacroAssembler* masm,
// Otherwise, the marker is InOptimizationQueue, so fall through hoping Register optimization_marker) {
// that an interrupt will eventually update the slot with optimized code. // ----------- S t a t e -------------
if (FLAG_debug_code) { // -- edx : new target (preserved for callee if needed, and caller)
__ cmp( // -- edi : target function (preserved for callee if needed, and caller)
optimized_code_entry, // -- optimization_marker : a Smi containing a non-zero optimization marker.
Immediate(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); // -----------------------------------
__ Assert(equal, AbortReason::kExpectedOptimizationSentinel); DCHECK(!AreAliased(edx, edi, optimization_marker));
}
__ jmp(&fallthrough); // TODO(v8:8394): The logging of first execution will break if
} // feedback vectors are not allocated. We need to find a different way of
} // logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
{ {
// Optimized code slot is a weak reference. // Otherwise, the marker is InOptimizationQueue, so fall through hoping
__ bind(&optimized_code_slot_is_weak_ref); // that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ LoadWeakValue(optimized_code_entry, &fallthrough); __ cmp(
optimization_marker,
__ push(edx); Immediate(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
__ Assert(equal, AbortReason::kExpectedOptimizationSentinel);
// Check if the optimized code is marked for deopt. If it is, bailout to a }
// given label.
Label found_deoptimized_code;
__ mov(eax,
FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ test(FieldOperand(eax, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
__ j(not_zero, &found_deoptimized_code);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
edx, eax);
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ LoadCodeObjectEntry(ecx, optimized_code_entry);
__ pop(edx);
__ jmp(ecx);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
__ bind(&found_deoptimized_code);
__ pop(edx);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
} }
// Fall-through if the optimized code cell is clear and there is no
// optimization marker.
__ bind(&fallthrough);
} }
// Advance the current bytecode offset. This simulates what all bytecode // Advance the current bytecode offset. This simulates what all bytecode
...@@ -982,9 +954,27 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -982,9 +954,27 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ CmpInstanceType(eax, FEEDBACK_VECTOR_TYPE); __ CmpInstanceType(eax, FEEDBACK_VECTOR_TYPE);
__ j(not_equal, &push_stack_frame); __ j(not_equal, &push_stack_frame);
// Read off the optimized code slot in the closure's feedback vector, and if Label has_optimization_marker;
// there is optimized code or an optimization marker, call that instead. Label maybe_has_optimized_code;
MaybeTailCallOptimizedCodeSlot(masm, ecx);
// Read off the optimized code slot in the feedback vector.
// Load the optimized code from the feedback vector and re-use the register.
Register optimized_code_entry = ecx;
__ mov(optimized_code_entry,
FieldOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// If not a Smi, then it must be a weak reference to the optimized code.
__ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
// Check if there is an optimization marker and if so carry onto the the
// MaybeOptimizeCode path.
__ cmp(optimized_code_entry,
Immediate(Smi::FromEnum(OptimizationMarker::kNone)));
__ j(not_equal, &has_optimization_marker);
Label not_optimized;
__ bind(&not_optimized);
// Load the feedback vector and increment the invocation count. // Load the feedback vector and increment the invocation count.
__ mov(feedback_vector, __ mov(feedback_vector,
...@@ -1117,6 +1107,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1117,6 +1107,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, edx, ecx); LeaveInterpreterFrame(masm, edx, ecx);
__ ret(0); __ ret(0);
__ bind(&has_optimization_marker);
MaybeOptimizeCode(masm, optimized_code_entry);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
// Load code entry from the weak reference, if it was cleared, resume
// execution of unoptimized code.
__ LoadWeakValue(optimized_code_entry, &not_optimized);
TailCallOptimizedCodeSlot(masm, optimized_code_entry);
__ bind(&compile_lazy); __ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
__ int3(); // Should not return. __ int3(); // Should not return.
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment