Commit 9dfe6359 authored by Dan Elphick's avatar Dan Elphick Committed by Commit Bot

[builtins] Reordered optimised code path in InterpreterEntryTrampoline

Pushes the optimised code path to after the main interpreter path, so
the straightline path is just normal interpretation.

Bug: v8:9771
Change-Id: I2f48ff290efcd85a5e30cf823027919560f8a56a
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1829220Reviewed-by: 's avatarRoss McIlroy <rmcilroy@chromium.org>
Commit-Queue: Dan Elphick <delphick@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64066}
parent 615e9003
......@@ -885,102 +885,70 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
__ bind(&no_match);
}
static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register feedback_vector,
Register scratch1,
Register scratch2) {
static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Register optimized_code_entry,
Register scratch) {
// ----------- S t a t e -------------
// -- r3 : new target (preserved for callee if needed, and caller)
// -- r1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, r1, r3, scratch1, scratch2));
Label optimized_code_slot_is_weak_ref, fallthrough;
DCHECK(!AreAliased(r1, r3, optimized_code_entry, scratch));
Register closure = r1;
Register optimized_code_entry = scratch1;
__ ldr(
optimized_code_entry,
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
__ ldr(scratch,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ ldr(scratch,
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
__ tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ b(ne, &found_deoptimized_code);
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret it as a weak reference to a code
// object.
__ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
// Optimized code is good, get it into the closure and link the closure
// into the optimized functions list, then tail call the optimized code.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ LoadCodeObjectEntry(r2, optimized_code_entry);
__ Jump(r2);
{
// Optimized code slot is a Smi optimization marker.
// Optimized code slot contains deoptimized code, evict it and re-enter
// the closure's code.
__ bind(&found_deoptimized_code);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
// Fall through if no optimization trigger.
__ cmp(optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kNone)));
__ b(eq, &fallthrough);
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
Register optimization_marker) {
// ----------- S t a t e -------------
// -- r3 : new target (preserved for callee if needed, and caller)
// -- r1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -- optimization_marker : a Smi containing a non-zero optimization marker.
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, r1, r3, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
masm, optimized_code_entry,
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
{
// Otherwise, the marker is InOptimizationQueue, so fall through hoping
// that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ cmp(
optimized_code_entry,
__ cmp(optimization_marker,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
__ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
}
__ jmp(&fallthrough);
}
}
{
// Optimized code slot is a weak reference.
__ bind(&optimized_code_slot_is_weak_ref);
__ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
__ ldr(scratch2, FieldMemOperand(optimized_code_entry,
Code::kCodeDataContainerOffset));
__ ldr(
scratch2,
FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
__ tst(scratch2, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ b(ne, &found_deoptimized_code);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
// The feedback vector is no longer used, so re-use it as a scratch
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ LoadCodeObjectEntry(r2, optimized_code_entry);
__ Jump(r2);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
__ bind(&found_deoptimized_code);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
// Fall-through if the optimized code cell is clear and there is no
// optimization marker.
__ bind(&fallthrough);
}
// Advance the current bytecode offset. This simulates what all bytecode
......@@ -1081,9 +1049,27 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ cmp(r4, Operand(FEEDBACK_VECTOR_TYPE));
__ b(ne, &push_stack_frame);
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6);
Label has_optimization_marker;
Label maybe_has_optimized_code;
Register optimized_code_entry = r4;
// Read off the optimized code slot in the feedback vector.
__ ldr(optimized_code_entry,
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// If not a Smi, then it must be a weak reference to the optimized code.
__ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
// Check if there is an optimization marker, if so carry onto the the
// MaybeOptimizeCode path.
__ cmp(optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kNone)));
__ b(ne, &has_optimization_marker);
Label not_optimized;
__ bind(&not_optimized);
// Increment invocation count for the function.
__ ldr(r9, FieldMemOperand(feedback_vector,
......@@ -1193,9 +1179,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, r2);
__ Jump(lr);
__ bind(&has_optimization_marker);
MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
// Load code entry from the weak reference, if it was cleared, resume
// execution of unoptimized code.
__ LoadWeakValue(optimized_code_entry, optimized_code_entry, &not_optimized);
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r6);
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
__ bkpt(0); // Should not return.
__ bind(&stack_overflow);
__ CallRuntime(Runtime::kThrowStackOverflow);
......
......@@ -1007,103 +1007,72 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
__ bind(&no_match);
}
static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register feedback_vector,
Register scratch1,
Register scratch2) {
static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Register optimized_code_entry,
Register scratch) {
// ----------- S t a t e -------------
// -- x3 : new target (preserved for callee if needed, and caller)
// -- x1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, x1, x3, scratch1, scratch2));
Label optimized_code_slot_is_weak_ref, fallthrough;
DCHECK(!AreAliased(x1, x3, optimized_code_entry, scratch));
Register closure = x1;
Register optimized_code_entry = scratch1;
__ LoadAnyTaggedField(
optimized_code_entry,
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
__ LoadTaggedPointerField(
scratch,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ Ldr(scratch.W(),
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
__ Tbnz(scratch.W(), Code::kMarkedForDeoptimizationBit,
&found_deoptimized_code);
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret is at a weak reference to a code
// object.
__ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ LoadCodeObjectEntry(x2, optimized_code_entry);
__ Jump(x2);
{
// Optimized code slot is a Smi optimization marker.
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
__ bind(&found_deoptimized_code);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
// Fall through if no optimization trigger.
__ CompareTaggedAndBranch(optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kNone)),
eq, &fallthrough);
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
Register optimization_marker) {
// ----------- S t a t e -------------
// -- x3 : new target (preserved for callee if needed, and caller)
// -- x1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -- optimization_marker : a Smi containing a non-zero optimization marker.
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, x1, x3, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
masm, optimized_code_entry,
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
{
// Otherwise, the marker is InOptimizationQueue, so fall through hoping
// that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ CmpTagged(
optimized_code_entry,
optimization_marker,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
__ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
}
__ B(&fallthrough);
}
}
{
// Optimized code slot is a weak reference.
__ bind(&optimized_code_slot_is_weak_ref);
__ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
__ LoadTaggedPointerField(
scratch2,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ Ldr(
scratch2.W(),
FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
__ Tbnz(scratch2.W(), Code::kMarkedForDeoptimizationBit,
&found_deoptimized_code);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
// The feedback vector is no longer used, so re-use it as a scratch
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ LoadCodeObjectEntry(x2, optimized_code_entry);
__ Jump(x2);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
__ bind(&found_deoptimized_code);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
// Fall-through if the optimized code cell is clear and there is no
// optimization marker.
__ bind(&fallthrough);
}
// Advance the current bytecode offset. This simulates what all bytecode
......@@ -1210,9 +1179,28 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Cmp(x7, FEEDBACK_VECTOR_TYPE);
__ B(ne, &push_stack_frame);
Label has_optimization_marker;
Label maybe_has_optimized_code;
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4);
Register optimized_code_entry = x7;
__ LoadAnyTaggedField(
optimized_code_entry,
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// If not a Smi, then it must be a weak reference to the optimized code.
__ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
// Check if there is an optimization marker and if so carry onto the the
// MaybeOptimizeCode path.
__ CompareTaggedAndBranch(optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kNone)),
ne, &has_optimization_marker);
Label not_optimized;
__ bind(&not_optimized);
// Increment invocation count for the function.
// MaybeTailCallOptimizedCodeSlot preserves feedback_vector, so safe to reuse
......@@ -1330,6 +1318,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, x2);
__ Ret();
__ bind(&has_optimization_marker);
MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
// Load code entry from the weak reference, if it was cleared, resume
// execution of unoptimized code.
__ LoadWeakValue(optimized_code_entry, optimized_code_entry, &not_optimized);
TailCallOptimizedCodeSlot(masm, optimized_code_entry, x4);
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
__ Unreachable(); // Should not return.
......
......@@ -785,51 +785,62 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
__ bind(&no_match);
}
static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register scratch) {
static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Register optimized_code_entry) {
// ----------- S t a t e -------------
// -- edx : new target (preserved for callee if needed, and caller)
// -- edi : target function (preserved for callee if needed, and caller)
// -- ecx : feedback vector (also used as scratch, value is not preserved)
// -----------------------------------
DCHECK(!AreAliased(edx, edi, scratch));
Label optimized_code_slot_is_weak_ref, fallthrough;
DCHECK(!AreAliased(edx, edi, optimized_code_entry));
Register closure = edi;
// Scratch contains feedback_vector.
Register feedback_vector = scratch;
// Load the optimized code from the feedback vector and re-use the register.
Register optimized_code_entry = scratch;
__ mov(optimized_code_entry,
FieldOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
__ push(edx);
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret it as a weak reference to a code
// object.
__ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
// Check if the optimized code is marked for deopt. If it is, bailout to a
// given label.
Label found_deoptimized_code;
__ mov(eax,
FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ test(FieldOperand(eax, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
__ j(not_zero, &found_deoptimized_code);
{
// Optimized code slot is an optimization marker.
// Optimized code is good, get it into the closure and link the closure
// into the optimized functions list, then tail call the optimized code.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, edx,
eax);
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ LoadCodeObjectEntry(ecx, optimized_code_entry);
__ pop(edx);
__ jmp(ecx);
// Fall through if no optimization trigger.
__ cmp(optimized_code_entry,
Immediate(Smi::FromEnum(OptimizationMarker::kNone)));
__ j(equal, &fallthrough);
// Optimized code slot contains deoptimized code, evict it and re-enter
// the closure's code.
__ bind(&found_deoptimized_code);
__ pop(edx);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void MaybeOptimizeCode(MacroAssembler* masm,
Register optimization_marker) {
// ----------- S t a t e -------------
// -- edx : new target (preserved for callee if needed, and caller)
// -- edi : target function (preserved for callee if needed, and caller)
// -- optimization_marker : a Smi containing a non-zero optimization marker.
// -----------------------------------
DCHECK(!AreAliased(edx, edi, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
masm, optimized_code_entry,
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
......@@ -838,50 +849,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ cmp(
optimized_code_entry,
optimization_marker,
Immediate(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
__ Assert(equal, AbortReason::kExpectedOptimizationSentinel);
}
__ jmp(&fallthrough);
}
}
{
// Optimized code slot is a weak reference.
__ bind(&optimized_code_slot_is_weak_ref);
__ LoadWeakValue(optimized_code_entry, &fallthrough);
__ push(edx);
// Check if the optimized code is marked for deopt. If it is, bailout to a
// given label.
Label found_deoptimized_code;
__ mov(eax,
FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ test(FieldOperand(eax, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
__ j(not_zero, &found_deoptimized_code);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
edx, eax);
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ LoadCodeObjectEntry(ecx, optimized_code_entry);
__ pop(edx);
__ jmp(ecx);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
__ bind(&found_deoptimized_code);
__ pop(edx);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
// Fall-through if the optimized code cell is clear and there is no
// optimization marker.
__ bind(&fallthrough);
}
// Advance the current bytecode offset. This simulates what all bytecode
......@@ -982,9 +954,27 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ CmpInstanceType(eax, FEEDBACK_VECTOR_TYPE);
__ j(not_equal, &push_stack_frame);
// Read off the optimized code slot in the closure's feedback vector, and if
// there is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, ecx);
Label has_optimization_marker;
Label maybe_has_optimized_code;
// Read off the optimized code slot in the feedback vector.
// Load the optimized code from the feedback vector and re-use the register.
Register optimized_code_entry = ecx;
__ mov(optimized_code_entry,
FieldOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// If not a Smi, then it must be a weak reference to the optimized code.
__ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
// Check if there is an optimization marker and if so carry onto the the
// MaybeOptimizeCode path.
__ cmp(optimized_code_entry,
Immediate(Smi::FromEnum(OptimizationMarker::kNone)));
__ j(not_equal, &has_optimization_marker);
Label not_optimized;
__ bind(&not_optimized);
// Load the feedback vector and increment the invocation count.
__ mov(feedback_vector,
......@@ -1117,6 +1107,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, edx, ecx);
__ ret(0);
__ bind(&has_optimization_marker);
MaybeOptimizeCode(masm, optimized_code_entry);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
// Load code entry from the weak reference, if it was cleared, resume
// execution of unoptimized code.
__ LoadWeakValue(optimized_code_entry, &not_optimized);
TailCallOptimizedCodeSlot(masm, optimized_code_entry);
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
__ int3(); // Should not return.
......
......@@ -7,6 +7,7 @@
#include "src/api/api-arguments.h"
#include "src/base/iterator.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/x64/assembler-x64.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames.h"
......@@ -851,10 +852,11 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// TODO(juliana): if we remove the code below then we don't need all
// the parameters.
static void ReplaceClosureCodeWithOptimizedCode(
MacroAssembler* masm, Register optimized_code, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
Register optimized_code,
Register closure,
Register scratch1,
Register scratch2) {
// Store the optimized code in the closure.
__ StoreTaggedField(FieldOperand(closure, JSFunction::kCodeOffset),
optimized_code);
......@@ -895,91 +897,63 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
__ bind(&no_match);
}
static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register feedback_vector,
Register scratch1, Register scratch2,
Register scratch3) {
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
Register optimization_marker) {
// ----------- S t a t e -------------
// -- rdx : new target (preserved for callee if needed, and caller)
// -- rdi : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -- optimization_marker : a Smi containing a non-zero optimization marker.
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, rdx, rdi, scratch1, scratch2, scratch3));
Label optimized_code_slot_is_weak_ref, fallthrough;
Register closure = rdi;
Register optimized_code_entry = scratch1;
Register decompr_scratch = COMPRESS_POINTERS_BOOL ? scratch2 : no_reg;
__ LoadAnyTaggedField(
optimized_code_entry,
FieldOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset),
decompr_scratch);
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret it as a weak reference to a code
// object.
__ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
{
// Optimized code slot is a Smi optimization marker.
// Fall through if no optimization trigger.
__ SmiCompare(optimized_code_entry,
Smi::FromEnum(OptimizationMarker::kNone));
__ j(equal, &fallthrough);
DCHECK(!AreAliased(feedback_vector, rdx, rdi, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
masm, optimized_code_entry,
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
{
// Otherwise, the marker is InOptimizationQueue, so fall through hoping
// that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ SmiCompare(optimized_code_entry,
__ SmiCompare(optimization_marker,
Smi::FromEnum(OptimizationMarker::kInOptimizationQueue));
__ Assert(equal, AbortReason::kExpectedOptimizationSentinel);
}
__ jmp(&fallthrough);
}
}
}
{
// Optimized code slot is a weak reference.
__ bind(&optimized_code_slot_is_weak_ref);
static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Register optimized_code_entry,
Register scratch1, Register scratch2) {
// ----------- S t a t e -------------
// -- rdx : new target (preserved for callee if needed, and caller)
// -- rdi : target function (preserved for callee if needed, and caller)
// -----------------------------------
__ LoadWeakValue(optimized_code_entry, &fallthrough);
Register closure = rdi;
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
__ LoadTaggedPointerField(
scratch2,
scratch1,
FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ testl(
FieldOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset),
__ testl(FieldOperand(scratch1, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
__ j(not_zero, &found_deoptimized_code);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
// The feedback vector is no longer used, so re-use it as a scratch
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
scratch1, scratch2);
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ Move(rcx, optimized_code_entry);
__ JumpCodeObject(rcx);
......@@ -988,11 +962,6 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// closure's code.
__ bind(&found_deoptimized_code);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
// Fall-through if the optimized code cell is clear and there is no
// optimization marker.
__ bind(&fallthrough);
}
// Advance the current bytecode offset. This simulates what all bytecode
......@@ -1100,9 +1069,30 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ CmpInstanceType(rcx, FEEDBACK_VECTOR_TYPE);
__ j(not_equal, &push_stack_frame);
Label has_optimization_marker;
Label maybe_has_optimized_code;
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r11, r15);
Register optimized_code_entry = rcx;
Register decompr_scratch = COMPRESS_POINTERS_BOOL ? r11 : no_reg;
__ LoadAnyTaggedField(
optimized_code_entry,
FieldOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset),
decompr_scratch);
// If not a Smi, then it must be a weak reference to the optimized code.
__ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
// Check if there is an optimization marker and if so carry onto the the
// MaybeOptimizeCode path.
__ SmiCompare(optimized_code_entry, Smi::FromEnum(OptimizationMarker::kNone));
__ j(not_equal, &has_optimization_marker);
Label not_optimized;
__ bind(&not_optimized);
// Increment invocation count for the function.
__ incl(
......@@ -1220,6 +1210,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
__ int3(); // Should not return.
__ bind(&has_optimization_marker);
MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
// Load code entry from the weak reference, if it was cleared, resume
// execution of unoptimized code.
__ LoadWeakValue(optimized_code_entry, &not_optimized);
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r11, r15);
__ bind(&stack_overflow);
__ CallRuntime(Runtime::kThrowStackOverflow);
__ int3(); // Should not return.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment