Commit f21aeb63 authored by Zhao Jiazhong's avatar Zhao Jiazhong Committed by Commit Bot

[mips][builtins] Reordered optimised code path in InterpreterEntryTrampoline

port 9dfe6359 https://crrev.com/c/1829220

[mips][builtins] Tweak optimized check in InterpreterEntryTrampoline

port 7177d87f https://crrev.com/c/1833683

Bug: v8:9771
Change-Id: I1d5214fd6d6b5bf3f6c30003c616054ce4e4ee15
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1892471Reviewed-by: 's avatarBill Budge <bbudge@chromium.org>
Commit-Queue: Dan Elphick <delphick@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65597}
parent cee4a51d
......@@ -793,9 +793,11 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
}
}
static void ReplaceClosureCodeWithOptimizedCode(
MacroAssembler* masm, Register optimized_code, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
Register optimized_code,
Register closure,
Register scratch1,
Register scratch2) {
// Store code entry in the closure.
__ sw(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
__ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
......@@ -831,97 +833,73 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
__ bind(&no_match);
}
static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register feedback_vector,
Register scratch1, Register scratch2,
Register scratch3) {
static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Register optimized_code_entry,
Register scratch1, Register scratch2) {
// ----------- S t a t e -------------
// -- a3 : new target (preserved for callee if needed, and caller)
// -- a1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, a1, a3, scratch1, scratch2, scratch3));
Label optimized_code_slot_is_weak_ref, fallthrough;
DCHECK(!AreAliased(optimized_code_entry, a1, a3, scratch1, scratch2));
Register closure = a1;
Register optimized_code_entry = scratch1;
__ lw(optimized_code_entry,
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret it as a weak cell to a code
// object.
__ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
{
// Optimized code slot is a Smi optimization marker.
// Fall through if no optimization trigger.
__ Branch(&fallthrough, eq, optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kNone)));
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
masm, optimized_code_entry,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
{
// Otherwise, the marker is InOptimizationQueue, so fall through hoping
// that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ Assert(
eq, AbortReason::kExpectedOptimizationSentinel,
optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
}
__ jmp(&fallthrough);
}
}
{
// Optimized code slot is a weak reference.
__ bind(&optimized_code_slot_is_weak_ref);
__ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
__ lw(scratch2, FieldMemOperand(optimized_code_entry,
Code::kCodeDataContainerOffset));
__ lw(scratch2, FieldMemOperand(
scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
__ And(scratch2, scratch2, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ Branch(&found_deoptimized_code, ne, scratch2, Operand(zero_reg));
__ Lw(scratch1,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ Lw(scratch1,
FieldMemOperand(scratch1, CodeDataContainer::kKindSpecificFlagsOffset));
__ And(scratch1, scratch1, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ Branch(&found_deoptimized_code, ne, scratch1, Operand(zero_reg));
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
// The feedback vector is no longer used, so re-use it as a scratch
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
scratch1, scratch2);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Addu(a2, optimized_code_entry, Code::kHeaderSize - kHeapObjectTag);
__ Jump(a2);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// losure's code.
// closure's code.
__ bind(&found_deoptimized_code);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
}
// Fall-through if the optimized code cell is clear and there is no
// optimization marker.
__ bind(&fallthrough);
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
Register optimization_marker) {
// ----------- S t a t e -------------
// -- a3 : new target (preserved for callee if needed, and caller)
// -- a1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -- optimization_marker : a Smi containing a non-zero optimization marker.
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
// Otherwise, the marker is InOptimizationQueue, so fall through hoping
// that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ Assert(eq, AbortReason::kExpectedOptimizationSentinel,
optimization_marker,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
}
}
// Advance the current bytecode offset. This simulates what all bytecode
......@@ -1026,7 +1004,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1);
Register optimized_code_entry = t0;
__ Lw(optimized_code_entry,
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the optimized code slot is not empty.
Label optimized_code_slot_not_empty;
__ Branch(&optimized_code_slot_not_empty, ne, optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kNone)));
Label not_optimized;
__ bind(&not_optimized);
// Increment invocation count for the function.
__ lw(t0, FieldMemOperand(feedback_vector,
......@@ -1138,6 +1128,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, t0);
__ Jump(ra);
__ bind(&optimized_code_slot_not_empty);
Label maybe_has_optimized_code;
// Check if optimized code marker is actually a weak reference to the
// optimized code as opposed to an optimization marker.
__ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
// Load code entry from the weak reference, if it was cleared, resume
// execution of unoptimized code.
__ LoadWeakValue(optimized_code_entry, optimized_code_entry, &not_optimized);
TailCallOptimizedCodeSlot(masm, optimized_code_entry, t1, t3);
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
// Unreachable code.
......
......@@ -812,9 +812,11 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
__ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
}
static void ReplaceClosureCodeWithOptimizedCode(
MacroAssembler* masm, Register optimized_code, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
Register optimized_code,
Register closure,
Register scratch1,
Register scratch2) {
// Store code entry in the closure.
__ Sd(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
__ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
......@@ -849,73 +851,22 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
__ bind(&no_match);
}
static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register feedback_vector,
Register scratch1, Register scratch2,
Register scratch3) {
static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Register optimized_code_entry,
Register scratch1, Register scratch2) {
// ----------- S t a t e -------------
// -- a3 : new target (preserved for callee if needed, and caller)
// -- a1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, a1, a3, scratch1, scratch2, scratch3));
Label optimized_code_slot_is_weak_ref, fallthrough;
DCHECK(!AreAliased(optimized_code_entry, a1, a3, scratch1, scratch2));
Register closure = a1;
Register optimized_code_entry = scratch1;
__ Ld(optimized_code_entry,
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret it as a weak reference to a code
// object.
__ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
{
// Optimized code slot is a Smi optimization marker.
// Fall through if no optimization trigger.
__ Branch(&fallthrough, eq, optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kNone)));
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
masm, optimized_code_entry,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
{
// Otherwise, the marker is InOptimizationQueue, so fall through hoping
// that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ Assert(
eq, AbortReason::kExpectedOptimizationSentinel,
optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
}
__ jmp(&fallthrough);
}
}
{
// Optimized code slot is a weak reference.
__ bind(&optimized_code_slot_is_weak_ref);
__ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
__ Ld(a5, FieldMemOperand(optimized_code_entry,
Code::kCodeDataContainerOffset));
__ Ld(a5,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ Lw(a5, FieldMemOperand(a5, CodeDataContainer::kKindSpecificFlagsOffset));
__ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ Branch(&found_deoptimized_code, ne, a5, Operand(zero_reg));
......@@ -925,7 +876,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// The feedback vector is no longer used, so re-use it as a scratch
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
scratch1, scratch2);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Daddu(a2, optimized_code_entry,
......@@ -933,14 +884,41 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ Jump(a2);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// losure's code.
// closure's code.
__ bind(&found_deoptimized_code);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
}
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
Register optimization_marker) {
// ----------- S t a t e -------------
// -- a3 : new target (preserved for callee if needed, and caller)
// -- a1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -- optimization_marker : a Smi containing a non-zero optimization marker.
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
// Fall-through if the optimized code cell is clear and there is no
// optimization marker.
__ bind(&fallthrough);
// Otherwise, the marker is InOptimizationQueue, so fall through hoping
// that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ Assert(eq, AbortReason::kExpectedOptimizationSentinel,
optimization_marker,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
}
}
// Advance the current bytecode offset. This simulates what all bytecode
......@@ -1044,7 +1022,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
Register optimized_code_entry = a4;
__ Ld(optimized_code_entry,
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the optimized code slot is not empty.
Label optimized_code_slot_not_empty;
__ Branch(&optimized_code_slot_not_empty, ne, optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kNone)));
Label not_optimized;
__ bind(&not_optimized);
// Increment invocation count for the function.
__ Lw(a4, FieldMemOperand(feedback_vector,
......@@ -1157,6 +1147,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, t0);
__ Jump(ra);
__ bind(&optimized_code_slot_not_empty);
Label maybe_has_optimized_code;
// Check if optimized code marker is actually a weak reference to the
// optimized code as opposed to an optimization marker.
__ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
// Load code entry from the weak reference, if it was cleared, resume
// execution of unoptimized code.
__ LoadWeakValue(optimized_code_entry, optimized_code_entry, &not_optimized);
TailCallOptimizedCodeSlot(masm, optimized_code_entry, t3, a5);
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
// Unreachable code.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment