Commit 89ea44bf authored by Victor Gomes's avatar Victor Gomes Committed by Commit Bot

[cleanup] Extract common code to MaybeOptimizeCodeOrTailCallOptimizedCode

- Also changes in arm and ia32 for consistency
- Removes fall-through since MaybeOptimizeCode never returns

Change-Id: I115cff07c7d58ac3f7d0e0feeccbd6b1b172bd53
Bug: v8:11429
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2695392
Commit-Queue: Victor Gomes <victorgomes@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72764}
parent 75131637
......@@ -989,6 +989,28 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ bind(&end);
}
static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
MacroAssembler* masm, Register optimization_state,
Register feedback_vector) {
Label maybe_has_optimized_code;
// Check if optimized code is available
__ tst(
optimization_state,
Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
__ b(eq, &maybe_has_optimized_code);
Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
__ bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
__ ldr(optimization_marker,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r6);
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right.
......@@ -1192,26 +1214,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
__ bind(&has_optimized_code_or_marker);
Label maybe_has_optimized_code;
// Check if optimized code is available
__ tst(
optimization_state,
Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
__ b(eq, &maybe_has_optimized_code);
Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
__ ldr(optimization_marker,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r6);
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector);
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
......
......@@ -1162,6 +1162,29 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ Bind(&end);
}
static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
MacroAssembler* masm, Register optimization_state,
Register feedback_vector) {
Label maybe_has_optimized_code;
// Check if optimized code is available
__ TestAndBranchIfAllClear(
optimization_state,
FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker,
&maybe_has_optimized_code);
Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
__ bind(&maybe_has_optimized_code);
Register optimized_code_entry = x7;
__ LoadAnyTaggedField(
optimized_code_entry,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, x4);
}
// static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
......@@ -1286,33 +1309,16 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// TODO(v8:11429): Document this frame setup better.
__ Ret();
__ RecordComment("[ Optimized marker check");
// TODO(v8:11429): Share this code with the InterpreterEntryTrampoline.
__ bind(&has_optimized_code_or_marker);
{
Label maybe_has_optimized_code;
__ RecordComment("[ Optimized marker check");
// Drop the frame created by the baseline call.
__ Pop<TurboAssembler::kAuthLR>(fp, lr);
// Check if optimized code is available
__ TestAndBranchIfAllClear(
optimization_state,
FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker,
&maybe_has_optimized_code);
Register optimization_marker = optimization_state.X();
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
__ bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state.X();
__ LoadAnyTaggedField(
optimized_code_entry,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, x4);
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector);
__ Trap();
__ RecordComment("]");
}
__ RecordComment("]");
__ bind(&call_stack_guard);
{
......@@ -1548,27 +1554,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
__ bind(&has_optimized_code_or_marker);
Label maybe_has_optimized_code;
// Check if optimized code is available
__ TestAndBranchIfAllClear(
optimization_state,
FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker,
&maybe_has_optimized_code);
Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
Register optimized_code_entry = x7;
__ LoadAnyTaggedField(
optimized_code_entry,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, x4);
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector);
__ bind(&is_baseline);
{
......
......@@ -919,6 +919,30 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ bind(&end);
}
static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
MacroAssembler* masm, Register optimization_state,
XMMRegister saved_feedback_vector) {
Label maybe_has_optimized_code;
// Check if optimized code is available
__ test(
optimization_state,
Immediate(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
__ j(zero, &maybe_has_optimized_code);
Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
MaybeOptimizeCode(masm, optimization_marker);
__ bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_marker;
Register feedback_vector = optimization_marker;
__ movd(feedback_vector, saved_feedback_vector); // Restore feedback vector.
__ mov(
optimized_code_entry,
FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry);
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right.
......@@ -1147,29 +1171,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
__ bind(&has_optimized_code_or_marker);
Label maybe_has_optimized_code;
// Restore actual argument count.
__ movd(eax, xmm0);
// Check if optimized code is available
__ test(
optimization_state,
Immediate(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
__ j(zero, &maybe_has_optimized_code);
Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
MaybeOptimizeCode(masm, optimization_marker);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_marker;
__ movd(optimized_code_entry, xmm1);
__ mov(
optimized_code_entry,
FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry);
{
// Restore actual argument count.
__ movd(eax, xmm0);
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
xmm1);
}
__ bind(&compile_lazy);
// Restore actual argument count.
......
......@@ -1038,6 +1038,27 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ bind(&end);
}
static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
MacroAssembler* masm, Register optimization_state,
Register feedback_vector) {
Label maybe_has_optimized_code;
__ testl(
optimization_state,
Immediate(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
__ j(zero, &maybe_has_optimized_code);
Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
__ bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
__ LoadAnyTaggedField(
optimized_code_entry,
FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r11, r15);
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right.
......@@ -1252,25 +1273,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ int3(); // Should not return.
__ bind(&has_optimized_code_or_marker);
Label maybe_has_optimized_code;
__ testl(
optimization_state,
Immediate(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
__ j(zero, &maybe_has_optimized_code);
Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
__ LoadAnyTaggedField(
optimized_code_entry,
FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r11, r15);
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector);
__ bind(&is_baseline);
{
......@@ -1710,34 +1714,15 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// Do "fast" return to caller pushed pc.
__ Ret();
__ RecordComment("[ Optimized marker check");
__ bind(&has_optimized_code_or_marker);
{
__ RecordComment("[ Optimized marker check");
// TODO(v8:11429,verwaest): Overwrite return address instead.
// Drop the return adress
__ Drop(1);
Register optimization_state = rcx;
// TODO(v8:11429): Extract to helper.
Label maybe_has_optimized_code;
__ testl(
optimization_state,
Immediate(
FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
__ j(zero, &maybe_has_optimized_code);
Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
__ bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
__ LoadAnyTaggedField(
optimized_code_entry,
FieldOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r11, r15);
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, rcx, feedback_vector);
__ Trap();
__ RecordComment("]");
}
__ bind(&call_stack_guard);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment