Commit 7177d87f authored by Dan Elphick's avatar Dan Elphick Committed by Commit Bot

[builtins] Tweak optimized check in InterpreterEntryTrampoline

Reorders the Smi check and the empty OptimizationMarker check as the
latter implies the first and means there is now just a single comparison
on the fast path.

Bug: v8:9771
Change-Id: Ibba1f322944b17186842983e227684b301ed5f31
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1833683
Commit-Queue: Dan Elphick <delphick@chromium.org>
Reviewed-by: 's avatarRoss McIlroy <rmcilroy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64110}
parent 18a8548d
......@@ -1049,9 +1049,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ cmp(r4, Operand(FEEDBACK_VECTOR_TYPE));
__ b(ne, &push_stack_frame);
Label has_optimization_marker;
Label maybe_has_optimized_code;
Register optimized_code_entry = r4;
// Read off the optimized code slot in the feedback vector.
......@@ -1059,14 +1056,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// If not a Smi, then it must be a weak reference to the optimized code.
__ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
// Check if there is an optimization marker, if so carry onto the the
// MaybeOptimizeCode path.
// Check if the optimized code slot is not empty.
Label optimized_code_slot_not_empty;
__ cmp(optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kNone)));
__ b(ne, &has_optimization_marker);
__ b(ne, &optimized_code_slot_not_empty);
Label not_optimized;
__ bind(&not_optimized);
......@@ -1179,7 +1173,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, r2);
__ Jump(lr);
__ bind(&has_optimization_marker);
__ bind(&optimized_code_slot_not_empty);
Label maybe_has_optimized_code;
// Check if optimized code marker is actually a weak reference to the
// optimized code.
__ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
......
......@@ -1179,9 +1179,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Cmp(x7, FEEDBACK_VECTOR_TYPE);
__ B(ne, &push_stack_frame);
Label has_optimization_marker;
Label maybe_has_optimized_code;
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
Register optimized_code_entry = x7;
......@@ -1190,14 +1187,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// If not a Smi, then it must be a weak reference to the optimized code.
__ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
// Check if there is an optimization marker and if so carry onto the the
// MaybeOptimizeCode path.
// Check if the optimized code slot is not empty.
Label optimized_code_slot_not_empty;
__ CompareTaggedAndBranch(optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kNone)),
ne, &has_optimization_marker);
ne, &optimized_code_slot_not_empty);
Label not_optimized;
__ bind(&not_optimized);
......@@ -1318,7 +1312,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, x2);
__ Ret();
__ bind(&has_optimization_marker);
__ bind(&optimized_code_slot_not_empty);
Label maybe_has_optimized_code;
// Check if optimized code marker is actually a weak reference to the
// optimized code as opposed to an optimization marker.
__ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
......
......@@ -954,9 +954,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ CmpInstanceType(eax, FEEDBACK_VECTOR_TYPE);
__ j(not_equal, &push_stack_frame);
Label has_optimization_marker;
Label maybe_has_optimized_code;
// Read off the optimized code slot in the feedback vector.
// Load the optimized code from the feedback vector and re-use the register.
Register optimized_code_entry = ecx;
......@@ -964,14 +961,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FieldOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// If not a Smi, then it must be a weak reference to the optimized code.
__ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
// Check if there is an optimization marker and if so carry onto the the
// MaybeOptimizeCode path.
// Check if the optimized code slot is not empty.
Label optimized_code_slot_not_empty;
__ cmp(optimized_code_entry,
Immediate(Smi::FromEnum(OptimizationMarker::kNone)));
__ j(not_equal, &has_optimization_marker);
__ j(not_equal, &optimized_code_slot_not_empty);
Label not_optimized;
__ bind(&not_optimized);
......@@ -1107,7 +1101,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, edx, ecx);
__ ret(0);
__ bind(&has_optimization_marker);
__ bind(&optimized_code_slot_not_empty);
Label maybe_has_optimized_code;
// Check if optimized code marker is actually a weak reference to the
// optimized code as opposed to an optimization marker.
__ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
MaybeOptimizeCode(masm, optimized_code_entry);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
......
......@@ -1069,8 +1069,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ CmpInstanceType(rcx, FEEDBACK_VECTOR_TYPE);
__ j(not_equal, &push_stack_frame);
Label has_optimization_marker;
Label maybe_has_optimized_code;
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
......@@ -1083,13 +1081,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FeedbackVector::kOptimizedCodeWeakOrSmiOffset),
decompr_scratch);
// If not a Smi, then it must be a weak reference to the optimized code.
__ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
// Check if there is an optimization marker and if so carry onto the the
// MaybeOptimizeCode path.
__ SmiCompare(optimized_code_entry, Smi::FromEnum(OptimizationMarker::kNone));
__ j(not_equal, &has_optimization_marker);
// Check if the optimized code slot is not empty.
Label optimized_code_slot_not_empty;
__ Cmp(optimized_code_entry, Smi::FromEnum(OptimizationMarker::kNone));
__ j(not_equal, &optimized_code_slot_not_empty);
Label not_optimized;
__ bind(&not_optimized);
......@@ -1210,7 +1205,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
__ int3(); // Should not return.
__ bind(&has_optimization_marker);
__ bind(&optimized_code_slot_not_empty);
Label maybe_has_optimized_code;
// Check if optimized code marker is actually a weak reference to the
// optimized code as opposed to an optimization marker.
__ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment