Commit 014112a8 authored by Milad Fa's avatar Milad Fa Committed by Commit Bot

PPC/s390: [turboprop] Add a slot for optimization marker in feedback vector

Port d7ece57e

Original Commit Message:

    Optimization marker and the optimized code used to share the same slot
    in the feedback vector as they were mutually exclusive. With turboprop
    we would want to mark the function for tier up to Turbofan while holding
    the optimized code for Turboprop. So this cl uses the existing padding
    field to hold the optimization marker instead.

    As a driveby, removes unused JSFunction::ClearOptimizedCodeSlot function
    and fixes a minor bug in Runtime_GetOptimizationStatus.

R=mythria@chromium.org, joransiu@ca.ibm.com, junyan@redhat.com, midawson@redhat.com
BUG=
LOG=N

Change-Id: Ie635fd05c26f70124076b9c51c0c70a2b948af69
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2505621Reviewed-by: 's avatarJunliang Yan <junyan@redhat.com>
Commit-Queue: Milad Fa <mfarazma@redhat.com>
Cr-Commit-Position: refs/heads/master@{#70854}
parent 03f60296
...@@ -849,13 +849,13 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) { ...@@ -849,13 +849,13 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ add(sp, sp, args_count); __ add(sp, sp, args_count);
} }
// Tail-call |function_id| if |smi_entry| == |marker| // Tail-call |function_id| if |actual_marker| == |expected_marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
Register smi_entry, Register actual_marker,
OptimizationMarker marker, OptimizationMarker expected_marker,
Runtime::FunctionId function_id) { Runtime::FunctionId function_id) {
Label no_match; Label no_match;
__ CmpSmiLiteral(smi_entry, Smi::FromEnum(marker), r0); __ cmpi(actual_marker, Operand(expected_marker));
__ bne(&no_match); __ bne(&no_match);
GenerateTailCallToReturnedCode(masm, function_id); GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match); __ bind(&no_match);
...@@ -872,10 +872,15 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm, ...@@ -872,10 +872,15 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(!AreAliased(r4, r6, optimized_code_entry, scratch)); DCHECK(!AreAliased(r4, r6, optimized_code_entry, scratch));
Register closure = r4; Register closure = r4;
Label heal_optimized_code_slot;
// If the optimized code is cleared, go to runtime to update the optimization
// marker field.
__ LoadWeakValue(optimized_code_entry, optimized_code_entry,
&heal_optimized_code_slot);
// Check if the optimized code is marked for deopt. If it is, call the // Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it. // runtime to clear it.
Label found_deoptimized_code;
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
scratch, scratch,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset)); FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
...@@ -883,7 +888,7 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm, ...@@ -883,7 +888,7 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
scratch, scratch,
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset)); FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0); __ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0);
__ bne(&found_deoptimized_code, cr0); __ bne(&heal_optimized_code_slot, cr0);
// Optimized code is good, get it into the closure and link the closure // Optimized code is good, get it into the closure and link the closure
// into the optimized functions list, then tail call the optimized code. // into the optimized functions list, then tail call the optimized code.
...@@ -893,10 +898,11 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm, ...@@ -893,10 +898,11 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
__ LoadCodeObjectEntry(r5, optimized_code_entry); __ LoadCodeObjectEntry(r5, optimized_code_entry);
__ Jump(r5); __ Jump(r5);
// Optimized code slot contains deoptimized code, evict it and re-enter // Optimized code slot contains deoptimized code or code is cleared and
// the closure's code. // optimized code marker isn't updated. Evict the code, update the marker
__ bind(&found_deoptimized_code); // and re-enter the closure's code.
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); __ bind(&heal_optimized_code_slot);
GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
} }
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
...@@ -906,7 +912,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, ...@@ -906,7 +912,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// -- r6 : new target (preserved for callee if needed, and caller) // -- r6 : new target (preserved for callee if needed, and caller)
// -- r4 : target function (preserved for callee if needed, and caller) // -- r4 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed) // -- feedback vector (preserved for caller if needed)
// -- optimization_marker : a Smi containing a non-zero optimization marker. // -- optimization_marker : a int32 containing a non-zero optimization
// marker.
// ----------------------------------- // -----------------------------------
DCHECK(!AreAliased(feedback_vector, r4, r6, optimization_marker)); DCHECK(!AreAliased(feedback_vector, r4, r6, optimization_marker));
...@@ -926,9 +933,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, ...@@ -926,9 +933,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// Otherwise, the marker is InOptimizationQueue, so fall through hoping // Otherwise, the marker is InOptimizationQueue, so fall through hoping
// that an interrupt will eventually update the slot with optimized code. // that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) { if (FLAG_debug_code) {
__ CmpSmiLiteral(optimization_marker, __ cmpi(optimization_marker,
Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), Operand(OptimizationMarker::kInOptimizationQueue));
r0);
__ Assert(eq, AbortReason::kExpectedOptimizationSentinel); __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
} }
} }
...@@ -1066,18 +1072,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1066,18 +1072,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ cmpi(r7, Operand(FEEDBACK_VECTOR_TYPE)); __ cmpi(r7, Operand(FEEDBACK_VECTOR_TYPE));
__ bne(&push_stack_frame); __ bne(&push_stack_frame);
Register optimized_code_entry = r7; Register optimization_state = r7;
// Read off the optimized code slot in the feedback vector. // Read off the optimization state in the feedback vector.
__ LoadAnyTaggedField( __ LoadP(optimization_state,
optimized_code_entry, FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset)); // Check if the optimized code slot is not empty or has a optimization marker.
// Check if the optimized code slot is not empty. Label has_optimized_code_or_marker;
Label optimized_code_slot_not_empty; __ cmpi(optimization_state,
__ CmpSmiLiteral(optimized_code_entry, Operand(FeedbackVector::kHasNoOptimizedCodeOrMarkerValue));
Smi::FromEnum(OptimizationMarker::kNone), r0); __ bne(&has_optimized_code_or_marker);
__ bne(&optimized_code_slot_not_empty);
Label not_optimized; Label not_optimized;
__ bind(&not_optimized); __ bind(&not_optimized);
...@@ -1236,19 +1241,26 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1236,19 +1241,26 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt); __ jmp(&after_stack_check_interrupt);
__ bind(&optimized_code_slot_not_empty); __ bind(&has_optimized_code_or_marker);
Label maybe_has_optimized_code; Label maybe_has_optimized_code;
// Check if optimized code marker is actually a weak reference to the
// optimized code. // Check if optimized code is available
__ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code); __ TestBitMask(optimization_state,
MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry); FeedbackVector::OptimizationTierBits::kMask, r0);
__ bne(&maybe_has_optimized_code, cr0);
Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
// Fall through if there's no runnable optimized code. // Fall through if there's no runnable optimized code.
__ jmp(&not_optimized); __ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code); __ bind(&maybe_has_optimized_code);
// Load code entry from the weak reference, if it was cleared, resume Register optimized_code_entry = optimization_state;
// execution of unoptimized code. __ LoadAnyTaggedField(
__ LoadWeakValue(optimized_code_entry, optimized_code_entry, &not_optimized); optimization_marker,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r9); TailCallOptimizedCodeSlot(masm, optimized_code_entry, r9);
__ bind(&compile_lazy); __ bind(&compile_lazy);
......
...@@ -907,13 +907,13 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) { ...@@ -907,13 +907,13 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ AddP(sp, sp, args_count); __ AddP(sp, sp, args_count);
} }
// Tail-call |function_id| if |smi_entry| == |marker| // Tail-call |function_id| if |actual_marker| == |expected_marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
Register smi_entry, Register actual_marker,
OptimizationMarker marker, OptimizationMarker expected_marker,
Runtime::FunctionId function_id) { Runtime::FunctionId function_id) {
Label no_match; Label no_match;
__ CmpSmiLiteral(smi_entry, Smi::FromEnum(marker), r0); __ CmpP(actual_marker, Operand(expected_marker));
__ bne(&no_match); __ bne(&no_match);
GenerateTailCallToReturnedCode(masm, function_id); GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match); __ bind(&no_match);
...@@ -930,17 +930,22 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm, ...@@ -930,17 +930,22 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(!AreAliased(r3, r5, optimized_code_entry, scratch)); DCHECK(!AreAliased(r3, r5, optimized_code_entry, scratch));
Register closure = r3; Register closure = r3;
Label heal_optimized_code_slot;
// If the optimized code is cleared, go to runtime to update the optimization
// marker field.
__ LoadWeakValue(optimized_code_entry, optimized_code_entry,
&heal_optimized_code_slot);
// Check if the optimized code is marked for deopt. If it is, call the // Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it. // runtime to clear it.
Label found_deoptimized_code;
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
scratch, scratch,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset)); FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ LoadW(scratch, FieldMemOperand( __ LoadW(scratch, FieldMemOperand(
scratch, CodeDataContainer::kKindSpecificFlagsOffset)); scratch, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0); __ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0);
__ bne(&found_deoptimized_code); __ bne(&heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure // Optimized code is good, get it into the closure and link the closure
// into the optimized functions list, then tail call the optimized code. // into the optimized functions list, then tail call the optimized code.
...@@ -950,10 +955,11 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm, ...@@ -950,10 +955,11 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
__ LoadCodeObjectEntry(r4, optimized_code_entry); __ LoadCodeObjectEntry(r4, optimized_code_entry);
__ Jump(r4); __ Jump(r4);
// Optimized code slot contains deoptimized code, evict it and re-enter // Optimized code slot contains deoptimized code or code is cleared and
// the closure's code. // optimized code marker isn't updated. Evict the code, update the marker
__ bind(&found_deoptimized_code); // and re-enter the closure's code.
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); __ bind(&heal_optimized_code_slot);
GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
} }
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
...@@ -963,7 +969,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, ...@@ -963,7 +969,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// -- r5 : new target (preserved for callee if needed, and caller) // -- r5 : new target (preserved for callee if needed, and caller)
// -- r3 : target function (preserved for callee if needed, and caller) // -- r3 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed) // -- feedback vector (preserved for caller if needed)
// -- optimization_marker : a Smi containing a non-zero optimization marker. // -- optimization_marker : a int32 containing a non-zero optimization
// marker.
// ----------------------------------- // -----------------------------------
DCHECK(!AreAliased(feedback_vector, r3, r5, optimization_marker)); DCHECK(!AreAliased(feedback_vector, r3, r5, optimization_marker));
...@@ -983,9 +990,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, ...@@ -983,9 +990,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// Otherwise, the marker is InOptimizationQueue, so fall through hoping // Otherwise, the marker is InOptimizationQueue, so fall through hoping
// that an interrupt will eventually update the slot with optimized code. // that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) { if (FLAG_debug_code) {
__ CmpSmiLiteral(optimization_marker, __ CmpP(optimization_marker,
Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), Operand(OptimizationMarker::kInOptimizationQueue));
r0);
__ Assert(eq, AbortReason::kExpectedOptimizationSentinel); __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
} }
} }
...@@ -1124,19 +1130,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1124,19 +1130,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ CmpP(r6, Operand(FEEDBACK_VECTOR_TYPE)); __ CmpP(r6, Operand(FEEDBACK_VECTOR_TYPE));
__ bne(&push_stack_frame); __ bne(&push_stack_frame);
Register optimized_code_entry = r6; Register optimization_state = r6;
// Read off the optimized code slot in the feedback vector. // Read off the optimization state in the feedback vector.
__ LoadAnyTaggedField( __ LoadP(optimization_state,
optimized_code_entry, FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the optimized code slot is not empty. // Check if the optimized code slot is not empty or has a optimization marker.
Label optimized_code_slot_not_empty; Label has_optimized_code_or_marker;
__ CmpSmiLiteral(optimized_code_entry, __ CmpP(optimization_state,
Smi::FromEnum(OptimizationMarker::kNone), r0); Operand(FeedbackVector::kHasNoOptimizedCodeOrMarkerValue));
__ bne(&optimized_code_slot_not_empty); __ bne(&has_optimized_code_or_marker);
Label not_optimized; Label not_optimized;
__ bind(&not_optimized); __ bind(&not_optimized);
...@@ -1267,12 +1271,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1267,12 +1271,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, r4); LeaveInterpreterFrame(masm, r4);
__ Ret(); __ Ret();
__ bind(&optimized_code_slot_not_empty); __ bind(&has_optimized_code_or_marker);
Label maybe_has_optimized_code; Label maybe_has_optimized_code;
// Check if optimized code marker is actually a weak reference to the
// optimized code. // Check if optimized code is available
__ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code); __ TestBitMask(optimization_state,
MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry); FeedbackVector::OptimizationTierBits::kMask, r0);
__ bne(&maybe_has_optimized_code);
Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
// Fall through if there's no runnable optimized code. // Fall through if there's no runnable optimized code.
__ jmp(&not_optimized); __ jmp(&not_optimized);
...@@ -1302,9 +1311,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1302,9 +1311,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt); __ jmp(&after_stack_check_interrupt);
__ bind(&maybe_has_optimized_code); __ bind(&maybe_has_optimized_code);
// Load code entry from the weak reference, if it was cleared, resume Register optimized_code_entry = optimization_state;
// execution of unoptimized code. __ LoadAnyTaggedField(
__ LoadWeakValue(optimized_code_entry, optimized_code_entry, &not_optimized); optimization_marker,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r8); TailCallOptimizedCodeSlot(masm, optimized_code_entry, r8);
__ bind(&compile_lazy); __ bind(&compile_lazy);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment