Commit 808a5d76 authored by Mythri A's avatar Mythri A Committed by Commit Bot

[turboprop] Fix IET to check optimization marker before optimized code

InterpreterEntryTrampoline checks
1. If there is any optimization marker, and process the marker by
calling the correct runtime function
2. If there is any optimized code, it installs optimized code and starts
executing optimized code.

Earlier it was OK to do these two checks in any order, since it wasn't
possible that we have both a marker and optimized code. Turboprop code
calls out to IET to process the optimization marker / install next tier
optimized code. So we have to check for optimization marker before we
check for optimized code.

Bug: v8:9684
Change-Id: Iaefb51aec9c2de0bcbacbdbd8a5dff513e573036
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2505721Reviewed-by: 's avatarRoss McIlroy <rmcilroy@chromium.org>
Commit-Queue: Mythri Alle <mythria@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70914}
parent 8b0cb19e
......@@ -910,12 +910,11 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
// Otherwise, the marker is InOptimizationQueue, so fall through hoping
// that an interrupt will eventually update the slot with optimized code.
// Marker should be one of LogFirstExecution / CompileOptimized /
// CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
// here.
if (FLAG_debug_code) {
__ cmp_raw_immediate(optimization_marker,
OptimizationMarker::kInOptimizationQueue);
__ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
__ stop();
}
}
......@@ -1053,8 +1052,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Check if the optimized code slot is not empty or has a optimization marker.
Label has_optimized_code_or_marker;
__ cmp_raw_immediate(optimization_state,
FeedbackVector::kHasNoOptimizedCodeOrMarkerValue);
__ tst(
optimization_state,
Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
__ b(ne, &has_optimized_code_or_marker);
Label not_optimized;
......@@ -1204,9 +1204,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Label maybe_has_optimized_code;
// Check if optimized code is available
__ tst(optimization_state,
Operand(FeedbackVector::OptimizationTierBits::kMask));
__ b(ne, &maybe_has_optimized_code);
__ tst(
optimization_state,
Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
__ b(eq, &maybe_has_optimized_code);
Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
......
......@@ -1072,12 +1072,11 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
// Otherwise, the marker is InOptimizationQueue, so fall through hoping
// that an interrupt will eventually update the slot with optimized code.
// Marker should be one of LogFirstExecution / CompileOptimized /
// CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
// here.
if (FLAG_debug_code) {
__ Cmp(optimization_marker,
Operand(OptimizationMarker::kInOptimizationQueue));
__ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
__ Unreachable();
}
}
......@@ -1216,9 +1215,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Check if there is optimized code or a optimization marker that needes to be
// processed.
Label has_optimized_code_or_marker;
__ CompareAndBranch(optimization_state,
Operand(FeedbackVector::kHasNoOptimizedCodeOrMarkerValue),
ne, &has_optimized_code_or_marker);
__ TestAndBranchIfAnySet(
optimization_state,
FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask,
&has_optimized_code_or_marker);
Label not_optimized;
__ bind(&not_optimized);
......@@ -1377,9 +1377,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Label maybe_has_optimized_code;
// Check if optimized code is available
__ TestAndBranchIfAnySet(optimization_state,
FeedbackVector::OptimizationTierBits::kMask,
&maybe_has_optimized_code);
__ TestAndBranchIfAllClear(
optimization_state,
FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker,
&maybe_has_optimized_code);
Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
......
......@@ -46,13 +46,15 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
LoadObjectField<Uint32T>(feedback_vector, FeedbackVector::kFlagsOffset);
// Fall through if no optimization trigger or optimized code.
GotoIf(Word32Equal(
optimization_state,
Int32Constant(FeedbackVector::kHasNoOptimizedCodeOrMarkerValue)),
&fallthrough);
GotoIfNot(IsSetWord32(
optimization_state,
FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask),
&fallthrough);
GotoIf(IsSetWord32<FeedbackVector::OptimizationTierBits>(optimization_state),
&may_have_optimized_code);
GotoIfNot(IsSetWord32(
optimization_state,
FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker),
&may_have_optimized_code);
// TODO(ishell): introduce Runtime::kHandleOptimizationMarker and check
// all these marker values there.
......@@ -67,16 +69,7 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
marker, OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent, function);
// Otherwise, the marker is InOptimizationQueue, so fall through hoping
// that an interrupt will eventually update the slot with optimized code.
CSA_ASSERT(
this, Word32Equal(marker, Int32Constant(
OptimizationMarker::kInOptimizationQueue)));
CSA_ASSERT(this,
IsCleared(LoadMaybeWeakObjectField(
feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset)));
Goto(&fallthrough);
Unreachable();
BIND(&may_have_optimized_code);
{
Label heal_optimized_code_slot(this);
......
......@@ -864,13 +864,11 @@ static void MaybeOptimizeCode(MacroAssembler* masm,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
{
// Otherwise, the marker is InOptimizationQueue, so fall through hoping
// that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ cmp(optimization_marker, OptimizationMarker::kInOptimizationQueue);
__ Assert(equal, AbortReason::kExpectedOptimizationSentinel);
}
// Marker should be one of LogFirstExecution / CompileOptimized /
// CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
// here.
if (FLAG_debug_code) {
__ int3();
}
}
......@@ -1010,8 +1008,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Check if there is optimized code or a optimization marker that needes to be
// processed.
Label has_optimized_code_or_marker;
__ cmp(optimization_state, FeedbackVector::kHasNoOptimizedCodeOrMarkerValue);
__ j(not_equal, &has_optimized_code_or_marker);
__ test(
optimization_state,
Immediate(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
__ j(not_zero, &has_optimized_code_or_marker);
Label not_optimized;
__ bind(&not_optimized);
......@@ -1189,9 +1189,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ movd(eax, xmm0);
// Check if optimized code is available
__ test(optimization_state,
Immediate(FeedbackVector::OptimizationTierBits::kMask));
__ j(not_zero, &maybe_has_optimized_code);
__ test(
optimization_state,
Immediate(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
__ j(zero, &maybe_has_optimized_code);
Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
......
......@@ -927,11 +927,11 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
// Otherwise, the marker is InOptimizationQueue, so fall through hoping
// that an interrupt will eventually update the slot with optimized code.
// Marker should be one of LogFirstExecution / CompileOptimized /
// CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
// here.
if (FLAG_debug_code) {
__ Cmp(optimization_marker, OptimizationMarker::kInOptimizationQueue);
__ Assert(equal, AbortReason::kExpectedOptimizationSentinel);
__ int3();
}
}
......@@ -1115,8 +1115,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Check if there is optimized code or a optimization marker that needs to be
// processed.
Label has_optimized_code_or_marker;
__ Cmp(optimization_state, FeedbackVector::kHasNoOptimizedCodeOrMarkerValue);
__ j(not_equal, &has_optimized_code_or_marker);
__ testl(
optimization_state,
Immediate(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
__ j(not_zero, &has_optimized_code_or_marker);
Label not_optimized;
__ bind(&not_optimized);
......@@ -1270,10 +1272,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&has_optimized_code_or_marker);
Label maybe_has_optimized_code;
// Check if optimized code is available
__ testl(optimization_state,
Immediate(FeedbackVector::OptimizationTierBits::kMask));
__ j(not_zero, &maybe_has_optimized_code);
__ testl(
optimization_state,
Immediate(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
__ j(zero, &maybe_has_optimized_code);
Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
......
......@@ -194,15 +194,14 @@ class FeedbackVector
STATIC_ASSERT(OptimizationTier::kLastOptimizationTier <
OptimizationTierBits::kMax);
// We want this value to be 0 to generate slightly compact code in
// InterpreterEntryTrampoline
static constexpr uint32_t kHasNoOptimizedCodeOrMarkerValue =
OptimizationTierBits::encode(OptimizationTier::kNone) |
OptimizationTierBits::encode(OptimizationTier::kNone);
STATIC_ASSERT(kHasNoOptimizedCodeOrMarkerValue == 0);
static constexpr uint32_t kHasCompileOptimizedOrLogFirstExecutionMarker =
kNoneOrInOptimizationQueueMask << OptimizationMarkerBits::kShift;
static constexpr uint32_t kHasNoTopTierCodeOrCompileOptimizedMarkerMask =
kNoneOrMidTierMask << OptimizationTierBits::kShift |
kNoneOrInOptimizationQueueMask << OptimizationMarkerBits::kShift;
kHasCompileOptimizedOrLogFirstExecutionMarker;
static constexpr uint32_t kHasOptimizedCodeOrCompileOptimizedMarkerMask =
OptimizationTierBits::kMask |
kHasCompileOptimizedOrLogFirstExecutionMarker;
inline bool is_empty() const;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment