Commit 54604990 authored by Jakob Gruber's avatar Jakob Gruber Committed by V8 LUCI CQ

[compiler] Remove OptimizationMarker::kLogFirstExecution

The functionality is unused and we are simplifying OptimizationMarker
usage.

Drive-by: Remove unused return value of Compiler::CompileOptimized.
Drive-by: Don't add kStackSpaceRequiredForCompilation as gap to the
stack check when compiling concurrently, i.e. on another thread.

Bug: chromium:757467
Change-Id: Ibbe204b82bf937b9eb74f9eb2c3fd2d719d53ef9
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3416245Reviewed-by: 's avatarCamillo Bruni <cbruni@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78800}
parent 99a5bb74
...@@ -936,12 +936,6 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, ...@@ -936,12 +936,6 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// ----------------------------------- // -----------------------------------
DCHECK(!AreAliased(feedback_vector, r1, r3, optimization_marker)); DCHECK(!AreAliased(feedback_vector, r1, r3, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker, TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized, OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent); Runtime::kCompileOptimized_NotConcurrent);
...@@ -949,9 +943,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, ...@@ -949,9 +943,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent, OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent); Runtime::kCompileOptimized_Concurrent);
// Marker should be one of LogFirstExecution / CompileOptimized / // Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
// CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach // InOptimizationQueue and None shouldn't reach here.
// here.
if (FLAG_debug_code) { if (FLAG_debug_code) {
__ stop(); __ stop();
} }
...@@ -1060,9 +1053,8 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( ...@@ -1060,9 +1053,8 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
DCHECK(!AreAliased(optimization_state, feedback_vector)); DCHECK(!AreAliased(optimization_state, feedback_vector));
Label maybe_has_optimized_code; Label maybe_has_optimized_code;
// Check if optimized code is available // Check if optimized code is available
__ tst( __ tst(optimization_state,
optimization_state, Operand(FeedbackVector::kHasCompileOptimizedMarker));
Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
__ b(eq, &maybe_has_optimized_code); __ b(eq, &maybe_has_optimized_code);
Register optimization_marker = optimization_state; Register optimization_marker = optimization_state;
......
...@@ -1122,12 +1122,6 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, ...@@ -1122,12 +1122,6 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
ASM_CODE_COMMENT(masm); ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(feedback_vector, x1, x3, optimization_marker)); DCHECK(!AreAliased(feedback_vector, x1, x3, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker, TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized, OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent); Runtime::kCompileOptimized_NotConcurrent);
...@@ -1135,9 +1129,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, ...@@ -1135,9 +1129,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent, OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent); Runtime::kCompileOptimized_Concurrent);
// Marker should be one of LogFirstExecution / CompileOptimized / // Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
// CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach // InOptimizationQueue and None shouldn't reach here.
// here.
if (FLAG_debug_code) { if (FLAG_debug_code) {
__ Unreachable(); __ Unreachable();
} }
...@@ -1242,10 +1235,9 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( ...@@ -1242,10 +1235,9 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
DCHECK(!AreAliased(optimization_state, feedback_vector)); DCHECK(!AreAliased(optimization_state, feedback_vector));
Label maybe_has_optimized_code; Label maybe_has_optimized_code;
// Check if optimized code is available // Check if optimized code is available
__ TestAndBranchIfAllClear( __ TestAndBranchIfAllClear(optimization_state,
optimization_state, FeedbackVector::kHasCompileOptimizedMarker,
FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker, &maybe_has_optimized_code);
&maybe_has_optimized_code);
Register optimization_marker = optimization_state; Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker); __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
......
...@@ -53,17 +53,14 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot( ...@@ -53,17 +53,14 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask), FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask),
&fallthrough); &fallthrough);
GotoIfNot(IsSetWord32( GotoIfNot(IsSetWord32(optimization_state,
optimization_state, FeedbackVector::kHasCompileOptimizedMarker),
FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker),
&may_have_optimized_code); &may_have_optimized_code);
// TODO(ishell): introduce Runtime::kHandleOptimizationMarker and check // TODO(ishell): introduce Runtime::kHandleOptimizationMarker and check
// all these marker values there. // all these marker values there.
TNode<Uint32T> marker = TNode<Uint32T> marker =
DecodeWord32<FeedbackVector::OptimizationMarkerBits>(optimization_state); DecodeWord32<FeedbackVector::OptimizationMarkerBits>(optimization_state);
TailCallRuntimeIfMarkerEquals(marker, OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution, function);
TailCallRuntimeIfMarkerEquals(marker, OptimizationMarker::kCompileOptimized, TailCallRuntimeIfMarkerEquals(marker, OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent, Runtime::kCompileOptimized_NotConcurrent,
function); function);
......
...@@ -894,12 +894,6 @@ static void MaybeOptimizeCode(MacroAssembler* masm, ...@@ -894,12 +894,6 @@ static void MaybeOptimizeCode(MacroAssembler* masm,
ASM_CODE_COMMENT(masm); ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(edx, edi, optimization_marker)); DCHECK(!AreAliased(edx, edi, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker, TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized, OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent); Runtime::kCompileOptimized_NotConcurrent);
...@@ -907,9 +901,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, ...@@ -907,9 +901,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm,
OptimizationMarker::kCompileOptimizedConcurrent, OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent); Runtime::kCompileOptimized_Concurrent);
// Marker should be one of LogFirstExecution / CompileOptimized / // Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
// CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach // InOptimizationQueue and None shouldn't reach here.
// here.
if (FLAG_debug_code) { if (FLAG_debug_code) {
__ int3(); __ int3();
} }
...@@ -1030,9 +1023,8 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( ...@@ -1030,9 +1023,8 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
ASM_CODE_COMMENT(masm); ASM_CODE_COMMENT(masm);
Label maybe_has_optimized_code; Label maybe_has_optimized_code;
// Check if optimized code is available // Check if optimized code is available
__ test( __ test(optimization_state,
optimization_state, Immediate(FeedbackVector::kHasCompileOptimizedMarker));
Immediate(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
__ j(zero, &maybe_has_optimized_code); __ j(zero, &maybe_has_optimized_code);
Register optimization_marker = optimization_state; Register optimization_marker = optimization_state;
......
...@@ -912,12 +912,6 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, ...@@ -912,12 +912,6 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
ASM_CODE_COMMENT(masm); ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker)); DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker, TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized, OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent); Runtime::kCompileOptimized_NotConcurrent);
...@@ -925,7 +919,7 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, ...@@ -925,7 +919,7 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent, OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent); Runtime::kCompileOptimized_Concurrent);
// Marker should be one of LogFirstExecution / CompileOptimized / // Marker should be one of CompileOptimized /
// CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
// here. // here.
if (FLAG_debug_code) { if (FLAG_debug_code) {
...@@ -1036,9 +1030,8 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( ...@@ -1036,9 +1030,8 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
{ {
UseScratchRegisterScope temps(masm); UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
__ And( __ And(scratch, optimization_state,
scratch, optimization_state, Operand(FeedbackVector::kHasCompileOptimizedMarker));
Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
__ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg)); __ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg));
} }
......
...@@ -922,12 +922,6 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, ...@@ -922,12 +922,6 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// ----------------------------------- // -----------------------------------
DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker)); DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker, TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized, OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent); Runtime::kCompileOptimized_NotConcurrent);
...@@ -935,9 +929,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, ...@@ -935,9 +929,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent, OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent); Runtime::kCompileOptimized_Concurrent);
// Marker should be one of LogFirstExecution / CompileOptimized / // Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
// CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach // InOptimizationQueue and None shouldn't reach here.
// here.
if (FLAG_debug_code) { if (FLAG_debug_code) {
__ stop(); __ stop();
} }
...@@ -1045,9 +1038,8 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( ...@@ -1045,9 +1038,8 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
{ {
UseScratchRegisterScope temps(masm); UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
__ And( __ And(scratch, optimization_state,
scratch, optimization_state, Operand(FeedbackVector::kHasCompileOptimizedMarker));
Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
__ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg)); __ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg));
} }
......
...@@ -924,12 +924,6 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, ...@@ -924,12 +924,6 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// ----------------------------------- // -----------------------------------
DCHECK(!AreAliased(feedback_vector, r4, r6, optimization_marker)); DCHECK(!AreAliased(feedback_vector, r4, r6, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker, TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized, OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent); Runtime::kCompileOptimized_NotConcurrent);
...@@ -937,9 +931,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, ...@@ -937,9 +931,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent, OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent); Runtime::kCompileOptimized_Concurrent);
// Marker should be one of LogFirstExecution / CompileOptimized / // Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
// CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach // InOptimizationQueue and None shouldn't reach here.
// here.
if (FLAG_debug_code) { if (FLAG_debug_code) {
__ stop(); __ stop();
} }
...@@ -1031,8 +1024,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( ...@@ -1031,8 +1024,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
DCHECK(!AreAliased(optimization_state, feedback_vector)); DCHECK(!AreAliased(optimization_state, feedback_vector));
Label maybe_has_optimized_code; Label maybe_has_optimized_code;
// Check if optimized code is available // Check if optimized code is available
__ TestBitMask(optimization_state, __ TestBitMask(optimization_state, FeedbackVector::kHasCompileOptimizedMarker,
FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker,
r0); r0);
__ beq(&maybe_has_optimized_code, cr0); __ beq(&maybe_has_optimized_code, cr0);
......
...@@ -1211,12 +1211,6 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, ...@@ -1211,12 +1211,6 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// ----------------------------------- // -----------------------------------
DCHECK(!AreAliased(feedback_vector, r3, r5, optimization_marker)); DCHECK(!AreAliased(feedback_vector, r3, r5, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker, TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized, OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent); Runtime::kCompileOptimized_NotConcurrent);
...@@ -1224,9 +1218,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, ...@@ -1224,9 +1218,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent, OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent); Runtime::kCompileOptimized_Concurrent);
// Marker should be one of LogFirstExecution / CompileOptimized / // Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
// CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach // InOptimizationQueue and None shouldn't reach here.
// here.
if (FLAG_debug_code) { if (FLAG_debug_code) {
__ stop(); __ stop();
} }
...@@ -1318,8 +1311,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( ...@@ -1318,8 +1311,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
DCHECK(!AreAliased(optimization_state, feedback_vector)); DCHECK(!AreAliased(optimization_state, feedback_vector));
Label maybe_has_optimized_code; Label maybe_has_optimized_code;
// Check if optimized code is available // Check if optimized code is available
__ TestBitMask(optimization_state, __ TestBitMask(optimization_state, FeedbackVector::kHasCompileOptimizedMarker,
FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker,
r0); r0);
__ beq(&maybe_has_optimized_code); __ beq(&maybe_has_optimized_code);
......
...@@ -952,12 +952,6 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, ...@@ -952,12 +952,6 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
ASM_CODE_COMMENT(masm); ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(feedback_vector, rdx, rdi, optimization_marker)); DCHECK(!AreAliased(feedback_vector, rdx, rdi, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker, TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized, OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent); Runtime::kCompileOptimized_NotConcurrent);
...@@ -965,9 +959,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, ...@@ -965,9 +959,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent, OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent); Runtime::kCompileOptimized_Concurrent);
// Marker should be one of LogFirstExecution / CompileOptimized / // Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
// CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach // InOptimizationQueue and None shouldn't reach here.
// here.
if (FLAG_debug_code) { if (FLAG_debug_code) {
__ int3(); __ int3();
} }
...@@ -1130,9 +1123,8 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( ...@@ -1130,9 +1123,8 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
ASM_CODE_COMMENT(masm); ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(optimization_state, feedback_vector, closure)); DCHECK(!AreAliased(optimization_state, feedback_vector, closure));
Label maybe_has_optimized_code; Label maybe_has_optimized_code;
__ testl( __ testl(optimization_state,
optimization_state, Immediate(FeedbackVector::kHasCompileOptimizedMarker));
Immediate(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
__ j(zero, &maybe_has_optimized_code); __ j(zero, &maybe_has_optimized_code);
Register optimization_marker = optimization_state; Register optimization_marker = optimization_state;
......
...@@ -2081,7 +2081,7 @@ bool Compiler::FinalizeBackgroundCompileTask(BackgroundCompileTask* task, ...@@ -2081,7 +2081,7 @@ bool Compiler::FinalizeBackgroundCompileTask(BackgroundCompileTask* task,
} }
// static // static
bool Compiler::CompileOptimized(Isolate* isolate, Handle<JSFunction> function, void Compiler::CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
ConcurrencyMode mode, CodeKind code_kind) { ConcurrencyMode mode, CodeKind code_kind) {
DCHECK(CodeKindIsOptimizedJSFunction(code_kind)); DCHECK(CodeKindIsOptimizedJSFunction(code_kind));
DCHECK(AllowCompilation::IsAllowed(isolate)); DCHECK(AllowCompilation::IsAllowed(isolate));
...@@ -2117,7 +2117,6 @@ bool Compiler::CompileOptimized(Isolate* isolate, Handle<JSFunction> function, ...@@ -2117,7 +2117,6 @@ bool Compiler::CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
function->ChecksOptimizationMarker()); function->ChecksOptimizationMarker());
DCHECK_IMPLIES(function->IsInOptimizationQueue(), DCHECK_IMPLIES(function->IsInOptimizationQueue(),
mode == ConcurrencyMode::kConcurrent); mode == ConcurrencyMode::kConcurrent);
return true;
} }
// static // static
......
...@@ -84,7 +84,7 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic { ...@@ -84,7 +84,7 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
static bool CompileBaseline(Isolate* isolate, Handle<JSFunction> function, static bool CompileBaseline(Isolate* isolate, Handle<JSFunction> function,
ClearExceptionFlag flag, ClearExceptionFlag flag,
IsCompiledScope* is_compiled_scope); IsCompiledScope* is_compiled_scope);
static bool CompileOptimized(Isolate* isolate, Handle<JSFunction> function, static void CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
ConcurrencyMode mode, CodeKind code_kind); ConcurrencyMode mode, CodeKind code_kind);
static MaybeHandle<SharedFunctionInfo> CompileToplevel( static MaybeHandle<SharedFunctionInfo> CompileToplevel(
ParseInfo* parse_info, Handle<Script> script, Isolate* isolate, ParseInfo* parse_info, Handle<Script> script, Isolate* isolate,
......
...@@ -1652,22 +1652,21 @@ using FileAndLine = std::pair<const char*, int>; ...@@ -1652,22 +1652,21 @@ using FileAndLine = std::pair<const char*, int>;
enum class OptimizationMarker : int32_t { enum class OptimizationMarker : int32_t {
// These values are set so that it is easy to check if there is a marker where // These values are set so that it is easy to check if there is a marker where
// some processing needs to be done. // some processing needs to be done.
kNone = 0b000, kNone = 0b00,
kInOptimizationQueue = 0b001, kInOptimizationQueue = 0b01,
kCompileOptimized = 0b010, kCompileOptimized = 0b10,
kCompileOptimizedConcurrent = 0b011, kCompileOptimizedConcurrent = 0b11,
kLogFirstExecution = 0b100, kLastOptimizationMarker = kCompileOptimizedConcurrent,
kLastOptimizationMarker = kLogFirstExecution
}; };
// For kNone or kInOptimizationQueue we don't need any special processing. // For kNone or kInOptimizationQueue we don't need any special processing.
// To check both cases using a single mask, we expect the kNone to be 0 and // To check both cases using a single mask, we expect the kNone to be 0 and
// kInOptimizationQueue to be 1 so that we can mask off the lsb for checking. // kInOptimizationQueue to be 1 so that we can mask off the lsb for checking.
STATIC_ASSERT(static_cast<int>(OptimizationMarker::kNone) == 0b000 && STATIC_ASSERT(static_cast<int>(OptimizationMarker::kNone) == 0b00 &&
static_cast<int>(OptimizationMarker::kInOptimizationQueue) == static_cast<int>(OptimizationMarker::kInOptimizationQueue) ==
0b001); 0b01);
STATIC_ASSERT(static_cast<int>(OptimizationMarker::kLastOptimizationMarker) <= STATIC_ASSERT(static_cast<int>(OptimizationMarker::kLastOptimizationMarker) <=
0b111); 0b11);
static constexpr uint32_t kNoneOrInOptimizationQueueMask = 0b110; static constexpr uint32_t kNoneOrInOptimizationQueueMask = 0b10;
inline bool IsInOptimizationQueueMarker(OptimizationMarker marker) { inline bool IsInOptimizationQueueMarker(OptimizationMarker marker) {
return marker == OptimizationMarker::kInOptimizationQueue; return marker == OptimizationMarker::kInOptimizationQueue;
...@@ -1681,8 +1680,6 @@ inline bool IsCompileOptimizedMarker(OptimizationMarker marker) { ...@@ -1681,8 +1680,6 @@ inline bool IsCompileOptimizedMarker(OptimizationMarker marker) {
inline std::ostream& operator<<(std::ostream& os, inline std::ostream& operator<<(std::ostream& os,
const OptimizationMarker& marker) { const OptimizationMarker& marker) {
switch (marker) { switch (marker) {
case OptimizationMarker::kLogFirstExecution:
return os << "OptimizationMarker::kLogFirstExecution";
case OptimizationMarker::kNone: case OptimizationMarker::kNone:
return os << "OptimizationMarker::kNone"; return os << "OptimizationMarker::kNone";
case OptimizationMarker::kCompileOptimized: case OptimizationMarker::kCompileOptimized:
......
...@@ -158,8 +158,7 @@ bool FeedbackVector::has_optimized_code() const { ...@@ -158,8 +158,7 @@ bool FeedbackVector::has_optimized_code() const {
} }
bool FeedbackVector::has_optimization_marker() const { bool FeedbackVector::has_optimization_marker() const {
return optimization_marker() != OptimizationMarker::kLogFirstExecution && return optimization_marker() != OptimizationMarker::kNone;
optimization_marker() != OptimizationMarker::kNone;
} }
// Conversion from an integer index to either a slot or an ic slot. // Conversion from an integer index to either a slot or an ic slot.
......
...@@ -261,9 +261,7 @@ Handle<FeedbackVector> FeedbackVector::New( ...@@ -261,9 +261,7 @@ Handle<FeedbackVector> FeedbackVector::New(
DCHECK_EQ(vector->length(), slot_count); DCHECK_EQ(vector->length(), slot_count);
DCHECK_EQ(vector->shared_function_info(), *shared); DCHECK_EQ(vector->shared_function_info(), *shared);
DCHECK_EQ(vector->optimization_marker(), DCHECK_EQ(vector->optimization_marker(), OptimizationMarker::kNone);
FLAG_log_function_events ? OptimizationMarker::kLogFirstExecution
: OptimizationMarker::kNone);
DCHECK_EQ(vector->optimization_tier(), OptimizationTier::kNone); DCHECK_EQ(vector->optimization_tier(), OptimizationTier::kNone);
DCHECK_EQ(vector->invocation_count(), 0); DCHECK_EQ(vector->invocation_count(), 0);
DCHECK_EQ(vector->profiler_ticks(), 0); DCHECK_EQ(vector->profiler_ticks(), 0);
...@@ -443,9 +441,7 @@ void FeedbackVector::ClearOptimizationTier(FeedbackCell feedback_cell) { ...@@ -443,9 +441,7 @@ void FeedbackVector::ClearOptimizationTier(FeedbackCell feedback_cell) {
void FeedbackVector::InitializeOptimizationState() { void FeedbackVector::InitializeOptimizationState() {
int32_t state = 0; int32_t state = 0;
state = OptimizationMarkerBits::update( state = OptimizationMarkerBits::update(state, OptimizationMarker::kNone);
state, FLAG_log_function_events ? OptimizationMarker::kLogFirstExecution
: OptimizationMarker::kNone);
state = OptimizationTierBits::update(state, OptimizationTier::kNone); state = OptimizationTierBits::update(state, OptimizationTier::kNone);
set_flags(state); set_flags(state);
} }
......
...@@ -203,9 +203,9 @@ class FeedbackVector ...@@ -203,9 +203,9 @@ class FeedbackVector
public: public:
NEVER_READ_ONLY_SPACE NEVER_READ_ONLY_SPACE
DEFINE_TORQUE_GENERATED_FEEDBACK_VECTOR_FLAGS() DEFINE_TORQUE_GENERATED_FEEDBACK_VECTOR_FLAGS()
STATIC_ASSERT(OptimizationMarker::kLastOptimizationMarker < STATIC_ASSERT(OptimizationMarker::kLastOptimizationMarker <=
OptimizationMarkerBits::kMax); OptimizationMarkerBits::kMax);
STATIC_ASSERT(OptimizationTier::kLastOptimizationTier < STATIC_ASSERT(OptimizationTier::kLastOptimizationTier <=
OptimizationTierBits::kMax); OptimizationTierBits::kMax);
static const bool kFeedbackVectorMaybeOptimizedCodeIsStoreRelease = true; static const bool kFeedbackVectorMaybeOptimizedCodeIsStoreRelease = true;
...@@ -213,14 +213,13 @@ class FeedbackVector ...@@ -213,14 +213,13 @@ class FeedbackVector
HeapObject>::maybe_optimized_code; HeapObject>::maybe_optimized_code;
DECL_RELEASE_ACQUIRE_WEAK_ACCESSORS(maybe_optimized_code) DECL_RELEASE_ACQUIRE_WEAK_ACCESSORS(maybe_optimized_code)
static constexpr uint32_t kHasCompileOptimizedOrLogFirstExecutionMarker = static constexpr uint32_t kHasCompileOptimizedMarker =
kNoneOrInOptimizationQueueMask << OptimizationMarkerBits::kShift; kNoneOrInOptimizationQueueMask << OptimizationMarkerBits::kShift;
static constexpr uint32_t kHasNoTopTierCodeOrCompileOptimizedMarkerMask = static constexpr uint32_t kHasNoTopTierCodeOrCompileOptimizedMarkerMask =
kNoneOrMidTierMask << OptimizationTierBits::kShift | kNoneOrMidTierMask << OptimizationTierBits::kShift |
kHasCompileOptimizedOrLogFirstExecutionMarker; kHasCompileOptimizedMarker;
static constexpr uint32_t kHasOptimizedCodeOrCompileOptimizedMarkerMask = static constexpr uint32_t kHasOptimizedCodeOrCompileOptimizedMarkerMask =
OptimizationTierBits::kMask | OptimizationTierBits::kMask | kHasCompileOptimizedMarker;
kHasCompileOptimizedOrLogFirstExecutionMarker;
inline bool is_empty() const; inline bool is_empty() const;
......
...@@ -6,9 +6,9 @@ type OptimizationMarker extends uint16 constexpr 'OptimizationMarker'; ...@@ -6,9 +6,9 @@ type OptimizationMarker extends uint16 constexpr 'OptimizationMarker';
type OptimizationTier extends uint16 constexpr 'OptimizationTier'; type OptimizationTier extends uint16 constexpr 'OptimizationTier';
bitfield struct FeedbackVectorFlags extends uint32 { bitfield struct FeedbackVectorFlags extends uint32 {
optimization_marker: OptimizationMarker: 3 bit; optimization_marker: OptimizationMarker: 2 bit;
optimization_tier: OptimizationTier: 2 bit; optimization_tier: OptimizationTier: 2 bit;
global_ticks_at_last_runtime_profiler_interrupt: uint32: 24 bit; all_your_bits_are_belong_to_jgruber: uint32: 28 bit;
} }
@generateBodyDescriptor @generateBodyDescriptor
......
...@@ -32,15 +32,13 @@ namespace { ...@@ -32,15 +32,13 @@ namespace {
Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function, Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
ConcurrencyMode mode) { ConcurrencyMode mode) {
StackLimitCheck check(isolate); StackLimitCheck check(isolate);
if (check.JsHasOverflowed(kStackSpaceRequiredForCompilation * KB)) { // Concurrent optimization runs on another thread, thus no additional gap.
return isolate->StackOverflow(); const int stack_gap = mode == ConcurrencyMode::kConcurrent
} ? 0
: kStackSpaceRequiredForCompilation * KB;
if (check.JsHasOverflowed(stack_gap)) return isolate->StackOverflow();
// Compile for the next tier. Compiler::CompileOptimized(isolate, function, mode, function->NextTier());
if (!Compiler::CompileOptimized(isolate, function, mode,
function->NextTier())) {
return ReadOnlyRoots(isolate).exception();
}
// As a post-condition of CompileOptimized, the function *must* be compiled, // As a post-condition of CompileOptimized, the function *must* be compiled,
// i.e. the installed Code object must not be the CompileLazy builtin. // i.e. the installed Code object must not be the CompileLazy builtin.
...@@ -108,26 +106,6 @@ RUNTIME_FUNCTION(Runtime_CompileOptimized_NotConcurrent) { ...@@ -108,26 +106,6 @@ RUNTIME_FUNCTION(Runtime_CompileOptimized_NotConcurrent) {
return CompileOptimized(isolate, function, ConcurrencyMode::kNotConcurrent); return CompileOptimized(isolate, function, ConcurrencyMode::kNotConcurrent);
} }
RUNTIME_FUNCTION(Runtime_FunctionFirstExecution) {
HandleScope scope(isolate);
StackLimitCheck check(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
DCHECK_EQ(function->feedback_vector().optimization_marker(),
OptimizationMarker::kLogFirstExecution);
DCHECK(FLAG_log_function_events);
Handle<SharedFunctionInfo> sfi(function->shared(), isolate);
Handle<String> name = SharedFunctionInfo::DebugName(sfi);
LOG(isolate,
FunctionEvent("first-execution", Script::cast(sfi->script()).id(), 0,
sfi->StartPosition(), sfi->EndPosition(), *name));
function->feedback_vector().ClearOptimizationMarker();
// Return the code to continue execution, we don't care at this point whether
// this is for lazy compilation or has been eagerly complied.
return function->code();
}
RUNTIME_FUNCTION(Runtime_HealOptimizedCodeSlot) { RUNTIME_FUNCTION(Runtime_HealOptimizedCodeSlot) {
SealHandleScope scope(isolate); SealHandleScope scope(isolate);
DCHECK_EQ(1, args.length()); DCHECK_EQ(1, args.length());
......
...@@ -111,7 +111,6 @@ namespace internal { ...@@ -111,7 +111,6 @@ namespace internal {
F(CompileOptimized_NotConcurrent, 1, 1) \ F(CompileOptimized_NotConcurrent, 1, 1) \
F(InstallBaselineCode, 1, 1) \ F(InstallBaselineCode, 1, 1) \
F(HealOptimizedCodeSlot, 1, 1) \ F(HealOptimizedCodeSlot, 1, 1) \
F(FunctionFirstExecution, 1, 1) \
F(InstantiateAsmJs, 4, 1) \ F(InstantiateAsmJs, 4, 1) \
F(NotifyDeoptimized, 0, 1) \ F(NotifyDeoptimized, 0, 1) \
F(ObserveNode, 1, 1) \ F(ObserveNode, 1, 1) \
......
...@@ -1129,10 +1129,7 @@ UNINITIALIZED_TEST(LogFunctionEvents) { ...@@ -1129,10 +1129,7 @@ UNINITIALIZED_TEST(LogFunctionEvents) {
logger.StopLogging(); logger.StopLogging();
// Ignore all the log entries that happened before warmup // TODO(cbruni): Reimplement first-execution logging if needed.
size_t start = logger.IndexOfLine(
{"function,first-execution", "warmUpEndMarkerFunction"});
CHECK(start != std::string::npos);
std::vector<std::vector<std::string>> lines = { std::vector<std::vector<std::string>> lines = {
// Create a new script // Create a new script
{"script,create"}, {"script,create"},
...@@ -1159,23 +1156,17 @@ UNINITIALIZED_TEST(LogFunctionEvents) { ...@@ -1159,23 +1156,17 @@ UNINITIALIZED_TEST(LogFunctionEvents) {
// - execute eager functions. // - execute eager functions.
{"function,parse-function,", ",lazyFunction"}, {"function,parse-function,", ",lazyFunction"},
{"function,interpreter-lazy,", ",lazyFunction"}, {"function,interpreter-lazy,", ",lazyFunction"},
{"function,first-execution,", ",lazyFunction"},
{"function,parse-function,", ",lazyInnerFunction"}, {"function,parse-function,", ",lazyInnerFunction"},
{"function,interpreter-lazy,", ",lazyInnerFunction"}, {"function,interpreter-lazy,", ",lazyInnerFunction"},
{"function,first-execution,", ",lazyInnerFunction"},
{"function,first-execution,", ",eagerFunction"},
{"function,parse-function,", ",Foo"}, {"function,parse-function,", ",Foo"},
{"function,interpreter-lazy,", ",Foo"}, {"function,interpreter-lazy,", ",Foo"},
{"function,first-execution,", ",Foo"},
{"function,parse-function,", ",Foo.foo"}, {"function,parse-function,", ",Foo.foo"},
{"function,interpreter-lazy,", ",Foo.foo"}, {"function,interpreter-lazy,", ",Foo.foo"},
{"function,first-execution,", ",Foo.foo"},
}; };
CHECK(logger.ContainsLinesInOrder(lines, start)); CHECK(logger.ContainsLinesInOrder(lines));
} }
i::FLAG_log_function_events = false; i::FLAG_log_function_events = false;
isolate->Dispose(); isolate->Dispose();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment