Commit 54604990 authored by Jakob Gruber's avatar Jakob Gruber Committed by V8 LUCI CQ

[compiler] Remove OptimizationMarker::kLogFirstExecution

The functionality is unused and we are simplifying OptimizationMarker
usage.

Drive-by: Remove unused return value of Compiler::CompileOptimized.
Drive-by: Don't add kStackSpaceRequiredForCompilation as gap to the
stack check when compiling concurrently, i.e. on another thread.

Bug: chromium:757467
Change-Id: Ibbe204b82bf937b9eb74f9eb2c3fd2d719d53ef9
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3416245Reviewed-by: 's avatarCamillo Bruni <cbruni@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78800}
parent 99a5bb74
......@@ -936,12 +936,6 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, r1, r3, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
......@@ -949,9 +943,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
// Marker should be one of LogFirstExecution / CompileOptimized /
// CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
// here.
// Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
// InOptimizationQueue and None shouldn't reach here.
if (FLAG_debug_code) {
__ stop();
}
......@@ -1060,9 +1053,8 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
DCHECK(!AreAliased(optimization_state, feedback_vector));
Label maybe_has_optimized_code;
// Check if optimized code is available
__ tst(
optimization_state,
Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
__ tst(optimization_state,
Operand(FeedbackVector::kHasCompileOptimizedMarker));
__ b(eq, &maybe_has_optimized_code);
Register optimization_marker = optimization_state;
......
......@@ -1122,12 +1122,6 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(feedback_vector, x1, x3, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
......@@ -1135,9 +1129,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
// Marker should be one of LogFirstExecution / CompileOptimized /
// CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
// here.
// Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
// InOptimizationQueue and None shouldn't reach here.
if (FLAG_debug_code) {
__ Unreachable();
}
......@@ -1242,10 +1235,9 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
DCHECK(!AreAliased(optimization_state, feedback_vector));
Label maybe_has_optimized_code;
// Check if optimized code is available
__ TestAndBranchIfAllClear(
optimization_state,
FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker,
&maybe_has_optimized_code);
__ TestAndBranchIfAllClear(optimization_state,
FeedbackVector::kHasCompileOptimizedMarker,
&maybe_has_optimized_code);
Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
......
......@@ -53,17 +53,14 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask),
&fallthrough);
GotoIfNot(IsSetWord32(
optimization_state,
FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker),
GotoIfNot(IsSetWord32(optimization_state,
FeedbackVector::kHasCompileOptimizedMarker),
&may_have_optimized_code);
// TODO(ishell): introduce Runtime::kHandleOptimizationMarker and check
// all these marker values there.
TNode<Uint32T> marker =
DecodeWord32<FeedbackVector::OptimizationMarkerBits>(optimization_state);
TailCallRuntimeIfMarkerEquals(marker, OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution, function);
TailCallRuntimeIfMarkerEquals(marker, OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent,
function);
......
......@@ -894,12 +894,6 @@ static void MaybeOptimizeCode(MacroAssembler* masm,
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(edx, edi, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
......@@ -907,9 +901,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
// Marker should be one of LogFirstExecution / CompileOptimized /
// CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
// here.
// Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
// InOptimizationQueue and None shouldn't reach here.
if (FLAG_debug_code) {
__ int3();
}
......@@ -1030,9 +1023,8 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
ASM_CODE_COMMENT(masm);
Label maybe_has_optimized_code;
// Check if optimized code is available
__ test(
optimization_state,
Immediate(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
__ test(optimization_state,
Immediate(FeedbackVector::kHasCompileOptimizedMarker));
__ j(zero, &maybe_has_optimized_code);
Register optimization_marker = optimization_state;
......
......@@ -912,12 +912,6 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
......@@ -925,7 +919,7 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
// Marker should be one of LogFirstExecution / CompileOptimized /
// Marker should be one of CompileOptimized /
// CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
// here.
if (FLAG_debug_code) {
......@@ -1036,9 +1030,8 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ And(
scratch, optimization_state,
Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
__ And(scratch, optimization_state,
Operand(FeedbackVector::kHasCompileOptimizedMarker));
__ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg));
}
......
......@@ -922,12 +922,6 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
......@@ -935,9 +929,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
// Marker should be one of LogFirstExecution / CompileOptimized /
// CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
// here.
// Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
// InOptimizationQueue and None shouldn't reach here.
if (FLAG_debug_code) {
__ stop();
}
......@@ -1045,9 +1038,8 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ And(
scratch, optimization_state,
Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
__ And(scratch, optimization_state,
Operand(FeedbackVector::kHasCompileOptimizedMarker));
__ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg));
}
......
......@@ -924,12 +924,6 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, r4, r6, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
......@@ -937,9 +931,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
// Marker should be one of LogFirstExecution / CompileOptimized /
// CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
// here.
// Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
// InOptimizationQueue and None shouldn't reach here.
if (FLAG_debug_code) {
__ stop();
}
......@@ -1031,8 +1024,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
DCHECK(!AreAliased(optimization_state, feedback_vector));
Label maybe_has_optimized_code;
// Check if optimized code is available
__ TestBitMask(optimization_state,
FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker,
__ TestBitMask(optimization_state, FeedbackVector::kHasCompileOptimizedMarker,
r0);
__ beq(&maybe_has_optimized_code, cr0);
......
......@@ -1211,12 +1211,6 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, r3, r5, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
......@@ -1224,9 +1218,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
// Marker should be one of LogFirstExecution / CompileOptimized /
// CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
// here.
// Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
// InOptimizationQueue and None shouldn't reach here.
if (FLAG_debug_code) {
__ stop();
}
......@@ -1318,8 +1311,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
DCHECK(!AreAliased(optimization_state, feedback_vector));
Label maybe_has_optimized_code;
// Check if optimized code is available
__ TestBitMask(optimization_state,
FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker,
__ TestBitMask(optimization_state, FeedbackVector::kHasCompileOptimizedMarker,
r0);
__ beq(&maybe_has_optimized_code);
......
......@@ -952,12 +952,6 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(feedback_vector, rdx, rdi, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
......@@ -965,9 +959,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
// Marker should be one of LogFirstExecution / CompileOptimized /
// CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
// here.
// Marker should be one of CompileOptimized / CompileOptimizedConcurrent.
// InOptimizationQueue and None shouldn't reach here.
if (FLAG_debug_code) {
__ int3();
}
......@@ -1130,9 +1123,8 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(optimization_state, feedback_vector, closure));
Label maybe_has_optimized_code;
__ testl(
optimization_state,
Immediate(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
__ testl(optimization_state,
Immediate(FeedbackVector::kHasCompileOptimizedMarker));
__ j(zero, &maybe_has_optimized_code);
Register optimization_marker = optimization_state;
......
......@@ -2081,7 +2081,7 @@ bool Compiler::FinalizeBackgroundCompileTask(BackgroundCompileTask* task,
}
// static
bool Compiler::CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
void Compiler::CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
ConcurrencyMode mode, CodeKind code_kind) {
DCHECK(CodeKindIsOptimizedJSFunction(code_kind));
DCHECK(AllowCompilation::IsAllowed(isolate));
......@@ -2117,7 +2117,6 @@ bool Compiler::CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
function->ChecksOptimizationMarker());
DCHECK_IMPLIES(function->IsInOptimizationQueue(),
mode == ConcurrencyMode::kConcurrent);
return true;
}
// static
......
......@@ -84,7 +84,7 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
static bool CompileBaseline(Isolate* isolate, Handle<JSFunction> function,
ClearExceptionFlag flag,
IsCompiledScope* is_compiled_scope);
static bool CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
static void CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
ConcurrencyMode mode, CodeKind code_kind);
static MaybeHandle<SharedFunctionInfo> CompileToplevel(
ParseInfo* parse_info, Handle<Script> script, Isolate* isolate,
......
......@@ -1652,22 +1652,21 @@ using FileAndLine = std::pair<const char*, int>;
enum class OptimizationMarker : int32_t {
// These values are set so that it is easy to check if there is a marker where
// some processing needs to be done.
kNone = 0b000,
kInOptimizationQueue = 0b001,
kCompileOptimized = 0b010,
kCompileOptimizedConcurrent = 0b011,
kLogFirstExecution = 0b100,
kLastOptimizationMarker = kLogFirstExecution
kNone = 0b00,
kInOptimizationQueue = 0b01,
kCompileOptimized = 0b10,
kCompileOptimizedConcurrent = 0b11,
kLastOptimizationMarker = kCompileOptimizedConcurrent,
};
// For kNone or kInOptimizationQueue we don't need any special processing.
// To check both cases using a single mask, we expect the kNone to be 0 and
// kInOptimizationQueue to be 1 so that we can mask off the lsb for checking.
STATIC_ASSERT(static_cast<int>(OptimizationMarker::kNone) == 0b000 &&
STATIC_ASSERT(static_cast<int>(OptimizationMarker::kNone) == 0b00 &&
static_cast<int>(OptimizationMarker::kInOptimizationQueue) ==
0b001);
0b01);
STATIC_ASSERT(static_cast<int>(OptimizationMarker::kLastOptimizationMarker) <=
0b111);
static constexpr uint32_t kNoneOrInOptimizationQueueMask = 0b110;
0b11);
static constexpr uint32_t kNoneOrInOptimizationQueueMask = 0b10;
inline bool IsInOptimizationQueueMarker(OptimizationMarker marker) {
return marker == OptimizationMarker::kInOptimizationQueue;
......@@ -1681,8 +1680,6 @@ inline bool IsCompileOptimizedMarker(OptimizationMarker marker) {
inline std::ostream& operator<<(std::ostream& os,
const OptimizationMarker& marker) {
switch (marker) {
case OptimizationMarker::kLogFirstExecution:
return os << "OptimizationMarker::kLogFirstExecution";
case OptimizationMarker::kNone:
return os << "OptimizationMarker::kNone";
case OptimizationMarker::kCompileOptimized:
......
......@@ -158,8 +158,7 @@ bool FeedbackVector::has_optimized_code() const {
}
bool FeedbackVector::has_optimization_marker() const {
return optimization_marker() != OptimizationMarker::kLogFirstExecution &&
optimization_marker() != OptimizationMarker::kNone;
return optimization_marker() != OptimizationMarker::kNone;
}
// Conversion from an integer index to either a slot or an ic slot.
......
......@@ -261,9 +261,7 @@ Handle<FeedbackVector> FeedbackVector::New(
DCHECK_EQ(vector->length(), slot_count);
DCHECK_EQ(vector->shared_function_info(), *shared);
DCHECK_EQ(vector->optimization_marker(),
FLAG_log_function_events ? OptimizationMarker::kLogFirstExecution
: OptimizationMarker::kNone);
DCHECK_EQ(vector->optimization_marker(), OptimizationMarker::kNone);
DCHECK_EQ(vector->optimization_tier(), OptimizationTier::kNone);
DCHECK_EQ(vector->invocation_count(), 0);
DCHECK_EQ(vector->profiler_ticks(), 0);
......@@ -443,9 +441,7 @@ void FeedbackVector::ClearOptimizationTier(FeedbackCell feedback_cell) {
void FeedbackVector::InitializeOptimizationState() {
int32_t state = 0;
state = OptimizationMarkerBits::update(
state, FLAG_log_function_events ? OptimizationMarker::kLogFirstExecution
: OptimizationMarker::kNone);
state = OptimizationMarkerBits::update(state, OptimizationMarker::kNone);
state = OptimizationTierBits::update(state, OptimizationTier::kNone);
set_flags(state);
}
......
......@@ -203,9 +203,9 @@ class FeedbackVector
public:
NEVER_READ_ONLY_SPACE
DEFINE_TORQUE_GENERATED_FEEDBACK_VECTOR_FLAGS()
STATIC_ASSERT(OptimizationMarker::kLastOptimizationMarker <
STATIC_ASSERT(OptimizationMarker::kLastOptimizationMarker <=
OptimizationMarkerBits::kMax);
STATIC_ASSERT(OptimizationTier::kLastOptimizationTier <
STATIC_ASSERT(OptimizationTier::kLastOptimizationTier <=
OptimizationTierBits::kMax);
static const bool kFeedbackVectorMaybeOptimizedCodeIsStoreRelease = true;
......@@ -213,14 +213,13 @@ class FeedbackVector
HeapObject>::maybe_optimized_code;
DECL_RELEASE_ACQUIRE_WEAK_ACCESSORS(maybe_optimized_code)
static constexpr uint32_t kHasCompileOptimizedOrLogFirstExecutionMarker =
static constexpr uint32_t kHasCompileOptimizedMarker =
kNoneOrInOptimizationQueueMask << OptimizationMarkerBits::kShift;
static constexpr uint32_t kHasNoTopTierCodeOrCompileOptimizedMarkerMask =
kNoneOrMidTierMask << OptimizationTierBits::kShift |
kHasCompileOptimizedOrLogFirstExecutionMarker;
kHasCompileOptimizedMarker;
static constexpr uint32_t kHasOptimizedCodeOrCompileOptimizedMarkerMask =
OptimizationTierBits::kMask |
kHasCompileOptimizedOrLogFirstExecutionMarker;
OptimizationTierBits::kMask | kHasCompileOptimizedMarker;
inline bool is_empty() const;
......
......@@ -6,9 +6,9 @@ type OptimizationMarker extends uint16 constexpr 'OptimizationMarker';
type OptimizationTier extends uint16 constexpr 'OptimizationTier';
bitfield struct FeedbackVectorFlags extends uint32 {
optimization_marker: OptimizationMarker: 3 bit;
optimization_marker: OptimizationMarker: 2 bit;
optimization_tier: OptimizationTier: 2 bit;
global_ticks_at_last_runtime_profiler_interrupt: uint32: 24 bit;
all_your_bits_are_belong_to_jgruber: uint32: 28 bit;
}
@generateBodyDescriptor
......
......@@ -32,15 +32,13 @@ namespace {
Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
ConcurrencyMode mode) {
StackLimitCheck check(isolate);
if (check.JsHasOverflowed(kStackSpaceRequiredForCompilation * KB)) {
return isolate->StackOverflow();
}
// Concurrent optimization runs on another thread, thus no additional gap.
const int stack_gap = mode == ConcurrencyMode::kConcurrent
? 0
: kStackSpaceRequiredForCompilation * KB;
if (check.JsHasOverflowed(stack_gap)) return isolate->StackOverflow();
// Compile for the next tier.
if (!Compiler::CompileOptimized(isolate, function, mode,
function->NextTier())) {
return ReadOnlyRoots(isolate).exception();
}
Compiler::CompileOptimized(isolate, function, mode, function->NextTier());
// As a post-condition of CompileOptimized, the function *must* be compiled,
// i.e. the installed Code object must not be the CompileLazy builtin.
......@@ -108,26 +106,6 @@ RUNTIME_FUNCTION(Runtime_CompileOptimized_NotConcurrent) {
return CompileOptimized(isolate, function, ConcurrencyMode::kNotConcurrent);
}
RUNTIME_FUNCTION(Runtime_FunctionFirstExecution) {
HandleScope scope(isolate);
StackLimitCheck check(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
DCHECK_EQ(function->feedback_vector().optimization_marker(),
OptimizationMarker::kLogFirstExecution);
DCHECK(FLAG_log_function_events);
Handle<SharedFunctionInfo> sfi(function->shared(), isolate);
Handle<String> name = SharedFunctionInfo::DebugName(sfi);
LOG(isolate,
FunctionEvent("first-execution", Script::cast(sfi->script()).id(), 0,
sfi->StartPosition(), sfi->EndPosition(), *name));
function->feedback_vector().ClearOptimizationMarker();
// Return the code to continue execution, we don't care at this point whether
// this is for lazy compilation or has been eagerly complied.
return function->code();
}
RUNTIME_FUNCTION(Runtime_HealOptimizedCodeSlot) {
SealHandleScope scope(isolate);
DCHECK_EQ(1, args.length());
......
......@@ -111,7 +111,6 @@ namespace internal {
F(CompileOptimized_NotConcurrent, 1, 1) \
F(InstallBaselineCode, 1, 1) \
F(HealOptimizedCodeSlot, 1, 1) \
F(FunctionFirstExecution, 1, 1) \
F(InstantiateAsmJs, 4, 1) \
F(NotifyDeoptimized, 0, 1) \
F(ObserveNode, 1, 1) \
......
......@@ -1129,10 +1129,7 @@ UNINITIALIZED_TEST(LogFunctionEvents) {
logger.StopLogging();
// Ignore all the log entries that happened before warmup
size_t start = logger.IndexOfLine(
{"function,first-execution", "warmUpEndMarkerFunction"});
CHECK(start != std::string::npos);
// TODO(cbruni): Reimplement first-execution logging if needed.
std::vector<std::vector<std::string>> lines = {
// Create a new script
{"script,create"},
......@@ -1159,23 +1156,17 @@ UNINITIALIZED_TEST(LogFunctionEvents) {
// - execute eager functions.
{"function,parse-function,", ",lazyFunction"},
{"function,interpreter-lazy,", ",lazyFunction"},
{"function,first-execution,", ",lazyFunction"},
{"function,parse-function,", ",lazyInnerFunction"},
{"function,interpreter-lazy,", ",lazyInnerFunction"},
{"function,first-execution,", ",lazyInnerFunction"},
{"function,first-execution,", ",eagerFunction"},
{"function,parse-function,", ",Foo"},
{"function,interpreter-lazy,", ",Foo"},
{"function,first-execution,", ",Foo"},
{"function,parse-function,", ",Foo.foo"},
{"function,interpreter-lazy,", ",Foo.foo"},
{"function,first-execution,", ",Foo.foo"},
};
CHECK(logger.ContainsLinesInOrder(lines, start));
CHECK(logger.ContainsLinesInOrder(lines));
}
i::FLAG_log_function_events = false;
isolate->Dispose();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment