Commit 09637ab3 authored by Leszek Swirski's avatar Leszek Swirski Committed by Commit Bot

[runtime] Don't count profiler ticks on Code objects

With the deprecation of Crankshaft, it's no longer necessary for
FullCodeGen to keep track of its runtime profiler ticks on the code
object, and we can instead unify the behaviour of FCG and Ignition to
both increment the SFI counter instead.

Bug: v8:6408
Change-Id: Idcdd673aa39af06fe15a0fc14dfda2afafb5e417
Reviewed-on: https://chromium-review.googlesource.com/528117Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Reviewed-by: 's avatarRoss McIlroy <rmcilroy@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#45892}
parent 522bc812
......@@ -891,11 +891,7 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
// Reset profiler ticks, function is no longer considered hot.
DCHECK(shared->is_compiled());
if (shared->HasBaselineCode()) {
shared->code()->set_profiler_ticks(0);
} else if (shared->HasBytecodeArray()) {
shared->set_profiler_ticks(0);
}
shared->set_profiler_ticks(0);
VMState<COMPILER> state(isolate);
DCHECK(!isolate->has_pending_exception());
......@@ -1006,11 +1002,7 @@ CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job) {
Handle<SharedFunctionInfo> shared = info->shared_info();
// Reset profiler ticks, function is no longer considered hot.
if (shared->HasBaselineCode()) {
shared->code()->set_profiler_ticks(0);
} else if (shared->HasBytecodeArray()) {
shared->set_profiler_ticks(0);
}
shared->set_profiler_ticks(0);
shared->set_has_concurrent_optimization_job(false);
......
......@@ -121,7 +121,6 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info, uintptr_t stack_limit) {
code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
code->set_has_reloc_info_for_serialization(info->will_serialize());
code->set_allow_osr_at_loop_nesting_level(0);
code->set_profiler_ticks(0);
code->set_back_edge_table_offset(table_offset);
Handle<ByteArray> source_positions =
cgen.source_position_table_builder_.ToSourcePositionTable(
......
......@@ -426,23 +426,15 @@ static void ComputeTypeInfoCountDelta(IC::State old_state, IC::State new_state,
// static
void IC::OnFeedbackChanged(Isolate* isolate, JSFunction* host_function) {
Code* host = host_function->shared()->code();
if (host->kind() == Code::FUNCTION) {
TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
info->change_own_type_change_checksum();
host->set_profiler_ticks(0);
} else if (host_function->IsInterpreted()) {
if (FLAG_trace_opt_verbose) {
if (host_function->shared()->profiler_ticks() != 0) {
PrintF("[resetting ticks for ");
host_function->PrintName();
PrintF(" due from %d due to IC change]\n",
host_function->shared()->profiler_ticks());
}
if (FLAG_trace_opt_verbose) {
if (host_function->shared()->profiler_ticks() != 0) {
PrintF("[resetting ticks for ");
host_function->PrintName();
PrintF(" due from %d due to IC change]\n",
host_function->shared()->profiler_ticks());
}
host_function->shared()->set_profiler_ticks(0);
}
host_function->shared()->set_profiler_ticks(0);
isolate->runtime_profiler()->NotifyICChanged();
// TODO(2029): When an optimized function is patched, it would
// be nice to propagate the corresponding type information to its
......@@ -478,10 +470,14 @@ void IC::PostPatching(Address address, Code* target, Code* old_target) {
info->change_ic_with_type_info_count(polymorphic_delta);
info->change_ic_generic_count(generic_delta);
}
TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
info->change_own_type_change_checksum();
}
host->set_profiler_ticks(0);
// TODO(leszeks): Normally we would reset profiler ticks here -- but, we don't
// currently have access the the feedback vector from the IC. In practice,
// this is not an issue, as these ICs are only used by asm.js, which shouldn't
// have too many IC changes. This inconsistency should go away once these
// Crankshaft/hydrogen code stubs go away.
isolate->runtime_profiler()->NotifyICChanged();
// TODO(2029): When an optimized function is patched, it would
// be nice to propagate the corresponding type information to its
......
......@@ -4066,21 +4066,6 @@ void Code::set_allow_osr_at_loop_nesting_level(int level) {
}
int Code::profiler_ticks() {
DCHECK_EQ(FUNCTION, kind());
return ProfilerTicksField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
void Code::set_profiler_ticks(int ticks) {
if (kind() == FUNCTION) {
unsigned previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
unsigned updated = ProfilerTicksField::update(previous, ticks);
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
}
int Code::builtin_index() { return READ_INT_FIELD(this, kBuiltinIndexOffset); }
void Code::set_builtin_index(int index) {
......
......@@ -13656,23 +13656,13 @@ void Map::StartInobjectSlackTracking() {
void SharedFunctionInfo::ResetForNewContext(int new_ic_age) {
code()->ClearInlineCaches();
set_ic_age(new_ic_age);
if (code()->kind() == Code::FUNCTION) {
code()->set_profiler_ticks(0);
if (optimization_disabled() && deopt_count() >= FLAG_max_deopt_count) {
// Re-enable optimizations if they were disabled due to deopt_count limit.
set_optimization_disabled(false);
}
set_opt_count(0);
set_deopt_count(0);
} else if (IsInterpreted()) {
set_profiler_ticks(0);
if (optimization_disabled() && deopt_count() >= FLAG_max_deopt_count) {
// Re-enable optimizations if they were disabled due to deopt_count limit.
set_optimization_disabled(false);
}
set_opt_count(0);
set_deopt_count(0);
set_profiler_ticks(0);
if (optimization_disabled() && deopt_count() >= FLAG_max_deopt_count) {
// Re-enable optimizations if they were disabled due to deopt_count limit.
set_optimization_disabled(false);
}
set_opt_count(0);
set_deopt_count(0);
}
void ObjectVisitor::VisitCodeTarget(Code* host, RelocInfo* rinfo) {
......
......@@ -3758,11 +3758,6 @@ class Code: public HeapObject {
inline void set_allow_osr_at_loop_nesting_level(int level);
inline int allow_osr_at_loop_nesting_level();
// [profiler_ticks]: For FUNCTION kind, tells for how many profiler ticks
// the code object was seen on the stack with no IC patching going on.
inline int profiler_ticks();
inline void set_profiler_ticks(int ticks);
// [builtin_index]: For builtins, tells which builtin index the code object
// has. Note that builtins can have a code kind other than BUILTIN. The
// builtin index is a non-negative integer for builtins, and -1 otherwise.
......@@ -4049,16 +4044,6 @@ class Code: public HeapObject {
class BodyDescriptor;
// Byte offsets within kKindSpecificFlags1Offset.
static const int kFullCodeFlags = kKindSpecificFlags1Offset;
class FullCodeFlagsHasDeoptimizationSupportField:
public BitField<bool, 0, 1> {}; // NOLINT
class FullCodeFlagsHasDebugBreakSlotsField: public BitField<bool, 1, 1> {};
class FullCodeFlagsHasRelocInfoForSerialization
: public BitField<bool, 2, 1> {};
// Bit 3 in this bitfield is unused.
class ProfilerTicksField : public BitField<int, 4, 28> {};
// Flags layout. BitField<type, shift, size>.
class HasUnwindingInfoField : public BitField<bool, 0, 1> {};
class KindField : public BitField<Kind, HasUnwindingInfoField::kNext, 5> {};
......@@ -4068,6 +4053,18 @@ class Code: public HeapObject {
PlatformSmiTagging::kSmiValueSize - KindField::kNext> {
};
// KindSpecificFlags1 layout (FUNCTION)
static const int kFullCodeFlags = kKindSpecificFlags1Offset;
static const int kFullCodeFlagsHasDeoptimizationSupportBit = 0;
static const int kFullCodeFlagsHasDebugBreakSlotsField = 1;
static const int kFullCodeFlagsHasRelocInfoForSerialization = 2;
class FullCodeFlagsHasDeoptimizationSupportField
: public BitField<bool, kFullCodeFlagsHasDeoptimizationSupportBit, 1> {};
class FullCodeFlagsHasDebugBreakSlotsField
: public BitField<bool, kFullCodeFlagsHasDebugBreakSlotsField, 1> {};
class FullCodeFlagsHasRelocInfoForSerialization
: public BitField<bool, kFullCodeFlagsHasRelocInfoForSerialization, 1> {};
// KindSpecificFlags1 layout (STUB, BUILTIN and OPTIMIZED_FUNCTION)
static const int kStackSlotsFirstBit = 0;
static const int kStackSlotsBitCount = 24;
......
......@@ -221,14 +221,11 @@ void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
function->IsOptimized())) {
// Attempt OSR if we are still running unoptimized code even though the
// the function has long been marked or even already been optimized.
int ticks = shared_code->profiler_ticks();
int ticks = shared->profiler_ticks();
int64_t allowance =
kOSRCodeSizeAllowanceBase +
static_cast<int64_t>(ticks) * kOSRCodeSizeAllowancePerTick;
if (shared_code->CodeSize() > allowance &&
ticks < Code::ProfilerTicksField::kMax) {
shared_code->set_profiler_ticks(ticks + 1);
} else {
if (shared_code->CodeSize() <= allowance) {
AttemptOnStackReplacement(frame);
}
return;
......@@ -248,19 +245,17 @@ void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
if (shared->deopt_count() >= FLAG_max_deopt_count) {
// If optimization was disabled due to many deoptimizations,
// then check if the function is hot and try to reenable optimization.
int ticks = shared_code->profiler_ticks();
int ticks = shared->profiler_ticks();
if (ticks >= kProfilerTicksBeforeReenablingOptimization) {
shared_code->set_profiler_ticks(0);
shared->set_profiler_ticks(0);
shared->TryReenableOptimization();
} else {
shared_code->set_profiler_ticks(ticks + 1);
}
}
return;
}
if (frame->is_optimized()) return;
int ticks = shared_code->profiler_ticks();
int ticks = shared->profiler_ticks();
if (ticks >= kProfilerTicksBeforeOptimization) {
int typeinfo, generic, total, type_percentage, generic_percentage;
......@@ -274,7 +269,6 @@ void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
} else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
Optimize(function, OptimizationReason::kHotWithoutMuchTypeInfo);
} else {
shared_code->set_profiler_ticks(ticks + 1);
if (FLAG_trace_opt_verbose) {
PrintF("[not yet optimizing ");
function->PrintName();
......@@ -292,11 +286,7 @@ void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
if (type_percentage >= FLAG_type_info_threshold &&
generic_percentage <= FLAG_generic_ic_threshold) {
Optimize(function, OptimizationReason::kSmallFunction);
} else {
shared_code->set_profiler_ticks(ticks + 1);
}
} else {
shared_code->set_profiler_ticks(ticks + 1);
}
}
......@@ -453,17 +443,12 @@ void RuntimeProfiler::MarkCandidatesForOptimization() {
MaybeOptimizeFullCodegen(function, frame, frame_count);
}
// Update shared function info ticks after checking for whether functions
// should be optimized to keep FCG (which updates ticks on code) and
// Ignition (which updates ticks on shared function info) in sync.
List<SharedFunctionInfo*> functions(4);
frame->GetFunctions(&functions);
for (int i = functions.length(); --i >= 0;) {
SharedFunctionInfo* shared_function_info = functions[i];
int ticks = shared_function_info->profiler_ticks();
if (ticks < Smi::kMaxValue) {
shared_function_info->set_profiler_ticks(ticks + 1);
}
// TODO(leszeks): Move this increment to before the maybe optimize checks,
// and update the tests to assume the increment has already happened.
SharedFunctionInfo* shared = function->shared();
int ticks = shared->profiler_ticks();
if (ticks < Smi::kMaxValue) {
shared->set_profiler_ticks(ticks + 1);
}
}
any_ic_changed_ = false;
......
......@@ -47,7 +47,6 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
Code* code = Code::cast(obj);
if (code->kind() == Code::FUNCTION) {
code->ClearInlineCaches();
code->set_profiler_ticks(0);
}
}
......
......@@ -2218,15 +2218,6 @@ TEST(InstanceOfStubWriteBarrier) {
CcTest::CollectGarbage(OLD_SPACE);
}
namespace {
int GetProfilerTicks(SharedFunctionInfo* shared) {
return FLAG_ignition || FLAG_turbo ? shared->profiler_ticks()
: shared->code()->profiler_ticks();
}
} // namespace
TEST(ResetSharedFunctionInfoCountersDuringIncrementalMarking) {
if (!FLAG_incremental_marking) return;
FLAG_stress_compaction = false;
......@@ -2270,7 +2261,7 @@ TEST(ResetSharedFunctionInfoCountersDuringIncrementalMarking) {
CHECK_EQ(CcTest::heap()->global_ic_age(), f->shared()->ic_age());
CHECK_EQ(0, f->shared()->opt_count());
CHECK_EQ(0, GetProfilerTicks(f->shared()));
CHECK_EQ(0, f->shared()->profiler_ticks());
}
......@@ -2313,7 +2304,7 @@ TEST(ResetSharedFunctionInfoCountersDuringMarkSweep) {
CHECK_EQ(CcTest::heap()->global_ic_age(), f->shared()->ic_age());
CHECK_EQ(0, f->shared()->opt_count());
CHECK_EQ(0, GetProfilerTicks(f->shared()));
CHECK_EQ(0, f->shared()->profiler_ticks());
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment