Move profiler_ticks to Code object, don't walk the stack when patching ICs

Review URL: https://chromiumcodereview.appspot.com/9866030

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@11162 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent b84b44d8
......@@ -326,6 +326,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
code->set_compiled_optimizable(info->IsOptimizable());
#endif // ENABLE_DEBUGGER_SUPPORT
code->set_allow_osr_at_loop_nesting_level(0);
code->set_profiler_ticks(0);
code->set_stack_check_table_offset(table_offset);
CodeGenerator::PrintCode(code, info);
info->SetCode(code); // May be an empty handle.
......
......@@ -1953,7 +1953,7 @@ MaybeObject* Heap::AllocateTypeFeedbackInfo() {
if (!maybe_info->To(&info)) return maybe_info;
}
info->set_ic_total_count(0);
info->set_ic_with_typeinfo_count(0);
info->set_ic_with_type_info_count(0);
info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
return info;
......@@ -2899,7 +2899,6 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
share->set_ast_node_count(0);
share->set_deopt_counter(FLAG_deopt_every_n_times);
share->set_profiler_ticks(0);
share->set_ic_age(0);
// Set integer fields (smi or int, depending on the architecture).
......
......@@ -296,58 +296,44 @@ Failure* IC::ReferenceError(const char* type, Handle<String> name) {
}
static int ComputeTypeInfoCountDelta(IC::State old_state, IC::State new_state) {
bool was_uninitialized =
old_state == UNINITIALIZED || old_state == PREMONOMORPHIC;
bool is_uninitialized =
new_state == UNINITIALIZED || new_state == PREMONOMORPHIC;
return (was_uninitialized && !is_uninitialized) ? 1 :
(!was_uninitialized && is_uninitialized) ? -1 : 0;
}
void IC::PostPatching(Address address, Code* target, Code* old_target) {
if (FLAG_type_info_threshold > 0) {
if (old_target->is_inline_cache_stub() &&
target->is_inline_cache_stub()) {
State old_state = old_target->ic_state();
State new_state = target->ic_state();
bool was_uninitialized =
old_state == UNINITIALIZED || old_state == PREMONOMORPHIC;
bool is_uninitialized =
new_state == UNINITIALIZED || new_state == PREMONOMORPHIC;
int delta = 0;
if (was_uninitialized && !is_uninitialized) {
delta = 1;
} else if (!was_uninitialized && is_uninitialized) {
delta = -1;
}
if (delta != 0) {
Code* host = target->GetHeap()->isolate()->
inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
// Not all Code objects have TypeFeedbackInfo.
if (host->type_feedback_info()->IsTypeFeedbackInfo()) {
TypeFeedbackInfo* info =
TypeFeedbackInfo::cast(host->type_feedback_info());
info->set_ic_with_typeinfo_count(
info->ic_with_typeinfo_count() + delta);
}
}
if (FLAG_type_info_threshold == 0 && !FLAG_watch_ic_patching) {
return;
}
Code* host = target->GetHeap()->isolate()->
inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
if (host->kind() != Code::FUNCTION) return;
if (FLAG_type_info_threshold > 0 &&
old_target->is_inline_cache_stub() &&
target->is_inline_cache_stub()) {
int delta = ComputeTypeInfoCountDelta(old_target->ic_state(),
target->ic_state());
// Not all Code objects have TypeFeedbackInfo.
if (delta != 0 && host->type_feedback_info()->IsTypeFeedbackInfo()) {
TypeFeedbackInfo* info =
TypeFeedbackInfo::cast(host->type_feedback_info());
info->set_ic_with_type_info_count(
info->ic_with_type_info_count() + delta);
}
}
if (FLAG_watch_ic_patching) {
host->set_profiler_ticks(0);
Isolate::Current()->runtime_profiler()->NotifyICChanged();
// We do not want to optimize until the ICs have settled down,
// so when they are patched, we postpone optimization for the
// current function and the functions above it on the stack that
// might want to inline this one.
StackFrameIterator it;
if (it.done()) return;
it.Advance();
static const int kStackFramesToMark = Compiler::kMaxInliningLevels - 1;
for (int i = 0; i < kStackFramesToMark; ++i) {
if (it.done()) return;
StackFrame* raw_frame = it.frame();
if (raw_frame->is_java_script()) {
JSFunction* function =
JSFunction::cast(JavaScriptFrame::cast(raw_frame)->function());
if (function->IsOptimized()) continue;
SharedFunctionInfo* shared = function->shared();
shared->set_profiler_ticks(0);
}
it.Advance();
}
}
// TODO(2029): When an optimized function is patched, it would
// be nice to propagate the corresponding type information to its
// unoptimized version for the benefit of later inlining.
}
......
......@@ -3073,6 +3073,19 @@ void Code::set_allow_osr_at_loop_nesting_level(int level) {
}
int Code::profiler_ticks() {
ASSERT(kind() == FUNCTION);
return READ_BYTE_FIELD(this, kProfilerTicksOffset);
}
void Code::set_profiler_ticks(int ticks) {
ASSERT(kind() == FUNCTION);
ASSERT(ticks < 256);
WRITE_BYTE_FIELD(this, kProfilerTicksOffset, ticks);
}
unsigned Code::stack_slots() {
ASSERT(kind() == OPTIMIZED_FUNCTION);
return READ_UINT32_FIELD(this, kStackSlotsOffset);
......@@ -3492,6 +3505,7 @@ ACCESSORS(SharedFunctionInfo, debug_info, Object, kDebugInfoOffset)
ACCESSORS(SharedFunctionInfo, inferred_name, String, kInferredNameOffset)
ACCESSORS(SharedFunctionInfo, this_property_assignments, Object,
kThisPropertyAssignmentsOffset)
SMI_ACCESSORS(SharedFunctionInfo, ic_age, kICAgeOffset)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype,
......@@ -3542,8 +3556,6 @@ SMI_ACCESSORS(SharedFunctionInfo, this_property_assignments_count,
SMI_ACCESSORS(SharedFunctionInfo, opt_count, kOptCountOffset)
SMI_ACCESSORS(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset)
SMI_ACCESSORS(SharedFunctionInfo, deopt_counter, kDeoptCounterOffset)
SMI_ACCESSORS(SharedFunctionInfo, profiler_ticks, kProfilerTicksOffset)
SMI_ACCESSORS(SharedFunctionInfo, ic_age, kICAgeOffset)
#else
#define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset) \
......@@ -3597,11 +3609,6 @@ PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, opt_count, kOptCountOffset)
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset)
PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, deopt_counter, kDeoptCounterOffset)
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo,
profiler_ticks,
kProfilerTicksOffset)
PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, ic_age, kICAgeOffset)
#endif
......@@ -4805,7 +4812,7 @@ Object* TypeFeedbackCells::RawUninitializedSentinel(Heap* heap) {
SMI_ACCESSORS(TypeFeedbackInfo, ic_total_count, kIcTotalCountOffset)
SMI_ACCESSORS(TypeFeedbackInfo, ic_with_typeinfo_count,
SMI_ACCESSORS(TypeFeedbackInfo, ic_with_type_info_count,
kIcWithTypeinfoCountOffset)
ACCESSORS(TypeFeedbackInfo, type_feedback_cells, TypeFeedbackCells,
kTypeFeedbackCellsOffset)
......
......@@ -559,8 +559,8 @@ void PolymorphicCodeCache::PolymorphicCodeCachePrint(FILE* out) {
void TypeFeedbackInfo::TypeFeedbackInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "TypeFeedbackInfo");
PrintF(out, "\n - ic_total_count: %d, ic_with_typeinfo_count: %d",
ic_total_count(), ic_with_typeinfo_count());
PrintF(out, "\n - ic_total_count: %d, ic_with_type_info_count: %d",
ic_total_count(), ic_with_type_info_count());
PrintF(out, "\n - type_feedback_cells: ");
type_feedback_cells()->FixedArrayPrint(out);
}
......
......@@ -7871,9 +7871,9 @@ void SharedFunctionInfo::AttachInitialMap(Map* map) {
void SharedFunctionInfo::ResetForNewContext(int new_ic_age) {
code()->ClearInlineCaches();
code()->set_profiler_ticks(0);
set_ic_age(new_ic_age);
set_opt_count(0);
set_profiler_ticks(0);
}
......
......@@ -4250,6 +4250,11 @@ class Code: public HeapObject {
inline void set_allow_osr_at_loop_nesting_level(int level);
inline int allow_osr_at_loop_nesting_level();
// [profiler_ticks]: For FUNCTION kind, tells for how many profiler ticks
// the code object was seen on the stack with no IC patching going on.
inline int profiler_ticks();
inline void set_profiler_ticks(int ticks);
// [stack_slots]: For kind OPTIMIZED_FUNCTION, the number of stack slots
// reserved in the code prologue.
inline unsigned stack_slots();
......@@ -4468,6 +4473,7 @@ class Code: public HeapObject {
static const int kBinaryOpReturnTypeOffset = kBinaryOpTypeOffset + 1;
static const int kAllowOSRAtLoopNestingLevelOffset = kFullCodeFlags + 1;
static const int kProfilerTicksOffset = kAllowOSRAtLoopNestingLevelOffset + 1;
static const int kSafepointTableOffsetOffset = kStackSlotsOffset + kIntSize;
static const int kStackCheckTableOffsetOffset = kStackSlotsOffset + kIntSize;
......@@ -5326,9 +5332,6 @@ class SharedFunctionInfo: public HeapObject {
inline int deopt_counter();
inline void set_deopt_counter(int counter);
inline int profiler_ticks();
inline void set_profiler_ticks(int ticks);
// Inline cache age is used to infer whether the function survived a context
// disposal or not. In the former case we reset the opt_count.
inline int ic_age();
......@@ -5510,10 +5513,13 @@ class SharedFunctionInfo: public HeapObject {
kInferredNameOffset + kPointerSize;
static const int kThisPropertyAssignmentsOffset =
kInitialMapOffset + kPointerSize;
// ic_age is a Smi field. It could be grouped with another Smi field into a
// PSEUDO_SMI_ACCESSORS pair (on x64), if one becomes available.
static const int kICAgeOffset = kThisPropertyAssignmentsOffset + kPointerSize;
#if V8_HOST_ARCH_32_BIT
// Smi fields.
static const int kLengthOffset =
kThisPropertyAssignmentsOffset + kPointerSize;
kICAgeOffset + kPointerSize;
static const int kFormalParameterCountOffset = kLengthOffset + kPointerSize;
static const int kExpectedNofPropertiesOffset =
kFormalParameterCountOffset + kPointerSize;
......@@ -5533,11 +5539,10 @@ class SharedFunctionInfo: public HeapObject {
kThisPropertyAssignmentsCountOffset + kPointerSize;
static const int kAstNodeCountOffset = kOptCountOffset + kPointerSize;
static const int kDeoptCounterOffset = kAstNodeCountOffset + kPointerSize;
static const int kProfilerTicksOffset = kDeoptCounterOffset + kPointerSize;
static const int kICAgeOffset = kProfilerTicksOffset + kPointerSize;
// Total size.
static const int kSize = kICAgeOffset + kPointerSize;
static const int kSize = kDeoptCounterOffset + kPointerSize;
#else
// The only reason to use smi fields instead of int fields
// is to allow iteration without maps decoding during
......@@ -5549,7 +5554,7 @@ class SharedFunctionInfo: public HeapObject {
// word is not set and thus this word cannot be treated as pointer
// to HeapObject during old space traversal.
static const int kLengthOffset =
kThisPropertyAssignmentsOffset + kPointerSize;
kICAgeOffset + kPointerSize;
static const int kFormalParameterCountOffset =
kLengthOffset + kIntSize;
......@@ -5576,12 +5581,8 @@ class SharedFunctionInfo: public HeapObject {
static const int kAstNodeCountOffset = kOptCountOffset + kIntSize;
static const int kDeoptCounterOffset = kAstNodeCountOffset + kIntSize;
static const int kProfilerTicksOffset = kDeoptCounterOffset + kIntSize;
static const int kICAgeOffset = kProfilerTicksOffset + kIntSize;
// Total size.
static const int kSize = kICAgeOffset + kIntSize;
static const int kSize = kDeoptCounterOffset + kIntSize;
#endif
......@@ -6568,8 +6569,8 @@ class TypeFeedbackInfo: public Struct {
inline int ic_total_count();
inline void set_ic_total_count(int count);
inline int ic_with_typeinfo_count();
inline void set_ic_with_typeinfo_count(int count);
inline int ic_with_type_info_count();
inline void set_ic_with_type_info_count(int count);
DECL_ACCESSORS(type_feedback_cells, TypeFeedbackCells)
......
......@@ -65,6 +65,12 @@ static const int kSizeLimit = 1500;
// Number of times a function has to be seen on the stack before it is
// optimized.
static const int kProfilerTicksBeforeOptimization = 2;
// If a function does not have enough type info (according to
// FLAG_type_info_threshold), but has seen a huge number of ticks,
// optimize it as it is.
static const int kTicksWhenNotEnoughTypeInfo = 100;
// We only have one byte to store the number of ticks.
STATIC_ASSERT(kTicksWhenNotEnoughTypeInfo < 256);
// Maximum size in bytes of generated code for a function to be optimized
// the very first time it is seen on the stack.
......@@ -105,20 +111,20 @@ void RuntimeProfiler::GlobalSetup() {
static void GetICCounts(JSFunction* function,
int* ic_with_typeinfo_count,
int* ic_with_type_info_count,
int* ic_total_count,
int* percentage) {
*ic_total_count = 0;
*ic_with_typeinfo_count = 0;
*ic_with_type_info_count = 0;
Object* raw_info =
function->shared()->code()->type_feedback_info();
if (raw_info->IsTypeFeedbackInfo()) {
TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
*ic_with_typeinfo_count = info->ic_with_typeinfo_count();
*ic_with_type_info_count = info->ic_with_type_info_count();
*ic_total_count = info->ic_total_count();
}
*percentage = *ic_total_count > 0
? 100 * *ic_with_typeinfo_count / *ic_total_count
? 100 * *ic_with_type_info_count / *ic_total_count
: 100;
}
......@@ -257,13 +263,14 @@ void RuntimeProfiler::OptimizeNow() {
}
}
if (function->IsMarkedForLazyRecompilation() &&
function->shared()->code()->kind() == Code::FUNCTION) {
Code* unoptimized = function->shared()->code();
int nesting = unoptimized->allow_osr_at_loop_nesting_level();
Code* shared_code = function->shared()->code();
if (shared_code->kind() != Code::FUNCTION) continue;
if (function->IsMarkedForLazyRecompilation()) {
int nesting = shared_code->allow_osr_at_loop_nesting_level();
if (nesting == 0) AttemptOnStackReplacement(function);
int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker);
unoptimized->set_allow_osr_at_loop_nesting_level(new_nesting);
shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
}
// Do not record non-optimizable functions.
......@@ -281,7 +288,7 @@ void RuntimeProfiler::OptimizeNow() {
}
if (FLAG_watch_ic_patching) {
int ticks = function->shared()->profiler_ticks();
int ticks = shared_code->profiler_ticks();
if (ticks >= kProfilerTicksBeforeOptimization) {
int typeinfo, total, percentage;
......@@ -290,12 +297,10 @@ void RuntimeProfiler::OptimizeNow() {
// If this particular function hasn't had any ICs patched for enough
// ticks, optimize it now.
Optimize(function, "hot and stable");
} else if (ticks >= 100) {
// If this function does not have enough type info, but has
// seen a huge number of ticks, optimize it as it is.
} else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
Optimize(function, "not much type info but very hot");
} else {
function->shared()->set_profiler_ticks(ticks + 1);
shared_code->set_profiler_ticks(ticks + 1);
if (FLAG_trace_opt_verbose) {
PrintF("[not yet optimizing ");
function->PrintName();
......@@ -304,7 +309,7 @@ void RuntimeProfiler::OptimizeNow() {
}
}
} else if (!any_ic_changed_ &&
function->shared()->code()->instruction_size() < kMaxSizeEarlyOpt) {
shared_code->instruction_size() < kMaxSizeEarlyOpt) {
// If no IC was patched since the last tick and this function is very
// small, optimistically optimize it now.
Optimize(function, "small function");
......@@ -317,7 +322,7 @@ void RuntimeProfiler::OptimizeNow() {
// then type info might already be stable and we can optimize now.
Optimize(function, "stable on startup");
} else {
function->shared()->set_profiler_ticks(ticks + 1);
shared_code->set_profiler_ticks(ticks + 1);
}
} else { // !FLAG_watch_ic_patching
samples[sample_count++] = function;
......
......@@ -8043,8 +8043,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
ASSERT(args.length() == 1);
Handle<JSFunction> function = args.at<JSFunction>(0);
function->shared()->set_profiler_ticks(0);
// If the function is not compiled ignore the lazy
// recompilation. This can happen if the debugger is activated and
// the function is returned to the not compiled state.
......@@ -8067,6 +8065,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
function->ReplaceCode(function->shared()->code());
return function->code();
}
function->shared()->code()->set_profiler_ticks(0);
if (JSFunction::CompileOptimized(function,
AstNode::kNoNumber,
CLEAR_EXCEPTION)) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment