Commit 75a1fa91 authored by ulan@chromium.org's avatar ulan@chromium.org

Reland r11425 "Re-enable optimization for hot functions that have optimization...

Reland r11425 "Re-enable optimization for hot functions that have optimization disabled due to many deopts."

Fix performance regressions introduced in r11425 by
- counting number of function deoptimizations instead of function optimizations,
- packing several counters into one field of shared function info.

BUG=v8:2040,121196
R=jkummerow@chromium.org

Review URL: https://chromiumcodereview.appspot.com/10534063

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@11766 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent d67e1d4f
......@@ -378,6 +378,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
reinterpret_cast<intptr_t>(from),
fp_to_sp_delta - (2 * kPointerSize));
}
function->shared()->increment_deopt_count();
// Find the optimized code.
if (type == EAGER) {
ASSERT(from == NULL);
......
......@@ -3022,8 +3022,8 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
share->set_ast_node_count(0);
share->set_deopt_counter(FLAG_deopt_every_n_times);
share->set_ic_age(0);
share->set_stress_deopt_counter(FLAG_deopt_every_n_times);
share->set_counters(0);
// Set integer fields (smi or int, depending on the architecture).
share->set_length(0);
......
......@@ -1597,7 +1597,7 @@ class Heap {
}
void AgeInlineCaches() {
++global_ic_age_;
global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
}
private:
......
......@@ -566,19 +566,22 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
__ push(eax);
__ push(ebx);
__ mov(ebx, shared);
__ mov(eax, FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset));
__ mov(eax,
FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset));
__ sub(Operand(eax), Immediate(Smi::FromInt(1)));
__ j(not_zero, &no_deopt, Label::kNear);
if (FLAG_trap_on_deopt) __ int3();
__ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times)));
__ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
__ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset),
eax);
__ pop(ebx);
__ pop(eax);
__ popfd();
__ jmp(entry, RelocInfo::RUNTIME_ENTRY);
__ bind(&no_deopt);
__ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
__ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset),
eax);
__ pop(ebx);
__ pop(eax);
__ popfd();
......
......@@ -3673,7 +3673,7 @@ ACCESSORS(SharedFunctionInfo, debug_info, Object, kDebugInfoOffset)
ACCESSORS(SharedFunctionInfo, inferred_name, String, kInferredNameOffset)
ACCESSORS(SharedFunctionInfo, this_property_assignments, Object,
kThisPropertyAssignmentsOffset)
SMI_ACCESSORS(SharedFunctionInfo, ic_age, kICAgeOffset)
SMI_ACCESSORS(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype,
......@@ -3722,8 +3722,10 @@ SMI_ACCESSORS(SharedFunctionInfo, compiler_hints,
SMI_ACCESSORS(SharedFunctionInfo, this_property_assignments_count,
kThisPropertyAssignmentsCountOffset)
SMI_ACCESSORS(SharedFunctionInfo, opt_count, kOptCountOffset)
SMI_ACCESSORS(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset)
SMI_ACCESSORS(SharedFunctionInfo, deopt_counter, kDeoptCounterOffset)
SMI_ACCESSORS(SharedFunctionInfo, counters, kCountersOffset)
SMI_ACCESSORS(SharedFunctionInfo,
stress_deopt_counter,
kStressDeoptCounterOffset)
#else
#define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset) \
......@@ -3775,8 +3777,10 @@ PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo,
kThisPropertyAssignmentsCountOffset)
PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, opt_count, kOptCountOffset)
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset)
PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, deopt_counter, kDeoptCounterOffset)
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, counters, kCountersOffset)
PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
stress_deopt_counter,
kStressDeoptCounterOffset)
#endif
......@@ -3977,12 +3981,64 @@ void SharedFunctionInfo::set_code_age(int code_age) {
}
int SharedFunctionInfo::ic_age() {
return ICAgeBits::decode(counters());
}
void SharedFunctionInfo::set_ic_age(int ic_age) {
set_counters(ICAgeBits::update(counters(), ic_age));
}
int SharedFunctionInfo::deopt_count() {
return DeoptCountBits::decode(counters());
}
void SharedFunctionInfo::set_deopt_count(int deopt_count) {
set_counters(DeoptCountBits::update(counters(), deopt_count));
}
void SharedFunctionInfo::increment_deopt_count() {
int value = counters();
int deopt_count = DeoptCountBits::decode(value);
deopt_count = (deopt_count + 1) & DeoptCountBits::kMax;
set_counters(DeoptCountBits::update(value, deopt_count));
}
int SharedFunctionInfo::opt_reenable_tries() {
return OptReenableTriesBits::decode(counters());
}
void SharedFunctionInfo::set_opt_reenable_tries(int tries) {
set_counters(OptReenableTriesBits::update(counters(), tries));
}
bool SharedFunctionInfo::has_deoptimization_support() {
Code* code = this->code();
return code->kind() == Code::FUNCTION && code->has_deoptimization_support();
}
void SharedFunctionInfo::TryReenableOptimization() {
int tries = opt_reenable_tries();
set_opt_reenable_tries((tries + 1) & OptReenableTriesBits::kMax);
// We reenable optimization whenever the number of tries is a large
// enough power of 2.
if (tries >= 16 && (((tries - 1) & tries) == 0)) {
set_optimization_disabled(false);
set_opt_count(0);
set_deopt_count(0);
code()->set_optimizable(true);
}
}
bool JSFunction::IsBuiltin() {
return context()->global()->IsJSBuiltinsObject();
}
......
......@@ -7983,6 +7983,7 @@ void SharedFunctionInfo::ResetForNewContext(int new_ic_age) {
code()->set_optimizable(true);
}
set_opt_count(0);
set_deopt_count(0);
}
}
......
......@@ -5412,8 +5412,8 @@ class SharedFunctionInfo: public HeapObject {
// A counter used to determine when to stress the deoptimizer with a
// deopt.
inline int deopt_counter();
inline void set_deopt_counter(int counter);
inline int stress_deopt_counter();
inline void set_stress_deopt_counter(int counter);
inline int profiler_ticks();
......@@ -5541,9 +5541,26 @@ class SharedFunctionInfo: public HeapObject {
bool HasSourceCode();
Handle<Object> GetSourceCode();
// Number of times the function was optimized.
inline int opt_count();
inline void set_opt_count(int opt_count);
// Number of times the function was deoptimized.
inline void set_deopt_count(int value);
inline int deopt_count();
inline void increment_deopt_count();
// Number of time we tried to re-enable optimization after it
// was disabled due to high number of deoptimizations.
inline void set_opt_reenable_tries(int value);
inline int opt_reenable_tries();
inline void TryReenableOptimization();
// Stores deopt_count, opt_reenable_tries and ic_age as bit-fields.
inline void set_counters(int value);
inline int counters();
// Source size of this function.
int SourceSize();
......@@ -5600,13 +5617,14 @@ class SharedFunctionInfo: public HeapObject {
kInferredNameOffset + kPointerSize;
static const int kThisPropertyAssignmentsOffset =
kInitialMapOffset + kPointerSize;
// ic_age is a Smi field. It could be grouped with another Smi field into a
// PSEUDO_SMI_ACCESSORS pair (on x64), if one becomes available.
static const int kICAgeOffset = kThisPropertyAssignmentsOffset + kPointerSize;
// ast_node_count is a Smi field. It could be grouped with another Smi field
// into a PSEUDO_SMI_ACCESSORS pair (on x64), if one becomes available.
static const int kAstNodeCountOffset =
kThisPropertyAssignmentsOffset + kPointerSize;
#if V8_HOST_ARCH_32_BIT
// Smi fields.
static const int kLengthOffset =
kICAgeOffset + kPointerSize;
kAstNodeCountOffset + kPointerSize;
static const int kFormalParameterCountOffset = kLengthOffset + kPointerSize;
static const int kExpectedNofPropertiesOffset =
kFormalParameterCountOffset + kPointerSize;
......@@ -5624,12 +5642,11 @@ class SharedFunctionInfo: public HeapObject {
kCompilerHintsOffset + kPointerSize;
static const int kOptCountOffset =
kThisPropertyAssignmentsCountOffset + kPointerSize;
static const int kAstNodeCountOffset = kOptCountOffset + kPointerSize;
static const int kDeoptCounterOffset = kAstNodeCountOffset + kPointerSize;
static const int kCountersOffset = kOptCountOffset + kPointerSize;
static const int kStressDeoptCounterOffset = kCountersOffset + kPointerSize;
// Total size.
static const int kSize = kDeoptCounterOffset + kPointerSize;
static const int kSize = kStressDeoptCounterOffset + kPointerSize;
#else
// The only reason to use smi fields instead of int fields
// is to allow iteration without maps decoding during
......@@ -5641,7 +5658,7 @@ class SharedFunctionInfo: public HeapObject {
// word is not set and thus this word cannot be treated as pointer
// to HeapObject during old space traversal.
static const int kLengthOffset =
kICAgeOffset + kPointerSize;
kAstNodeCountOffset + kPointerSize;
static const int kFormalParameterCountOffset =
kLengthOffset + kIntSize;
......@@ -5665,11 +5682,11 @@ class SharedFunctionInfo: public HeapObject {
static const int kOptCountOffset =
kThisPropertyAssignmentsCountOffset + kIntSize;
static const int kAstNodeCountOffset = kOptCountOffset + kIntSize;
static const int kDeoptCounterOffset = kAstNodeCountOffset + kIntSize;
static const int kCountersOffset = kOptCountOffset + kIntSize;
static const int kStressDeoptCounterOffset = kCountersOffset + kIntSize;
// Total size.
static const int kSize = kDeoptCounterOffset + kIntSize;
static const int kSize = kStressDeoptCounterOffset + kIntSize;
#endif
......@@ -5722,6 +5739,10 @@ class SharedFunctionInfo: public HeapObject {
kCompilerHintsCount // Pseudo entry
};
class DeoptCountBits: public BitField<int, 0, 4> {};
class OptReenableTriesBits: public BitField<int, 4, 18> {};
class ICAgeBits: public BitField<int, 22, 8> {};
private:
#if V8_HOST_ARCH_32_BIT
// On 32 bit platforms, compiler hints is a smi.
......
......@@ -65,13 +65,20 @@ static const int kSizeLimit = 1500;
// Number of times a function has to be seen on the stack before it is
// optimized.
static const int kProfilerTicksBeforeOptimization = 2;
// If the function optimization was disabled due to high deoptimization count,
// but the function is hot and has been seen on the stack this number of times,
// then we try to reenable optimization for this function.
static const int kProfilerTicksBeforeReenablingOptimization = 250;
// If a function does not have enough type info (according to
// FLAG_type_info_threshold), but has seen a huge number of ticks,
// optimize it as it is.
static const int kTicksWhenNotEnoughTypeInfo = 100;
// We only have one byte to store the number of ticks.
STATIC_ASSERT(kProfilerTicksBeforeOptimization < 256);
STATIC_ASSERT(kProfilerTicksBeforeReenablingOptimization < 256);
STATIC_ASSERT(kTicksWhenNotEnoughTypeInfo < 256);
// Maximum size in bytes of generated code for a function to be optimized
// the very first time it is seen on the stack.
static const int kMaxSizeEarlyOpt = 500;
......@@ -263,7 +270,9 @@ void RuntimeProfiler::OptimizeNow() {
}
}
Code* shared_code = function->shared()->code();
SharedFunctionInfo* shared = function->shared();
Code* shared_code = shared->code();
if (shared_code->kind() != Code::FUNCTION) continue;
if (function->IsMarkedForLazyRecompilation()) {
......@@ -273,19 +282,33 @@ void RuntimeProfiler::OptimizeNow() {
shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
}
// Do not record non-optimizable functions.
if (!function->IsOptimizable()) continue;
if (function->shared()->optimization_disabled()) continue;
// Only record top-level code on top of the execution stack and
// avoid optimizing excessively large scripts since top-level code
// will be executed only once.
const int kMaxToplevelSourceSize = 10 * 1024;
if (function->shared()->is_toplevel()
&& (frame_count > 1
|| function->shared()->SourceSize() > kMaxToplevelSourceSize)) {
if (shared->is_toplevel() &&
(frame_count > 1 || shared->SourceSize() > kMaxToplevelSourceSize)) {
continue;
}
// Do not record non-optimizable functions.
if (shared->optimization_disabled()) {
if (shared->deopt_count() >= Compiler::kDefaultMaxOptCount) {
// If optimization was disabled due to many deoptimizations,
// then check if the function is hot and try to reenable optimization.
int ticks = shared_code->profiler_ticks();
if (ticks >= kProfilerTicksBeforeReenablingOptimization) {
shared_code->set_profiler_ticks(0);
shared->TryReenableOptimization();
} else {
shared_code->set_profiler_ticks(ticks + 1);
}
}
continue;
}
if (!function->IsOptimizable()) continue;
if (FLAG_watch_ic_patching) {
int ticks = shared_code->profiler_ticks();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment