Commit 3200fafa authored by klaasb's avatar klaasb Committed by Commit bot

[interpreter] Compute and use type info percentage

Previously we would not have a total count of ICs when interpreting and
thus the check for sufficient type info would always succeed.
Also use the optimization checks for OSR while waiting for baseline
compilation and refactor the check.

BUG=v8:4280
BUG=chromium:634884

Review-Url: https://codereview.chromium.org/2360913003
Cr-Commit-Position: refs/heads/master@{#39677}
parent 73518a90
......@@ -55,6 +55,33 @@ static const int kOSRCodeSizeAllowancePerTickIgnition =
static const int kMaxSizeEarlyOpt =
5 * FullCodeGenerator::kCodeSizeMultiplier;
#define OPTIMIZATION_REASON_LIST(V) \
V(DoNotOptimize, "do not optimize") \
V(HotAndStable, "hot and stable") \
V(HotEnoughForBaseline, "hot enough for baseline") \
V(HotWithoutMuchTypeInfo, "not much type info but very hot") \
V(SmallFunction, "small function")
enum class OptimizationReason : uint8_t {
#define OPTIMIZATION_REASON_CONSTANTS(Constant, message) k##Constant,
OPTIMIZATION_REASON_LIST(OPTIMIZATION_REASON_CONSTANTS)
#undef OPTIMIZATION_REASON_CONSTANTS
};
char const* OptimizationReasonToString(OptimizationReason reason) {
static char const* reasons[] = {
#define OPTIMIZATION_REASON_TEXTS(Constant, message) message,
OPTIMIZATION_REASON_LIST(OPTIMIZATION_REASON_TEXTS)
#undef OPTIMIZATION_REASON_TEXTS
};
size_t const index = static_cast<size_t>(reason);
DCHECK_LT(index, arraysize(reasons));
return reasons[index];
}
std::ostream& operator<<(std::ostream& os, OptimizationReason reason) {
return os << OptimizationReasonToString(reason);
}
RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
: isolate_(isolate),
......@@ -80,11 +107,15 @@ static void GetICCounts(JSFunction* function, int* ic_with_type_info_count,
// Harvest vector-ics as well
TypeFeedbackVector* vector = function->feedback_vector();
int with = 0, gen = 0;
int with = 0, gen = 0, type_vector_ic_count = 0;
const bool is_interpreted =
function->shared()->code()->is_interpreter_trampoline_builtin();
vector->ComputeCounts(&with, &gen, is_interpreted);
vector->ComputeCounts(&with, &gen, &type_vector_ic_count, is_interpreted);
if (is_interpreted) {
DCHECK_EQ(*ic_total_count, 0);
*ic_total_count = type_vector_ic_count;
}
*ic_with_type_info_count += with;
*ic_generic_count += gen;
......@@ -116,13 +147,17 @@ static void TraceRecompile(JSFunction* function, const char* reason,
}
}
void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
TraceRecompile(function, reason, "optimized");
void RuntimeProfiler::Optimize(JSFunction* function,
OptimizationReason reason) {
DCHECK_NE(reason, OptimizationReason::kDoNotOptimize);
TraceRecompile(function, OptimizationReasonToString(reason), "optimized");
function->AttemptConcurrentOptimization();
}
void RuntimeProfiler::Baseline(JSFunction* function, const char* reason) {
TraceRecompile(function, reason, "baseline");
void RuntimeProfiler::Baseline(JSFunction* function,
OptimizationReason reason) {
DCHECK_NE(reason, OptimizationReason::kDoNotOptimize);
TraceRecompile(function, OptimizationReasonToString(reason), "baseline");
// TODO(4280): Fix this to check function is compiled for the interpreter
// once we have a standard way to check that. For now function will only
......@@ -241,9 +276,9 @@ void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
generic_percentage <= FLAG_generic_ic_threshold) {
// If this particular function hasn't had any ICs patched for enough
// ticks, optimize it now.
Optimize(function, "hot and stable");
Optimize(function, OptimizationReason::kHotAndStable);
} else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
Optimize(function, "not much type info but very hot");
Optimize(function, OptimizationReason::kHotWithoutMuchTypeInfo);
} else {
shared_code->set_profiler_ticks(ticks + 1);
if (FLAG_trace_opt_verbose) {
......@@ -262,7 +297,7 @@ void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
&generic_percentage);
if (type_percentage >= FLAG_type_info_threshold &&
generic_percentage <= FLAG_generic_ic_threshold) {
Optimize(function, "small function");
Optimize(function, OptimizationReason::kSmallFunction);
} else {
shared_code->set_profiler_ticks(ticks + 1);
}
......@@ -275,31 +310,16 @@ void RuntimeProfiler::MaybeBaselineIgnition(JSFunction* function,
JavaScriptFrame* frame) {
if (function->IsInOptimizationQueue()) return;
SharedFunctionInfo* shared = function->shared();
int ticks = shared->profiler_ticks();
// TODO(rmcilroy): Also ensure we only OSR top-level code if it is smaller
// than kMaxToplevelSourceSize.
if (FLAG_always_osr) {
AttemptOnStackReplacement(frame, AbstractCode::kMaxLoopNestingMarker);
// Fall through and do a normal baseline compile as well.
} else if (!frame->is_optimized() &&
(function->IsMarkedForBaseline() ||
function->IsMarkedForOptimization() ||
function->IsMarkedForConcurrentOptimization() ||
function->IsOptimized())) {
// Attempt OSR if we are still running interpreted code even though the
// the function has long been marked or even already been optimized.
int64_t allowance =
kOSRCodeSizeAllowanceBaseIgnition +
static_cast<int64_t>(ticks) * kOSRCodeSizeAllowancePerTickIgnition;
if (shared->bytecode_array()->Size() <= allowance) {
AttemptOnStackReplacement(frame);
}
} else if (MaybeOSRIgnition(function, frame)) {
return;
}
SharedFunctionInfo* shared = function->shared();
int ticks = shared->profiler_ticks();
if (shared->optimization_disabled() &&
shared->disable_optimization_reason() == kOptimizationDisabledForTest) {
// Don't baseline functions which have been marked by NeverOptimizeFunction
......@@ -308,7 +328,7 @@ void RuntimeProfiler::MaybeBaselineIgnition(JSFunction* function,
}
if (ticks >= kProfilerTicksBeforeBaseline) {
Baseline(function, "hot enough for baseline");
Baseline(function, OptimizationReason::kHotEnoughForBaseline);
}
}
......@@ -316,31 +336,16 @@ void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function,
JavaScriptFrame* frame) {
if (function->IsInOptimizationQueue()) return;
SharedFunctionInfo* shared = function->shared();
int ticks = shared->profiler_ticks();
// TODO(rmcilroy): Also ensure we only OSR top-level code if it is smaller
// than kMaxToplevelSourceSize.
if (FLAG_always_osr) {
AttemptOnStackReplacement(frame, AbstractCode::kMaxLoopNestingMarker);
// Fall through and do a normal optimized compile as well.
} else if (!frame->is_optimized() &&
(function->IsMarkedForBaseline() ||
function->IsMarkedForOptimization() ||
function->IsMarkedForConcurrentOptimization() ||
function->IsOptimized())) {
// Attempt OSR if we are still running interpreted code even though the
// the function has long been marked or even already been optimized.
int64_t allowance =
kOSRCodeSizeAllowanceBaseIgnition +
static_cast<int64_t>(ticks) * kOSRCodeSizeAllowancePerTickIgnition;
if (shared->bytecode_array()->Size() <= allowance) {
AttemptOnStackReplacement(frame);
}
} else if (MaybeOSRIgnition(function, frame)) {
return;
}
SharedFunctionInfo* shared = function->shared();
int ticks = shared->profiler_ticks();
if (shared->optimization_disabled()) {
if (shared->deopt_count() >= FLAG_max_opt_count) {
// If optimization was disabled due to many deoptimizations,
......@@ -352,8 +357,51 @@ void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function,
}
return;
}
if (function->IsOptimized()) return;
OptimizationReason reason = ShouldOptimizeIgnition(function, frame);
if (reason != OptimizationReason::kDoNotOptimize) {
Optimize(function, reason);
}
}
bool RuntimeProfiler::MaybeOSRIgnition(JSFunction* function,
JavaScriptFrame* frame) {
if (!FLAG_ignition_osr) return false;
SharedFunctionInfo* shared = function->shared();
int ticks = shared->profiler_ticks();
// TODO(rmcilroy): Also ensure we only OSR top-level code if it is smaller
// than kMaxToplevelSourceSize.
bool osr_before_baselined = function->IsMarkedForBaseline() &&
ShouldOptimizeIgnition(function, frame) !=
OptimizationReason::kDoNotOptimize;
if (!frame->is_optimized() &&
(osr_before_baselined || function->IsMarkedForOptimization() ||
function->IsMarkedForConcurrentOptimization() ||
function->IsOptimized())) {
// Attempt OSR if we are still running interpreted code even though the
// the function has long been marked or even already been optimized.
int64_t allowance =
kOSRCodeSizeAllowanceBaseIgnition +
static_cast<int64_t>(ticks) * kOSRCodeSizeAllowancePerTickIgnition;
if (shared->bytecode_array()->Size() <= allowance) {
AttemptOnStackReplacement(frame);
}
return true;
}
return false;
}
OptimizationReason RuntimeProfiler::ShouldOptimizeIgnition(
JSFunction* function, JavaScriptFrame* frame) {
SharedFunctionInfo* shared = function->shared();
int ticks = shared->profiler_ticks();
if (ticks >= kProfilerTicksBeforeOptimization) {
int typeinfo, generic, total, type_percentage, generic_percentage;
GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
......@@ -362,9 +410,9 @@ void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function,
generic_percentage <= FLAG_generic_ic_threshold) {
// If this particular function hasn't had any ICs patched for enough
// ticks, optimize it now.
Optimize(function, "hot and stable");
return OptimizationReason::kHotAndStable;
} else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
Optimize(function, "not much type info but very hot");
return OptimizationReason::kHotWithoutMuchTypeInfo;
} else {
if (FLAG_trace_opt_verbose) {
PrintF("[not yet optimizing ");
......@@ -372,10 +420,12 @@ void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function,
PrintF(", not enough type info: %d/%d (%d%%)]\n", typeinfo, total,
type_percentage);
}
return OptimizationReason::kDoNotOptimize;
}
}
// TODO(rmcilroy): Consider whether we should optimize small functions when
// they are first seen on the stack (e.g., kMaxSizeEarlyOpt).
return OptimizationReason::kDoNotOptimize;
}
void RuntimeProfiler::MarkCandidatesForOptimization() {
......@@ -423,6 +473,5 @@ void RuntimeProfiler::MarkCandidatesForOptimization() {
any_ic_changed_ = false;
}
} // namespace internal
} // namespace v8
......@@ -13,6 +13,7 @@ namespace internal {
class Isolate;
class JavaScriptFrame;
class JSFunction;
enum class OptimizationReason : uint8_t;
class RuntimeProfiler {
public:
......@@ -30,8 +31,13 @@ class RuntimeProfiler {
int frame_count);
void MaybeBaselineIgnition(JSFunction* function, JavaScriptFrame* frame);
void MaybeOptimizeIgnition(JSFunction* function, JavaScriptFrame* frame);
void Optimize(JSFunction* function, const char* reason);
void Baseline(JSFunction* function, const char* reason);
// Potentially attempts OSR from ignition and returns whether no other
// optimization attempts should be made.
bool MaybeOSRIgnition(JSFunction* function, JavaScriptFrame* frame);
OptimizationReason ShouldOptimizeIgnition(JSFunction* function,
JavaScriptFrame* frame);
void Optimize(JSFunction* function, OptimizationReason reason);
void Baseline(JSFunction* function, OptimizationReason reason);
Isolate* isolate_;
bool any_ic_changed_;
......
......@@ -148,6 +148,7 @@ CompareOperationHint CompareOperationHintFromFeedback(int type_feedback) {
}
void TypeFeedbackVector::ComputeCounts(int* with_type_info, int* generic,
int* vector_ic_count,
bool code_is_interpreted) {
Object* uninitialized_sentinel =
TypeFeedbackVector::RawUninitializedSentinel(GetIsolate());
......@@ -155,14 +156,19 @@ void TypeFeedbackVector::ComputeCounts(int* with_type_info, int* generic,
*TypeFeedbackVector::MegamorphicSentinel(GetIsolate());
int with = 0;
int gen = 0;
int total = 0;
TypeFeedbackMetadataIterator iter(metadata());
while (iter.HasNext()) {
FeedbackVectorSlot slot = iter.Next();
FeedbackVectorSlotKind kind = iter.kind();
Object* obj = Get(slot);
if (obj != uninitialized_sentinel &&
kind != FeedbackVectorSlotKind::GENERAL) {
if (kind == FeedbackVectorSlotKind::GENERAL) {
continue;
}
total++;
if (obj != uninitialized_sentinel) {
if (kind == FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC ||
kind == FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC) {
// If we are not running interpreted code, we need to ignore
......@@ -202,6 +208,7 @@ void TypeFeedbackVector::ComputeCounts(int* with_type_info, int* generic,
*with_type_info = with;
*generic = gen;
*vector_ic_count = total;
}
Handle<Symbol> TypeFeedbackVector::UninitializedSentinel(Isolate* isolate) {
......
......@@ -248,7 +248,7 @@ class TypeFeedbackVector : public FixedArray {
static const int kReservedIndexCount = 2;
inline void ComputeCounts(int* with_type_info, int* generic,
bool code_is_interpreted);
int* vector_ic_count, bool code_is_interpreted);
inline bool is_empty() const;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment