Commit 22bbb73d authored by Jakob Gruber's avatar Jakob Gruber Committed by V8 LUCI CQ

[tiering] Centralize the optimization decision

OptimizationDecision holds the optimization {reason, code kind,
concurrency mode}. We centralize it to avoid having to recalculate in
Optimize when we already know everything in ShouldOptimize.

Bug: v8:7700
Change-Id: Ifcd902e86f844ce045bcefd4ae72ac17b42acb3a
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3500300
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Auto-Submit: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79326}
parent 5cdac4b8
...@@ -1702,7 +1702,7 @@ inline std::ostream& operator<<(std::ostream& os, ...@@ -1702,7 +1702,7 @@ inline std::ostream& operator<<(std::ostream& os,
enum class BlockingBehavior { kBlock, kDontBlock }; enum class BlockingBehavior { kBlock, kDontBlock };
enum class ConcurrencyMode { kNotConcurrent, kConcurrent }; enum class ConcurrencyMode : uint8_t { kNotConcurrent, kConcurrent };
inline const char* ToString(ConcurrencyMode mode) { inline const char* ToString(ConcurrencyMode mode) {
switch (mode) { switch (mode) {
......
...@@ -86,12 +86,47 @@ void TraceRecompile(JSFunction function, OptimizationReason reason, ...@@ -86,12 +86,47 @@ void TraceRecompile(JSFunction function, OptimizationReason reason,
} // namespace } // namespace
void TieringManager::Optimize(JSFunction function, OptimizationReason reason, class OptimizationDecision {
CodeKind code_kind) { public:
DCHECK_NE(reason, OptimizationReason::kDoNotOptimize); static constexpr OptimizationDecision TurbofanHotAndStable() {
TraceRecompile(function, reason, code_kind, isolate_); return {OptimizationReason::kHotAndStable, CodeKind::TURBOFAN,
function.MarkForOptimization(isolate_, CodeKind::TURBOFAN, ConcurrencyMode::kConcurrent};
ConcurrencyMode::kConcurrent); }
static constexpr OptimizationDecision TurbofanSmallFunction() {
return {OptimizationReason::kSmallFunction, CodeKind::TURBOFAN,
ConcurrencyMode::kConcurrent};
}
static constexpr OptimizationDecision DoNotOptimize() {
return {OptimizationReason::kDoNotOptimize,
// These values don't matter but we have to pass something.
CodeKind::TURBOFAN, ConcurrencyMode::kConcurrent};
}
constexpr bool should_optimize() const {
return optimization_reason != OptimizationReason::kDoNotOptimize;
}
OptimizationReason optimization_reason;
CodeKind code_kind;
ConcurrencyMode concurrency_mode;
private:
OptimizationDecision() = default;
constexpr OptimizationDecision(OptimizationReason optimization_reason,
CodeKind code_kind,
ConcurrencyMode concurrency_mode)
: optimization_reason(optimization_reason),
code_kind(code_kind),
concurrency_mode(concurrency_mode) {}
};
// Since we pass by value:
STATIC_ASSERT(sizeof(OptimizationDecision) <= kInt32Size);
void TieringManager::Optimize(JSFunction function, CodeKind code_kind,
OptimizationDecision d) {
DCHECK(d.should_optimize());
TraceRecompile(function, d.optimization_reason, code_kind, isolate_);
function.MarkForOptimization(isolate_, d.code_kind, d.concurrency_mode);
} }
void TieringManager::AttemptOnStackReplacement(UnoptimizedFrame* frame, void TieringManager::AttemptOnStackReplacement(UnoptimizedFrame* frame,
...@@ -168,12 +203,8 @@ void TieringManager::MaybeOptimizeFrame(JSFunction function, ...@@ -168,12 +203,8 @@ void TieringManager::MaybeOptimizeFrame(JSFunction function,
} }
} }
OptimizationReason reason = ShouldOptimize( OptimizationDecision d = ShouldOptimize(function, code_kind, frame);
function, function.shared().GetBytecodeArray(isolate_), frame); if (d.should_optimize()) Optimize(function, code_kind, d);
if (reason != OptimizationReason::kDoNotOptimize) {
Optimize(function, reason, code_kind);
}
} }
bool TieringManager::MaybeOSR(JSFunction function, UnoptimizedFrame* frame) { bool TieringManager::MaybeOSR(JSFunction function, UnoptimizedFrame* frame) {
...@@ -200,16 +231,21 @@ bool ShouldOptimizeAsSmallFunction(int bytecode_size, bool any_ic_changed) { ...@@ -200,16 +231,21 @@ bool ShouldOptimizeAsSmallFunction(int bytecode_size, bool any_ic_changed) {
} // namespace } // namespace
OptimizationReason TieringManager::ShouldOptimize(JSFunction function, OptimizationDecision TieringManager::ShouldOptimize(JSFunction function,
BytecodeArray bytecode, CodeKind code_kind,
JavaScriptFrame* frame) { JavaScriptFrame* frame) {
if (function.ActiveTierIsTurbofan()) { DCHECK_EQ(code_kind, function.GetActiveTier().value());
return OptimizationReason::kDoNotOptimize;
if (code_kind == CodeKind::TURBOFAN) {
// Already in the top tier.
return OptimizationDecision::DoNotOptimize();
} }
// If function's SFI has OSR cache, once enter loop range of OSR cache, set // If function's SFI has OSR cache, once enter loop range of OSR cache, set
// OSR loop nesting level for matching condition of OSR (loop_depth < // OSR loop nesting level for matching condition of OSR (loop_depth <
// osr_level), soon later OSR will be triggered when executing bytecode // osr_level), soon later OSR will be triggered when executing bytecode
// JumpLoop which is entry of the OSR cache, then hit the OSR cache. // JumpLoop which is entry of the OSR cache, then hit the OSR cache.
BytecodeArray bytecode = function.shared().GetBytecodeArray(isolate_);
if (V8_UNLIKELY(function.shared().osr_code_cache_state() > kNotCached) && if (V8_UNLIKELY(function.shared().osr_code_cache_state() > kNotCached) &&
frame->is_unoptimized()) { frame->is_unoptimized()) {
int current_offset = int current_offset =
...@@ -227,7 +263,7 @@ OptimizationReason TieringManager::ShouldOptimize(JSFunction function, ...@@ -227,7 +263,7 @@ OptimizationReason TieringManager::ShouldOptimize(JSFunction function,
current_offset >= jump_target_offset) { current_offset >= jump_target_offset) {
bytecode.set_osr_loop_nesting_level(iterator.GetImmediateOperand(1) + bytecode.set_osr_loop_nesting_level(iterator.GetImmediateOperand(1) +
1); 1);
return OptimizationReason::kHotAndStable; return OptimizationDecision::TurbofanHotAndStable();
} }
} }
} }
...@@ -236,12 +272,12 @@ OptimizationReason TieringManager::ShouldOptimize(JSFunction function, ...@@ -236,12 +272,12 @@ OptimizationReason TieringManager::ShouldOptimize(JSFunction function,
FLAG_ticks_before_optimization + FLAG_ticks_before_optimization +
(bytecode.length() / FLAG_bytecode_size_allowance_per_tick); (bytecode.length() / FLAG_bytecode_size_allowance_per_tick);
if (ticks >= ticks_for_optimization) { if (ticks >= ticks_for_optimization) {
return OptimizationReason::kHotAndStable; return OptimizationDecision::TurbofanHotAndStable();
} else if (ShouldOptimizeAsSmallFunction(bytecode.length(), } else if (ShouldOptimizeAsSmallFunction(bytecode.length(),
any_ic_changed_)) { any_ic_changed_)) {
// If no IC was patched since the last tick and this function is very // If no IC was patched since the last tick and this function is very
// small, optimistically optimize it now. // small, optimistically optimize it now.
return OptimizationReason::kSmallFunction; return OptimizationDecision::TurbofanSmallFunction();
} else if (FLAG_trace_opt_verbose) { } else if (FLAG_trace_opt_verbose) {
PrintF("[not yet optimizing "); PrintF("[not yet optimizing ");
function.PrintName(); function.PrintName();
...@@ -253,7 +289,7 @@ OptimizationReason TieringManager::ShouldOptimize(JSFunction function, ...@@ -253,7 +289,7 @@ OptimizationReason TieringManager::ShouldOptimize(JSFunction function,
bytecode.length(), FLAG_max_bytecode_size_for_early_opt); bytecode.length(), FLAG_max_bytecode_size_for_early_opt);
} }
} }
return OptimizationReason::kDoNotOptimize; return OptimizationDecision::DoNotOptimize();
} }
TieringManager::OnInterruptTickScope::OnInterruptTickScope( TieringManager::OnInterruptTickScope::OnInterruptTickScope(
......
...@@ -17,7 +17,8 @@ class Isolate; ...@@ -17,7 +17,8 @@ class Isolate;
class UnoptimizedFrame; class UnoptimizedFrame;
class JavaScriptFrame; class JavaScriptFrame;
class JSFunction; class JSFunction;
enum class CodeKind; class OptimizationDecision;
enum class CodeKind : uint8_t;
enum class OptimizationReason : uint8_t; enum class OptimizationReason : uint8_t;
class TieringManager { class TieringManager {
...@@ -45,11 +46,10 @@ class TieringManager { ...@@ -45,11 +46,10 @@ class TieringManager {
// Potentially attempts OSR from and returns whether no other // Potentially attempts OSR from and returns whether no other
// optimization attempts should be made. // optimization attempts should be made.
bool MaybeOSR(JSFunction function, UnoptimizedFrame* frame); bool MaybeOSR(JSFunction function, UnoptimizedFrame* frame);
OptimizationReason ShouldOptimize(JSFunction function, OptimizationDecision ShouldOptimize(JSFunction function, CodeKind code_kind,
BytecodeArray bytecode_array, JavaScriptFrame* frame);
JavaScriptFrame* frame); void Optimize(JSFunction function, CodeKind code_kind,
void Optimize(JSFunction function, OptimizationReason reason, OptimizationDecision decision);
CodeKind code_kind);
void Baseline(JSFunction function, OptimizationReason reason); void Baseline(JSFunction function, OptimizationReason reason);
class V8_NODISCARD OnInterruptTickScope final { class V8_NODISCARD OnInterruptTickScope final {
......
...@@ -31,7 +31,7 @@ namespace internal { ...@@ -31,7 +31,7 @@ namespace internal {
V(MAGLEV) \ V(MAGLEV) \
V(TURBOFAN) V(TURBOFAN)
enum class CodeKind { enum class CodeKind : uint8_t {
#define DEFINE_CODE_KIND_ENUM(name) name, #define DEFINE_CODE_KIND_ENUM(name) name,
CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM) CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM)
#undef DEFINE_CODE_KIND_ENUM #undef DEFINE_CODE_KIND_ENUM
...@@ -42,6 +42,8 @@ STATIC_ASSERT(CodeKind::BASELINE < CodeKind::TURBOFAN); ...@@ -42,6 +42,8 @@ STATIC_ASSERT(CodeKind::BASELINE < CodeKind::TURBOFAN);
#define V(...) +1 #define V(...) +1
static constexpr int kCodeKindCount = CODE_KIND_LIST(V); static constexpr int kCodeKindCount = CODE_KIND_LIST(V);
#undef V #undef V
// Unlikely, but just to be safe:
STATIC_ASSERT(kCodeKindCount <= std::numeric_limits<uint8_t>::max());
const char* CodeKindToString(CodeKind kind); const char* CodeKindToString(CodeKind kind);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment