Commit a980adfc authored by Jakob Gruber's avatar Jakob Gruber Committed by Commit Bot

[nci] Delay NCI compilation to second request

Benchmarks showed a large number of useless NCI compilation
tasks, i.e. code objects were generated and cached but never used.

Ideally, we'd only spawn an NCI task when the generated code will
be used in the future. To approximate this behavior, we now delay
task creation to the *second* time a function is optimized; the
thought being that a function that has been optimized twice is likely
to be optimized (= become hot) again in the future.

Bug: v8:8888
Change-Id: Ia37ae6a4c3861a611086964c20c313dda1974f14
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2414032Reviewed-by: 's avatarTobias Tebbi <tebbi@chromium.org>
Reviewed-by: 's avatarMythri Alle <mythria@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70103}
parent 00b540f2
...@@ -912,9 +912,7 @@ void InsertCodeIntoCompilationCache(Isolate* isolate, ...@@ -912,9 +912,7 @@ void InsertCodeIntoCompilationCache(Isolate* isolate,
OptimizedCompilationInfo* info) { OptimizedCompilationInfo* info) {
if (!CodeKindIsNativeContextIndependentJSFunction(info->code_kind())) return; if (!CodeKindIsNativeContextIndependentJSFunction(info->code_kind())) return;
// TODO(jgruber,v8:8888): This should turn into a DCHECK once we DCHECK(info->osr_offset().IsNone());
// spawn dedicated NCI compile tasks.
if (!info->osr_offset().IsNone()) return;
Handle<Code> code = info->code(); Handle<Code> code = info->code();
DCHECK(!info->function_context_specializing()); DCHECK(!info->function_context_specializing());
...@@ -1073,6 +1071,7 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function, ...@@ -1073,6 +1071,7 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
PendingOptimizationTable::FunctionWasOptimized(isolate, function); PendingOptimizationTable::FunctionWasOptimized(isolate, function);
} }
// Check the optimized code cache (stored on the SharedFunctionInfo).
if (CodeKindIsStoredInOptimizedCodeCache(code_kind)) { if (CodeKindIsStoredInOptimizedCodeCache(code_kind)) {
Handle<Code> cached_code; Handle<Code> cached_code;
if (GetCodeFromOptimizedCodeCache(function, osr_offset) if (GetCodeFromOptimizedCodeCache(function, osr_offset)
...@@ -1086,13 +1085,18 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function, ...@@ -1086,13 +1085,18 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
DCHECK(shared->is_compiled()); DCHECK(shared->is_compiled());
function->feedback_vector().set_profiler_ticks(0); function->feedback_vector().set_profiler_ticks(0);
if (CodeKindIsNativeContextIndependentJSFunction(code_kind) && // Check the compilation cache (stored on the Isolate, shared between native
osr_offset == BailoutId::None()) { // contexts).
// Don't generate NCI code when we've already done so in the past. if (CodeKindIsNativeContextIndependentJSFunction(code_kind)) {
DCHECK(osr_offset.IsNone());
DCHECK(FLAG_turbo_nci_as_midtier || shared->has_optimized_at_least_once());
Handle<Code> cached_code; Handle<Code> cached_code;
if (GetCodeFromCompilationCache(isolate, shared).ToHandle(&cached_code)) { if (GetCodeFromCompilationCache(isolate, shared).ToHandle(&cached_code)) {
if (FLAG_trace_turbo_nci) CHECK_EQ(cached_code->kind(), CodeKind::NATIVE_CONTEXT_INDEPENDENT);
if (FLAG_trace_turbo_nci) {
CompilationCacheCode::TraceHit(shared, cached_code); CompilationCacheCode::TraceHit(shared, cached_code);
}
return cached_code; return cached_code;
} }
} }
......
...@@ -44,10 +44,6 @@ class WorkerThreadRuntimeCallStats; ...@@ -44,10 +44,6 @@ class WorkerThreadRuntimeCallStats;
using UnoptimizedCompilationJobList = using UnoptimizedCompilationJobList =
std::forward_list<std::unique_ptr<UnoptimizedCompilationJob>>; std::forward_list<std::unique_ptr<UnoptimizedCompilationJob>>;
inline bool ShouldSpawnExtraNativeContextIndependentCompilationJob() {
return FLAG_turbo_nci && !FLAG_turbo_nci_as_midtier;
}
// The V8 compiler API. // The V8 compiler API.
// //
// This is the central hub for dispatching to the various compilers within V8. // This is the central hub for dispatching to the various compilers within V8.
......
...@@ -6156,6 +6156,18 @@ Handle<CompilationCacheTable> CompilationCacheTable::PutCode( ...@@ -6156,6 +6156,18 @@ Handle<CompilationCacheTable> CompilationCacheTable::PutCode(
Isolate* isolate, Handle<CompilationCacheTable> cache, Isolate* isolate, Handle<CompilationCacheTable> cache,
Handle<SharedFunctionInfo> key, Handle<Code> value) { Handle<SharedFunctionInfo> key, Handle<Code> value) {
CodeKey k(key); CodeKey k(key);
{
InternalIndex entry = cache->FindEntry(isolate, &k);
if (entry.is_found()) {
// Update.
cache->set(EntryToIndex(entry), *key);
cache->set(EntryToIndex(entry) + 1, *value);
return cache;
}
}
// Insert.
cache = EnsureCapacity(isolate, cache); cache = EnsureCapacity(isolate, cache);
InternalIndex entry = cache->FindInsertionEntry(isolate, k.Hash()); InternalIndex entry = cache->FindInsertionEntry(isolate, k.Hash());
cache->set(EntryToIndex(entry), *key); cache->set(EntryToIndex(entry), *key);
......
...@@ -186,6 +186,9 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags2, ...@@ -186,6 +186,9 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags2,
has_static_private_methods_or_accessors, has_static_private_methods_or_accessors,
SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit) SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags2, has_optimized_at_least_once,
SharedFunctionInfo::HasOptimizedAtLeastOnceBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags2, may_have_cached_code, BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags2, may_have_cached_code,
SharedFunctionInfo::MayHaveCachedCodeBit) SharedFunctionInfo::MayHaveCachedCodeBit)
......
...@@ -408,6 +408,10 @@ class SharedFunctionInfo : public HeapObject { ...@@ -408,6 +408,10 @@ class SharedFunctionInfo : public HeapObject {
DECL_BOOLEAN_ACCESSORS(class_scope_has_private_brand) DECL_BOOLEAN_ACCESSORS(class_scope_has_private_brand)
DECL_BOOLEAN_ACCESSORS(has_static_private_methods_or_accessors) DECL_BOOLEAN_ACCESSORS(has_static_private_methods_or_accessors)
// True if this SFI has been (non-OSR) optimized in the past. This is used to
// guide native-context-independent codegen.
DECL_BOOLEAN_ACCESSORS(has_optimized_at_least_once)
// True if a Code object associated with this SFI has been inserted into the // True if a Code object associated with this SFI has been inserted into the
// compilation cache. Note that the cache entry may be removed by aging, // compilation cache. Note that the cache entry may be removed by aging,
// hence the 'may'. // hence the 'may'.
......
...@@ -44,6 +44,7 @@ bitfield struct SharedFunctionInfoFlags extends uint32 { ...@@ -44,6 +44,7 @@ bitfield struct SharedFunctionInfoFlags extends uint32 {
bitfield struct SharedFunctionInfoFlags2 extends uint8 { bitfield struct SharedFunctionInfoFlags2 extends uint8 {
class_scope_has_private_brand: bool: 1 bit; class_scope_has_private_brand: bool: 1 bit;
has_static_private_methods_or_accessors: bool: 1 bit; has_static_private_methods_or_accessors: bool: 1 bit;
has_optimized_at_least_once: bool: 1 bit;
may_have_cached_code: bool: 1 bit; may_have_cached_code: bool: 1 bit;
} }
......
...@@ -60,6 +60,10 @@ RUNTIME_FUNCTION(Runtime_CompileLazy) { ...@@ -60,6 +60,10 @@ RUNTIME_FUNCTION(Runtime_CompileLazy) {
namespace { namespace {
inline bool MaybeSpawnNativeContextIndependentCompilationJob() {
return FLAG_turbo_nci && !FLAG_turbo_nci_as_midtier;
}
Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function, Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
ConcurrencyMode mode) { ConcurrencyMode mode) {
StackLimitCheck check(isolate); StackLimitCheck check(isolate);
...@@ -67,17 +71,28 @@ Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function, ...@@ -67,17 +71,28 @@ Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
return isolate->StackOverflow(); return isolate->StackOverflow();
} }
if (function->HasOptimizationMarker()) function->ClearOptimizationMarker(); // Compile for the next tier.
if (!Compiler::CompileOptimized(function, mode, function->NextTier())) { if (!Compiler::CompileOptimized(function, mode, function->NextTier())) {
return ReadOnlyRoots(isolate).exception(); return ReadOnlyRoots(isolate).exception();
} }
if (ShouldSpawnExtraNativeContextIndependentCompilationJob()) {
if (!Compiler::CompileOptimized(function, mode, // Possibly compile for NCI caching.
CodeKind::NATIVE_CONTEXT_INDEPENDENT)) { if (MaybeSpawnNativeContextIndependentCompilationJob()) {
return ReadOnlyRoots(isolate).exception(); // The first optimization request does not trigger NCI compilation,
// since we try to avoid compiling Code that remains unused in the future.
// Repeated optimization (possibly in different native contexts) is taken
// as a signal that this SFI will continue to be used in the future, thus
// we trigger NCI compilation.
if (function->shared().has_optimized_at_least_once()) {
if (!Compiler::CompileOptimized(function, mode,
CodeKind::NATIVE_CONTEXT_INDEPENDENT)) {
return ReadOnlyRoots(isolate).exception();
}
} else {
function->shared().set_has_optimized_at_least_once(true);
} }
} }
DCHECK(function->is_compiled()); DCHECK(function->is_compiled());
return function->code(); return function->code();
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment