Commit 102304d1 authored by Jakob Gruber's avatar Jakob Gruber Committed by Commit Bot

[nci] Remove --turbo-nci-as-midtier and other flags

The removed flags are:

1. --turbo-nci-as-midtier
2. --turbo-nci-cache-ageing
3. --turbo-nci-delayed-codegen

Flag 1. was used by a testing mode that is no longer used. Flags 2. and
3. were used to experiment with codegen and cacheing heuristics, no
longer needed now that work is suspended.

Bug: v8:8888
Change-Id: Ib4a89f09340c2d94ee7688928c8235276c1f1032
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2661461
Auto-Submit: Jakob Gruber <jgruber@chromium.org>
Commit-Queue: Mythri Alle <mythria@chromium.org>
Reviewed-by: 's avatarMythri Alle <mythria@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72476}
parent df98901c
...@@ -78,10 +78,8 @@ void CompilationCacheScript::Age() { ...@@ -78,10 +78,8 @@ void CompilationCacheScript::Age() {
void CompilationCacheEval::Age() { AgeCustom(this); } void CompilationCacheEval::Age() { AgeCustom(this); }
void CompilationCacheRegExp::Age() { AgeByGeneration(this); } void CompilationCacheRegExp::Age() { AgeByGeneration(this); }
void CompilationCacheCode::Age() { void CompilationCacheCode::Age() {
if (FLAG_turbo_nci_cache_ageing) { if (FLAG_trace_turbo_nci) CompilationCacheCode::TraceAgeing();
if (FLAG_trace_turbo_nci) CompilationCacheCode::TraceAgeing(); AgeByGeneration(this);
AgeByGeneration(this);
}
} }
void CompilationSubCache::Iterate(RootVisitor* v) { void CompilationSubCache::Iterate(RootVisitor* v) {
......
...@@ -61,35 +61,6 @@ namespace internal { ...@@ -61,35 +61,6 @@ namespace internal {
namespace { namespace {
bool IsForNativeContextIndependentCachingOnly(CodeKind kind) {
// NCI code is only cached (and not installed on the JSFunction upon
// successful compilation), unless the testing-only
// FLAG_turbo_nci_as_midtier is enabled.
return CodeKindIsNativeContextIndependentJSFunction(kind) &&
!FLAG_turbo_nci_as_midtier;
}
// This predicate is currently needed only because the nci-as-midtier testing
// configuration is special. A quick summary of compilation configurations:
//
// - Turbofan (and currently Turboprop) uses both the optimization marker and
// the optimized code cache (underneath, the marker and the cache share the same
// slot on the feedback vector).
// - Native context independent (NCI) code uses neither the marker nor the
// cache.
// - The NCI-as-midtier testing configuration uses the marker, but not the
// cache.
//
// This predicate supports that last case. In the near future, this last case is
// expected to change s.t. code kinds use the marker iff they use the optimized
// code cache (details still TBD). In that case, the existing
// CodeKindIsStoredInOptimizedCodeCache is sufficient and this extra predicate
// can be removed.
// TODO(jgruber,rmcilroy,v8:8888): Remove this predicate once that has happened.
bool UsesOptimizationMarker(CodeKind kind) {
return !IsForNativeContextIndependentCachingOnly(kind);
}
class CompilerTracer : public AllStatic { class CompilerTracer : public AllStatic {
public: public:
static void PrintTracePrefix(const CodeTracer::Scope& scope, static void PrintTracePrefix(const CodeTracer::Scope& scope,
...@@ -867,7 +838,8 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache( ...@@ -867,7 +838,8 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
} }
void ClearOptimizedCodeCache(OptimizedCompilationInfo* compilation_info) { void ClearOptimizedCodeCache(OptimizedCompilationInfo* compilation_info) {
DCHECK(UsesOptimizationMarker(compilation_info->code_kind())); DCHECK(!CodeKindIsNativeContextIndependentJSFunction(
compilation_info->code_kind()));
Handle<JSFunction> function = compilation_info->closure(); Handle<JSFunction> function = compilation_info->closure();
if (compilation_info->osr_offset().IsNone()) { if (compilation_info->osr_offset().IsNone()) {
Handle<FeedbackVector> vector = Handle<FeedbackVector> vector =
...@@ -879,12 +851,7 @@ void ClearOptimizedCodeCache(OptimizedCompilationInfo* compilation_info) { ...@@ -879,12 +851,7 @@ void ClearOptimizedCodeCache(OptimizedCompilationInfo* compilation_info) {
void InsertCodeIntoOptimizedCodeCache( void InsertCodeIntoOptimizedCodeCache(
OptimizedCompilationInfo* compilation_info) { OptimizedCompilationInfo* compilation_info) {
const CodeKind kind = compilation_info->code_kind(); const CodeKind kind = compilation_info->code_kind();
if (!CodeKindIsStoredInOptimizedCodeCache(kind)) { if (!CodeKindIsStoredInOptimizedCodeCache(kind)) return;
if (UsesOptimizationMarker(kind)) {
ClearOptimizedCodeCache(compilation_info);
}
return;
}
if (compilation_info->function_context_specializing()) { if (compilation_info->function_context_specializing()) {
// Function context specialization folds-in the function context, so no // Function context specialization folds-in the function context, so no
...@@ -1076,10 +1043,10 @@ MaybeHandle<Code> GetOptimizedCode( ...@@ -1076,10 +1043,10 @@ MaybeHandle<Code> GetOptimizedCode(
// Make sure we clear the optimization marker on the function so that we // Make sure we clear the optimization marker on the function so that we
// don't try to re-optimize. // don't try to re-optimize.
// If compiling for NCI caching only (which does not use the optimization // If compiling for NCI (which does not use the optimization marker), don't
// marker), don't touch the marker to avoid interfering with Turbofan // touch the marker to avoid interfering with Turbofan compilation.
// compilation. if (!CodeKindIsNativeContextIndependentJSFunction(code_kind) &&
if (UsesOptimizationMarker(code_kind) && function->HasOptimizationMarker()) { function->HasOptimizationMarker()) {
function->ClearOptimizationMarker(); function->ClearOptimizationMarker();
} }
...@@ -1123,8 +1090,6 @@ MaybeHandle<Code> GetOptimizedCode( ...@@ -1123,8 +1090,6 @@ MaybeHandle<Code> GetOptimizedCode(
// contexts). // contexts).
if (CodeKindIsNativeContextIndependentJSFunction(code_kind)) { if (CodeKindIsNativeContextIndependentJSFunction(code_kind)) {
DCHECK(osr_offset.IsNone()); DCHECK(osr_offset.IsNone());
DCHECK(FLAG_turbo_nci_as_midtier || !FLAG_turbo_nci_delayed_codegen ||
shared->has_optimized_at_least_once());
Handle<Code> cached_code; Handle<Code> cached_code;
if (GetCodeFromCompilationCache(isolate, shared).ToHandle(&cached_code)) { if (GetCodeFromCompilationCache(isolate, shared).ToHandle(&cached_code)) {
...@@ -1937,16 +1902,16 @@ bool Compiler::CompileOptimized(Handle<JSFunction> function, ...@@ -1937,16 +1902,16 @@ bool Compiler::CompileOptimized(Handle<JSFunction> function,
code = BUILTIN_CODE(isolate, InterpreterEntryTrampoline); code = BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
} }
if (!IsForNativeContextIndependentCachingOnly(code_kind)) { if (!CodeKindIsNativeContextIndependentJSFunction(code_kind)) {
function->set_code(*code); function->set_code(*code);
} }
// Check postconditions on success. // Check postconditions on success.
DCHECK(!isolate->has_pending_exception()); DCHECK(!isolate->has_pending_exception());
DCHECK(function->shared().is_compiled()); DCHECK(function->shared().is_compiled());
DCHECK(IsForNativeContextIndependentCachingOnly(code_kind) || DCHECK(CodeKindIsNativeContextIndependentJSFunction(code_kind) ||
function->is_compiled()); function->is_compiled());
if (UsesOptimizationMarker(code_kind)) { if (!CodeKindIsNativeContextIndependentJSFunction(code_kind)) {
DCHECK_IMPLIES(function->HasOptimizationMarker(), DCHECK_IMPLIES(function->HasOptimizationMarker(),
function->IsInOptimizationQueue()); function->IsInOptimizationQueue());
DCHECK_IMPLIES(function->HasOptimizationMarker(), DCHECK_IMPLIES(function->HasOptimizationMarker(),
...@@ -3065,7 +3030,7 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job, ...@@ -3065,7 +3030,7 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
CodeKind code_kind = compilation_info->code_kind(); CodeKind code_kind = compilation_info->code_kind();
const bool should_install_code_on_function = const bool should_install_code_on_function =
!IsForNativeContextIndependentCachingOnly(code_kind); !CodeKindIsNativeContextIndependentJSFunction(code_kind);
if (should_install_code_on_function) { if (should_install_code_on_function) {
// Reset profiler ticks, function is no longer considered hot. // Reset profiler ticks, function is no longer considered hot.
compilation_info->closure()->feedback_vector().set_profiler_ticks(0); compilation_info->closure()->feedback_vector().set_profiler_ticks(0);
...@@ -3100,7 +3065,7 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job, ...@@ -3100,7 +3065,7 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
CompilerTracer::TraceAbortedJob(isolate, compilation_info); CompilerTracer::TraceAbortedJob(isolate, compilation_info);
compilation_info->closure()->set_code(shared->GetCode()); compilation_info->closure()->set_code(shared->GetCode());
// Clear the InOptimizationQueue marker, if it exists. // Clear the InOptimizationQueue marker, if it exists.
if (UsesOptimizationMarker(code_kind) && if (!CodeKindIsNativeContextIndependentJSFunction(code_kind) &&
compilation_info->closure()->IsInOptimizationQueue()) { compilation_info->closure()->IsInOptimizationQueue()) {
compilation_info->closure()->ClearOptimizationMarker(); compilation_info->closure()->ClearOptimizationMarker();
} }
......
...@@ -751,20 +751,10 @@ DEFINE_BOOL(turbo_nci, false, ...@@ -751,20 +751,10 @@ DEFINE_BOOL(turbo_nci, false,
// TODO(v8:8888): Temporary until NCI caching is implemented or // TODO(v8:8888): Temporary until NCI caching is implemented or
// feedback collection is made unconditional. // feedback collection is made unconditional.
DEFINE_IMPLICATION(turbo_nci, turbo_collect_feedback_in_generic_lowering) DEFINE_IMPLICATION(turbo_nci, turbo_collect_feedback_in_generic_lowering)
DEFINE_BOOL(turbo_nci_as_midtier, false,
"insert NCI as a midtier compiler for testing purposes.")
DEFINE_BOOL(print_nci_code, false, "print native context independent code.") DEFINE_BOOL(print_nci_code, false, "print native context independent code.")
DEFINE_BOOL(trace_turbo_nci, false, "trace native context independent code.") DEFINE_BOOL(trace_turbo_nci, false, "trace native context independent code.")
DEFINE_BOOL(turbo_collect_feedback_in_generic_lowering, true, DEFINE_BOOL(turbo_collect_feedback_in_generic_lowering, true,
"enable experimental feedback collection in generic lowering.") "enable experimental feedback collection in generic lowering.")
// TODO(jgruber,v8:8888): Remove this flag once we've settled on a codegen
// strategy.
DEFINE_BOOL(turbo_nci_delayed_codegen, true,
"delay NCI codegen to reduce useless compilation work.")
// TODO(jgruber,v8:8888): Remove this flag once we've settled on an ageing
// strategy.
DEFINE_BOOL(turbo_nci_cache_ageing, false,
"enable ageing of the NCI code cache.")
// TODO(jgruber,v8:8888): Remove this flag once we've settled on an ageing // TODO(jgruber,v8:8888): Remove this flag once we've settled on an ageing
// strategy. // strategy.
DEFINE_BOOL(isolate_script_cache_ageing, true, DEFINE_BOOL(isolate_script_cache_ageing, true,
......
...@@ -109,8 +109,7 @@ inline OptimizationTier GetTierForCodeKind(CodeKind kind) { ...@@ -109,8 +109,7 @@ inline OptimizationTier GetTierForCodeKind(CodeKind kind) {
: OptimizationTier::kMidTier; : OptimizationTier::kMidTier;
} }
if (kind == CodeKind::NATIVE_CONTEXT_INDEPENDENT) { if (kind == CodeKind::NATIVE_CONTEXT_INDEPENDENT) {
return FLAG_turbo_nci_as_midtier ? OptimizationTier::kMidTier return OptimizationTier::kTopTier;
: OptimizationTier::kTopTier;
} }
return OptimizationTier::kNone; return OptimizationTier::kNone;
} }
......
...@@ -150,9 +150,7 @@ bool JSFunction::ActiveTierIsMidtierTurboprop() const { ...@@ -150,9 +150,7 @@ bool JSFunction::ActiveTierIsMidtierTurboprop() const {
} }
CodeKind JSFunction::NextTier() const { CodeKind JSFunction::NextTier() const {
if (V8_UNLIKELY(FLAG_turbo_nci_as_midtier && ActiveTierIsIgnition())) { if (V8_UNLIKELY(FLAG_turboprop) && ActiveTierIsMidtierTurboprop()) {
return CodeKind::NATIVE_CONTEXT_INDEPENDENT;
} else if (V8_UNLIKELY(FLAG_turboprop) && ActiveTierIsMidtierTurboprop()) {
return CodeKind::TURBOFAN; return CodeKind::TURBOFAN;
} else if (V8_UNLIKELY(FLAG_turboprop)) { } else if (V8_UNLIKELY(FLAG_turboprop)) {
DCHECK(ActiveTierIsIgnition()); DCHECK(ActiveTierIsIgnition());
......
...@@ -227,9 +227,6 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags2, ...@@ -227,9 +227,6 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags2,
has_static_private_methods_or_accessors, has_static_private_methods_or_accessors,
SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit) SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags2, has_optimized_at_least_once,
SharedFunctionInfo::HasOptimizedAtLeastOnceBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags2, may_have_cached_code, BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags2, may_have_cached_code,
SharedFunctionInfo::MayHaveCachedCodeBit) SharedFunctionInfo::MayHaveCachedCodeBit)
......
...@@ -390,10 +390,6 @@ class SharedFunctionInfo ...@@ -390,10 +390,6 @@ class SharedFunctionInfo
DECL_BOOLEAN_ACCESSORS(class_scope_has_private_brand) DECL_BOOLEAN_ACCESSORS(class_scope_has_private_brand)
DECL_BOOLEAN_ACCESSORS(has_static_private_methods_or_accessors) DECL_BOOLEAN_ACCESSORS(has_static_private_methods_or_accessors)
// True if this SFI has been (non-OSR) optimized in the past. This is used to
// guide native-context-independent codegen.
DECL_BOOLEAN_ACCESSORS(has_optimized_at_least_once)
// True if a Code object associated with this SFI has been inserted into the // True if a Code object associated with this SFI has been inserted into the
// compilation cache. Note that the cache entry may be removed by aging, // compilation cache. Note that the cache entry may be removed by aging,
// hence the 'may'. // hence the 'may'.
......
...@@ -42,7 +42,6 @@ bitfield struct SharedFunctionInfoFlags extends uint32 { ...@@ -42,7 +42,6 @@ bitfield struct SharedFunctionInfoFlags extends uint32 {
bitfield struct SharedFunctionInfoFlags2 extends uint8 { bitfield struct SharedFunctionInfoFlags2 extends uint8 {
class_scope_has_private_brand: bool: 1 bit; class_scope_has_private_brand: bool: 1 bit;
has_static_private_methods_or_accessors: bool: 1 bit; has_static_private_methods_or_accessors: bool: 1 bit;
has_optimized_at_least_once: bool: 1 bit;
may_have_cached_code: bool: 1 bit; may_have_cached_code: bool: 1 bit;
} }
......
...@@ -26,26 +26,9 @@ namespace { ...@@ -26,26 +26,9 @@ namespace {
// Returns false iff an exception was thrown. // Returns false iff an exception was thrown.
bool MaybeSpawnNativeContextIndependentCompilationJob( bool MaybeSpawnNativeContextIndependentCompilationJob(
Handle<JSFunction> function, ConcurrencyMode mode) { Handle<JSFunction> function, ConcurrencyMode mode) {
if (!FLAG_turbo_nci || FLAG_turbo_nci_as_midtier) { if (!FLAG_turbo_nci) return true; // Nothing to do.
return true; // Nothing to do. return Compiler::CompileOptimized(function, mode,
} CodeKind::NATIVE_CONTEXT_INDEPENDENT);
// If delayed codegen is enabled, the first optimization request does not
// trigger NCI compilation, since we try to avoid compiling Code that
// remains unused in the future. Repeated optimization (possibly in
// different native contexts) is taken as a signal that this SFI will
// continue to be used in the future, thus we trigger NCI compilation.
if (!FLAG_turbo_nci_delayed_codegen ||
function->shared().has_optimized_at_least_once()) {
if (!Compiler::CompileOptimized(function, mode,
CodeKind::NATIVE_CONTEXT_INDEPENDENT)) {
return false;
}
} else {
function->shared().set_has_optimized_at_least_once(true);
}
return true;
} }
Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function, Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
......
...@@ -949,8 +949,7 @@ TEST(DecideToPretenureDuringCompilation) { ...@@ -949,8 +949,7 @@ TEST(DecideToPretenureDuringCompilation) {
// compilation. // compilation.
if (!i::FLAG_opt || i::FLAG_always_opt || i::FLAG_minor_mc || if (!i::FLAG_opt || i::FLAG_always_opt || i::FLAG_minor_mc ||
i::FLAG_stress_incremental_marking || i::FLAG_optimize_for_size || i::FLAG_stress_incremental_marking || i::FLAG_optimize_for_size ||
i::FLAG_turbo_nci || i::FLAG_turbo_nci_as_midtier || i::FLAG_turbo_nci || i::FLAG_stress_concurrent_allocation) {
i::FLAG_stress_concurrent_allocation) {
return; return;
} }
......
...@@ -3978,7 +3978,6 @@ TEST(FastApiCPUProfiler) { ...@@ -3978,7 +3978,6 @@ TEST(FastApiCPUProfiler) {
// None of the following configurations include JSCallReducer. // None of the following configurations include JSCallReducer.
if (i::FLAG_jitless) return; if (i::FLAG_jitless) return;
if (i::FLAG_turboprop) return; if (i::FLAG_turboprop) return;
if (i::FLAG_turbo_nci_as_midtier) return;
FLAG_SCOPE_EXTERNAL(opt); FLAG_SCOPE_EXTERNAL(opt);
FLAG_SCOPE_EXTERNAL(turbo_fast_api_calls); FLAG_SCOPE_EXTERNAL(turbo_fast_api_calls);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment