Commit 714b95f0 authored by machenbach's avatar machenbach Committed by Commit bot

Revert of [Intepreter] Always use BytecodeGraphBuilder when...

Revert of [Intepreter] Always use BytecodeGraphBuilder when --turbo-from-bytecode (patchset #3 id:80001 of https://codereview.chromium.org/2156753002/ )

Reason for revert:
Breaks tsan:
https://build.chromium.org/p/client.v8/builders/V8%20Linux64%20TSAN/builds/10758

Original issue's description:
> [Intepreter] Always use BytecodeGraphBuilder when --turbo-from-bytecode
>
> Always use the BytecodeGraphBuilder when the  --turbo-from-bytecode
> is enabled, assuming the function should be compiled for Ignition.
> Adds a new MaybeOptimizeIgnition function to runtime-profiler
> which is called if the function should be optimized from bytecode
> rather than going via full-codegen.
>
> BUG=v8:4280
>
> Committed: https://crrev.com/9ca7db914be88e6792a88eab4a1988ee031d70c4
> Cr-Commit-Position: refs/heads/master@{#37921}

TBR=mstarzinger@chromium.org,rmcilroy@chromium.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=v8:4280

Review-Url: https://codereview.chromium.org/2165223002
Cr-Commit-Position: refs/heads/master@{#37925}
parent 31388627
...@@ -441,7 +441,7 @@ void EnsureFeedbackMetadata(CompilationInfo* info) { ...@@ -441,7 +441,7 @@ void EnsureFeedbackMetadata(CompilationInfo* info) {
info->literal()->feedback_vector_spec())); info->literal()->feedback_vector_spec()));
} }
bool ShouldUseIgnition(CompilationInfo* info) { bool UseIgnition(CompilationInfo* info) {
DCHECK(info->has_shared_info()); DCHECK(info->has_shared_info());
// When requesting debug code as a replacement for existing code, we provide // When requesting debug code as a replacement for existing code, we provide
...@@ -489,7 +489,7 @@ bool GenerateUnoptimizedCode(CompilationInfo* info) { ...@@ -489,7 +489,7 @@ bool GenerateUnoptimizedCode(CompilationInfo* info) {
return true; return true;
} }
} }
if (FLAG_ignition && ShouldUseIgnition(info)) { if (FLAG_ignition && UseIgnition(info)) {
success = interpreter::Interpreter::MakeBytecode(info); success = interpreter::Interpreter::MakeBytecode(info);
} else { } else {
success = FullCodeGenerator::MakeCode(info); success = FullCodeGenerator::MakeCode(info);
...@@ -801,11 +801,8 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function, ...@@ -801,11 +801,8 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.OptimizeCode"); TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.OptimizeCode");
// TurboFan can optimize directly from existing bytecode. // TurboFan can optimize directly from existing bytecode.
if (FLAG_turbo_from_bytecode && use_turbofan && ShouldUseIgnition(info)) { if (FLAG_turbo_from_bytecode && use_turbofan &&
if (!Compiler::EnsureBytecode(info)) { info->shared_info()->HasBytecodeArray()) {
if (isolate->has_pending_exception()) isolate->clear_pending_exception();
return MaybeHandle<Code>();
}
info->MarkAsOptimizeFromBytecode(); info->MarkAsOptimizeFromBytecode();
} }
...@@ -1347,16 +1344,6 @@ MaybeHandle<JSArray> Compiler::CompileForLiveEdit(Handle<Script> script) { ...@@ -1347,16 +1344,6 @@ MaybeHandle<JSArray> Compiler::CompileForLiveEdit(Handle<Script> script) {
return infos; return infos;
} }
bool Compiler::EnsureBytecode(CompilationInfo* info) {
DCHECK(ShouldUseIgnition(info));
if (!info->shared_info()->HasBytecodeArray()) {
DCHECK(!info->shared_info()->is_compiled());
if (GetUnoptimizedCode(info).is_null()) return false;
}
DCHECK(info->shared_info()->HasBytecodeArray());
return true;
}
// TODO(turbofan): In the future, unoptimized code with deopt support could // TODO(turbofan): In the future, unoptimized code with deopt support could
// be generated lazily once deopt is triggered. // be generated lazily once deopt is triggered.
bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) { bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
...@@ -1418,20 +1405,6 @@ bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) { ...@@ -1418,20 +1405,6 @@ bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
return true; return true;
} }
// static
Compiler::CompilationTier Compiler::NextCompilationTier(JSFunction* function) {
Handle<SharedFunctionInfo> shared(function->shared(), function->GetIsolate());
if (shared->code()->is_interpreter_trampoline_builtin()) {
if (FLAG_turbo_from_bytecode && UseTurboFan(shared)) {
return OPTIMIZED;
} else {
return BASELINE;
}
} else {
return OPTIMIZED;
}
}
MaybeHandle<JSFunction> Compiler::GetFunctionFromEval( MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
Handle<String> source, Handle<SharedFunctionInfo> outer_info, Handle<String> source, Handle<SharedFunctionInfo> outer_info,
Handle<Context> context, LanguageMode language_mode, Handle<Context> context, LanguageMode language_mode,
......
...@@ -37,7 +37,6 @@ class Compiler : public AllStatic { ...@@ -37,7 +37,6 @@ class Compiler : public AllStatic {
public: public:
enum ClearExceptionFlag { KEEP_EXCEPTION, CLEAR_EXCEPTION }; enum ClearExceptionFlag { KEEP_EXCEPTION, CLEAR_EXCEPTION };
enum ConcurrencyMode { NOT_CONCURRENT, CONCURRENT }; enum ConcurrencyMode { NOT_CONCURRENT, CONCURRENT };
enum CompilationTier { INTERPRETED, BASELINE, OPTIMIZED };
// =========================================================================== // ===========================================================================
// The following family of methods ensures a given function is compiled. The // The following family of methods ensures a given function is compiled. The
...@@ -66,12 +65,6 @@ class Compiler : public AllStatic { ...@@ -66,12 +65,6 @@ class Compiler : public AllStatic {
static bool Analyze(ParseInfo* info); static bool Analyze(ParseInfo* info);
// Adds deoptimization support, requires ParseAndAnalyze. // Adds deoptimization support, requires ParseAndAnalyze.
static bool EnsureDeoptimizationSupport(CompilationInfo* info); static bool EnsureDeoptimizationSupport(CompilationInfo* info);
// Ensures that bytecode is generated, calls ParseAndAnalyze internally.
static bool EnsureBytecode(CompilationInfo* info);
// The next compilation tier which the function should be compiled to for
// optimization. This is used as a hint by the runtime profiler.
static CompilationTier NextCompilationTier(JSFunction* function);
// =========================================================================== // ===========================================================================
// The following family of methods instantiates new functions for scripts or // The following family of methods instantiates new functions for scripts or
......
...@@ -110,6 +110,11 @@ static void TraceRecompile(JSFunction* function, const char* reason, ...@@ -110,6 +110,11 @@ static void TraceRecompile(JSFunction* function, const char* reason,
void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) { void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
TraceRecompile(function, reason, "optimized"); TraceRecompile(function, reason, "optimized");
// TODO(4280): Fix this to check function is compiled to baseline once we
// have a standard way to check that. For now, if baseline code doesn't have
// a bytecode array.
DCHECK(!function->shared()->HasBytecodeArray());
function->AttemptConcurrentOptimization(); function->AttemptConcurrentOptimization();
} }
...@@ -248,7 +253,7 @@ void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function, ...@@ -248,7 +253,7 @@ void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
} }
} }
void RuntimeProfiler::MaybeBaselineIgnition(JSFunction* function) { void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function) {
if (function->IsInOptimizationQueue()) return; if (function->IsInOptimizationQueue()) return;
SharedFunctionInfo* shared = function->shared(); SharedFunctionInfo* shared = function->shared();
...@@ -256,6 +261,8 @@ void RuntimeProfiler::MaybeBaselineIgnition(JSFunction* function) { ...@@ -256,6 +261,8 @@ void RuntimeProfiler::MaybeBaselineIgnition(JSFunction* function) {
// TODO(rmcilroy): Also ensure we only OSR top-level code if it is smaller // TODO(rmcilroy): Also ensure we only OSR top-level code if it is smaller
// than kMaxToplevelSourceSize. // than kMaxToplevelSourceSize.
// TODO(rmcilroy): Consider whether we should optimize small functions when
// they are first seen on the stack (e.g., kMaxSizeEarlyOpt).
if (function->IsMarkedForBaseline() || function->IsMarkedForOptimization() || if (function->IsMarkedForBaseline() || function->IsMarkedForOptimization() ||
function->IsMarkedForConcurrentOptimization() || function->IsMarkedForConcurrentOptimization() ||
...@@ -276,58 +283,6 @@ void RuntimeProfiler::MaybeBaselineIgnition(JSFunction* function) { ...@@ -276,58 +283,6 @@ void RuntimeProfiler::MaybeBaselineIgnition(JSFunction* function) {
} }
} }
void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function) {
if (function->IsInOptimizationQueue()) return;
SharedFunctionInfo* shared = function->shared();
int ticks = shared->profiler_ticks();
// TODO(rmcilroy): Also ensure we only OSR top-level code if it is smaller
// than kMaxToplevelSourceSize.
if (function->IsMarkedForBaseline() || function->IsMarkedForOptimization() ||
function->IsMarkedForConcurrentOptimization() ||
function->IsOptimized()) {
// TODO(rmcilroy): Support OSR in these cases.
return;
}
if (shared->optimization_disabled()) {
if (shared->deopt_count() >= FLAG_max_opt_count) {
// If optimization was disabled due to many deoptimizations,
// then check if the function is hot and try to reenable optimization.
if (ticks >= kProfilerTicksBeforeReenablingOptimization) {
shared->set_profiler_ticks(0);
shared->TryReenableOptimization();
}
}
return;
}
if (function->IsOptimized()) return;
if (ticks >= kProfilerTicksBeforeOptimization) {
int typeinfo, generic, total, type_percentage, generic_percentage;
GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
&generic_percentage);
if (type_percentage >= FLAG_type_info_threshold &&
generic_percentage <= FLAG_generic_ic_threshold) {
// If this particular function hasn't had any ICs patched for enough
// ticks, optimize it now.
Optimize(function, "hot and stable");
} else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
Optimize(function, "not much type info but very hot");
} else {
if (FLAG_trace_opt_verbose) {
PrintF("[not yet optimizing ");
function->PrintName();
PrintF(", not enough type info: %d/%d (%d%%)]\n", typeinfo, total,
type_percentage);
}
}
}
// TODO(rmcilroy): Consider whether we should optimize small functions when
// they are first seen on the stack (e.g., kMaxSizeEarlyOpt).
}
void RuntimeProfiler::MarkCandidatesForOptimization() { void RuntimeProfiler::MarkCandidatesForOptimization() {
HandleScope scope(isolate_); HandleScope scope(isolate_);
...@@ -356,18 +311,10 @@ void RuntimeProfiler::MarkCandidatesForOptimization() { ...@@ -356,18 +311,10 @@ void RuntimeProfiler::MarkCandidatesForOptimization() {
} }
} }
Compiler::CompilationTier next_tier =
Compiler::NextCompilationTier(function);
if (frame->is_interpreted()) { if (frame->is_interpreted()) {
if (next_tier == Compiler::BASELINE) { DCHECK(!frame->is_optimized());
DCHECK(!frame->is_optimized()); MaybeOptimizeIgnition(function);
MaybeBaselineIgnition(function);
} else {
DCHECK_EQ(next_tier, Compiler::OPTIMIZED);
MaybeOptimizeIgnition(function);
}
} else { } else {
DCHECK_EQ(next_tier, Compiler::OPTIMIZED);
MaybeOptimizeFullCodegen(function, frame_count, frame->is_optimized()); MaybeOptimizeFullCodegen(function, frame_count, frame->is_optimized());
} }
} }
......
...@@ -26,7 +26,6 @@ class RuntimeProfiler { ...@@ -26,7 +26,6 @@ class RuntimeProfiler {
private: private:
void MaybeOptimizeFullCodegen(JSFunction* function, int frame_count, void MaybeOptimizeFullCodegen(JSFunction* function, int frame_count,
bool frame_optimized); bool frame_optimized);
void MaybeBaselineIgnition(JSFunction* function);
void MaybeOptimizeIgnition(JSFunction* function); void MaybeOptimizeIgnition(JSFunction* function);
void Optimize(JSFunction* function, const char* reason); void Optimize(JSFunction* function, const char* reason);
void Baseline(JSFunction* function, const char* reason); void Baseline(JSFunction* function, const char* reason);
......
...@@ -401,37 +401,6 @@ ...@@ -401,37 +401,6 @@
############################################################################## ##############################################################################
['ignition or ignition_turbofan', { ['ignition or ignition_turbofan', {
# TODO(rmcilroy,4837): Inlining is currently disabled for the BytecodeGraphBuilder
# (see InliningPhase::Run). Also, we don't set a LoadContextSlot for a function as
# immutable in the BytecodeGraphBuilder, therefore no inlining happens.
'test-run-inlining/InlineLoopGuardedTwice': [FAIL],
'test-run-inlining/InlineSurplusArgumentsDeopt': [FAIL],
'test-run-inlining/InlineTwice': [FAIL],
'test-run-inlining/InlineSurplusArgumentsObject': [FAIL],
'test-run-inlining/InlineTwiceDependentDiamond': [FAIL],
'test-run-inlining/InlineWithArguments': [FAIL],
'test-run-inlining/InlineLoopUnguardedTwice': [FAIL],
'test-run-inlining/InlineOmitArgumentsObject': [FAIL],
'test-run-inlining/InlineLoopUnguardedOnce': [FAIL],
'test-run-inlining/InlineOmitArgumentsDeopt': [FAIL],
'test-run-inlining/InlineTwiceDependentDiamondDifferent': [FAIL],
'test-run-inlining/SimpleInliningContext': [FAIL],
'test-run-inlining/InlineMutuallyRecursive': [FAIL],
'test-run-inlining/InlineLoopGuardedEmpty': [FAIL],
'test-run-inlining/InlineLoopGuardedOnce': [FAIL],
'test-run-inlining/InlineOmitArguments': [FAIL],
'test-run-inlining/SimpleInlining': [FAIL],
'test-run-inlining/InlineLoopUnguardedEmpty': [FAIL],
'test-run-inlining/InlineNestedBuiltin': [FAIL],
'test-run-inlining/InlineSurplusArguments': [FAIL],
'test-run-inlining/InlineBuiltin': [FAIL],
'test-run-inlining/InlineTwiceDependent': [FAIL],
'test-run-inlining/SimpleInliningContextDeopt': [FAIL],
# TODO(rmcilroy,4766): Requires BytecodeGraphBuilder to track source position
# on nodes (behind --turbo_source_positions flag).
'test-cpu-profiler/TickLinesOptimized': [FAIL],
# TODO(rmcilroy,4680): Related to lack of code flushing. Check failed: !function->shared()->is_compiled() || function->IsOptimized(). # TODO(rmcilroy,4680): Related to lack of code flushing. Check failed: !function->shared()->is_compiled() || function->IsOptimized().
'test-heap/TestCodeFlushingPreAged': [FAIL], 'test-heap/TestCodeFlushingPreAged': [FAIL],
'test-heap/TestCodeFlushingIncrementalScavenge': [FAIL], 'test-heap/TestCodeFlushingIncrementalScavenge': [FAIL],
......
...@@ -208,9 +208,7 @@ class FunctionTester : public InitializedHandleScope { ...@@ -208,9 +208,7 @@ class FunctionTester : public InitializedHandleScope {
CompilationInfo info(&parse_info, function); CompilationInfo info(&parse_info, function);
info.MarkAsDeoptimizationEnabled(); info.MarkAsDeoptimizationEnabled();
if (!FLAG_turbo_from_bytecode) { CHECK(Parser::ParseStatic(info.parse_info()));
CHECK(Parser::ParseStatic(info.parse_info()));
}
info.SetOptimizing(); info.SetOptimizing();
if (flags_ & CompilationInfo::kFunctionContextSpecializing) { if (flags_ & CompilationInfo::kFunctionContextSpecializing) {
info.MarkAsFunctionContextSpecializing(); info.MarkAsFunctionContextSpecializing();
...@@ -218,8 +216,7 @@ class FunctionTester : public InitializedHandleScope { ...@@ -218,8 +216,7 @@ class FunctionTester : public InitializedHandleScope {
if (flags_ & CompilationInfo::kInliningEnabled) { if (flags_ & CompilationInfo::kInliningEnabled) {
info.MarkAsInliningEnabled(); info.MarkAsInliningEnabled();
} }
if (FLAG_turbo_from_bytecode) { if (FLAG_turbo_from_bytecode && function->shared()->HasBytecodeArray()) {
CHECK(Compiler::EnsureBytecode(&info));
info.MarkAsOptimizeFromBytecode(); info.MarkAsOptimizeFromBytecode();
} else { } else {
CHECK(Compiler::Analyze(info.parse_info())); CHECK(Compiler::Analyze(info.parse_info()));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment