Commit 5f3ed078 authored by Jakob Gruber's avatar Jakob Gruber Committed by V8 LUCI CQ

[compiler] Remove ContinuationForConcurrentOptimization

.. to simplify logic within compiler.cc. GetOrCompileOptimized now only
returns Code object if the requested optimized Code object is available.

This change also required updating CompileLazy to install the
appropriate Code object before potentially calling CompileOptimized_*
runtime functions in order to satisfy the is_compiled precondition.

Bug: v8:12161
Change-Id: I991dbcc0ba8f3d635aa1e1f06e4cffd89e08a47b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3562978Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Commit-Queue: Jakob Linke <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79762}
parent 3f5a3df6
...@@ -129,6 +129,10 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) { ...@@ -129,6 +129,10 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
// If feedback cell isn't initialized, compile function // If feedback cell isn't initialized, compile function
GotoIf(IsUndefined(feedback_cell_value), &compile_function); GotoIf(IsUndefined(feedback_cell_value), &compile_function);
CSA_DCHECK(this, TaggedNotEqual(sfi_code, HeapConstant(BUILTIN_CODE(
isolate(), CompileLazy))));
StoreObjectField(function, JSFunction::kCodeOffset, sfi_code);
Label maybe_use_sfi_code(this); Label maybe_use_sfi_code(this);
// If there is no feedback, don't check for optimized code. // If there is no feedback, don't check for optimized code.
GotoIf(HasInstanceType(feedback_cell_value, CLOSURE_FEEDBACK_CELL_ARRAY_TYPE), GotoIf(HasInstanceType(feedback_cell_value, CLOSURE_FEEDBACK_CELL_ARRAY_TYPE),
...@@ -145,13 +149,7 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) { ...@@ -145,13 +149,7 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
// optimized Code object (we'd have tail-called it above). A usual case would // optimized Code object (we'd have tail-called it above). A usual case would
// be the InterpreterEntryTrampoline to start executing existing bytecode. // be the InterpreterEntryTrampoline to start executing existing bytecode.
BIND(&maybe_use_sfi_code); BIND(&maybe_use_sfi_code);
CSA_DCHECK(this, TaggedNotEqual(sfi_code, HeapConstant(BUILTIN_CODE( Label tailcall_code(this), baseline(this);
isolate(), CompileLazy))));
StoreObjectField(function, JSFunction::kCodeOffset, sfi_code);
Label tailcall_code(this);
Label baseline(this);
TVARIABLE(CodeT, code); TVARIABLE(CodeT, code);
// Check if we have baseline code. // Check if we have baseline code.
...@@ -170,8 +168,8 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) { ...@@ -170,8 +168,8 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
function)); function));
}); });
Goto(&tailcall_code); Goto(&tailcall_code);
BIND(&tailcall_code); BIND(&tailcall_code);
// Jump to the selected code entry.
GenerateTailCallToJSCode(code.value(), function); GenerateTailCallToJSCode(code.value(), function);
BIND(&compile_function); BIND(&compile_function);
......
...@@ -1066,27 +1066,6 @@ bool CompileTurbofan_Concurrent(Isolate* isolate, ...@@ -1066,27 +1066,6 @@ bool CompileTurbofan_Concurrent(Isolate* isolate,
return true; return true;
} }
// Returns the code object at which execution continues after a concurrent
// optimization job has been started (but not finished).
MaybeHandle<CodeT> ContinuationForConcurrentOptimization(
Isolate* isolate, Handle<JSFunction> function, BytecodeOffset osr_offset) {
if (IsOSR(osr_offset)) {
// OSR tierup differs from plain tierup in that we don't simply continue
// execution at the returned code. Instead, we must signal unavailability
// of OSR'd code by returning the empty handle.
return {};
}
DCHECK(!IsOSR(osr_offset));
if (function->shared().HasBaselineCode()) {
CodeT baseline_code = function->shared().baseline_code(kAcquireLoad);
function->set_code(baseline_code);
return handle(baseline_code, isolate);
}
DCHECK(function->ActiveTierIsIgnition());
return BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
}
enum class CompileResultBehavior { enum class CompileResultBehavior {
// Default behavior, i.e. install the result, insert into caches, etc. // Default behavior, i.e. install the result, insert into caches, etc.
kDefault, kDefault,
...@@ -1136,10 +1115,7 @@ MaybeHandle<CodeT> CompileTurbofan(Isolate* isolate, ...@@ -1136,10 +1115,7 @@ MaybeHandle<CodeT> CompileTurbofan(Isolate* isolate,
// Prepare the job and launch concurrent compilation, or compile now. // Prepare the job and launch concurrent compilation, or compile now.
if (IsConcurrent(mode)) { if (IsConcurrent(mode)) {
if (CompileTurbofan_Concurrent(isolate, std::move(job))) { if (CompileTurbofan_Concurrent(isolate, std::move(job))) return {};
return ContinuationForConcurrentOptimization(isolate, function,
osr_offset);
}
} else { } else {
DCHECK(IsSynchronous(mode)); DCHECK(IsSynchronous(mode));
if (CompileTurbofan_NotConcurrent(isolate, job.get())) { if (CompileTurbofan_NotConcurrent(isolate, job.get())) {
...@@ -1192,8 +1168,7 @@ MaybeHandle<CodeT> CompileMaglev(Isolate* isolate, Handle<JSFunction> function, ...@@ -1192,8 +1168,7 @@ MaybeHandle<CodeT> CompileMaglev(Isolate* isolate, Handle<JSFunction> function,
// Remember that the function is currently being processed. // Remember that the function is currently being processed.
SetTieringState(*function, osr_offset, TieringState::kInProgress); SetTieringState(*function, osr_offset, TieringState::kInProgress);
// The code that triggered optimization continues execution here. return {};
return ContinuationForConcurrentOptimization(isolate, function, osr_offset);
#else // V8_ENABLE_MAGLEV #else // V8_ENABLE_MAGLEV
UNREACHABLE(); UNREACHABLE();
#endif // V8_ENABLE_MAGLEV #endif // V8_ENABLE_MAGLEV
...@@ -2256,26 +2231,15 @@ void Compiler::CompileOptimized(Isolate* isolate, Handle<JSFunction> function, ...@@ -2256,26 +2231,15 @@ void Compiler::CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
} }
Handle<CodeT> code; Handle<CodeT> code;
if (!GetOrCompileOptimized(isolate, function, mode, code_kind) if (GetOrCompileOptimized(isolate, function, mode, code_kind)
.ToHandle(&code)) { .ToHandle(&code)) {
// Optimization failed, get the existing code. We could have optimized code function->set_code(*code, kReleaseStore);
// from a lower tier here. Unoptimized code must exist already if we are
// optimizing.
DCHECK(!isolate->has_pending_exception());
DCHECK(function->shared().is_compiled());
DCHECK(function->shared().HasBytecodeArray());
code = ContinuationForConcurrentOptimization(isolate, function,
BytecodeOffset::None())
.ToHandleChecked();
} }
function->set_code(*code, kReleaseStore);
#ifdef DEBUG #ifdef DEBUG
// Check postconditions on success.
DCHECK(!isolate->has_pending_exception()); DCHECK(!isolate->has_pending_exception());
DCHECK(function->shared().is_compiled());
DCHECK(function->is_compiled()); DCHECK(function->is_compiled());
DCHECK(function->shared().HasBytecodeArray());
const TieringState tiering_state = function->tiering_state(); const TieringState tiering_state = function->tiering_state();
DCHECK(IsNone(tiering_state) || IsInProgress(tiering_state)); DCHECK(IsNone(tiering_state) || IsInProgress(tiering_state));
DCHECK_IMPLIES(IsInProgress(tiering_state), function->ChecksTieringState()); DCHECK_IMPLIES(IsInProgress(tiering_state), function->ChecksTieringState());
......
...@@ -31,6 +31,11 @@ namespace { ...@@ -31,6 +31,11 @@ namespace {
Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function, Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
CodeKind target_kind, ConcurrencyMode mode) { CodeKind target_kind, ConcurrencyMode mode) {
// As a pre- and post-condition of CompileOptimized, the function *must* be
// compiled, i.e. the installed Code object must not be CompileLazy.
IsCompiledScope is_compiled_scope(function->shared(), isolate);
DCHECK(is_compiled_scope.is_compiled());
StackLimitCheck check(isolate); StackLimitCheck check(isolate);
// Concurrent optimization runs on another thread, thus no additional gap. // Concurrent optimization runs on another thread, thus no additional gap.
const int gap = const int gap =
...@@ -39,8 +44,6 @@ Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function, ...@@ -39,8 +44,6 @@ Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
Compiler::CompileOptimized(isolate, function, mode, target_kind); Compiler::CompileOptimized(isolate, function, mode, target_kind);
// As a post-condition of CompileOptimized, the function *must* be compiled,
// i.e. the installed Code object must not be the CompileLazy builtin.
DCHECK(function->is_compiled()); DCHECK(function->is_compiled());
return function->code(); return function->code();
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment