Commit 8b4272c2 authored by Jakob Linke's avatar Jakob Linke Committed by V8 LUCI CQ

[compiler] Merge all CompileFoo_Bar runtime functions

There's no need to decode the tiering state in generated code - merge
these runtime functions and decode in the new generic CompileOptimized
runtime function instead.

CompileMaglev_Synchronized
CompileMaglev_Concurrent
CompileTurbofan_Synchronized
CompileTurbofan_Concurrent

->

CompileOptimized

Bug: v8:7700
Change-Id: I36f3964bb5bb72b35f65f454c3ad3db0656c14bd
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3825877
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Auto-Submit: Jakob Linke <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82383}
parent 99beb35b
......@@ -29,17 +29,6 @@ void LazyBuiltinsAssembler::GenerateTailCallToReturnedCode(
GenerateTailCallToJSCode(code, function);
}
void LazyBuiltinsAssembler::TailCallRuntimeIfStateEquals(
TNode<Uint32T> state, TieringState expected_state,
Runtime::FunctionId function_id, TNode<JSFunction> function) {
Label no_match(this);
GotoIfNot(
Word32Equal(state, Uint32Constant(static_cast<uint32_t>(expected_state))),
&no_match);
GenerateTailCallToReturnedCode(function_id, function);
BIND(&no_match);
}
void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
TNode<JSFunction> function, TNode<FeedbackVector> feedback_vector) {
Label fallthrough(this), may_have_optimized_code(this);
......@@ -58,21 +47,8 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
FeedbackVector::kTieringStateIsAnyRequestMask),
&may_have_optimized_code);
// TODO(ishell): introduce Runtime::kHandleTieringState and check
// all these state values there.
TNode<Uint32T> state =
DecodeWord32<FeedbackVector::TieringStateBits>(optimization_state);
TailCallRuntimeIfStateEquals(state,
TieringState::kRequestTurbofan_Synchronous,
Runtime::kCompileTurbofan_Synchronous, function);
TailCallRuntimeIfStateEquals(state, TieringState::kRequestTurbofan_Concurrent,
Runtime::kCompileTurbofan_Concurrent, function);
TailCallRuntimeIfStateEquals(state, TieringState::kRequestMaglev_Synchronous,
Runtime::kCompileMaglev_Synchronous, function);
TailCallRuntimeIfStateEquals(state, TieringState::kRequestMaglev_Concurrent,
Runtime::kCompileMaglev_Concurrent, function);
Unreachable();
GenerateTailCallToReturnedCode(Runtime::kCompileOptimized, function);
BIND(&may_have_optimized_code);
{
Label heal_optimized_code_slot(this);
......
......@@ -1936,39 +1936,6 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
namespace {
// Tail-call |function_id| if |actual_state| == |expected_state|
void TailCallRuntimeIfStateEquals(MacroAssembler* masm, Register actual_state,
TieringState expected_state,
Runtime::FunctionId function_id) {
ASM_CODE_COMMENT(masm);
Label no_match;
__ cmp_raw_immediate(actual_state, static_cast<int>(expected_state));
__ b(ne, &no_match);
__ GenerateTailCallToReturnedCode(function_id);
__ bind(&no_match);
}
void MaybeOptimizeCode(MacroAssembler* masm, Register tiering_state) {
// ----------- S t a t e -------------
// -- r0 : actual argument count
// -- r3 : new target (preserved for callee if needed, and caller)
// -- r1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -- tiering_state : a int32 containing a non-zero optimization
// marker.
// -----------------------------------
DCHECK(!AreAliased(r1, r3, tiering_state));
TailCallRuntimeIfStateEquals(masm, tiering_state,
TieringState::kRequestTurbofan_Synchronous,
Runtime::kCompileTurbofan_Synchronous);
TailCallRuntimeIfStateEquals(masm, tiering_state,
TieringState::kRequestTurbofan_Concurrent,
Runtime::kCompileTurbofan_Concurrent);
__ stop();
}
void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Register optimized_code_entry,
Register scratch) {
......@@ -2086,13 +2053,11 @@ void MacroAssembler::MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
Operand(FeedbackVector::kTieringStateIsAnyRequestMask));
b(eq, &maybe_has_optimized_code);
Register tiering_state = optimization_state;
DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
MaybeOptimizeCode(this, tiering_state);
GenerateTailCallToReturnedCode(Runtime::kCompileOptimized);
bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
ldr(tiering_state,
ldr(optimized_code_entry,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(this, optimized_code_entry, r6);
......
......@@ -1329,39 +1329,6 @@ void MacroAssembler::PopCalleeSavedRegisters() {
namespace {
// Tail-call |function_id| if |actual_state| == |expected_state|
void TailCallRuntimeIfStateEquals(MacroAssembler* masm, Register actual_state,
TieringState expected_state,
Runtime::FunctionId function_id) {
ASM_CODE_COMMENT(masm);
Label no_match;
__ CompareAndBranch(actual_state, Operand(static_cast<int>(expected_state)),
ne, &no_match);
__ GenerateTailCallToReturnedCode(function_id);
__ bind(&no_match);
}
void MaybeOptimizeCode(MacroAssembler* masm, Register tiering_state) {
// ----------- S t a t e -------------
// -- x0 : actual argument count
// -- x3 : new target (preserved for callee if needed, and caller)
// -- x1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -- tiering_state : int32 containing non-zero tiering state.
// -----------------------------------
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(x1, x3, tiering_state));
TailCallRuntimeIfStateEquals(masm, tiering_state,
TieringState::kRequestTurbofan_Synchronous,
Runtime::kCompileTurbofan_Synchronous);
TailCallRuntimeIfStateEquals(masm, tiering_state,
TieringState::kRequestTurbofan_Concurrent,
Runtime::kCompileTurbofan_Concurrent);
__ Unreachable();
}
void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Register optimized_code_entry,
Register scratch) {
......@@ -1476,14 +1443,12 @@ void MacroAssembler::MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(optimization_state, feedback_vector));
Label maybe_has_optimized_code;
// Check if optimized code is available
// Check if optimized code is available.
TestAndBranchIfAllClear(optimization_state,
FeedbackVector::kTieringStateIsAnyRequestMask,
&maybe_has_optimized_code);
Register tiering_state = optimization_state;
DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
MaybeOptimizeCode(this, tiering_state);
GenerateTailCallToReturnedCode(Runtime::kCompileOptimized);
bind(&maybe_has_optimized_code);
Register optimized_code_entry = x7;
......
......@@ -720,38 +720,6 @@ Immediate MacroAssembler::ClearedValue() const {
namespace {
// Tail-call |function_id| if |actual_state| == |expected_state|
void TailCallRuntimeIfStateEquals(MacroAssembler* masm, Register actual_state,
TieringState expected_state,
Runtime::FunctionId function_id) {
ASM_CODE_COMMENT(masm);
Label no_match;
__ cmp(actual_state, static_cast<int>(expected_state));
__ j(not_equal, &no_match, Label::kNear);
__ GenerateTailCallToReturnedCode(function_id);
__ bind(&no_match);
}
void MaybeOptimizeCode(MacroAssembler* masm, Register tiering_state) {
// ----------- S t a t e -------------
// -- eax : actual argument count
// -- edx : new target (preserved for callee if needed, and caller)
// -- edi : target function (preserved for callee if needed, and caller)
// -- tiering_state : a Smi containing a non-zero tiering state.
// -----------------------------------
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(edx, edi, tiering_state));
TailCallRuntimeIfStateEquals(masm, tiering_state,
TieringState::kRequestTurbofan_Synchronous,
Runtime::kCompileTurbofan_Synchronous);
TailCallRuntimeIfStateEquals(masm, tiering_state,
TieringState::kRequestTurbofan_Concurrent,
Runtime::kCompileTurbofan_Concurrent);
__ int3();
}
void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Register optimized_code_entry) {
// ----------- S t a t e -------------
......@@ -885,13 +853,11 @@ void MacroAssembler::MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
Immediate(FeedbackVector::kTieringStateIsAnyRequestMask));
j(zero, &maybe_has_optimized_code);
Register tiering_state = optimization_state;
DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
MaybeOptimizeCode(this, tiering_state);
GenerateTailCallToReturnedCode(Runtime::kCompileOptimized);
bind(&maybe_has_optimized_code);
Register optimized_code_entry = tiering_state;
Register feedback_vector = tiering_state;
Register optimized_code_entry = optimization_state;
Register feedback_vector = optimization_state;
movd(feedback_vector, saved_feedback_vector); // Restore feedback vector.
mov(optimized_code_entry,
FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset));
......
......@@ -791,45 +791,6 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
namespace {
// Tail-call |function_id| if |actual_state| == |expected_state|
void TailCallRuntimeIfStateEquals(MacroAssembler* masm, Register actual_state,
TieringState expected_state,
Runtime::FunctionId function_id) {
ASM_CODE_COMMENT(masm);
Label no_match;
__ Cmp(actual_state, static_cast<int>(expected_state));
__ j(not_equal, &no_match);
__ GenerateTailCallToReturnedCode(function_id);
__ bind(&no_match);
}
void MaybeOptimizeCode(MacroAssembler* masm, Register tiering_state) {
// ----------- S t a t e -------------
// -- rax : actual argument count
// -- rdx : new target (preserved for callee if needed, and caller)
// -- rdi : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -- tiering_state : a Smi containing a non-zero tiering state.
// -----------------------------------
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(rdx, rdi, tiering_state));
TailCallRuntimeIfStateEquals(masm, tiering_state,
TieringState::kRequestMaglev_Synchronous,
Runtime::kCompileMaglev_Synchronous);
TailCallRuntimeIfStateEquals(masm, tiering_state,
TieringState::kRequestMaglev_Concurrent,
Runtime::kCompileMaglev_Concurrent);
TailCallRuntimeIfStateEquals(masm, tiering_state,
TieringState::kRequestTurbofan_Synchronous,
Runtime::kCompileTurbofan_Synchronous);
TailCallRuntimeIfStateEquals(masm, tiering_state,
TieringState::kRequestTurbofan_Concurrent,
Runtime::kCompileTurbofan_Concurrent);
__ int3();
}
void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Register optimized_code_entry, Register closure,
Register scratch1, Register scratch2,
......@@ -953,13 +914,12 @@ void MacroAssembler::MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(optimization_state, feedback_vector, closure));
Label maybe_has_optimized_code;
// Check if optimized code is available.
testl(optimization_state,
Immediate(FeedbackVector::kTieringStateIsAnyRequestMask));
j(zero, &maybe_has_optimized_code);
Register tiering_state = optimization_state;
DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
MaybeOptimizeCode(this, tiering_state);
GenerateTailCallToReturnedCode(Runtime::kCompileOptimized);
bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
......
......@@ -442,6 +442,8 @@ class MaglevCodeGeneratingNodeProcessor {
__ BailoutIfDeoptimized(rbx);
// Tiering support.
// TODO(jgruber): Extract to a builtin (the tiering prologue is ~230 bytes
// per Maglev code object on x64).
{
// Scratch registers. Don't clobber regs related to the calling
// convention (e.g. kJavaScriptCallArgCountRegister).
......
......@@ -19,29 +19,6 @@
namespace v8 {
namespace internal {
namespace {
Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
CodeKind target_kind, ConcurrencyMode mode) {
// As a pre- and post-condition of CompileOptimized, the function *must* be
// compiled, i.e. the installed Code object must not be CompileLazy.
IsCompiledScope is_compiled_scope(function->shared(), isolate);
DCHECK(is_compiled_scope.is_compiled());
StackLimitCheck check(isolate);
// Concurrent optimization runs on another thread, thus no additional gap.
const int gap =
IsConcurrent(mode) ? 0 : kStackSpaceRequiredForCompilation * KB;
if (check.JsHasOverflowed(gap)) return isolate->StackOverflow();
Compiler::CompileOptimized(isolate, function, mode, target_kind);
DCHECK(function->is_compiled());
return function->code();
}
} // namespace
RUNTIME_FUNCTION(Runtime_CompileLazy) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
......@@ -84,36 +61,51 @@ RUNTIME_FUNCTION(Runtime_InstallBaselineCode) {
return baseline_code;
}
RUNTIME_FUNCTION(Runtime_CompileMaglev_Concurrent) {
RUNTIME_FUNCTION(Runtime_CompileOptimized) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
Handle<JSFunction> function = args.at<JSFunction>(0);
return CompileOptimized(isolate, function, CodeKind::MAGLEV,
ConcurrencyMode::kConcurrent);
}
RUNTIME_FUNCTION(Runtime_CompileMaglev_Synchronous) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
Handle<JSFunction> function = args.at<JSFunction>(0);
return CompileOptimized(isolate, function, CodeKind::MAGLEV,
ConcurrencyMode::kSynchronous);
}
CodeKind target_kind;
ConcurrencyMode mode;
DCHECK(function->has_feedback_vector());
switch (function->tiering_state()) {
case TieringState::kRequestMaglev_Synchronous:
target_kind = CodeKind::MAGLEV;
mode = ConcurrencyMode::kSynchronous;
break;
case TieringState::kRequestMaglev_Concurrent:
target_kind = CodeKind::MAGLEV;
mode = ConcurrencyMode::kConcurrent;
break;
case TieringState::kRequestTurbofan_Synchronous:
target_kind = CodeKind::TURBOFAN;
mode = ConcurrencyMode::kSynchronous;
break;
case TieringState::kRequestTurbofan_Concurrent:
target_kind = CodeKind::TURBOFAN;
mode = ConcurrencyMode::kConcurrent;
break;
case TieringState::kNone:
case TieringState::kInProgress:
UNREACHABLE();
}
RUNTIME_FUNCTION(Runtime_CompileTurbofan_Concurrent) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
Handle<JSFunction> function = args.at<JSFunction>(0);
return CompileOptimized(isolate, function, CodeKind::TURBOFAN,
ConcurrencyMode::kConcurrent);
}
// As a pre- and post-condition of CompileOptimized, the function *must* be
// compiled, i.e. the installed Code object must not be CompileLazy.
IsCompiledScope is_compiled_scope(function->shared(), isolate);
DCHECK(is_compiled_scope.is_compiled());
RUNTIME_FUNCTION(Runtime_CompileTurbofan_Synchronous) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
Handle<JSFunction> function = args.at<JSFunction>(0);
return CompileOptimized(isolate, function, CodeKind::TURBOFAN,
ConcurrencyMode::kSynchronous);
StackLimitCheck check(isolate);
// Concurrent optimization runs on another thread, thus no additional gap.
const int gap =
IsConcurrent(mode) ? 0 : kStackSpaceRequiredForCompilation * KB;
if (check.JsHasOverflowed(gap)) return isolate->StackOverflow();
Compiler::CompileOptimized(isolate, function, mode, target_kind);
DCHECK(function->is_compiled());
return function->code();
}
RUNTIME_FUNCTION(Runtime_HealOptimizedCodeSlot) {
......
......@@ -109,10 +109,7 @@ namespace internal {
F(TraceOptimizedOSREntry, 0, 1) \
F(CompileLazy, 1, 1) \
F(CompileBaseline, 1, 1) \
F(CompileMaglev_Concurrent, 1, 1) \
F(CompileMaglev_Synchronous, 1, 1) \
F(CompileTurbofan_Concurrent, 1, 1) \
F(CompileTurbofan_Synchronous, 1, 1) \
F(CompileOptimized, 1, 1) \
F(InstallBaselineCode, 1, 1) \
F(HealOptimizedCodeSlot, 1, 1) \
F(InstantiateAsmJs, 4, 1) \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment