Commit ca33c73e authored by Jakob Linke's avatar Jakob Linke Committed by V8 LUCI CQ

[masm] Move tiering logic to macro-assembler

.. since these functions will also be used by Maglev codegen.

Bug: v8:7700
Change-Id: I6fdf830976369aa0dc70ca54be2165a1186eab06
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3816666Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Auto-Submit: Jakob Linke <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82293}
parent 74d4f133
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -37,6 +37,8 @@
#include "src/codegen/arm/macro-assembler-arm.h"
#endif
#define __ ACCESS_MASM(masm)
namespace v8 {
namespace internal {
......@@ -1931,6 +1933,170 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
bind(&done);
}
namespace {
// Tail-call |function_id| if |actual_state| == |expected_state|
void TailCallRuntimeIfStateEquals(MacroAssembler* masm, Register actual_state,
TieringState expected_state,
Runtime::FunctionId function_id) {
ASM_CODE_COMMENT(masm);
Label no_match;
__ cmp_raw_immediate(actual_state, static_cast<int>(expected_state));
__ b(ne, &no_match);
__ GenerateTailCallToReturnedCode(function_id);
__ bind(&no_match);
}
void MaybeOptimizeCode(MacroAssembler* masm, Register tiering_state) {
// ----------- S t a t e -------------
// -- r0 : actual argument count
// -- r3 : new target (preserved for callee if needed, and caller)
// -- r1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -- tiering_state : a int32 containing a non-zero optimization
// marker.
// -----------------------------------
DCHECK(!AreAliased(r1, r3, tiering_state));
TailCallRuntimeIfStateEquals(masm, tiering_state,
TieringState::kRequestTurbofan_Synchronous,
Runtime::kCompileTurbofan_Synchronous);
TailCallRuntimeIfStateEquals(masm, tiering_state,
TieringState::kRequestTurbofan_Concurrent,
Runtime::kCompileTurbofan_Concurrent);
__ stop();
}
void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Register optimized_code_entry,
Register scratch) {
// ----------- S t a t e -------------
// -- r0 : actual argument count
// -- r3 : new target (preserved for callee if needed, and caller)
// -- r1 : target function (preserved for callee if needed, and caller)
// -----------------------------------
DCHECK(!AreAliased(r1, r3, optimized_code_entry, scratch));
Register closure = r1;
Label heal_optimized_code_slot;
// If the optimized code is cleared, go to runtime to update the optimization
// marker field.
__ LoadWeakValue(optimized_code_entry, optimized_code_entry,
&heal_optimized_code_slot);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
{
UseScratchRegisterScope temps(masm);
__ TestCodeTIsMarkedForDeoptimization(optimized_code_entry,
temps.Acquire());
__ b(ne, &heal_optimized_code_slot);
}
// Optimized code is good, get it into the closure and link the closure
// into the optimized functions list, then tail call the optimized code.
__ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure);
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ LoadCodeObjectEntry(r2, optimized_code_entry);
__ Jump(r2);
// Optimized code slot contains deoptimized code or code is cleared and
// optimized code marker isn't updated. Evict the code, update the marker
// and re-enter the closure's code.
__ bind(&heal_optimized_code_slot);
__ GenerateTailCallToReturnedCode(Runtime::kHealOptimizedCodeSlot);
}
} // namespace
#ifdef V8_ENABLE_DEBUG_CODE
void MacroAssembler::AssertFeedbackVector(Register object) {
if (FLAG_debug_code) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
CompareObjectType(object, scratch, scratch, FEEDBACK_VECTOR_TYPE);
Assert(eq, AbortReason::kExpectedFeedbackVector);
}
}
#endif // V8_ENABLE_DEBUG_CODE
void MacroAssembler::ReplaceClosureCodeWithOptimizedCode(
Register optimized_code, Register closure) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(optimized_code, closure));
// Store code entry in the closure.
str(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore,
SmiCheck::kOmit);
}
void MacroAssembler::GenerateTailCallToReturnedCode(
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
// -- r0 : actual argument count
// -- r1 : target function (preserved for callee)
// -- r3 : new target (preserved for callee)
// -----------------------------------
{
FrameAndConstantPoolScope scope(this, StackFrame::INTERNAL);
// Push a copy of the target function, the new target and the actual
// argument count.
// Push function as parameter to the runtime call.
SmiTag(kJavaScriptCallArgCountRegister);
Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister);
CallRuntime(function_id, 1);
mov(r2, r0);
// Restore target function, new target and actual argument count.
Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
kJavaScriptCallArgCountRegister);
SmiUntag(kJavaScriptCallArgCountRegister);
}
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
JumpCodeObject(r2);
}
// Read off the optimization state in the feedback vector and check if there
// is optimized code or a tiering state that needs to be processed.
void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(optimization_state, feedback_vector));
ldrh(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
tst(optimization_state,
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
b(ne, has_optimized_code_or_state);
}
void MacroAssembler::MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
Register optimization_state, Register feedback_vector) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(optimization_state, feedback_vector));
Label maybe_has_optimized_code;
// Check if optimized code is available.
tst(optimization_state,
Operand(FeedbackVector::kTieringStateIsAnyRequestMask));
b(eq, &maybe_has_optimized_code);
Register tiering_state = optimization_state;
DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
MaybeOptimizeCode(this, tiering_state);
bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
ldr(tiering_state,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(this, optimized_code_entry, r6);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles) {
ASM_CODE_COMMENT(this);
......@@ -2776,4 +2942,6 @@ void TurboAssembler::F64x2PromoteLowF32x4(QwNeonRegister dst,
} // namespace internal
} // namespace v8
#undef __
#endif // V8_TARGET_ARCH_ARM
......@@ -761,6 +761,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
return MemOperand(sp, 0);
}
// Tiering support.
void AssertFeedbackVector(Register object) NOOP_UNLESS_DEBUG_CODE;
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code,
Register closure);
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
void LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state);
void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register optimization_state,
Register feedback_vector);
// ---------------------------------------------------------------------------
// Runtime calls
......
......@@ -35,6 +35,8 @@
#include "src/codegen/arm64/macro-assembler-arm64.h"
#endif
#define __ ACCESS_MASM(masm)
namespace v8 {
namespace internal {
......@@ -1325,6 +1327,173 @@ void MacroAssembler::PopCalleeSavedRegisters() {
ldp(d14, d15, tos);
}
namespace {
// Tail-call |function_id| if |actual_state| == |expected_state|
void TailCallRuntimeIfStateEquals(MacroAssembler* masm, Register actual_state,
TieringState expected_state,
Runtime::FunctionId function_id) {
ASM_CODE_COMMENT(masm);
Label no_match;
__ CompareAndBranch(actual_state, Operand(static_cast<int>(expected_state)),
ne, &no_match);
__ GenerateTailCallToReturnedCode(function_id);
__ bind(&no_match);
}
void MaybeOptimizeCode(MacroAssembler* masm, Register tiering_state) {
// ----------- S t a t e -------------
// -- x0 : actual argument count
// -- x3 : new target (preserved for callee if needed, and caller)
// -- x1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -- tiering_state : int32 containing non-zero tiering state.
// -----------------------------------
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(x1, x3, tiering_state));
TailCallRuntimeIfStateEquals(masm, tiering_state,
TieringState::kRequestTurbofan_Synchronous,
Runtime::kCompileTurbofan_Synchronous);
TailCallRuntimeIfStateEquals(masm, tiering_state,
TieringState::kRequestTurbofan_Concurrent,
Runtime::kCompileTurbofan_Concurrent);
__ Unreachable();
}
void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Register optimized_code_entry,
Register scratch) {
// ----------- S t a t e -------------
// -- x0 : actual argument count
// -- x3 : new target (preserved for callee if needed, and caller)
// -- x1 : target function (preserved for callee if needed, and caller)
// -----------------------------------
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(x1, x3, optimized_code_entry, scratch));
Register closure = x1;
Label heal_optimized_code_slot;
// If the optimized code is cleared, go to runtime to update the optimization
// marker field.
__ LoadWeakValue(optimized_code_entry, optimized_code_entry,
&heal_optimized_code_slot);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
__ AssertCodeT(optimized_code_entry);
__ JumpIfCodeTIsMarkedForDeoptimization(optimized_code_entry, scratch,
&heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
__ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ Move(x2, optimized_code_entry);
__ JumpCodeTObject(x2);
// Optimized code slot contains deoptimized code or code is cleared and
// optimized code marker isn't updated. Evict the code, update the marker
// and re-enter the closure's code.
__ bind(&heal_optimized_code_slot);
__ GenerateTailCallToReturnedCode(Runtime::kHealOptimizedCodeSlot);
}
} // namespace
#ifdef V8_ENABLE_DEBUG_CODE
void MacroAssembler::AssertFeedbackVector(Register object, Register scratch) {
if (FLAG_debug_code) {
CompareObjectType(object, scratch, scratch, FEEDBACK_VECTOR_TYPE);
Assert(eq, AbortReason::kExpectedFeedbackVector);
}
}
#endif // V8_ENABLE_DEBUG_CODE
void MacroAssembler::ReplaceClosureCodeWithOptimizedCode(
Register optimized_code, Register closure) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(optimized_code, closure));
// Store code entry in the closure.
AssertCodeT(optimized_code);
StoreTaggedField(optimized_code,
FieldMemOperand(closure, JSFunction::kCodeOffset));
RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore,
SmiCheck::kOmit);
}
void MacroAssembler::GenerateTailCallToReturnedCode(
Runtime::FunctionId function_id) {
ASM_CODE_COMMENT(this);
// ----------- S t a t e -------------
// -- x0 : actual argument count
// -- x1 : target function (preserved for callee)
// -- x3 : new target (preserved for callee)
// -----------------------------------
{
FrameScope scope(this, StackFrame::INTERNAL);
// Push a copy of the target function, the new target and the actual
// argument count.
SmiTag(kJavaScriptCallArgCountRegister);
Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
kJavaScriptCallArgCountRegister, padreg);
// Push another copy as a parameter to the runtime call.
PushArgument(kJavaScriptCallTargetRegister);
CallRuntime(function_id, 1);
Mov(x2, x0);
// Restore target function, new target and actual argument count.
Pop(padreg, kJavaScriptCallArgCountRegister,
kJavaScriptCallNewTargetRegister, kJavaScriptCallTargetRegister);
SmiUntag(kJavaScriptCallArgCountRegister);
}
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
JumpCodeTObject(x2);
}
// Read off the optimization state in the feedback vector and check if there
// is optimized code or a tiering state that needs to be processed.
void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(optimization_state, feedback_vector));
Ldrh(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
TestAndBranchIfAnySet(
optimization_state,
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask,
has_optimized_code_or_state);
}
void MacroAssembler::MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
Register optimization_state, Register feedback_vector) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(optimization_state, feedback_vector));
Label maybe_has_optimized_code;
// Check if optimized code is available
TestAndBranchIfAllClear(optimization_state,
FeedbackVector::kTieringStateIsAnyRequestMask,
&maybe_has_optimized_code);
Register tiering_state = optimization_state;
DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
MaybeOptimizeCode(this, tiering_state);
bind(&maybe_has_optimized_code);
Register optimized_code_entry = x7;
LoadAnyTaggedField(
optimized_code_entry,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(this, optimized_code_entry, x4);
}
#ifdef V8_ENABLE_DEBUG_CODE
void TurboAssembler::AssertSpAligned() {
if (!FLAG_debug_code) return;
......@@ -3799,4 +3968,6 @@ void TurboAssembler::I64x2AllTrue(Register dst, VRegister src) {
} // namespace internal
} // namespace v8
#undef __
#endif // V8_TARGET_ARCH_ARM64
......@@ -1824,6 +1824,18 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// authenticates the link register after popping it.
void PopCalleeSavedRegisters();
// Tiering support.
void AssertFeedbackVector(Register object,
Register scratch) NOOP_UNLESS_DEBUG_CODE;
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code,
Register closure);
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
void LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state);
void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register optimization_state,
Register feedback_vector);
// Helpers ------------------------------------------------------------------
template <typename Field>
......
......@@ -62,6 +62,8 @@
#include "src/codegen/ia32/macro-assembler-ia32.h"
#endif
#define __ ACCESS_MASM(masm)
namespace v8 {
namespace internal {
......@@ -722,6 +724,186 @@ Immediate MacroAssembler::ClearedValue() const {
static_cast<int32_t>(HeapObjectReference::ClearedValue(isolate()).ptr()));
}
namespace {
// Tail-call |function_id| if |actual_state| == |expected_state|
void TailCallRuntimeIfStateEquals(MacroAssembler* masm, Register actual_state,
TieringState expected_state,
Runtime::FunctionId function_id) {
ASM_CODE_COMMENT(masm);
Label no_match;
__ cmp(actual_state, static_cast<int>(expected_state));
__ j(not_equal, &no_match, Label::kNear);
__ GenerateTailCallToReturnedCode(function_id);
__ bind(&no_match);
}
void MaybeOptimizeCode(MacroAssembler* masm, Register tiering_state) {
// ----------- S t a t e -------------
// -- eax : actual argument count
// -- edx : new target (preserved for callee if needed, and caller)
// -- edi : target function (preserved for callee if needed, and caller)
// -- tiering_state : a Smi containing a non-zero tiering state.
// -----------------------------------
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(edx, edi, tiering_state));
TailCallRuntimeIfStateEquals(masm, tiering_state,
TieringState::kRequestTurbofan_Synchronous,
Runtime::kCompileTurbofan_Synchronous);
TailCallRuntimeIfStateEquals(masm, tiering_state,
TieringState::kRequestTurbofan_Concurrent,
Runtime::kCompileTurbofan_Concurrent);
__ int3();
}
void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Register optimized_code_entry) {
// ----------- S t a t e -------------
// -- eax : actual argument count
// -- edx : new target (preserved for callee if needed, and caller)
// -- edi : target function (preserved for callee if needed, and caller)
// -----------------------------------
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(edx, edi, optimized_code_entry));
Register closure = edi;
__ Push(eax);
__ Push(edx);
Label heal_optimized_code_slot;
// If the optimized code is cleared, go to runtime to update the optimization
// marker field.
__ LoadWeakValue(optimized_code_entry, &heal_optimized_code_slot);
// Check if the optimized code is marked for deopt. If it is, bailout to a
// given label.
__ TestCodeTIsMarkedForDeoptimization(optimized_code_entry, eax);
__ j(not_zero, &heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure
// into the optimized functions list, then tail call the optimized code.
__ Push(optimized_code_entry);
__ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure, edx,
ecx);
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ Pop(optimized_code_entry);
__ LoadCodeObjectEntry(ecx, optimized_code_entry);
__ Pop(edx);
__ Pop(eax);
__ jmp(ecx);
// Optimized code slot contains deoptimized code or code is cleared and
// optimized code marker isn't updated. Evict the code, update the marker
// and re-enter the closure's code.
__ bind(&heal_optimized_code_slot);
__ Pop(edx);
__ Pop(eax);
__ GenerateTailCallToReturnedCode(Runtime::kHealOptimizedCodeSlot);
}
} // namespace
#ifdef V8_ENABLE_DEBUG_CODE
void MacroAssembler::AssertFeedbackVector(Register object, Register scratch) {
if (FLAG_debug_code) {
CmpObjectType(object, FEEDBACK_VECTOR_TYPE, scratch);
Assert(equal, AbortReason::kExpectedFeedbackVector);
}
}
#endif // V8_ENABLE_DEBUG_CODE
void MacroAssembler::ReplaceClosureCodeWithOptimizedCode(
Register optimized_code, Register closure, Register value,
Register slot_address) {
ASM_CODE_COMMENT(this);
// Store the optimized code in the closure.
mov(FieldOperand(closure, JSFunction::kCodeOffset), optimized_code);
mov(value, optimized_code); // Write barrier clobbers slot_address below.
RecordWriteField(closure, JSFunction::kCodeOffset, value, slot_address,
SaveFPRegsMode::kIgnore, SmiCheck::kOmit);
}
void MacroAssembler::GenerateTailCallToReturnedCode(
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
// -- eax : actual argument count
// -- edx : new target (preserved for callee)
// -- edi : target function (preserved for callee)
// -----------------------------------
ASM_CODE_COMMENT(this);
{
FrameScope scope(this, StackFrame::INTERNAL);
// Push a copy of the target function, the new target and the actual
// argument count.
push(kJavaScriptCallTargetRegister);
push(kJavaScriptCallNewTargetRegister);
SmiTag(kJavaScriptCallArgCountRegister);
push(kJavaScriptCallArgCountRegister);
// Function is also the parameter to the runtime call.
push(kJavaScriptCallTargetRegister);
CallRuntime(function_id, 1);
mov(ecx, eax);
// Restore target function, new target and actual argument count.
pop(kJavaScriptCallArgCountRegister);
SmiUntag(kJavaScriptCallArgCountRegister);
pop(kJavaScriptCallNewTargetRegister);
pop(kJavaScriptCallTargetRegister);
}
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
JumpCodeObject(ecx);
}
// Read off the optimization state in the feedback vector and check if there
// is optimized code or a tiering state that needs to be processed.
// Registers optimization_state and feedback_vector must be aliased.
void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, XMMRegister saved_feedback_vector,
Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(this);
Register feedback_vector = optimization_state;
// Store feedback_vector. We may need it if we need to load the optimize code
// slot entry.
movd(saved_feedback_vector, feedback_vector);
mov_w(optimization_state,
FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
// Check if there is optimized code or a tiering state that needes to be
// processed.
test_w(optimization_state,
Immediate(
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
j(not_zero, has_optimized_code_or_state);
}
void MacroAssembler::MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
Register optimization_state, XMMRegister saved_feedback_vector) {
ASM_CODE_COMMENT(this);
Label maybe_has_optimized_code;
// Check if optimized code is available.
test(optimization_state,
Immediate(FeedbackVector::kTieringStateIsAnyRequestMask));
j(zero, &maybe_has_optimized_code);
Register tiering_state = optimization_state;
DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
MaybeOptimizeCode(this, tiering_state);
bind(&maybe_has_optimized_code);
Register optimized_code_entry = tiering_state;
Register feedback_vector = tiering_state;
movd(feedback_vector, saved_feedback_vector); // Restore feedback vector.
mov(optimized_code_entry,
FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(this, optimized_code_entry);
}
#ifdef V8_ENABLE_DEBUG_CODE
void MacroAssembler::AssertSmi(Register object) {
if (FLAG_debug_code) {
......@@ -2043,4 +2225,6 @@ void TurboAssembler::DebugBreak() { int3(); }
} // namespace internal
} // namespace v8
#undef __
#endif // V8_TARGET_ARCH_IA32
......@@ -554,6 +554,19 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void TestCodeTIsMarkedForDeoptimization(Register codet, Register scratch);
Immediate ClearedValue() const;
// Tiering support.
void AssertFeedbackVector(Register object,
Register scratch) NOOP_UNLESS_DEBUG_CODE;
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code,
Register closure, Register scratch1,
Register slot_address);
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
void LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, XMMRegister saved_feedback_vector,
Label* has_optimized_code_or_state);
void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
Register optimization_state, XMMRegister saved_feedback_vector);
// Abort execution if argument is not a smi, enabled via --debug-code.
void AssertSmi(Register object) NOOP_UNLESS_DEBUG_CODE;
......
......@@ -37,6 +37,8 @@
#include "src/codegen/x64/macro-assembler-x64.h"
#endif
#define __ ACCESS_MASM(masm)
namespace v8 {
namespace internal {
......@@ -786,6 +788,188 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
Jump(code, RelocInfo::CODE_TARGET);
}
namespace {
// Tail-call |function_id| if |actual_state| == |expected_state|
void TailCallRuntimeIfStateEquals(MacroAssembler* masm, Register actual_state,
TieringState expected_state,
Runtime::FunctionId function_id) {
ASM_CODE_COMMENT(masm);
Label no_match;
__ Cmp(actual_state, static_cast<int>(expected_state));
__ j(not_equal, &no_match);
__ GenerateTailCallToReturnedCode(function_id);
__ bind(&no_match);
}
void MaybeOptimizeCode(MacroAssembler* masm, Register tiering_state) {
// ----------- S t a t e -------------
// -- rax : actual argument count
// -- rdx : new target (preserved for callee if needed, and caller)
// -- rdi : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -- tiering_state : a Smi containing a non-zero tiering state.
// -----------------------------------
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(rdx, rdi, tiering_state));
TailCallRuntimeIfStateEquals(masm, tiering_state,
TieringState::kRequestMaglev_Synchronous,
Runtime::kCompileMaglev_Synchronous);
TailCallRuntimeIfStateEquals(masm, tiering_state,
TieringState::kRequestMaglev_Concurrent,
Runtime::kCompileMaglev_Concurrent);
TailCallRuntimeIfStateEquals(masm, tiering_state,
TieringState::kRequestTurbofan_Synchronous,
Runtime::kCompileTurbofan_Synchronous);
TailCallRuntimeIfStateEquals(masm, tiering_state,
TieringState::kRequestTurbofan_Concurrent,
Runtime::kCompileTurbofan_Concurrent);
__ int3();
}
void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Register optimized_code_entry, Register closure,
Register scratch1, Register scratch2,
JumpMode jump_mode) {
// ----------- S t a t e -------------
// rax : actual argument count
// rdx : new target (preserved for callee if needed, and caller)
// rsi : current context, used for the runtime call
// rdi : target function (preserved for callee if needed, and caller)
// -----------------------------------
ASM_CODE_COMMENT(masm);
DCHECK_EQ(closure, kJSFunctionRegister);
DCHECK(!AreAliased(rax, rdx, closure, rsi, optimized_code_entry, scratch1,
scratch2));
Label heal_optimized_code_slot;
// If the optimized code is cleared, go to runtime to update the optimization
// marker field.
__ LoadWeakValue(optimized_code_entry, &heal_optimized_code_slot);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
__ AssertCodeT(optimized_code_entry);
__ TestCodeTIsMarkedForDeoptimization(optimized_code_entry, scratch1);
__ j(not_zero, &heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
__ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure,
scratch1, scratch2);
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ Move(rcx, optimized_code_entry);
__ JumpCodeTObject(rcx, jump_mode);
// Optimized code slot contains deoptimized code or code is cleared and
// optimized code marker isn't updated. Evict the code, update the marker
// and re-enter the closure's code.
__ bind(&heal_optimized_code_slot);
__ GenerateTailCallToReturnedCode(Runtime::kHealOptimizedCodeSlot, jump_mode);
}
} // namespace
#ifdef V8_ENABLE_DEBUG_CODE
void MacroAssembler::AssertFeedbackVector(Register object) {
if (FLAG_debug_code) {
CmpObjectType(object, FEEDBACK_VECTOR_TYPE, kScratchRegister);
Assert(equal, AbortReason::kExpectedFeedbackVector);
}
}
#endif // V8_ENABLE_DEBUG_CODE
void MacroAssembler::GenerateTailCallToReturnedCode(
Runtime::FunctionId function_id, JumpMode jump_mode) {
// ----------- S t a t e -------------
// -- rax : actual argument count
// -- rdx : new target (preserved for callee)
// -- rdi : target function (preserved for callee)
// -----------------------------------
ASM_CODE_COMMENT(this);
{
FrameScope scope(this, StackFrame::INTERNAL);
// Push a copy of the target function, the new target and the actual
// argument count.
Push(kJavaScriptCallTargetRegister);
Push(kJavaScriptCallNewTargetRegister);
SmiTag(kJavaScriptCallArgCountRegister);
Push(kJavaScriptCallArgCountRegister);
// Function is also the parameter to the runtime call.
Push(kJavaScriptCallTargetRegister);
CallRuntime(function_id, 1);
movq(rcx, rax);
// Restore target function, new target and actual argument count.
Pop(kJavaScriptCallArgCountRegister);
SmiUntag(kJavaScriptCallArgCountRegister);
Pop(kJavaScriptCallNewTargetRegister);
Pop(kJavaScriptCallTargetRegister);
}
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
JumpCodeTObject(rcx, jump_mode);
}
void MacroAssembler::ReplaceClosureCodeWithOptimizedCode(
Register optimized_code, Register closure, Register scratch1,
Register slot_address) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(optimized_code, closure, scratch1, slot_address));
DCHECK_EQ(closure, kJSFunctionRegister);
// Store the optimized code in the closure.
AssertCodeT(optimized_code);
StoreTaggedField(FieldOperand(closure, JSFunction::kCodeOffset),
optimized_code);
// Write barrier clobbers scratch1 below.
Register value = scratch1;
movq(value, optimized_code);
RecordWriteField(closure, JSFunction::kCodeOffset, value, slot_address,
SaveFPRegsMode::kIgnore, SmiCheck::kOmit);
}
// Read off the optimization state in the feedback vector and check if there
// is optimized code or a tiering state that needs to be processed.
void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(this);
movzxwl(optimization_state,
FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
testw(optimization_state,
Immediate(
FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
j(not_zero, has_optimized_code_or_state);
}
void MacroAssembler::MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
Register optimization_state, Register feedback_vector, Register closure,
JumpMode jump_mode) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(optimization_state, feedback_vector, closure));
Label maybe_has_optimized_code;
testl(optimization_state,
Immediate(FeedbackVector::kTieringStateIsAnyRequestMask));
j(zero, &maybe_has_optimized_code);
Register tiering_state = optimization_state;
DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
MaybeOptimizeCode(this, tiering_state);
bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
LoadAnyTaggedField(
optimized_code_entry,
FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(this, optimized_code_entry, closure, r9,
WriteBarrierDescriptor::SlotAddressRegister(),
jump_mode);
}
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion) const {
int bytes = 0;
......@@ -3287,4 +3471,6 @@ void TurboAssembler::DebugBreak() { int3(); }
} // namespace internal
} // namespace v8
#undef __
#endif // V8_TARGET_ARCH_X64
......@@ -828,6 +828,20 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void TestCodeTIsMarkedForDeoptimization(Register codet, Register scratch);
Immediate ClearedValue() const;
// Tiering support.
void AssertFeedbackVector(Register object) NOOP_UNLESS_DEBUG_CODE;
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code,
Register closure, Register scratch1,
Register slot_address);
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id,
JumpMode jump_mode = JumpMode::kJump);
void LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state);
void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
Register optimization_state, Register feedback_vector, Register closure,
JumpMode jump_mode = JumpMode::kJump);
// Abort execution if argument is not a CodeT, enabled via --debug-code.
void AssertCodeT(Register object) NOOP_UNLESS_DEBUG_CODE;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment