Commit 4c7ec1fc authored by Liu Yu's avatar Liu Yu Committed by V8 LUCI CQ

[loong64][mips64][masm] Move tiering logic to macro-assembler

Port commit ca33c73e

Bug: v8:7700
Change-Id: I4da1e75be2418ddf6651614c7902a6393b19479a
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3828411
Auto-Submit: Liu Yu <liuyu@loongson.cn>
Reviewed-by: 's avatarZhao Jiazhong <zhaojiazhong-hf@loongson.cn>
Commit-Queue: Zhao Jiazhong <zhaojiazhong-hf@loongson.cn>
Cr-Commit-Position: refs/heads/main@{#82427}
parent 50ec8a11
This diff is collapsed.
This diff is collapsed.
......@@ -35,6 +35,8 @@
#include "src/codegen/loong64/macro-assembler-loong64.h"
#endif
#define __ ACCESS_MASM(masm)
namespace v8 {
namespace internal {
......@@ -4157,7 +4159,175 @@ void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
Jump(code_object);
}
namespace {
// Tail-call |function_id| if |actual_state| == |expected_state|
void TailCallRuntimeIfStateEquals(MacroAssembler* masm, Register actual_state,
TieringState expected_state,
Runtime::FunctionId function_id) {
ASM_CODE_COMMENT(masm);
Label no_match;
__ Branch(&no_match, ne, actual_state,
Operand(static_cast<int>(expected_state)));
__ GenerateTailCallToReturnedCode(function_id);
__ bind(&no_match);
}
void MaybeOptimizeCode(MacroAssembler* masm, Register tiering_state) {
// ----------- S t a t e -------------
// -- a0 : actual argument count
// -- a3 : new target (preserved for callee if needed, and caller)
// -- a1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -- tiering_state : a Smi containing a non-zero tiering state.
// -----------------------------------
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(a1, a3, tiering_state));
TailCallRuntimeIfStateEquals(masm, tiering_state,
TieringState::kRequestTurbofan_Synchronous,
Runtime::kCompileTurbofan_Synchronous);
TailCallRuntimeIfStateEquals(masm, tiering_state,
TieringState::kRequestTurbofan_Concurrent,
Runtime::kCompileTurbofan_Concurrent);
__ stop();
}
void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Register optimized_code_entry) {
// ----------- S t a t e -------------
// -- a0 : actual argument count
// -- a3 : new target (preserved for callee if needed, and caller)
// -- a1 : target function (preserved for callee if needed, and caller)
// -----------------------------------
DCHECK(!AreAliased(optimized_code_entry, a1, a3));
Register closure = a1;
Label heal_optimized_code_slot;
// If the optimized code is cleared, go to runtime to update the optimization
// marker field.
__ LoadWeakValue(optimized_code_entry, optimized_code_entry,
&heal_optimized_code_slot);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
__ TestCodeTIsMarkedForDeoptimizationAndJump(optimized_code_entry, a6, ne,
&heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
// The feedback vector is no longer used, so re-use it as a scratch
// register.
__ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ LoadCodeObjectEntry(a2, optimized_code_entry);
__ Jump(a2);
// Optimized code slot contains deoptimized code or code is cleared and
// optimized code marker isn't updated. Evict the code, update the marker
// and re-enter the closure's code.
__ bind(&heal_optimized_code_slot);
__ GenerateTailCallToReturnedCode(Runtime::kHealOptimizedCodeSlot);
}
} // namespace
#ifdef V8_ENABLE_DEBUG_CODE
void MacroAssembler::AssertFeedbackVector(Register object, Register scratch) {
if (FLAG_debug_code) {
GetObjectType(object, scratch, scratch);
Assert(eq, AbortReason::kExpectedFeedbackVector, scratch,
Operand(FEEDBACK_VECTOR_TYPE));
}
}
#endif // V8_ENABLE_DEBUG_CODE
void MacroAssembler::ReplaceClosureCodeWithOptimizedCode(
Register optimized_code, Register closure) {
ASM_CODE_COMMENT(this);
St_d(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore,
SmiCheck::kOmit);
}
void MacroAssembler::GenerateTailCallToReturnedCode(
Runtime::FunctionId function_id) {
ASM_CODE_COMMENT(this);
// ----------- S t a t e -------------
// -- a0 : actual argument count
// -- a1 : target function (preserved for callee)
// -- a3 : new target (preserved for callee)
// -----------------------------------
{
FrameScope scope(this, StackFrame::INTERNAL);
// Push a copy of the target function, the new target and the actual
// argument count.
// Push function as parameter to the runtime call.
SmiTag(kJavaScriptCallArgCountRegister);
Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister);
CallRuntime(function_id, 1);
LoadCodeObjectEntry(a2, a0);
// Restore target function, new target and actual argument count.
Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
kJavaScriptCallArgCountRegister);
SmiUntag(kJavaScriptCallArgCountRegister);
}
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
Jump(a2);
}
// Read off the optimization state in the feedback vector and check if there
// is optimized code or a tiering state that needs to be processed.
void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(this);
Register scratch = t2;
DCHECK(!AreAliased(t2, optimization_state, feedback_vector));
Ld_hu(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
And(scratch, optimization_state,
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
Branch(has_optimized_code_or_state, ne, scratch, Operand(zero_reg));
}
void MacroAssembler::MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
Register optimization_state, Register feedback_vector) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(optimization_state, feedback_vector));
Label maybe_has_optimized_code;
// Check if optimized code marker is available
{
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
And(scratch, optimization_state,
Operand(FeedbackVector::kTieringStateIsAnyRequestMask));
Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg));
}
Register tiering_state = optimization_state;
DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
MaybeOptimizeCode(this, tiering_state);
bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
Ld_d(optimized_code_entry,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(this, optimized_code_entry);
}
} // namespace internal
} // namespace v8
#undef __
#endif // V8_TARGET_ARCH_LOONG64
......@@ -1043,6 +1043,19 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
// ---------------------------------------------------------------------------
// Tiering support.
void AssertFeedbackVector(Register object,
Register scratch) NOOP_UNLESS_DEBUG_CODE;
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code,
Register closure);
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
void LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state);
void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register optimization_state,
Register feedback_vector);
template <typename Field>
void DecodeField(Register dst, Register src) {
Bstrpick_d(dst, src, Field::kShift + Field::kSize - 1, Field::kShift);
......
......@@ -35,6 +35,8 @@
#include "src/codegen/mips64/macro-assembler-mips64.h"
#endif
#define __ ACCESS_MASM(masm)
namespace v8 {
namespace internal {
......@@ -6171,7 +6173,178 @@ void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
Jump(code_object);
}
namespace {
// Tail-call |function_id| if |actual_state| == |expected_state|
void TailCallRuntimeIfStateEquals(MacroAssembler* masm, Register actual_state,
TieringState expected_state,
Runtime::FunctionId function_id) {
ASM_CODE_COMMENT(masm);
Label no_match;
__ Branch(&no_match, ne, actual_state,
Operand(static_cast<int>(expected_state)));
__ GenerateTailCallToReturnedCode(function_id);
__ bind(&no_match);
}
void MaybeOptimizeCode(MacroAssembler* masm, Register tiering_state) {
// ----------- S t a t e -------------
// -- a0 : actual argument count
// -- a3 : new target (preserved for callee if needed, and caller)
// -- a1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -- tiering_state : a int32 containing a non-zero optimization
// marker.
// -----------------------------------
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(a1, a3, tiering_state));
TailCallRuntimeIfStateEquals(masm, tiering_state,
TieringState::kRequestTurbofan_Synchronous,
Runtime::kCompileTurbofan_Synchronous);
TailCallRuntimeIfStateEquals(masm, tiering_state,
TieringState::kRequestTurbofan_Concurrent,
Runtime::kCompileTurbofan_Concurrent);
__ stop();
}
void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Register optimized_code_entry, Register scratch1,
Register scratch2) {
// ----------- S t a t e -------------
// -- a0 : actual argument count
// -- a3 : new target (preserved for callee if needed, and caller)
// -- a1 : target function (preserved for callee if needed, and caller)
// -----------------------------------
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(optimized_code_entry, a1, a3, scratch1, scratch2));
Register closure = a1;
Label heal_optimized_code_slot;
// If the optimized code is cleared, go to runtime to update the optimization
// marker field.
__ LoadWeakValue(optimized_code_entry, optimized_code_entry,
&heal_optimized_code_slot);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
__ TestCodeTIsMarkedForDeoptimizationAndJump(optimized_code_entry, scratch1,
ne, &heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
// The feedback vector is no longer used, so re-use it as a scratch
// register.
__ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure,
scratch1, scratch2);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Daddu(a2, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a2);
// Optimized code slot contains deoptimized code or code is cleared and
// optimized code marker isn't updated. Evict the code, update the marker
// and re-enter the closure's code.
__ bind(&heal_optimized_code_slot);
__ GenerateTailCallToReturnedCode(Runtime::kHealOptimizedCodeSlot);
}
} // namespace
#ifdef V8_ENABLE_DEBUG_CODE
void MacroAssembler::AssertFeedbackVector(Register object, Register scratch) {
if (FLAG_debug_code) {
GetObjectType(object, scratch, scratch);
Assert(eq, AbortReason::kExpectedFeedbackVector, scratch,
Operand(FEEDBACK_VECTOR_TYPE));
}
}
#endif // V8_ENABLE_DEBUG_CODE
void MacroAssembler::ReplaceClosureCodeWithOptimizedCode(
Register optimized_code, Register closure, Register scratch1,
Register scratch2) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(optimized_code, closure, scratch1, scratch2));
// Store code entry in the closure.
Sd(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore,
SmiCheck::kOmit);
}
void MacroAssembler::GenerateTailCallToReturnedCode(
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
// -- a0 : actual argument count
// -- a1 : target function (preserved for callee)
// -- a3 : new target (preserved for callee)
// -----------------------------------
ASM_CODE_COMMENT(this);
{
FrameScope scope(this, StackFrame::INTERNAL);
// Push a copy of the target function, the new target and the actual
// argument count.
// Push function as parameter to the runtime call.
SmiTag(kJavaScriptCallArgCountRegister);
Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister);
CallRuntime(function_id, 1);
// Restore target function, new target and actual argument count.
Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
kJavaScriptCallArgCountRegister);
SmiUntag(kJavaScriptCallArgCountRegister);
}
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
Jump(a2);
}
void MacroAssembler::LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state) {
ASM_CODE_COMMENT(this);
Register scratch = t2;
Lhu(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
And(scratch, optimization_state,
Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
Branch(has_optimized_code_or_state, ne, scratch, Operand(zero_reg));
}
void MacroAssembler::MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
Register optimization_state, Register feedback_vector) {
ASM_CODE_COMMENT(this);
Label maybe_has_optimized_code;
// Check if optimized code marker is available
{
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
And(scratch, optimization_state,
Operand(FeedbackVector::kTieringStateIsAnyRequestMask));
Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg));
}
Register tiering_state = optimization_state;
DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
MaybeOptimizeCode(this, tiering_state);
bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
Ld(tiering_state, FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(this, optimized_code_entry, t3, a5);
}
} // namespace internal
} // namespace v8
#undef __
#endif // V8_TARGET_ARCH_MIPS64
......@@ -1231,6 +1231,20 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
// ---------------------------------------------------------------------------
// Tiering support.
void AssertFeedbackVector(Register object,
Register scratch) NOOP_UNLESS_DEBUG_CODE;
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code,
Register closure, Register scratch1,
Register scratch2);
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
void LoadTieringStateAndJumpIfNeedsProcessing(
Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_state);
void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register optimization_state,
Register feedback_vector);
template <typename Field>
void DecodeField(Register dst, Register src) {
Ext(dst, src, Field::kShift, Field::kSize);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment