Commit 3b5ba5a8 authored by Milad Farazmand's avatar Milad Farazmand Committed by V8 LUCI CQ

Revert "PPC/S390: Reland "[osr] Use the new OSR cache""

This reverts commit c575e8ae.

Reason for revert: Original CL reverted https://crrev.com/c/3615219.

Original change's description:
> PPC/S390: Reland "[osr] Use the new OSR cache"
>
> Port 91453880
>
> Original Commit Message:
>
>   This is a reland of commit 91da3883
>
>   Original change's description:
>   > Fixed: Use an X register for JumpIfCodeTIsMarkedForDeoptimization
>   > on arm64.
>   > Bug: v8:12161
>
> Change-Id: I6e63bd5995340bac32654ef12c52d25b496140e3
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3607997
> Reviewed-by: Junliang Yan <junyan@redhat.com>
> Commit-Queue: Milad Farazmand <mfarazma@redhat.com>
> Cr-Commit-Position: refs/heads/main@{#80194}

Change-Id: I977e59238e1f03c21307c1499cde8b567d1e3e2c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3620538
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Reviewed-by: 's avatarMilad Farazmand <mfarazma@redhat.com>
Commit-Queue: Milad Farazmand <mfarazma@redhat.com>
Cr-Commit-Position: refs/heads/main@{#80305}
parent 5c92b06e
...@@ -532,32 +532,6 @@ void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target, ...@@ -532,32 +532,6 @@ void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
__ StoreTaggedField(value, FieldMemOperand(target, offset), r0); __ StoreTaggedField(value, FieldMemOperand(target, offset), r0);
} }
void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
Register feedback_vector,
FeedbackSlot slot,
Label* on_result,
Label::Distance) {
Label fallthrough;
LoadTaggedPointerField(scratch_and_result, feedback_vector,
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
// Is it marked_for_deoptimization? If yes, clear the slot.
{
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ TestCodeTIsMarkedForDeoptimization(scratch_and_result, scratch, r0);
__ beq(on_result, cr0);
__ mov(scratch, __ ClearedValue());
StoreTaggedFieldNoWriteBarrier(
feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()),
scratch);
}
__ bind(&fallthrough);
Move(scratch_and_result, 0);
}
void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
int32_t weight, Label* skip_interrupt_label) { int32_t weight, Label* skip_interrupt_label) {
ASM_CODE_COMMENT(masm_); ASM_CODE_COMMENT(masm_);
......
...@@ -530,32 +530,6 @@ void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target, ...@@ -530,32 +530,6 @@ void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
__ StoreTaggedField(value, FieldMemOperand(target, offset), r0); __ StoreTaggedField(value, FieldMemOperand(target, offset), r0);
} }
void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
Register feedback_vector,
FeedbackSlot slot,
Label* on_result,
Label::Distance) {
Label fallthrough;
LoadTaggedPointerField(scratch_and_result, feedback_vector,
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
// Is it marked_for_deoptimization? If yes, clear the slot.
{
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ TestCodeTIsMarkedForDeoptimization(scratch_and_result, scratch);
__ beq(on_result);
__ mov(scratch, __ ClearedValue());
StoreTaggedFieldNoWriteBarrier(
feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()),
scratch);
}
__ bind(&fallthrough);
Move(scratch_and_result, 0);
}
void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
int32_t weight, Label* skip_interrupt_label) { int32_t weight, Label* skip_interrupt_label) {
ASM_CODE_COMMENT(masm_); ASM_CODE_COMMENT(masm_);
......
...@@ -79,28 +79,16 @@ void Generate_OSREntry(MacroAssembler* masm, Register entry_address, ...@@ -79,28 +79,16 @@ void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
__ Ret(); __ Ret();
} }
void ResetBytecodeAge(MacroAssembler* masm, Register bytecode_array, void ResetBytecodeAgeAndOsrState(MacroAssembler* masm, Register bytecode_array,
Register scratch) { Register scratch) {
// Reset the bytecode age and OSR state (optimized to a single write).
static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
DCHECK(!AreAliased(bytecode_array, scratch));
__ mov(scratch, Operand(0)); __ mov(scratch, Operand(0));
__ StoreU16( __ StoreU32(scratch,
scratch, FieldMemOperand(bytecode_array,
FieldMemOperand(bytecode_array, BytecodeArray::kBytecodeAgeOffset), r0); BytecodeArray::kOsrUrgencyAndInstallTargetOffset),
} r0);
void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm,
Register feedback_vector, Register scratch1,
Register scratch2) {
DCHECK(!AreAliased(feedback_vector, scratch1));
__ LoadU8(scratch1,
FieldMemOperand(feedback_vector, FeedbackVector::kOsrStateOffset),
scratch2);
__ andi(scratch1, scratch1,
Operand(FeedbackVector::MaybeHasOptimizedOsrCodeBit::kMask));
__ StoreU8(scratch1,
FieldMemOperand(feedback_vector, FeedbackVector::kOsrStateOffset),
scratch2);
} }
// Restarts execution either at the current or next (in execution order) // Restarts execution either at the current or next (in execution order)
...@@ -228,7 +216,8 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, ...@@ -228,7 +216,8 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
if (is_osr) { if (is_osr) {
Register scratch = ip; Register scratch = ip;
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister, scratch); ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister,
scratch);
Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag); Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag);
} else { } else {
__ AddS64(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag)); __ AddS64(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag));
...@@ -402,15 +391,37 @@ enum class OsrSourceTier { ...@@ -402,15 +391,37 @@ enum class OsrSourceTier {
}; };
void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
Register maybe_target_code) { Register current_loop_depth,
Label jump_to_optimized_code; Register encoded_current_bytecode_offset,
{ Register osr_urgency_and_install_target) {
// If maybe_target_code is not null, no need to call into runtime. A static constexpr Register scratch = r6;
// precondition here is: if maybe_target_code is a Code object, it must NOT DCHECK(!AreAliased(scratch, current_loop_depth,
// be marked_for_deoptimization (callers must ensure this). encoded_current_bytecode_offset,
__ CmpSmiLiteral(maybe_target_code, Smi::zero(), r0); osr_urgency_and_install_target));
__ bne(&jump_to_optimized_code); // OSR based on urgency, i.e. is the OSR urgency greater than the current
} // loop depth?
Label try_osr;
STATIC_ASSERT(BytecodeArray::OsrUrgencyBits::kShift == 0);
Register urgency = scratch;
__ andi(urgency, osr_urgency_and_install_target,
Operand(BytecodeArray::OsrUrgencyBits::kMask));
__ CmpU64(urgency, current_loop_depth);
__ bgt(&try_osr);
// OSR based on the install target offset, i.e. does the current bytecode
// offset match the install target offset?
static constexpr int kMask = BytecodeArray::OsrInstallTargetBits::kMask;
Register install_target = osr_urgency_and_install_target;
__ andi(install_target, osr_urgency_and_install_target, Operand(kMask));
__ CmpU64(install_target, encoded_current_bytecode_offset);
__ beq(&try_osr);
// Neither urgency nor the install target triggered, return to the caller.
// Note: the return value must be nullptr or a valid Code object.
__ mov(r3, Operand(0));
__ Ret(0);
__ bind(&try_osr);
ASM_CODE_COMMENT(masm); ASM_CODE_COMMENT(masm);
{ {
...@@ -419,12 +430,12 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, ...@@ -419,12 +430,12 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
} }
// If the code object is null, just return to the caller. // If the code object is null, just return to the caller.
Label skip;
__ CmpSmiLiteral(r3, Smi::zero(), r0); __ CmpSmiLiteral(r3, Smi::zero(), r0);
__ bne(&jump_to_optimized_code); __ bne(&skip);
__ Ret(); __ Ret();
__ bind(&jump_to_optimized_code); __ bind(&skip);
DCHECK_EQ(maybe_target_code, r3); // Already in the right spot.
if (source == OsrSourceTier::kInterpreter) { if (source == OsrSourceTier::kInterpreter) {
// Drop the handler frame that is be sitting on top of the actual // Drop the handler frame that is be sitting on top of the actual
...@@ -1179,12 +1190,16 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm, ...@@ -1179,12 +1190,16 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the // Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it. // runtime to clear it.
{ __ LoadTaggedPointerField(
UseScratchRegisterScope temps(masm); scratch,
__ TestCodeTIsMarkedForDeoptimization(optimized_code_entry, temps.Acquire(), FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset),
scratch); r0);
__ LoadS32(
scratch,
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset),
r0);
__ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0);
__ bne(&heal_optimized_code_slot, cr0); __ bne(&heal_optimized_code_slot, cr0);
}
// Optimized code is good, get it into the closure and link the closure // Optimized code is good, get it into the closure and link the closure
// into the optimized functions list, then tail call the optimized code. // into the optimized functions list, then tail call the optimized code.
...@@ -1377,8 +1392,6 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { ...@@ -1377,8 +1392,6 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
&has_optimized_code_or_state); &has_optimized_code_or_state);
} }
{ ResetFeedbackVectorOsrUrgency(masm, feedback_vector, r11, r0); }
// Increment invocation count for the function. // Increment invocation count for the function.
{ {
Register invocation_count = r13; Register invocation_count = r13;
...@@ -1414,7 +1427,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { ...@@ -1414,7 +1427,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// the frame, so load it into a register. // the frame, so load it into a register.
Register bytecodeArray = descriptor.GetRegisterParameter( Register bytecodeArray = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray); BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
ResetBytecodeAge(masm, bytecodeArray, r13); ResetBytecodeAgeAndOsrState(masm, bytecodeArray, r13);
__ Push(argc, bytecodeArray); __ Push(argc, bytecodeArray);
...@@ -1540,11 +1553,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1540,11 +1553,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LoadTieringStateAndJumpIfNeedsProcessing( LoadTieringStateAndJumpIfNeedsProcessing(
masm, optimization_state, feedback_vector, &has_optimized_code_or_state); masm, optimization_state, feedback_vector, &has_optimized_code_or_state);
{
UseScratchRegisterScope temps(masm);
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, ip, r0);
}
Label not_optimized; Label not_optimized;
__ bind(&not_optimized); __ bind(&not_optimized);
...@@ -1568,7 +1576,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1568,7 +1576,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL); FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure); __ PushStandardFrame(closure);
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister, r8); ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister, r8);
// Load initial bytecode offset. // Load initial bytecode offset.
__ mov(kInterpreterBytecodeOffsetRegister, __ mov(kInterpreterBytecodeOffsetRegister,
...@@ -2062,20 +2070,24 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { ...@@ -2062,20 +2070,24 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
using D = InterpreterOnStackReplacementDescriptor; using D = InterpreterOnStackReplacementDescriptor;
STATIC_ASSERT(D::kParameterCount == 1); STATIC_ASSERT(D::kParameterCount == 3);
OnStackReplacement(masm, OsrSourceTier::kInterpreter, OnStackReplacement(masm, OsrSourceTier::kInterpreter,
D::MaybeTargetCodeRegister()); D::CurrentLoopDepthRegister(),
D::EncodedCurrentBytecodeOffsetRegister(),
D::OsrUrgencyAndInstallTargetRegister());
} }
#if ENABLE_SPARKPLUG #if ENABLE_SPARKPLUG
void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) { void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
using D = BaselineOnStackReplacementDescriptor; using D = BaselineOnStackReplacementDescriptor;
STATIC_ASSERT(D::kParameterCount == 1); STATIC_ASSERT(D::kParameterCount == 3);
__ LoadU64(kContextRegister, __ LoadU64(kContextRegister,
MemOperand(fp, BaselineFrameConstants::kContextOffset), r0); MemOperand(fp, BaselineFrameConstants::kContextOffset), r0);
OnStackReplacement(masm, OsrSourceTier::kBaseline, OnStackReplacement(masm, OsrSourceTier::kBaseline,
D::MaybeTargetCodeRegister()); D::CurrentLoopDepthRegister(),
D::EncodedCurrentBytecodeOffsetRegister(),
D::OsrUrgencyAndInstallTargetRegister());
} }
#endif #endif
......
...@@ -83,27 +83,18 @@ void Generate_OSREntry(MacroAssembler* masm, Register entry_address, ...@@ -83,27 +83,18 @@ void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
__ Ret(); __ Ret();
} }
void ResetBytecodeAge(MacroAssembler* masm, Register bytecode_array, void ResetBytecodeAgeAndOsrState(MacroAssembler* masm, Register bytecode_array,
Register scratch) { Register scratch) {
// Reset the bytecode age and OSR state (optimized to a single write).
static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
DCHECK(!AreAliased(bytecode_array, scratch));
__ mov(r0, Operand(0)); __ mov(r0, Operand(0));
__ StoreU16( __ StoreU32(r0,
r0, FieldMemOperand(bytecode_array, BytecodeArray::kBytecodeAgeOffset), FieldMemOperand(bytecode_array,
BytecodeArray::kOsrUrgencyAndInstallTargetOffset),
scratch); scratch);
} }
void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm,
Register feedback_vector, Register scratch) {
DCHECK(!AreAliased(feedback_vector, scratch));
__ LoadU8(scratch,
FieldMemOperand(feedback_vector, FeedbackVector::kOsrStateOffset));
__ AndP(scratch, scratch,
Operand(FeedbackVector::MaybeHasOptimizedOsrCodeBit::kMask));
__ StoreU8(scratch,
FieldMemOperand(feedback_vector, FeedbackVector::kOsrStateOffset));
}
// Restarts execution either at the current or next (in execution order) // Restarts execution either at the current or next (in execution order)
// bytecode. If there is baseline code on the shared function info, converts an // bytecode. If there is baseline code on the shared function info, converts an
// interpreter frame into a baseline frame and continues execution in baseline // interpreter frame into a baseline frame and continues execution in baseline
...@@ -227,7 +218,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, ...@@ -227,7 +218,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
if (is_osr) { if (is_osr) {
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't // TODO(pthier): Separate baseline Sparkplug from TF arming and don't
// disarm Sparkplug here. // disarm Sparkplug here.
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister, r1); ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister, r1);
Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag); Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag);
} else { } else {
__ AddS64(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag)); __ AddS64(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag));
...@@ -265,15 +256,37 @@ enum class OsrSourceTier { ...@@ -265,15 +256,37 @@ enum class OsrSourceTier {
}; };
void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
Register maybe_target_code) { Register current_loop_depth,
Label jump_to_optimized_code; Register encoded_current_bytecode_offset,
{ Register osr_urgency_and_install_target) {
// If maybe_target_code is not null, no need to call into runtime. A static constexpr Register scratch = r5;
// precondition here is: if maybe_target_code is a Code object, it must NOT DCHECK(!AreAliased(scratch, current_loop_depth,
// be marked_for_deoptimization (callers must ensure this). encoded_current_bytecode_offset,
__ CmpSmiLiteral(maybe_target_code, Smi::zero(), r0); osr_urgency_and_install_target));
__ bne(&jump_to_optimized_code); // OSR based on urgency, i.e. is the OSR urgency greater than the current
} // loop depth?
Label try_osr;
STATIC_ASSERT(BytecodeArray::OsrUrgencyBits::kShift == 0);
Register urgency = scratch;
__ AndP(urgency, osr_urgency_and_install_target,
Operand(BytecodeArray::OsrUrgencyBits::kMask));
__ CmpU64(urgency, current_loop_depth);
__ bgt(&try_osr);
// OSR based on the install target offset, i.e. does the current bytecode
// offset match the install target offset?
static constexpr int kMask = BytecodeArray::OsrInstallTargetBits::kMask;
Register install_target = osr_urgency_and_install_target;
__ AndP(install_target, osr_urgency_and_install_target, Operand(kMask));
__ CmpU64(install_target, encoded_current_bytecode_offset);
__ beq(&try_osr);
// Neither urgency nor the install target triggered, return to the caller.
// Note: the return value must be nullptr or a valid Code object.
__ mov(r2, Operand(0));
__ Ret(0);
__ bind(&try_osr);
ASM_CODE_COMMENT(masm); ASM_CODE_COMMENT(masm);
{ {
...@@ -282,12 +295,12 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, ...@@ -282,12 +295,12 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
} }
// If the code object is null, just return to the caller. // If the code object is null, just return to the caller.
Label jump_to_returned_code;
__ CmpSmiLiteral(r2, Smi::zero(), r0); __ CmpSmiLiteral(r2, Smi::zero(), r0);
__ bne(&jump_to_optimized_code); __ bne(&jump_to_returned_code);
__ Ret(); __ Ret();
__ bind(&jump_to_optimized_code); __ bind(&jump_to_returned_code);
DCHECK_EQ(maybe_target_code, r2); // Already in the right spot.
if (source == OsrSourceTier::kInterpreter) { if (source == OsrSourceTier::kInterpreter) {
// Drop the handler frame that is be sitting on top of the actual // Drop the handler frame that is be sitting on top of the actual
...@@ -1208,10 +1221,13 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm, ...@@ -1208,10 +1221,13 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the // Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it. // runtime to clear it.
{ __ LoadTaggedPointerField(
__ TestCodeTIsMarkedForDeoptimization(optimized_code_entry, scratch); scratch,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ LoadS32(scratch, FieldMemOperand(
scratch, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0);
__ bne(&heal_optimized_code_slot); __ bne(&heal_optimized_code_slot);
}
// Optimized code is good, get it into the closure and link the closure // Optimized code is good, get it into the closure and link the closure
// into the optimized functions list, then tail call the optimized code. // into the optimized functions list, then tail call the optimized code.
...@@ -1405,11 +1421,6 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { ...@@ -1405,11 +1421,6 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
&has_optimized_code_or_state); &has_optimized_code_or_state);
} }
{
UseScratchRegisterScope temps(masm);
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, r1);
}
// Increment invocation count for the function. // Increment invocation count for the function.
{ {
Register invocation_count = r1; Register invocation_count = r1;
...@@ -1443,7 +1454,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { ...@@ -1443,7 +1454,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// the frame, so load it into a register. // the frame, so load it into a register.
Register bytecodeArray = descriptor.GetRegisterParameter( Register bytecodeArray = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray); BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
ResetBytecodeAge(masm, bytecodeArray, r1); ResetBytecodeAgeAndOsrState(masm, bytecodeArray, r1);
__ Push(argc, bytecodeArray); __ Push(argc, bytecodeArray);
...@@ -1570,11 +1581,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1570,11 +1581,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LoadTieringStateAndJumpIfNeedsProcessing( LoadTieringStateAndJumpIfNeedsProcessing(
masm, optimization_state, feedback_vector, &has_optimized_code_or_state); masm, optimization_state, feedback_vector, &has_optimized_code_or_state);
{
UseScratchRegisterScope temps(masm);
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, r1);
}
Label not_optimized; Label not_optimized;
__ bind(&not_optimized); __ bind(&not_optimized);
...@@ -1592,7 +1598,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1592,7 +1598,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL); FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure); __ PushStandardFrame(closure);
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister, r1); ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister, r1);
// Load the initial bytecode offset. // Load the initial bytecode offset.
__ mov(kInterpreterBytecodeOffsetRegister, __ mov(kInterpreterBytecodeOffsetRegister,
...@@ -3853,20 +3859,24 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) { ...@@ -3853,20 +3859,24 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
using D = InterpreterOnStackReplacementDescriptor; using D = InterpreterOnStackReplacementDescriptor;
STATIC_ASSERT(D::kParameterCount == 1); STATIC_ASSERT(D::kParameterCount == 3);
OnStackReplacement(masm, OsrSourceTier::kInterpreter, OnStackReplacement(masm, OsrSourceTier::kInterpreter,
D::MaybeTargetCodeRegister()); D::CurrentLoopDepthRegister(),
D::EncodedCurrentBytecodeOffsetRegister(),
D::OsrUrgencyAndInstallTargetRegister());
} }
#if ENABLE_SPARKPLUG #if ENABLE_SPARKPLUG
void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) { void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
using D = BaselineOnStackReplacementDescriptor; using D = BaselineOnStackReplacementDescriptor;
STATIC_ASSERT(D::kParameterCount == 1); STATIC_ASSERT(D::kParameterCount == 3);
__ LoadU64(kContextRegister, __ LoadU64(kContextRegister,
MemOperand(fp, BaselineFrameConstants::kContextOffset)); MemOperand(fp, BaselineFrameConstants::kContextOffset));
OnStackReplacement(masm, OsrSourceTier::kBaseline, OnStackReplacement(masm, OsrSourceTier::kBaseline,
D::MaybeTargetCodeRegister()); D::CurrentLoopDepthRegister(),
D::EncodedCurrentBytecodeOffsetRegister(),
D::OsrUrgencyAndInstallTargetRegister());
} }
#endif #endif
......
...@@ -297,24 +297,6 @@ void TurboAssembler::Drop(Register count, Register scratch) { ...@@ -297,24 +297,6 @@ void TurboAssembler::Drop(Register count, Register scratch) {
add(sp, sp, scratch); add(sp, sp, scratch);
} }
void MacroAssembler::TestCodeTIsMarkedForDeoptimization(Register codet,
Register scratch1,
Register scratch2) {
LoadTaggedPointerField(scratch1,
FieldMemOperand(codet, Code::kCodeDataContainerOffset),
scratch2);
LoadS32(
scratch1,
FieldMemOperand(scratch1, CodeDataContainer::kKindSpecificFlagsOffset),
scratch2);
TestBit(scratch1, Code::kMarkedForDeoptimizationBit, scratch2);
}
Operand MacroAssembler::ClearedValue() const {
return Operand(
static_cast<int32_t>(HeapObjectReference::ClearedValue(isolate()).ptr()));
}
void TurboAssembler::Call(Label* target) { b(target, SetLK); } void TurboAssembler::Call(Label* target) { b(target, SetLK); }
void TurboAssembler::Push(Handle<HeapObject> handle) { void TurboAssembler::Push(Handle<HeapObject> handle) {
......
...@@ -1361,10 +1361,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { ...@@ -1361,10 +1361,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DecodeField<Field>(reg, reg, rc); DecodeField<Field>(reg, reg, rc);
} }
void TestCodeTIsMarkedForDeoptimization(Register codet, Register scratch1,
Register scratch2);
Operand ClearedValue() const;
private: private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize; static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
......
...@@ -495,20 +495,6 @@ void TurboAssembler::Drop(Register count, Register scratch) { ...@@ -495,20 +495,6 @@ void TurboAssembler::Drop(Register count, Register scratch) {
AddS64(sp, sp, scratch); AddS64(sp, sp, scratch);
} }
void MacroAssembler::TestCodeTIsMarkedForDeoptimization(Register codet,
Register scratch) {
LoadTaggedPointerField(
scratch, FieldMemOperand(codet, Code::kCodeDataContainerOffset));
LoadS32(scratch, FieldMemOperand(
scratch, CodeDataContainer::kKindSpecificFlagsOffset));
TestBit(scratch, Code::kMarkedForDeoptimizationBit, scratch);
}
Operand MacroAssembler::ClearedValue() const {
return Operand(
static_cast<int32_t>(HeapObjectReference::ClearedValue(isolate()).ptr()));
}
void TurboAssembler::Call(Label* target) { b(r14, target); } void TurboAssembler::Call(Label* target) { b(r14, target); }
void TurboAssembler::Push(Handle<HeapObject> handle) { void TurboAssembler::Push(Handle<HeapObject> handle) {
......
...@@ -1773,9 +1773,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { ...@@ -1773,9 +1773,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
RememberedSetAction remembered_set_action = RememberedSetAction::kEmit, RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
SmiCheck smi_check = SmiCheck::kInline); SmiCheck smi_check = SmiCheck::kInline);
void TestCodeTIsMarkedForDeoptimization(Register codet, Register scratch);
Operand ClearedValue() const;
private: private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize; static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
// Helper functions for generating invokes. // Helper functions for generating invokes.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment