Commit d8dc66f9 authored by Lu Yahan's avatar Lu Yahan Committed by V8 LUCI CQ

[riscv64][sparkplug] Fix sparkplug verify framesize failed

Change-Id: I7481749ba3d5c41d7405b0d88a51defbc8bec9d6
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3093009
Auto-Submit: Yahan Lu <yahan@iscas.ac.cn>
Reviewed-by: 's avatarJi Qiu <qiuji@iscas.ac.cn>
Commit-Queue: Yahan Lu <yahan@iscas.ac.cn>
Cr-Commit-Position: refs/heads/master@{#76277}
parent 52720f63
......@@ -37,69 +37,35 @@ void BaselineCompiler::PrologueFillFrame() {
const int kLoopUnrollSize = 8;
const int new_target_index = new_target_or_generator_register.index();
const bool has_new_target = new_target_index != kMaxInt;
// BaselineOutOfLinePrologue already pushed one undefined.
register_count -= 1;
if (has_new_target) {
if (new_target_index == 0) {
// Oops, need to fix up that undefined that BaselineOutOfLinePrologue
// pushed.
__ masm()->Sd(kJavaScriptCallNewTargetRegister, MemOperand(sp));
} else {
DCHECK_LE(new_target_index, register_count);
int index = 1;
for (; index + 2 <= new_target_index; index += 2) {
__ masm()->Push(kInterpreterAccumulatorRegister,
kInterpreterAccumulatorRegister);
}
if (index == new_target_index) {
__ masm()->Push(kJavaScriptCallNewTargetRegister,
kInterpreterAccumulatorRegister);
} else {
DCHECK_EQ(index, new_target_index - 1);
__ masm()->Push(kInterpreterAccumulatorRegister,
kJavaScriptCallNewTargetRegister);
}
// We pushed "index" registers, minus the one the prologue pushed, plus
// the two registers that included new_target.
register_count -= (index - 1 + 2);
__ masm()->Add64(sp, sp, Operand(-(kPointerSize * new_target_index)));
for (int i = 0; i < new_target_index; i++) {
__ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
}
// Push new_target_or_generator.
__ Push(kJavaScriptCallNewTargetRegister);
register_count -= new_target_index + 1;
}
if (register_count < 2 * kLoopUnrollSize) {
// If the frame is small enough, just unroll the frame fill completely.
for (int i = 0; i < register_count; i += 2) {
__ masm()->Push(kInterpreterAccumulatorRegister,
kInterpreterAccumulatorRegister);
__ masm()->Add64(sp, sp, Operand(-(kPointerSize * register_count)));
for (int i = 0; i < register_count; ++i) {
__ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
}
} else {
BaselineAssembler::ScratchRegisterScope temps(&basm_);
Register scratch = temps.AcquireScratch();
// Extract the first few registers to round to the unroll size.
int first_registers = register_count % kLoopUnrollSize;
for (int i = 0; i < first_registers; i += 2) {
__ masm()->Push(kInterpreterAccumulatorRegister,
kInterpreterAccumulatorRegister);
}
__ Move(scratch, register_count / kLoopUnrollSize);
// We enter the loop unconditionally, so make sure we need to loop at least
// once.
DCHECK_GT(register_count / kLoopUnrollSize, 0);
Label loop;
__ Bind(&loop);
for (int i = 0; i < kLoopUnrollSize; i += 2) {
__ masm()->Push(kInterpreterAccumulatorRegister,
kInterpreterAccumulatorRegister);
__ masm()->Add64(sp, sp, Operand(-(kPointerSize * register_count)));
for (int i = 0; i < register_count; ++i) {
__ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
}
__ masm()->Branch(&loop, gt, scratch, Operand(1));
}
}
void BaselineCompiler::VerifyFrameSize() {
ASM_CODE_COMMENT(&masm_);
__ masm()->Add64(kScratchReg, sp,
RoundUp(InterpreterFrameConstants::kFixedFrameSizeFromFp +
bytecode_->frame_size(),
2 * kSystemPointerSize));
Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp +
bytecode_->frame_size()));
__ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, kScratchReg,
Operand(fp));
}
......
......@@ -1160,9 +1160,9 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// store the bytecode offset.
if (FLAG_debug_code) {
UseScratchRegisterScope temps(masm);
Register type = temps.Acquire();
__ GetObjectType(feedback_vector, type, type);
__ Assert(eq, AbortReason::kExpectedFeedbackVector, type,
Register invocation_count = temps.Acquire();
__ GetObjectType(feedback_vector, invocation_count, invocation_count);
__ Assert(eq, AbortReason::kExpectedFeedbackVector, invocation_count,
Operand(FEEDBACK_VECTOR_TYPE));
}
// Our stack is currently aligned. We have have to push something along with
......@@ -1171,8 +1171,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves
// `undefined` in the accumulator register, to skip the load in the baseline
// code.
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
__ Push(feedback_vector, kInterpreterAccumulatorRegister);
__ Push(feedback_vector);
}
Label call_stack_guard;
......@@ -1203,7 +1202,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
{
ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
// Drop the frame created by the baseline call.
__ Pop(fp, ra);
__ Pop(ra, fp);
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector);
__ Trap();
......@@ -1212,14 +1211,13 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ bind(&call_stack_guard);
{
ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
Register new_target = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kJavaScriptCallNewTarget);
FrameScope frame_scope(masm, StackFrame::INTERNAL);
// Save incoming new target or generator
__ Push(zero_reg, new_target);
__ CallRuntime(Runtime::kStackGuard);
__ Pop(new_target, zero_reg);
__ Push(kJavaScriptCallNewTargetRegister);
__ SmiTag(frame_size);
__ Push(frame_size);
__ CallRuntime(Runtime::kStackGuardWithGap);
__ Pop(kJavaScriptCallNewTargetRegister);
}
__ Ret();
temps.Exclude(kScratchReg.bit() | kScratchReg2.bit());
......@@ -1466,31 +1464,25 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&is_baseline);
{
// Load the feedback vector from the closure.
__ Ld(feedback_vector,
__ LoadTaggedPointerField(
feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ld(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ LoadTaggedPointerField(
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
// allocate it.
__ Ld(scratch, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ Lh(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ LoadTaggedPointerField(
scratch, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ Lhu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ Branch(&install_baseline_code, ne, scratch,
Operand(FEEDBACK_VECTOR_TYPE));
// Read off the optimization state in the feedback vector.
// TODO(v8:11429): Is this worth doing here? Baseline code will check it
// anyway...
__ Ld(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
// Check if there is optimized code or a optimization marker that needes to
// be processed.
__ And(
scratch, optimization_state,
Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
__ Branch(&has_optimized_code_or_marker, ne, scratch, Operand(zero_reg));
// Check for an optimization marker.
LoadOptimizationStateAndJumpIfNeedsProcessing(
masm, optimization_state, feedback_vector,
&has_optimized_code_or_marker);
// Load the baseline code into the closure.
__ LoadTaggedPointerField(
......@@ -2713,6 +2705,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
#if V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in t0 by the jump table trampoline.
// Convert to Smi for the runtime call
......@@ -2786,6 +2779,7 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
}
__ Ret();
}
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
......@@ -3634,7 +3628,6 @@ namespace {
void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
bool next_bytecode,
bool is_osr = false) {
__ Push(zero_reg, kInterpreterAccumulatorRegister);
Label start;
__ bind(&start);
......@@ -3661,7 +3654,6 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Branch(&start_with_baseline, eq, scratch, Operand(BASELINE_DATA_TYPE));
// Start with bytecode as there is no baseline code.
__ Pop(zero_reg, kInterpreterAccumulatorRegister);
Builtin builtin_id = next_bytecode
? Builtin::kInterpreterEnterAtNextBytecode
: Builtin::kInterpreterEnterAtBytecode;
......@@ -3695,7 +3687,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
UseScratchRegisterScope temps(masm);
Register type = temps.Acquire();
__ GetObjectType(feedback_vector, type, type);
__ Branch(&install_baseline_code, eq, type, Operand(FEEDBACK_VECTOR_TYPE));
__ Branch(&install_baseline_code, ne, type, Operand(FEEDBACK_VECTOR_TYPE));
// Save BytecodeOffset from the stack frame.
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
......@@ -3705,7 +3697,6 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
feedback_vector = no_reg;
// Compute baseline pc for bytecode offset.
__ Push(zero_reg, kInterpreterAccumulatorRegister);
ExternalReference get_baseline_pc_extref;
if (next_bytecode || is_osr) {
get_baseline_pc_extref =
......@@ -3738,6 +3729,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Get bytecode array from the stack frame.
__ Ld(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ Push(kInterpreterAccumulatorRegister);
{
Register arg_reg_1 = a0;
Register arg_reg_2 = a1;
......@@ -3749,13 +3741,15 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ CallCFunction(get_baseline_pc, 3, 0);
}
__ Add64(code_obj, code_obj, kReturnRegister0);
__ Pop(kInterpreterAccumulatorRegister, zero_reg);
__ Pop(kInterpreterAccumulatorRegister);
if (is_osr) {
// Reset the OSR loop nesting depth to disarm back edges.
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
__ Sd(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
__ Ld(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ Sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrLoopNestingLevelOffset));
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
......@@ -3780,8 +3774,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ bind(&install_baseline_code);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(kInterpreterAccumulatorRegister);
__ Push(closure);
__ CallRuntime(Runtime::kInstallBaselineCode, 1);
__ Pop(kInterpreterAccumulatorRegister);
}
// Retry from the start after installing baseline code.
__ Branch(&start);
......
......@@ -151,6 +151,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Branch(Label* target);
void Branch(int32_t target);
void BranchLong(Label* L);
void Branch(Label* target, Condition cond, Register r1, const Operand& r2,
Label::Distance near_jump = Label::kFar);
void Branch(int32_t target, Condition cond, Register r1, const Operand& r2,
......@@ -943,7 +944,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register rs, const Operand& rt);
bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt);
void BranchLong(Label* L);
void BranchAndLinkLong(Label* L);
template <typename F_TYPE>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment