Commit d8dc66f9 authored by Lu Yahan's avatar Lu Yahan Committed by V8 LUCI CQ

[riscv64][sparkplug] Fix sparkplug verify framesize failed

Change-Id: I7481749ba3d5c41d7405b0d88a51defbc8bec9d6
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3093009
Auto-Submit: Yahan Lu <yahan@iscas.ac.cn>
Reviewed-by: 's avatarJi Qiu <qiuji@iscas.ac.cn>
Commit-Queue: Yahan Lu <yahan@iscas.ac.cn>
Cr-Commit-Position: refs/heads/master@{#76277}
parent 52720f63
...@@ -37,69 +37,35 @@ void BaselineCompiler::PrologueFillFrame() { ...@@ -37,69 +37,35 @@ void BaselineCompiler::PrologueFillFrame() {
const int kLoopUnrollSize = 8; const int kLoopUnrollSize = 8;
const int new_target_index = new_target_or_generator_register.index(); const int new_target_index = new_target_or_generator_register.index();
const bool has_new_target = new_target_index != kMaxInt; const bool has_new_target = new_target_index != kMaxInt;
// BaselineOutOfLinePrologue already pushed one undefined.
register_count -= 1;
if (has_new_target) { if (has_new_target) {
if (new_target_index == 0) { DCHECK_LE(new_target_index, register_count);
// Oops, need to fix up that undefined that BaselineOutOfLinePrologue __ masm()->Add64(sp, sp, Operand(-(kPointerSize * new_target_index)));
// pushed. for (int i = 0; i < new_target_index; i++) {
__ masm()->Sd(kJavaScriptCallNewTargetRegister, MemOperand(sp)); __ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
} else {
DCHECK_LE(new_target_index, register_count);
int index = 1;
for (; index + 2 <= new_target_index; index += 2) {
__ masm()->Push(kInterpreterAccumulatorRegister,
kInterpreterAccumulatorRegister);
}
if (index == new_target_index) {
__ masm()->Push(kJavaScriptCallNewTargetRegister,
kInterpreterAccumulatorRegister);
} else {
DCHECK_EQ(index, new_target_index - 1);
__ masm()->Push(kInterpreterAccumulatorRegister,
kJavaScriptCallNewTargetRegister);
}
// We pushed "index" registers, minus the one the prologue pushed, plus
// the two registers that included new_target.
register_count -= (index - 1 + 2);
} }
// Push new_target_or_generator.
__ Push(kJavaScriptCallNewTargetRegister);
register_count -= new_target_index + 1;
} }
if (register_count < 2 * kLoopUnrollSize) { if (register_count < 2 * kLoopUnrollSize) {
// If the frame is small enough, just unroll the frame fill completely. // If the frame is small enough, just unroll the frame fill completely.
for (int i = 0; i < register_count; i += 2) { __ masm()->Add64(sp, sp, Operand(-(kPointerSize * register_count)));
__ masm()->Push(kInterpreterAccumulatorRegister, for (int i = 0; i < register_count; ++i) {
kInterpreterAccumulatorRegister); __ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
} }
} else { } else {
BaselineAssembler::ScratchRegisterScope temps(&basm_); __ masm()->Add64(sp, sp, Operand(-(kPointerSize * register_count)));
Register scratch = temps.AcquireScratch(); for (int i = 0; i < register_count; ++i) {
__ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
// Extract the first few registers to round to the unroll size.
int first_registers = register_count % kLoopUnrollSize;
for (int i = 0; i < first_registers; i += 2) {
__ masm()->Push(kInterpreterAccumulatorRegister,
kInterpreterAccumulatorRegister);
}
__ Move(scratch, register_count / kLoopUnrollSize);
// We enter the loop unconditionally, so make sure we need to loop at least
// once.
DCHECK_GT(register_count / kLoopUnrollSize, 0);
Label loop;
__ Bind(&loop);
for (int i = 0; i < kLoopUnrollSize; i += 2) {
__ masm()->Push(kInterpreterAccumulatorRegister,
kInterpreterAccumulatorRegister);
} }
__ masm()->Branch(&loop, gt, scratch, Operand(1));
} }
} }
void BaselineCompiler::VerifyFrameSize() { void BaselineCompiler::VerifyFrameSize() {
ASM_CODE_COMMENT(&masm_); ASM_CODE_COMMENT(&masm_);
__ masm()->Add64(kScratchReg, sp, __ masm()->Add64(kScratchReg, sp,
RoundUp(InterpreterFrameConstants::kFixedFrameSizeFromFp + Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp +
bytecode_->frame_size(), bytecode_->frame_size()));
2 * kSystemPointerSize));
__ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, kScratchReg, __ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, kScratchReg,
Operand(fp)); Operand(fp));
} }
......
...@@ -1160,9 +1160,9 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { ...@@ -1160,9 +1160,9 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// store the bytecode offset. // store the bytecode offset.
if (FLAG_debug_code) { if (FLAG_debug_code) {
UseScratchRegisterScope temps(masm); UseScratchRegisterScope temps(masm);
Register type = temps.Acquire(); Register invocation_count = temps.Acquire();
__ GetObjectType(feedback_vector, type, type); __ GetObjectType(feedback_vector, invocation_count, invocation_count);
__ Assert(eq, AbortReason::kExpectedFeedbackVector, type, __ Assert(eq, AbortReason::kExpectedFeedbackVector, invocation_count,
Operand(FEEDBACK_VECTOR_TYPE)); Operand(FEEDBACK_VECTOR_TYPE));
} }
// Our stack is currently aligned. We have have to push something along with // Our stack is currently aligned. We have have to push something along with
...@@ -1171,8 +1171,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { ...@@ -1171,8 +1171,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves // TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves
// `undefined` in the accumulator register, to skip the load in the baseline // `undefined` in the accumulator register, to skip the load in the baseline
// code. // code.
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); __ Push(feedback_vector);
__ Push(feedback_vector, kInterpreterAccumulatorRegister);
} }
Label call_stack_guard; Label call_stack_guard;
...@@ -1203,7 +1202,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { ...@@ -1203,7 +1202,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
{ {
ASM_CODE_COMMENT_STRING(masm, "Optimized marker check"); ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
// Drop the frame created by the baseline call. // Drop the frame created by the baseline call.
__ Pop(fp, ra); __ Pop(ra, fp);
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state, MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector); feedback_vector);
__ Trap(); __ Trap();
...@@ -1212,14 +1211,13 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { ...@@ -1212,14 +1211,13 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ bind(&call_stack_guard); __ bind(&call_stack_guard);
{ {
ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call"); ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
Register new_target = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kJavaScriptCallNewTarget);
FrameScope frame_scope(masm, StackFrame::INTERNAL); FrameScope frame_scope(masm, StackFrame::INTERNAL);
// Save incoming new target or generator // Save incoming new target or generator
__ Push(zero_reg, new_target); __ Push(kJavaScriptCallNewTargetRegister);
__ CallRuntime(Runtime::kStackGuard); __ SmiTag(frame_size);
__ Pop(new_target, zero_reg); __ Push(frame_size);
__ CallRuntime(Runtime::kStackGuardWithGap);
__ Pop(kJavaScriptCallNewTargetRegister);
} }
__ Ret(); __ Ret();
temps.Exclude(kScratchReg.bit() | kScratchReg2.bit()); temps.Exclude(kScratchReg.bit() | kScratchReg2.bit());
...@@ -1466,31 +1464,25 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1466,31 +1464,25 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&is_baseline); __ bind(&is_baseline);
{ {
// Load the feedback vector from the closure. // Load the feedback vector from the closure.
__ Ld(feedback_vector, __ LoadTaggedPointerField(
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); feedback_vector,
__ Ld(feedback_vector, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
FieldMemOperand(feedback_vector, Cell::kValueOffset)); __ LoadTaggedPointerField(
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label install_baseline_code; Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to // Check if feedback vector is valid. If not, call prepare for baseline to
// allocate it. // allocate it.
__ Ld(scratch, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); __ LoadTaggedPointerField(
__ Lh(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); scratch, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ Lhu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ Branch(&install_baseline_code, ne, scratch, __ Branch(&install_baseline_code, ne, scratch,
Operand(FEEDBACK_VECTOR_TYPE)); Operand(FEEDBACK_VECTOR_TYPE));
// Read off the optimization state in the feedback vector. // Check for an optimization marker.
// TODO(v8:11429): Is this worth doing here? Baseline code will check it LoadOptimizationStateAndJumpIfNeedsProcessing(
// anyway... masm, optimization_state, feedback_vector,
__ Ld(optimization_state, &has_optimized_code_or_marker);
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
// Check if there is optimized code or a optimization marker that needes to
// be processed.
__ And(
scratch, optimization_state,
Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
__ Branch(&has_optimized_code_or_marker, ne, scratch, Operand(zero_reg));
// Load the baseline code into the closure. // Load the baseline code into the closure.
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
...@@ -2713,6 +2705,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { ...@@ -2713,6 +2705,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
} }
#if V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in t0 by the jump table trampoline. // The function index was put in t0 by the jump table trampoline.
// Convert to Smi for the runtime call // Convert to Smi for the runtime call
...@@ -2786,6 +2779,7 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) { ...@@ -2786,6 +2779,7 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
} }
__ Ret(); __ Ret();
} }
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode, SaveFPRegsMode save_doubles, ArgvMode argv_mode,
...@@ -3634,7 +3628,6 @@ namespace { ...@@ -3634,7 +3628,6 @@ namespace {
void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
bool next_bytecode, bool next_bytecode,
bool is_osr = false) { bool is_osr = false) {
__ Push(zero_reg, kInterpreterAccumulatorRegister);
Label start; Label start;
__ bind(&start); __ bind(&start);
...@@ -3661,7 +3654,6 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, ...@@ -3661,7 +3654,6 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Branch(&start_with_baseline, eq, scratch, Operand(BASELINE_DATA_TYPE)); __ Branch(&start_with_baseline, eq, scratch, Operand(BASELINE_DATA_TYPE));
// Start with bytecode as there is no baseline code. // Start with bytecode as there is no baseline code.
__ Pop(zero_reg, kInterpreterAccumulatorRegister);
Builtin builtin_id = next_bytecode Builtin builtin_id = next_bytecode
? Builtin::kInterpreterEnterAtNextBytecode ? Builtin::kInterpreterEnterAtNextBytecode
: Builtin::kInterpreterEnterAtBytecode; : Builtin::kInterpreterEnterAtBytecode;
...@@ -3695,7 +3687,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, ...@@ -3695,7 +3687,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
UseScratchRegisterScope temps(masm); UseScratchRegisterScope temps(masm);
Register type = temps.Acquire(); Register type = temps.Acquire();
__ GetObjectType(feedback_vector, type, type); __ GetObjectType(feedback_vector, type, type);
__ Branch(&install_baseline_code, eq, type, Operand(FEEDBACK_VECTOR_TYPE)); __ Branch(&install_baseline_code, ne, type, Operand(FEEDBACK_VECTOR_TYPE));
// Save BytecodeOffset from the stack frame. // Save BytecodeOffset from the stack frame.
__ SmiUntag(kInterpreterBytecodeOffsetRegister, __ SmiUntag(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
...@@ -3705,7 +3697,6 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, ...@@ -3705,7 +3697,6 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
feedback_vector = no_reg; feedback_vector = no_reg;
// Compute baseline pc for bytecode offset. // Compute baseline pc for bytecode offset.
__ Push(zero_reg, kInterpreterAccumulatorRegister);
ExternalReference get_baseline_pc_extref; ExternalReference get_baseline_pc_extref;
if (next_bytecode || is_osr) { if (next_bytecode || is_osr) {
get_baseline_pc_extref = get_baseline_pc_extref =
...@@ -3738,6 +3729,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, ...@@ -3738,6 +3729,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Get bytecode array from the stack frame. // Get bytecode array from the stack frame.
__ Ld(kInterpreterBytecodeArrayRegister, __ Ld(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ Push(kInterpreterAccumulatorRegister);
{ {
Register arg_reg_1 = a0; Register arg_reg_1 = a0;
Register arg_reg_2 = a1; Register arg_reg_2 = a1;
...@@ -3749,13 +3741,15 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, ...@@ -3749,13 +3741,15 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ CallCFunction(get_baseline_pc, 3, 0); __ CallCFunction(get_baseline_pc, 3, 0);
} }
__ Add64(code_obj, code_obj, kReturnRegister0); __ Add64(code_obj, code_obj, kReturnRegister0);
__ Pop(kInterpreterAccumulatorRegister, zero_reg); __ Pop(kInterpreterAccumulatorRegister);
if (is_osr) { if (is_osr) {
// Reset the OSR loop nesting depth to disarm back edges. // Reset the OSR loop nesting depth to disarm back edges.
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here. // Sparkplug here.
__ Sd(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister, __ Ld(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ Sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrLoopNestingLevelOffset)); BytecodeArray::kOsrLoopNestingLevelOffset));
Generate_OSREntry(masm, code_obj, Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag)); Operand(Code::kHeaderSize - kHeapObjectTag));
...@@ -3780,8 +3774,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, ...@@ -3780,8 +3774,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ bind(&install_baseline_code); __ bind(&install_baseline_code);
{ {
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(kInterpreterAccumulatorRegister);
__ Push(closure); __ Push(closure);
__ CallRuntime(Runtime::kInstallBaselineCode, 1); __ CallRuntime(Runtime::kInstallBaselineCode, 1);
__ Pop(kInterpreterAccumulatorRegister);
} }
// Retry from the start after installing baseline code. // Retry from the start after installing baseline code.
__ Branch(&start); __ Branch(&start);
......
...@@ -151,6 +151,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { ...@@ -151,6 +151,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Branch(Label* target); void Branch(Label* target);
void Branch(int32_t target); void Branch(int32_t target);
void BranchLong(Label* L);
void Branch(Label* target, Condition cond, Register r1, const Operand& r2, void Branch(Label* target, Condition cond, Register r1, const Operand& r2,
Label::Distance near_jump = Label::kFar); Label::Distance near_jump = Label::kFar);
void Branch(int32_t target, Condition cond, Register r1, const Operand& r2, void Branch(int32_t target, Condition cond, Register r1, const Operand& r2,
...@@ -943,7 +944,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { ...@@ -943,7 +944,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register rs, const Operand& rt); Register rs, const Operand& rt);
bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond, bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt); Register rs, const Operand& rt);
void BranchLong(Label* L);
void BranchAndLinkLong(Label* L); void BranchAndLinkLong(Label* L);
template <typename F_TYPE> template <typename F_TYPE>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment