Commit d8dc66f9 authored by Lu Yahan's avatar Lu Yahan Committed by V8 LUCI CQ

[riscv64][sparkplug] Fix sparkplug verify framesize failed

Change-Id: I7481749ba3d5c41d7405b0d88a51defbc8bec9d6
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3093009
Auto-Submit: Yahan Lu <yahan@iscas.ac.cn>
Reviewed-by: 's avatarJi Qiu <qiuji@iscas.ac.cn>
Commit-Queue: Yahan Lu <yahan@iscas.ac.cn>
Cr-Commit-Position: refs/heads/master@{#76277}
parent 52720f63
......@@ -109,30 +109,19 @@ void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
}
void BaselineAssembler::CallBuiltin(Builtin builtin) {
if (masm()->options().short_builtin_calls) {
__ CallBuiltin(builtin);
} else {
ASM_CODE_COMMENT_STRING(masm_,
__ CommentForOffHeapTrampoline("call", builtin));
Register temp = t6;
__ LoadEntryFromBuiltin(builtin, temp);
__ Call(temp);
}
ASM_CODE_COMMENT_STRING(masm_,
__ CommentForOffHeapTrampoline("call", builtin));
Register temp = t6;
__ LoadEntryFromBuiltin(builtin, temp);
__ Call(temp);
}
void BaselineAssembler::TailCallBuiltin(Builtin builtin) {
if (masm()->options().short_builtin_calls) {
// Generate pc-relative jump.
__ TailCallBuiltin(builtin);
} else {
ASM_CODE_COMMENT_STRING(
masm_, __ CommentForOffHeapTrampoline("tail call", builtin));
// t6 be used for function call in RISCV64
// For example 'jalr t6' or 'jal t6'
Register temp = t6;
__ LoadEntryFromBuiltin(builtin, temp);
__ Jump(temp);
}
ASM_CODE_COMMENT_STRING(masm_,
__ CommentForOffHeapTrampoline("tail call", builtin));
Register temp = t6;
__ LoadEntryFromBuiltin(builtin, temp);
__ Jump(temp);
}
void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
......@@ -140,7 +129,7 @@ void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
ScratchRegisterScope temps(this);
Register tmp = temps.AcquireScratch();
__ And(tmp, value, Operand(mask));
__ Branch(target, AsMasmCondition(cc), tmp, Operand(mask));
__ Branch(target, AsMasmCondition(cc), tmp, Operand(zero_reg));
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
......@@ -161,6 +150,11 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
Label* target, Label::Distance) {
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
if (FLAG_debug_code) {
__ AssertNotSmi(map);
__ GetObjectType(map, type, type);
__ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
}
__ Ld(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
}
......@@ -182,44 +176,28 @@ void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
Label* target, Label::Distance) {
ScratchRegisterScope temps(this);
Register temp = temps.AcquireScratch();
// todo: compress pointer
__ AssertSmi(lhs);
__ AssertSmi(rhs);
if (COMPRESS_POINTERS_BOOL) {
__ Sub32(temp, lhs, rhs);
} else {
__ Sub64(temp, lhs, rhs);
}
__ Branch(target, AsMasmCondition(cc), temp, Operand(zero_reg));
__ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs));
}
void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target,
Label::Distance) {
// todo: compress pointer
ScratchRegisterScope temps(this);
Register tmp1 = temps.AcquireScratch();
Register tmp2 = temps.AcquireScratch();
__ Ld(tmp1, operand);
if (COMPRESS_POINTERS_BOOL) {
__ Sub32(tmp2, value, tmp1);
} else {
__ Sub64(tmp2, value, tmp1);
}
__ Branch(target, AsMasmCondition(cc), tmp2, Operand(zero_reg));
Register scratch = temps.AcquireScratch();
__ Ld(scratch, operand);
__ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
}
void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
Register value, Label* target,
Label::Distance) {
// todo: compress pointer
ScratchRegisterScope temps(this);
Register tmp1 = temps.AcquireScratch();
Register tmp2 = temps.AcquireScratch();
__ Ld(tmp1, operand);
if (COMPRESS_POINTERS_BOOL) {
__ Sub32(tmp2, tmp1, value);
} else {
__ Sub64(tmp2, tmp1, value);
}
__ Branch(target, AsMasmCondition(cc), tmp2, Operand(zero_reg));
Register scratch = temps.AcquireScratch();
__ Ld(scratch, operand);
__ Branch(target, AsMasmCondition(cc), scratch, Operand(value));
}
void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
Label* target, Label::Distance) {
......@@ -267,137 +245,51 @@ inline Register ToRegister(BaselineAssembler* basm,
return reg;
}
template <typename... Args>
struct CountPushHelper;
template <>
struct CountPushHelper<> {
static int Count() { return 0; }
};
template <typename Arg, typename... Args>
struct CountPushHelper<Arg, Args...> {
static int Count(Arg arg, Args... args) {
return 1 + CountPushHelper<Args...>::Count(args...);
}
};
template <typename... Args>
struct CountPushHelper<interpreter::RegisterList, Args...> {
static int Count(interpreter::RegisterList list, Args... args) {
return list.register_count() + CountPushHelper<Args...>::Count(args...);
}
};
template <typename... Args>
struct PushAllHelper;
template <typename... Args>
void PushAll(BaselineAssembler* basm, Args... args) {
PushAllHelper<Args...>::Push(basm, args...);
}
template <typename... Args>
void PushAllReverse(BaselineAssembler* basm, Args... args) {
PushAllHelper<Args...>::PushReverse(basm, args...);
}
template <>
struct PushAllHelper<> {
static void Push(BaselineAssembler* basm) {}
static void PushReverse(BaselineAssembler* basm) {}
static int Push(BaselineAssembler* basm) { return 0; }
static int PushReverse(BaselineAssembler* basm) { return 0; }
};
inline void PushSingle(MacroAssembler* masm, RootIndex source) {
masm->PushRoot(source);
}
inline void PushSingle(MacroAssembler* masm, Register reg) { masm->Push(reg); }
inline void PushSingle(MacroAssembler* masm, Smi value) { masm->Push(value); }
inline void PushSingle(MacroAssembler* masm, Handle<HeapObject> object) {
masm->Push(object);
}
inline void PushSingle(MacroAssembler* masm, int32_t immediate) {
masm->li(kScratchReg, (int64_t)(immediate));
PushSingle(masm, kScratchReg);
}
inline void PushSingle(MacroAssembler* masm, TaggedIndex value) {
masm->li(kScratchReg, static_cast<int64_t>(value.ptr()));
PushSingle(masm, kScratchReg);
}
inline void PushSingle(MacroAssembler* masm, MemOperand operand) {
masm->Ld(kScratchReg, operand);
PushSingle(masm, kScratchReg);
}
inline void PushSingle(MacroAssembler* masm, interpreter::Register source) {
return PushSingle(masm, BaselineAssembler::RegisterFrameOperand(source));
}
template <typename Arg>
struct PushAllHelper<Arg> {
static void Push(BaselineAssembler* basm, Arg arg) {
PushSingle(basm->masm(), arg);
static int Push(BaselineAssembler* basm, Arg arg) {
BaselineAssembler::ScratchRegisterScope scope(basm);
basm->masm()->Push(ToRegister(basm, &scope, arg));
return 1;
}
static void PushReverse(BaselineAssembler* basm, Arg arg) {
// Push the padding register to round up the amount of values pushed.
static int PushReverse(BaselineAssembler* basm, Arg arg) {
return Push(basm, arg);
}
};
template <typename Arg1, typename Arg2, typename... Args>
struct PushAllHelper<Arg1, Arg2, Args...> {
static void Push(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2,
Args... args) {
{
BaselineAssembler::ScratchRegisterScope scope(basm);
basm->masm()->Push(ToRegister(basm, &scope, arg1),
ToRegister(basm, &scope, arg2));
}
PushAll(basm, args...);
}
static void PushReverse(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2,
Args... args) {
PushAllReverse(basm, args...);
{
BaselineAssembler::ScratchRegisterScope scope(basm);
basm->masm()->Push(ToRegister(basm, &scope, arg2),
ToRegister(basm, &scope, arg1));
}
}
};
// Currently RegisterLists are always be the last argument, so we don't
// specialize for the case where they're not. We do still specialise for the
// aligned and unaligned cases.
template <typename Arg>
struct PushAllHelper<Arg, interpreter::RegisterList> {
static void Push(BaselineAssembler* basm, Arg arg,
interpreter::RegisterList list) {
DCHECK_EQ(list.register_count() % 2, 1);
PushAll(basm, arg, list[0], list.PopLeft());
template <typename Arg, typename... Args>
struct PushAllHelper<Arg, Args...> {
static int Push(BaselineAssembler* basm, Arg arg, Args... args) {
PushAllHelper<Arg>::Push(basm, arg);
return 1 + PushAllHelper<Args...>::Push(basm, args...);
}
static void PushReverse(BaselineAssembler* basm, Arg arg,
interpreter::RegisterList list) {
if (list.register_count() == 0) {
PushAllReverse(basm, arg);
} else {
PushAllReverse(basm, arg, list[0], list.PopLeft());
}
static int PushReverse(BaselineAssembler* basm, Arg arg, Args... args) {
int nargs = PushAllHelper<Args...>::PushReverse(basm, args...);
PushAllHelper<Arg>::Push(basm, arg);
return nargs + 1;
}
};
template <>
struct PushAllHelper<interpreter::RegisterList> {
static void Push(BaselineAssembler* basm, interpreter::RegisterList list) {
DCHECK_EQ(list.register_count() % 2, 0);
for (int reg_index = 0; reg_index < list.register_count(); reg_index += 2) {
PushAll(basm, list[reg_index], list[reg_index + 1]);
static int Push(BaselineAssembler* basm, interpreter::RegisterList list) {
for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) {
PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
}
return list.register_count();
}
static void PushReverse(BaselineAssembler* basm,
interpreter::RegisterList list) {
int reg_index = list.register_count() - 1;
if (reg_index % 2 == 0) {
// Push the padding register to round up the amount of values pushed.
PushAllReverse(basm, list[reg_index]);
reg_index--;
}
for (; reg_index >= 1; reg_index -= 2) {
PushAllReverse(basm, list[reg_index - 1], list[reg_index]);
static int PushReverse(BaselineAssembler* basm,
interpreter::RegisterList list) {
for (int reg_index = list.register_count() - 1; reg_index >= 0;
--reg_index) {
PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
}
return list.register_count();
}
};
......@@ -414,10 +306,9 @@ struct PopAllHelper<Register> {
}
};
template <typename... T>
struct PopAllHelper<Register, Register, T...> {
static void Pop(BaselineAssembler* basm, Register reg1, Register reg2,
T... tail) {
basm->masm()->Pop(reg1, reg2);
struct PopAllHelper<Register, T...> {
static void Pop(BaselineAssembler* basm, Register reg, T... tail) {
PopAllHelper<Register>::Pop(basm, reg);
PopAllHelper<T...>::Pop(basm, tail...);
}
};
......@@ -426,20 +317,12 @@ struct PopAllHelper<Register, Register, T...> {
template <typename... T>
int BaselineAssembler::Push(T... vals) {
// We have to count the pushes first, to decide whether to add padding before
// the first push.
int push_count = detail::CountPushHelper<T...>::Count(vals...);
if (push_count % 2 == 0) {
detail::PushAll(this, vals...);
} else {
detail::PushAll(this, vals...);
}
return push_count;
return detail::PushAllHelper<T...>::Push(this, vals...);
}
template <typename... T>
void BaselineAssembler::PushReverse(T... vals) {
detail::PushAllReverse(this, vals...);
detail::PushAllHelper<T...>::PushReverse(this, vals...);
}
template <typename... T>
......@@ -461,7 +344,7 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
}
void BaselineAssembler::LoadByteField(Register output, Register source,
int offset) {
__ Ld(output, FieldMemOperand(source, offset));
__ Lb(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
Smi value) {
......@@ -495,11 +378,11 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ Ld(interrupt_budget,
__ Lw(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
// Remember to set flags as part of the add!
__ Add64(interrupt_budget, interrupt_budget, weight);
__ Sd(interrupt_budget,
__ Add32(interrupt_budget, interrupt_budget, weight);
__ Sw(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
if (skip_interrupt_label) {
DCHECK_LT(weight, 0);
......@@ -517,11 +400,11 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ Ld(interrupt_budget,
__ Lw(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
// Remember to set flags as part of the add!
__ Add64(interrupt_budget, interrupt_budget, weight);
__ Sd(interrupt_budget,
__ Add32(interrupt_budget, interrupt_budget, weight);
__ Sw(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
if (skip_interrupt_label)
__ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(weight));
......@@ -556,20 +439,19 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12);
int32_t Lo12 = (int32_t)imm64 << 20 >> 20;
__ auipc(temp, Hi20); // Read PC + Hi20 into t6
__ lui(temp, Lo12); // jump PC + Hi20 + Lo12
__ addi(temp, temp, Lo12); // jump PC + Hi20 + Lo12
int entry_size_log2 = 2;
Register temp2 = scope.AcquireScratch();
__ CalcScaledAddress(temp2, temp, reg, entry_size_log2);
int entry_size_log2 = 3;
__ CalcScaledAddress(temp, temp, reg, entry_size_log2);
__ Jump(temp);
{
TurboAssembler::BlockTrampolinePoolScope(masm());
__ BlockTrampolinePoolFor(num_labels * kInstrSize);
__ BlockTrampolinePoolFor(num_labels * kInstrSize * 2);
__ bind(&table);
for (int i = 0; i < num_labels; ++i) {
__ Branch(labels[i]);
__ BranchLong(labels[i]);
}
DCHECK_EQ(num_labels * kInstrSize, __ InstructionsGeneratedSince(&table));
DCHECK_EQ(num_labels * 2, __ InstructionsGeneratedSince(&table));
__ bind(&fallthrough);
}
}
......@@ -598,7 +480,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->Push(kJSFunctionRegister);
__ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
__ masm()->Pop(kInterpreterAccumulatorRegister, params_size);
__ masm()->Pop(params_size, kInterpreterAccumulatorRegister);
__ masm()->SmiUntag(params_size);
__ Bind(&skip_interrupt_label);
......
......@@ -37,69 +37,35 @@ void BaselineCompiler::PrologueFillFrame() {
const int kLoopUnrollSize = 8;
const int new_target_index = new_target_or_generator_register.index();
const bool has_new_target = new_target_index != kMaxInt;
// BaselineOutOfLinePrologue already pushed one undefined.
register_count -= 1;
if (has_new_target) {
if (new_target_index == 0) {
// Oops, need to fix up that undefined that BaselineOutOfLinePrologue
// pushed.
__ masm()->Sd(kJavaScriptCallNewTargetRegister, MemOperand(sp));
} else {
DCHECK_LE(new_target_index, register_count);
int index = 1;
for (; index + 2 <= new_target_index; index += 2) {
__ masm()->Push(kInterpreterAccumulatorRegister,
kInterpreterAccumulatorRegister);
}
if (index == new_target_index) {
__ masm()->Push(kJavaScriptCallNewTargetRegister,
kInterpreterAccumulatorRegister);
} else {
DCHECK_EQ(index, new_target_index - 1);
__ masm()->Push(kInterpreterAccumulatorRegister,
kJavaScriptCallNewTargetRegister);
}
// We pushed "index" registers, minus the one the prologue pushed, plus
// the two registers that included new_target.
register_count -= (index - 1 + 2);
DCHECK_LE(new_target_index, register_count);
__ masm()->Add64(sp, sp, Operand(-(kPointerSize * new_target_index)));
for (int i = 0; i < new_target_index; i++) {
__ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
}
// Push new_target_or_generator.
__ Push(kJavaScriptCallNewTargetRegister);
register_count -= new_target_index + 1;
}
if (register_count < 2 * kLoopUnrollSize) {
// If the frame is small enough, just unroll the frame fill completely.
for (int i = 0; i < register_count; i += 2) {
__ masm()->Push(kInterpreterAccumulatorRegister,
kInterpreterAccumulatorRegister);
__ masm()->Add64(sp, sp, Operand(-(kPointerSize * register_count)));
for (int i = 0; i < register_count; ++i) {
__ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
}
} else {
BaselineAssembler::ScratchRegisterScope temps(&basm_);
Register scratch = temps.AcquireScratch();
// Extract the first few registers to round to the unroll size.
int first_registers = register_count % kLoopUnrollSize;
for (int i = 0; i < first_registers; i += 2) {
__ masm()->Push(kInterpreterAccumulatorRegister,
kInterpreterAccumulatorRegister);
}
__ Move(scratch, register_count / kLoopUnrollSize);
// We enter the loop unconditionally, so make sure we need to loop at least
// once.
DCHECK_GT(register_count / kLoopUnrollSize, 0);
Label loop;
__ Bind(&loop);
for (int i = 0; i < kLoopUnrollSize; i += 2) {
__ masm()->Push(kInterpreterAccumulatorRegister,
kInterpreterAccumulatorRegister);
__ masm()->Add64(sp, sp, Operand(-(kPointerSize * register_count)));
for (int i = 0; i < register_count; ++i) {
__ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
}
__ masm()->Branch(&loop, gt, scratch, Operand(1));
}
}
void BaselineCompiler::VerifyFrameSize() {
ASM_CODE_COMMENT(&masm_);
__ masm()->Add64(kScratchReg, sp,
RoundUp(InterpreterFrameConstants::kFixedFrameSizeFromFp +
bytecode_->frame_size(),
2 * kSystemPointerSize));
Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp +
bytecode_->frame_size()));
__ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, kScratchReg,
Operand(fp));
}
......
......@@ -1160,9 +1160,9 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// store the bytecode offset.
if (FLAG_debug_code) {
UseScratchRegisterScope temps(masm);
Register type = temps.Acquire();
__ GetObjectType(feedback_vector, type, type);
__ Assert(eq, AbortReason::kExpectedFeedbackVector, type,
Register invocation_count = temps.Acquire();
__ GetObjectType(feedback_vector, invocation_count, invocation_count);
__ Assert(eq, AbortReason::kExpectedFeedbackVector, invocation_count,
Operand(FEEDBACK_VECTOR_TYPE));
}
// Our stack is currently aligned. We have have to push something along with
......@@ -1171,8 +1171,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves
// `undefined` in the accumulator register, to skip the load in the baseline
// code.
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
__ Push(feedback_vector, kInterpreterAccumulatorRegister);
__ Push(feedback_vector);
}
Label call_stack_guard;
......@@ -1203,7 +1202,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
{
ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
// Drop the frame created by the baseline call.
__ Pop(fp, ra);
__ Pop(ra, fp);
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector);
__ Trap();
......@@ -1212,14 +1211,13 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ bind(&call_stack_guard);
{
ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
Register new_target = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kJavaScriptCallNewTarget);
FrameScope frame_scope(masm, StackFrame::INTERNAL);
// Save incoming new target or generator
__ Push(zero_reg, new_target);
__ CallRuntime(Runtime::kStackGuard);
__ Pop(new_target, zero_reg);
__ Push(kJavaScriptCallNewTargetRegister);
__ SmiTag(frame_size);
__ Push(frame_size);
__ CallRuntime(Runtime::kStackGuardWithGap);
__ Pop(kJavaScriptCallNewTargetRegister);
}
__ Ret();
temps.Exclude(kScratchReg.bit() | kScratchReg2.bit());
......@@ -1466,31 +1464,25 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&is_baseline);
{
// Load the feedback vector from the closure.
__ Ld(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ld(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ LoadTaggedPointerField(
feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
// allocate it.
__ Ld(scratch, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ Lh(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ LoadTaggedPointerField(
scratch, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ Lhu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ Branch(&install_baseline_code, ne, scratch,
Operand(FEEDBACK_VECTOR_TYPE));
// Read off the optimization state in the feedback vector.
// TODO(v8:11429): Is this worth doing here? Baseline code will check it
// anyway...
__ Ld(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
// Check if there is optimized code or a optimization marker that needes to
// be processed.
__ And(
scratch, optimization_state,
Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
__ Branch(&has_optimized_code_or_marker, ne, scratch, Operand(zero_reg));
// Check for an optimization marker.
LoadOptimizationStateAndJumpIfNeedsProcessing(
masm, optimization_state, feedback_vector,
&has_optimized_code_or_marker);
// Load the baseline code into the closure.
__ LoadTaggedPointerField(
......@@ -2713,6 +2705,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
#if V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in t0 by the jump table trampoline.
// Convert to Smi for the runtime call
......@@ -2786,6 +2779,7 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
}
__ Ret();
}
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
......@@ -3634,7 +3628,6 @@ namespace {
void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
bool next_bytecode,
bool is_osr = false) {
__ Push(zero_reg, kInterpreterAccumulatorRegister);
Label start;
__ bind(&start);
......@@ -3661,7 +3654,6 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Branch(&start_with_baseline, eq, scratch, Operand(BASELINE_DATA_TYPE));
// Start with bytecode as there is no baseline code.
__ Pop(zero_reg, kInterpreterAccumulatorRegister);
Builtin builtin_id = next_bytecode
? Builtin::kInterpreterEnterAtNextBytecode
: Builtin::kInterpreterEnterAtBytecode;
......@@ -3695,7 +3687,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
UseScratchRegisterScope temps(masm);
Register type = temps.Acquire();
__ GetObjectType(feedback_vector, type, type);
__ Branch(&install_baseline_code, eq, type, Operand(FEEDBACK_VECTOR_TYPE));
__ Branch(&install_baseline_code, ne, type, Operand(FEEDBACK_VECTOR_TYPE));
// Save BytecodeOffset from the stack frame.
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
......@@ -3705,7 +3697,6 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
feedback_vector = no_reg;
// Compute baseline pc for bytecode offset.
__ Push(zero_reg, kInterpreterAccumulatorRegister);
ExternalReference get_baseline_pc_extref;
if (next_bytecode || is_osr) {
get_baseline_pc_extref =
......@@ -3738,6 +3729,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Get bytecode array from the stack frame.
__ Ld(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ Push(kInterpreterAccumulatorRegister);
{
Register arg_reg_1 = a0;
Register arg_reg_2 = a1;
......@@ -3749,13 +3741,15 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ CallCFunction(get_baseline_pc, 3, 0);
}
__ Add64(code_obj, code_obj, kReturnRegister0);
__ Pop(kInterpreterAccumulatorRegister, zero_reg);
__ Pop(kInterpreterAccumulatorRegister);
if (is_osr) {
// Reset the OSR loop nesting depth to disarm back edges.
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
__ Sd(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
__ Ld(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ Sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrLoopNestingLevelOffset));
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
......@@ -3780,8 +3774,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ bind(&install_baseline_code);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(kInterpreterAccumulatorRegister);
__ Push(closure);
__ CallRuntime(Runtime::kInstallBaselineCode, 1);
__ Pop(kInterpreterAccumulatorRegister);
}
// Retry from the start after installing baseline code.
__ Branch(&start);
......
......@@ -151,6 +151,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Branch(Label* target);
void Branch(int32_t target);
void BranchLong(Label* L);
void Branch(Label* target, Condition cond, Register r1, const Operand& r2,
Label::Distance near_jump = Label::kFar);
void Branch(int32_t target, Condition cond, Register r1, const Operand& r2,
......@@ -943,7 +944,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register rs, const Operand& rt);
bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt);
void BranchLong(Label* L);
void BranchAndLinkLong(Label* L);
template <typename F_TYPE>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment