Commit 2d3d7470 authored by Liu Yu's avatar Liu Yu Committed by Commit Bot

[mips][sparkplug] Use return to jump to optimized

code to keep the RSB balanced

Besides, extract common code to
MaybeOptimizeCodeOrTailCallOptimizedCode
and cache the instance in a register,

Port: af3c5307
Port: 89ea44bf
Port: adf035fb
Change-Id: I3fde5b0995ea8aa51faeb3fd743cebef748ba745
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2710212
Auto-Submit: Liu yu <liuyu@loongson.cn>
Reviewed-by: 's avatarZhao Jiazhong <zhaojiazhong-hf@loongson.cn>
Commit-Queue: Zhao Jiazhong <zhaojiazhong-hf@loongson.cn>
Cr-Commit-Position: refs/heads/master@{#72884}
parent b5678755
......@@ -957,6 +957,28 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ bind(&end);
}
static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
MacroAssembler* masm, Register optimization_state,
Register feedback_vector) {
Label maybe_has_optimized_code;
// Check if optimized code marker is available
__ andi(t1, optimization_state,
FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker);
__ Branch(&maybe_has_optimized_code, eq, t1, Operand(zero_reg));
Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
__ bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
__ Lw(optimization_marker,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, t1, t3);
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right.
......@@ -1160,26 +1182,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
__ bind(&has_optimized_code_or_marker);
Label maybe_has_optimized_code;
// Check if optimized code marker is available
__ andi(t1, optimization_state,
FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker);
__ Branch(&maybe_has_optimized_code, eq, t1, Operand(zero_reg));
Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
__ Lw(optimization_marker,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, t1, t3);
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector);
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
......
......@@ -974,6 +974,28 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ bind(&end);
}
static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
MacroAssembler* masm, Register optimization_state,
Register feedback_vector) {
Label maybe_has_optimized_code;
// Check if optimized code marker is available
__ andi(t0, optimization_state,
FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker);
__ Branch(&maybe_has_optimized_code, eq, t0, Operand(zero_reg));
Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
__ bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
__ Ld(optimization_marker,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, t3, a5);
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right.
......@@ -1178,25 +1200,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
__ bind(&has_optimized_code_or_marker);
Label maybe_has_optimized_code;
// Check if optimized code marker is available
__ andi(t0, optimization_state,
FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker);
__ Branch(&maybe_has_optimized_code, eq, t0, Operand(zero_reg));
Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
Register optimized_code_entry = optimization_state;
__ Ld(optimization_marker,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, t3, a5);
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector);
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
......
......@@ -227,7 +227,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// TODO(mips): Implement.
UNIMPLEMENTED();
}
void JumpCodeObject(Register code_object) override {
void JumpCodeObject(Register code_object,
JumpMode jump_mode = JumpMode::kJump) override {
// TODO(mips): Implement.
UNIMPLEMENTED();
}
......
......@@ -250,7 +250,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// TODO(mips): Implement.
UNIMPLEMENTED();
}
void JumpCodeObject(Register code_object) override {
void JumpCodeObject(Register code_object,
JumpMode jump_mode = JumpMode::kJump) override {
// TODO(mips): Implement.
UNIMPLEMENTED();
}
......
......@@ -403,16 +403,19 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
void LiftoffAssembler::LoadFromInstance(Register dst, int32_t offset,
int size) {
DCHECK_LE(0, offset);
void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
lw(dst, liftoff::GetInstanceOperand());
}
void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
int32_t offset, int size) {
DCHECK_LE(0, offset);
switch (size) {
case 1:
lb(dst, MemOperand(dst, offset));
lb(dst, MemOperand(instance, offset));
break;
case 4:
lw(dst, MemOperand(dst, offset));
lw(dst, MemOperand(instance, offset));
break;
default:
UNIMPLEMENTED();
......@@ -420,8 +423,10 @@ void LiftoffAssembler::LoadFromInstance(Register dst, int32_t offset,
}
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
Register instance,
int32_t offset) {
LoadFromInstance(dst, offset, kTaggedSize);
STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
lw(dst, MemOperand(instance, offset));
}
void LiftoffAssembler::SpillInstance(Register instance) {
......
......@@ -385,19 +385,22 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
void LiftoffAssembler::LoadFromInstance(Register dst, int32_t offset,
int size) {
DCHECK_LE(0, offset);
void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
Ld(dst, liftoff::GetInstanceOperand());
}
void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
int offset, int size) {
DCHECK_LE(0, offset);
switch (size) {
case 1:
Lb(dst, MemOperand(dst, offset));
Lb(dst, MemOperand(instance, offset));
break;
case 4:
Lw(dst, MemOperand(dst, offset));
Lw(dst, MemOperand(instance, offset));
break;
case 8:
Ld(dst, MemOperand(dst, offset));
Ld(dst, MemOperand(instance, offset));
break;
default:
UNIMPLEMENTED();
......@@ -405,8 +408,10 @@ void LiftoffAssembler::LoadFromInstance(Register dst, int32_t offset,
}
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
Register instance,
int32_t offset) {
LoadFromInstance(dst, offset, kTaggedSize);
STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
Ld(dst, MemOperand(instance, offset));
}
void LiftoffAssembler::SpillInstance(Register instance) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment