Commit a026e990 authored by Liu Yu's avatar Liu Yu Committed by V8 LUCI CQ

[loong64][mips][osr] Add an install-by-offset mechanism

Port commit b8473c52

Bug: v8:12161
Change-Id: I4cdf161356039b47ecf054aeba85c7a3d0d06de0
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3583218Reviewed-by: 's avatarZhao Jiazhong <zhaojiazhong-hf@loongson.cn>
Commit-Queue: Yu Liu <liuyu@loongson.cn>
Auto-Submit: Yu Liu <liuyu@loongson.cn>
Cr-Commit-Position: refs/heads/main@{#79932}
parent c90fcc42
......@@ -113,6 +113,11 @@ void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
Label::Distance) {
__ JumpIfNotSmi(value, target);
}
void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
Label* target,
Label::Distance distance) {
JumpIf(cc, left, Operand(right), target, distance);
}
void BaselineAssembler::CallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(masm_,
......@@ -346,6 +351,10 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ Ld_d(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ Ld_hu(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadWord8Field(Register output, Register source,
int offset) {
__ Ld_b(output, FieldMemOperand(source, offset));
......@@ -415,6 +424,10 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
__ Add_d(lhs, lhs, Operand(rhs));
}
void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
__ And(output, lhs, Operand(rhs));
}
void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
ASM_CODE_COMMENT(masm_);
......
......@@ -115,6 +115,11 @@ void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
Label::Distance) {
__ JumpIfNotSmi(value, target);
}
void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
Label* target,
Label::Distance distance) {
JumpIf(cc, left, Operand(right), target, distance);
}
void BaselineAssembler::CallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(masm_,
......@@ -356,6 +361,10 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ Lw(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ lhu(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadWord8Field(Register output, Register source,
int offset) {
__ lb(output, FieldMemOperand(source, offset));
......@@ -427,6 +436,10 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
__ Addu(lhs, lhs, Operand(rhs));
}
void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
__ And(output, lhs, Operand(rhs));
}
void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
ASM_CODE_COMMENT(masm_);
......
......@@ -113,6 +113,11 @@ void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
Label::Distance) {
__ JumpIfNotSmi(value, target);
}
void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
Label* target,
Label::Distance distance) {
JumpIf(cc, left, Operand(right), target, distance);
}
void BaselineAssembler::CallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(masm_,
......@@ -354,6 +359,10 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ Ld(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ Lhu(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadWord8Field(Register output, Register source,
int offset) {
__ Lb(output, FieldMemOperand(source, offset));
......@@ -425,6 +434,10 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
__ Daddu(lhs, lhs, Operand(rhs));
}
void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
__ And(output, lhs, Operand(rhs));
}
void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
ASM_CODE_COMMENT(masm_);
......
......@@ -1045,6 +1045,19 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
TailCallOptimizedCodeSlot(masm, optimized_code_entry);
}
namespace {
void ResetBytecodeAgeAndOsrState(MacroAssembler* masm,
Register bytecode_array) {
// Reset code age and the OSR state (optimized to a single write).
static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ St_w(zero_reg,
FieldMemOperand(bytecode_array,
BytecodeArray::kOsrUrgencyAndInstallTargetOffset));
}
} // namespace
// static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
......@@ -1110,19 +1123,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
// We'll use the bytecode for both code age/OSR resetting, and pushing onto
// the frame, so load it into a register.
Register bytecodeArray = descriptor.GetRegisterParameter(
Register bytecode_array = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ St_h(zero_reg,
FieldMemOperand(bytecodeArray, BytecodeArray::kOsrUrgencyOffset));
__ Push(argc, bytecodeArray);
ResetBytecodeAgeAndOsrState(masm, bytecode_array);
__ Push(argc, bytecode_array);
// Baseline code frames store the feedback vector where interpreter would
// store the bytecode offset.
......@@ -1273,14 +1277,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ St_h(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrUrgencyOffset));
ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
// Load initial bytecode offset.
__ li(kInterpreterBytecodeOffsetRegister,
......@@ -3676,14 +3673,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Pop(kInterpreterAccumulatorRegister);
if (is_osr) {
// Reset the OSR loop nesting depth to disarm back edges.
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
// TODO(liuyu): Remove Ld as arm64 after register reallocation.
__ Ld_d(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ St_h(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrUrgencyOffset));
ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
} else {
......
......@@ -1043,6 +1043,19 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
TailCallOptimizedCodeSlot(masm, optimized_code_entry, t1, t3);
}
namespace {
void ResetBytecodeAgeAndOsrState(MacroAssembler* masm,
Register bytecode_array) {
// Reset code age and the OSR state (optimized to a single write).
static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ sw(zero_reg,
FieldMemOperand(bytecode_array,
BytecodeArray::kOsrUrgencyAndInstallTargetOffset));
}
} // namespace
// static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
......@@ -1106,19 +1119,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
// We'll use the bytecode for both code age/OSR resetting, and pushing onto
// the frame, so load it into a register.
Register bytecodeArray = descriptor.GetRegisterParameter(
Register bytecode_array = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ sh(zero_reg,
FieldMemOperand(bytecodeArray, BytecodeArray::kOsrUrgencyOffset));
__ Push(argc, bytecodeArray);
ResetBytecodeAgeAndOsrState(masm, bytecode_array);
__ Push(argc, bytecode_array);
// Baseline code frames store the feedback vector where interpreter would
// store the bytecode offset.
......@@ -1267,14 +1271,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrUrgencyOffset));
ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
// Load initial bytecode offset.
__ li(kInterpreterBytecodeOffsetRegister,
......@@ -4124,14 +4121,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Pop(kInterpreterAccumulatorRegister);
if (is_osr) {
// Reset the OSR loop nesting depth to disarm back edges.
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
// TODO(liuyu): Remove Ld as arm64 after register reallocation.
__ Lw(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrUrgencyOffset));
ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
} else {
......
......@@ -1051,6 +1051,19 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
TailCallOptimizedCodeSlot(masm, optimized_code_entry, t3, a5);
}
namespace {
void ResetBytecodeAgeAndOsrState(MacroAssembler* masm,
Register bytecode_array) {
// Reset code age and the OSR state (optimized to a single write).
static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ Sw(zero_reg,
FieldMemOperand(bytecode_array,
BytecodeArray::kOsrUrgencyAndInstallTargetOffset));
}
} // namespace
// static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
......@@ -1114,19 +1127,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
// We'll use the bytecode for both code age/OSR resetting, and pushing onto
// the frame, so load it into a register.
Register bytecodeArray = descriptor.GetRegisterParameter(
Register bytecode_array = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ Sh(zero_reg,
FieldMemOperand(bytecodeArray, BytecodeArray::kOsrUrgencyOffset));
__ Push(argc, bytecodeArray);
ResetBytecodeAgeAndOsrState(masm, bytecode_array);
__ Push(argc, bytecode_array);
// Baseline code frames store the feedback vector where interpreter would
// store the bytecode offset.
......@@ -1275,14 +1279,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrUrgencyOffset));
ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
// Load initial bytecode offset.
__ li(kInterpreterBytecodeOffsetRegister,
......@@ -3702,14 +3699,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Pop(kInterpreterAccumulatorRegister);
if (is_osr) {
// Reset the OSR loop nesting depth to disarm back edges.
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
// TODO(liuyu): Remove Ld as arm64 after register reallocation.
__ Ld(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ Sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrUrgencyOffset));
ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
} else {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment