Commit a3aad69c authored by Junliang Yan's avatar Junliang Yan Committed by V8 LUCI CQ

s390x/ppc64: [osr] Add an install-by-offset mechanism

Port 52b99213e73045e9ffcae970e6c3f3cd07fc8381

Bug: v8:12161
Change-Id: Iac4f31eb6be83bca0e4bd407d81f1ece271b1e67
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3576124Reviewed-by: 's avatarMilad Farazmand <mfarazma@redhat.com>
Commit-Queue: Junliang Yan <junyan@redhat.com>
Cr-Commit-Position: refs/heads/main@{#79917}
parent c77feb99
......@@ -188,6 +188,12 @@ void BaselineAssembler::JumpIfSmi(Register value, Label* target,
__ JumpIfSmi(value, target);
}
void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
Label* target,
Label::Distance distance) {
JumpIf(cc, left, Operand(right), target, distance);
}
void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
Label::Distance) {
__ JumpIfNotSmi(value, target);
......@@ -455,14 +461,23 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
UNIMPLEMENTED();
}
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ LoadU16(output, FieldMemOperand(source, offset), r0);
}
void BaselineAssembler::LoadWord8Field(Register output, Register source,
int offset) {
UNIMPLEMENTED();
__ LoadU8(output, FieldMemOperand(source, offset), r0);
}
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
Smi value) {
UNIMPLEMENTED();
}
void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
int offset,
Register value) {
......@@ -491,6 +506,10 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
UNIMPLEMENTED();
}
void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
__ AndU32(output, lhs, Operand(rhs));
}
#undef __
#define __ basm.
......
......@@ -172,18 +172,28 @@ void BaselineAssembler::JumpTarget() {
void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
__ b(target);
}
void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
Label* target, Label::Distance) {
__ JumpIfRoot(value, index, target);
}
void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
Label* target, Label::Distance) {
__ JumpIfNotRoot(value, index, target);
}
void BaselineAssembler::JumpIfSmi(Register value, Label* target,
Label::Distance) {
__ JumpIfSmi(value, target);
}
void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
Label* target,
Label::Distance distance) {
JumpIf(cc, left, Operand(right), target, distance);
}
void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
Label::Distance) {
__ JumpIfNotSmi(value, target);
......@@ -431,18 +441,27 @@ void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) {
__ LoadTaggedPointerField(output, FieldMemOperand(source, offset), r0);
}
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
int offset) {
__ LoadTaggedSignedField(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset), r0);
}
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ LoadU16(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadWord8Field(Register output, Register source,
int offset) {
__ LoadU8(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
Smi value) {
ASM_CODE_COMMENT(masm_);
......@@ -451,6 +470,7 @@ void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
__ LoadSmiLiteral(tmp, value);
__ StoreTaggedField(tmp, FieldMemOperand(target, offset), r0);
}
void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
int offset,
Register value) {
......@@ -461,6 +481,7 @@ void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
__ RecordWriteField(target, offset, value, scratch, kLRHasNotBeenSaved,
SaveFPRegsMode::kIgnore);
}
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset,
Register value) {
......@@ -523,6 +544,10 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
}
}
void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
__ AndP(output, lhs, Operand(rhs));
}
void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
ASM_CODE_COMMENT(masm_);
......
......@@ -204,11 +204,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
if (is_osr) {
Register scratch = ip;
__ mov(scratch, Operand(0));
__ StoreU16(scratch,
FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrUrgencyOffset),
r0);
ResetBytecodeAgeAndOsrState(masm, bytecodeArray, scratch);
Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag);
} else {
__ AddS64(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag));
......@@ -240,6 +236,18 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ b(&start);
}
void ResetBytecodeAgeAndOsrState(MacroAssembler* masm, Register bytecode_array,
Register scratch) {
// Reset the bytecode age and OSR state (optimized to a single write).
static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov(scratch, Operand(0));
__ StoreU32(scratch,
FieldMemOperand(bytecode_array,
BytecodeArray::kOsrUrgencyAndInstallTargetOffset),
r0);
}
} // namespace
void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
......@@ -1389,17 +1397,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ li(r8, Operand(0));
__ StoreU16(r8,
FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrUrgencyOffset),
r0);
ResetBytecodeAgeAndOsrState(masm, bytecodeArray, r8);
// Load initial bytecode offset.
__ mov(kInterpreterBytecodeOffsetRegister,
......
......@@ -83,6 +83,18 @@ void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
__ Ret();
}
void ResetBytecodeAgeAndOsrState(MacroAssembler* masm, Register bytecode_array,
Register scratch) {
// Reset the bytecode age and OSR state (optimized to a single write).
static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov(r0, Operand(0));
__ StoreU32(r0,
FieldMemOperand(bytecode_array,
BytecodeArray::kOsrUrgencyAndInstallTargetOffset),
scratch);
}
// Restarts execution either at the current or next (in execution order)
// bytecode. If there is baseline code on the shared function info, converts an
// interpreter frame into a baseline frame and continues execution in baseline
......@@ -204,10 +216,9 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Pop(kInterpreterAccumulatorRegister);
if (is_osr) {
Register scratch = r1;
__ mov(scratch, Operand(0));
__ StoreU16(scratch, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrUrgencyOffset));
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't
// disarm Sparkplug here.
ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister, r1);
Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag);
} else {
__ AddS64(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag));
......@@ -239,7 +250,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ b(&start);
}
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
enum class OsrSourceTier {
kInterpreter,
kBaseline,
};
void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source) {
ASM_CODE_COMMENT(masm);
{
FrameScope scope(masm, StackFrame::INTERNAL);
......@@ -247,14 +263,14 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
}
// If the code object is null, just return to the caller.
Label skip;
Label jump_to_returned_code;
__ CmpSmiLiteral(r2, Smi::zero(), r0);
__ bne(&skip);
__ bne(&jump_to_returned_code);
__ Ret();
__ bind(&skip);
__ bind(&jump_to_returned_code);
if (is_interpreter) {
if (source == OsrSourceTier::kInterpreter) {
// Drop the handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode.
__ LeaveFrame(StackFrame::STUB);
......@@ -1409,19 +1425,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// the frame, so load it into a register.
Register bytecodeArray = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
{
Register scratch = r0;
__ mov(scratch, Operand(0));
__ StoreU16(scratch, FieldMemOperand(bytecodeArray,
BytecodeArray::kOsrUrgencyOffset));
}
ResetBytecodeAgeAndOsrState(masm, bytecodeArray, r1);
__ Push(argc, bytecodeArray);
......@@ -1573,17 +1577,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov(r1, Operand(0));
__ StoreU16(r1,
FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrUrgencyOffset),
r0);
ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister, r1);
// Load the initial bytecode offset.
__ mov(kInterpreterBytecodeOffsetRegister,
......@@ -3843,14 +3837,14 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
}
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
return OnStackReplacement(masm, true);
OnStackReplacement(masm, OsrSourceTier::kInterpreter);
}
#if ENABLE_SPARKPLUG
void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
__ LoadU64(kContextRegister,
MemOperand(fp, BaselineFrameConstants::kContextOffset));
return OnStackReplacement(masm, false);
OnStackReplacement(masm, OsrSourceTier::kBaseline);
}
#endif
......
......@@ -3776,6 +3776,7 @@ void TurboAssembler::LoadU32(Register dst, const MemOperand& mem,
}
void TurboAssembler::LoadU16(Register dst, const MemOperand& mem) {
// TODO(s390x): Add scratch reg
#if V8_TARGET_ARCH_S390X
llgh(dst, mem);
#else
......@@ -3792,6 +3793,7 @@ void TurboAssembler::LoadU16(Register dst, Register src) {
}
void TurboAssembler::LoadS8(Register dst, const MemOperand& mem) {
// TODO(s390x): Add scratch reg
#if V8_TARGET_ARCH_S390X
lgb(dst, mem);
#else
......@@ -3808,6 +3810,7 @@ void TurboAssembler::LoadS8(Register dst, Register src) {
}
void TurboAssembler::LoadU8(Register dst, const MemOperand& mem) {
// TODO(s390x): Add scratch reg
#if V8_TARGET_ARCH_S390X
llgc(dst, mem);
#else
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment