Commit 2889a3c2 authored by Junliang Yan's avatar Junliang Yan Committed by V8 LUCI CQ

ppc: [liftoff] Implement and clean up AddS64

Change-Id: I1b1d8d0485f037ba5c105741039e62db87fd2b6a
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3008642Reviewed-by: 's avatarMilad Fa <mfarazma@redhat.com>
Commit-Queue: Junliang Yan <junyan@redhat.com>
Cr-Commit-Position: refs/heads/master@{#75584}
parent b8b3e75c
...@@ -310,7 +310,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode, ...@@ -310,7 +310,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
void TurboAssembler::Drop(int count) { void TurboAssembler::Drop(int count) {
if (count > 0) { if (count > 0) {
Add(sp, sp, count * kSystemPointerSize, r0); AddS64(sp, sp, Operand(count * kSystemPointerSize), r0);
} }
} }
...@@ -665,7 +665,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, ...@@ -665,7 +665,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// of the object, so so offset must be a multiple of kSystemPointerSize. // of the object, so so offset must be a multiple of kSystemPointerSize.
DCHECK(IsAligned(offset, kTaggedSize)); DCHECK(IsAligned(offset, kTaggedSize));
Add(slot_address, object, offset - kHeapObjectTag, r0); AddS64(slot_address, object, Operand(offset - kHeapObjectTag), r0);
if (FLAG_debug_code) { if (FLAG_debug_code) {
Label ok; Label ok;
andi(r0, slot_address, Operand(kTaggedSize - 1)); andi(r0, slot_address, Operand(kTaggedSize - 1));
...@@ -1232,7 +1232,9 @@ int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) { ...@@ -1232,7 +1232,9 @@ int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
} }
mtlr(r0); mtlr(r0);
frame_ends = pc_offset(); frame_ends = pc_offset();
Add(sp, fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment, r0); AddS64(sp, fp,
Operand(StandardFrameConstants::kCallerSPOffset + stack_adjustment),
r0);
mr(fp, ip); mr(fp, ip);
return frame_ends; return frame_ends;
} }
...@@ -1769,7 +1771,7 @@ void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left, ...@@ -1769,7 +1771,7 @@ void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left,
original_left = overflow_dst; original_left = overflow_dst;
mr(original_left, left); mr(original_left, left);
} }
Add(dst, left, right, scratch); AddS64(dst, left, Operand(right), scratch);
xor_(overflow_dst, dst, original_left); xor_(overflow_dst, dst, original_left);
if (right >= 0) { if (right >= 0) {
and_(overflow_dst, overflow_dst, dst, SetRC); and_(overflow_dst, overflow_dst, dst, SetRC);
...@@ -2594,12 +2596,16 @@ void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src) { ...@@ -2594,12 +2596,16 @@ void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
addi(sp, sp, Operand(kFloatSize)); addi(sp, sp, Operand(kFloatSize));
} }
void TurboAssembler::Add(Register dst, Register src, intptr_t value, void TurboAssembler::AddS64(Register dst, Register src, Register value) {
Register scratch) { add(dst, src, value);
if (is_int16(value)) { }
addi(dst, src, Operand(value));
void TurboAssembler::AddS64(Register dst, Register src, const Operand& value,
Register scratch) {
if (is_int16(value.immediate())) {
addi(dst, src, value);
} else { } else {
mov(scratch, Operand(value)); mov(scratch, value);
add(dst, src, scratch); add(dst, src, scratch);
} }
} }
...@@ -2721,7 +2727,7 @@ void MacroAssembler::CmplSmiLiteral(Register src1, Smi smi, Register scratch, ...@@ -2721,7 +2727,7 @@ void MacroAssembler::CmplSmiLiteral(Register src1, Smi smi, Register scratch,
void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi smi, void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi smi,
Register scratch) { Register scratch) {
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
Add(dst, src, static_cast<intptr_t>(smi.ptr()), scratch); AddS64(dst, src, Operand(smi.ptr()), scratch);
#else #else
LoadSmiLiteral(scratch, smi); LoadSmiLiteral(scratch, smi);
add(dst, src, scratch); add(dst, src, scratch);
...@@ -2731,7 +2737,7 @@ void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi smi, ...@@ -2731,7 +2737,7 @@ void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi smi,
void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi smi, void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi smi,
Register scratch) { Register scratch) {
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
Add(dst, src, -(static_cast<intptr_t>(smi.ptr())), scratch); AddS64(dst, src, Operand(-(static_cast<intptr_t>(smi.ptr()))), scratch);
#else #else
LoadSmiLiteral(scratch, smi); LoadSmiLiteral(scratch, smi);
sub(dst, src, scratch); sub(dst, src, scratch);
......
...@@ -114,7 +114,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { ...@@ -114,7 +114,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void AllocateStackSpace(int bytes) { void AllocateStackSpace(int bytes) {
DCHECK_GE(bytes, 0); DCHECK_GE(bytes, 0);
if (bytes == 0) return; if (bytes == 0) return;
Add(sp, sp, -bytes, r0); AddS64(sp, sp, Operand(-bytes), r0);
} }
// Push a fixed frame, consisting of lr, fp, constant pool. // Push a fixed frame, consisting of lr, fp, constant pool.
...@@ -171,7 +171,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { ...@@ -171,7 +171,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// reset rounding mode to default (kRoundToNearest) // reset rounding mode to default (kRoundToNearest)
void ResetRoundingMode(); void ResetRoundingMode();
void Add(Register dst, Register src, intptr_t value, Register scratch);
void AddS64(Register dst, Register src, const Operand& value,
Register scratch = r0);
void AddS64(Register dst, Register src, Register value);
void Push(Register src) { push(src); } void Push(Register src) { push(src); }
// Push a handle. // Push a handle.
......
...@@ -773,13 +773,13 @@ void AdjustStackPointerForTailCall( ...@@ -773,13 +773,13 @@ void AdjustStackPointerForTailCall(
if (pending_pushes != nullptr) { if (pending_pushes != nullptr) {
FlushPendingPushRegisters(tasm, state, pending_pushes); FlushPendingPushRegisters(tasm, state, pending_pushes);
} }
tasm->Add(sp, sp, -stack_slot_delta * kSystemPointerSize, r0); tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize), r0);
state->IncreaseSPDelta(stack_slot_delta); state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) { } else if (allow_shrinkage && stack_slot_delta < 0) {
if (pending_pushes != nullptr) { if (pending_pushes != nullptr) {
FlushPendingPushRegisters(tasm, state, pending_pushes); FlushPendingPushRegisters(tasm, state, pending_pushes);
} }
tasm->Add(sp, sp, -stack_slot_delta * kSystemPointerSize, r0); tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize), r0);
state->IncreaseSPDelta(stack_slot_delta); state->IncreaseSPDelta(stack_slot_delta);
} }
} }
...@@ -4176,7 +4176,8 @@ void CodeGenerator::AssembleConstructFrame() { ...@@ -4176,7 +4176,8 @@ void CodeGenerator::AssembleConstructFrame() {
WasmInstanceObject::kRealStackLimitAddressOffset), WasmInstanceObject::kRealStackLimitAddressOffset),
r0); r0);
__ LoadU64(scratch, MemOperand(scratch), r0); __ LoadU64(scratch, MemOperand(scratch), r0);
__ Add(scratch, scratch, required_slots * kSystemPointerSize, r0); __ AddS64(scratch, scratch,
Operand(required_slots * kSystemPointerSize), r0);
__ cmpl(sp, scratch); __ cmpl(sp, scratch);
__ bge(&done); __ bge(&done);
} }
...@@ -4198,7 +4199,7 @@ void CodeGenerator::AssembleConstructFrame() { ...@@ -4198,7 +4199,7 @@ void CodeGenerator::AssembleConstructFrame() {
required_slots -= frame()->GetReturnSlotCount(); required_slots -= frame()->GetReturnSlotCount();
required_slots -= (kDoubleSize / kSystemPointerSize) * required_slots -= (kDoubleSize / kSystemPointerSize) *
base::bits::CountPopulation(saves_fp); base::bits::CountPopulation(saves_fp);
__ Add(sp, sp, -required_slots * kSystemPointerSize, r0); __ AddS64(sp, sp, Operand(-required_slots * kSystemPointerSize), r0);
} }
// Save callee-saved Double registers. // Save callee-saved Double registers.
...@@ -4224,7 +4225,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { ...@@ -4224,7 +4225,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
const int returns = frame()->GetReturnSlotCount(); const int returns = frame()->GetReturnSlotCount();
if (returns != 0) { if (returns != 0) {
// Create space for returns. // Create space for returns.
__ Add(sp, sp, returns * kSystemPointerSize, r0); __ AddS64(sp, sp, Operand(returns * kSystemPointerSize), r0);
} }
// Restore registers. // Restore registers.
......
...@@ -735,7 +735,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) { ...@@ -735,7 +735,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
__ bind(&stack_ok); __ bind(&stack_ok);
// Allocate space on stack for registers. // Allocate space on stack for registers.
__ Add(sp, sp, -num_registers_ * kSystemPointerSize, r0); __ AddS64(sp, sp, Operand(-num_registers_ * kSystemPointerSize), r0);
// Load string end. // Load string end.
__ LoadU64(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); __ LoadU64(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
// Load input start. // Load input start.
......
...@@ -792,7 +792,6 @@ UNIMPLEMENTED_I32_BINOP_I(i32_xor) ...@@ -792,7 +792,6 @@ UNIMPLEMENTED_I32_BINOP_I(i32_xor)
UNIMPLEMENTED_I32_SHIFTOP(i32_shl) UNIMPLEMENTED_I32_SHIFTOP(i32_shl)
UNIMPLEMENTED_I32_SHIFTOP(i32_sar) UNIMPLEMENTED_I32_SHIFTOP(i32_sar)
UNIMPLEMENTED_I32_SHIFTOP(i32_shr) UNIMPLEMENTED_I32_SHIFTOP(i32_shr)
UNIMPLEMENTED_I64_BINOP(i64_add)
UNIMPLEMENTED_I64_BINOP(i64_sub) UNIMPLEMENTED_I64_BINOP(i64_sub)
UNIMPLEMENTED_I64_BINOP(i64_mul) UNIMPLEMENTED_I64_BINOP(i64_mul)
#ifdef V8_TARGET_ARCH_PPC64 #ifdef V8_TARGET_ARCH_PPC64
...@@ -845,7 +844,7 @@ UNIMPLEMENTED_FP_UNOP(f64_sqrt) ...@@ -845,7 +844,7 @@ UNIMPLEMENTED_FP_UNOP(f64_sqrt)
#undef UNIMPLEMENTED_I32_SHIFTOP #undef UNIMPLEMENTED_I32_SHIFTOP
#undef UNIMPLEMENTED_I64_SHIFTOP #undef UNIMPLEMENTED_I64_SHIFTOP
#define SIGN_EXT(r) lgfr(r, r) #define SIGN_EXT(r) extsw(r, r)
#define INT32_AND_WITH_1F(x) Operand(x & 0x1f) #define INT32_AND_WITH_1F(x) Operand(x & 0x1f)
#define REGISTER_AND_WITH_1F \ #define REGISTER_AND_WITH_1F \
([&](Register rhs) { \ ([&](Register rhs) { \
...@@ -874,10 +873,33 @@ UNOP_LIST(EMIT_UNOP_FUNCTION) ...@@ -874,10 +873,33 @@ UNOP_LIST(EMIT_UNOP_FUNCTION)
#undef EMIT_UNOP_FUNCTION #undef EMIT_UNOP_FUNCTION
#undef UNOP_LIST #undef UNOP_LIST
void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, // V(name, instr, dtype, stype1, stype2, dcast, scast1, scast2, rcast,
int64_t imm) { // return_val, return_type)
bailout(kUnsupportedArchitecture, "i64_addi"); #define BINOP_LIST(V) \
} V(i64_add, AddS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
V(i64_addi, AddS64, LiftoffRegister, LiftoffRegister, int64_t, LFR_TO_REG, \
LFR_TO_REG, Operand, USE, , void)
#define EMIT_BINOP_FUNCTION(name, instr, dtype, stype1, stype2, dcast, scast1, \
scast2, rcast, ret, return_type) \
return_type LiftoffAssembler::emit_##name(dtype dst, stype1 lhs, \
stype2 rhs) { \
auto _dst = dcast(dst); \
auto _lhs = scast1(lhs); \
auto _rhs = scast2(rhs); \
instr(_dst, _lhs, _rhs); \
rcast(_dst); \
return ret; \
}
BINOP_LIST(EMIT_BINOP_FUNCTION)
#undef BINOP_LIST
#undef EMIT_BINOP_FUNCTION
#undef SIGN_EXT
#undef INT32_AND_WITH_1F
#undef REGISTER_AND_WITH_1F
#undef LFR_TO_REG
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs, void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero, Label* trap_div_by_zero,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment