Commit 7aafd201 authored by Junliang Yan's avatar Junliang Yan Committed by V8 LUCI CQ

ppc: Cleanup shift operations

Change-Id: I04a950d196070ce8661e95b3e2b00802a5000870
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3042044Reviewed-by: 's avatarMilad Fa <mfarazma@redhat.com>
Commit-Queue: Junliang Yan <junyan@redhat.com>
Cr-Commit-Position: refs/heads/master@{#75888}
parent f287e901
This diff is collapsed.
...@@ -313,7 +313,7 @@ void TurboAssembler::Drop(int count) { ...@@ -313,7 +313,7 @@ void TurboAssembler::Drop(int count) {
} }
void TurboAssembler::Drop(Register count, Register scratch) { void TurboAssembler::Drop(Register count, Register scratch) {
ShiftLeftImm(scratch, count, Operand(kSystemPointerSizeLog2)); ShiftLeftU64(scratch, count, Operand(kSystemPointerSizeLog2));
add(sp, sp, scratch); add(sp, sp, scratch);
} }
...@@ -336,7 +336,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch, ...@@ -336,7 +336,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
if (order == kNormal) { if (order == kNormal) {
cmpi(size, Operand::Zero()); cmpi(size, Operand::Zero());
beq(&done); beq(&done);
ShiftLeftImm(scratch, size, Operand(kSystemPointerSizeLog2)); ShiftLeftU64(scratch, size, Operand(kSystemPointerSizeLog2));
add(scratch, array, scratch); add(scratch, array, scratch);
mtctr(size); mtctr(size);
...@@ -1011,16 +1011,16 @@ void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high, ...@@ -1011,16 +1011,16 @@ void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
blt(&less_than_32); blt(&less_than_32);
// If shift >= 32 // If shift >= 32
andi(scratch, shift, Operand(0x1F)); andi(scratch, shift, Operand(0x1F));
slw(dst_high, src_low, scratch); ShiftLeftU32(dst_high, src_low, scratch);
li(dst_low, Operand::Zero()); li(dst_low, Operand::Zero());
b(&done); b(&done);
bind(&less_than_32); bind(&less_than_32);
// If shift < 32 // If shift < 32
subfic(scratch, shift, Operand(32)); subfic(scratch, shift, Operand(32));
slw(dst_high, src_high, shift); ShiftLeftU32(dst_high, src_high, shift);
srw(scratch, src_low, scratch); srw(scratch, src_low, scratch);
orx(dst_high, dst_high, scratch); orx(dst_high, dst_high, scratch);
slw(dst_low, src_low, shift); ShiftLeftU32(dst_low, src_low, shift);
bind(&done); bind(&done);
} }
...@@ -1034,15 +1034,15 @@ void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high, ...@@ -1034,15 +1034,15 @@ void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
li(dst_low, Operand::Zero()); li(dst_low, Operand::Zero());
} else if (shift > 32) { } else if (shift > 32) {
shift &= 0x1F; shift &= 0x1F;
slwi(dst_high, src_low, Operand(shift)); ShiftLeftU32(dst_high, src_low, Operand(shift));
li(dst_low, Operand::Zero()); li(dst_low, Operand::Zero());
} else if (shift == 0) { } else if (shift == 0) {
Move(dst_low, src_low); Move(dst_low, src_low);
Move(dst_high, src_high); Move(dst_high, src_high);
} else { } else {
slwi(dst_high, src_high, Operand(shift)); ShiftLeftU32(dst_high, src_high, Operand(shift));
rlwimi(dst_high, src_low, shift, 32 - shift, 31); rlwimi(dst_high, src_low, shift, 32 - shift, 31);
slwi(dst_low, src_low, Operand(shift)); ShiftLeftU32(dst_low, src_low, Operand(shift));
} }
} }
...@@ -1065,7 +1065,7 @@ void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high, ...@@ -1065,7 +1065,7 @@ void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
// If shift < 32 // If shift < 32
subfic(scratch, shift, Operand(32)); subfic(scratch, shift, Operand(32));
srw(dst_low, src_low, shift); srw(dst_low, src_low, shift);
slw(scratch, src_high, scratch); ShiftLeftU32(scratch, src_high, scratch);
orx(dst_low, dst_low, scratch); orx(dst_low, dst_low, scratch);
srw(dst_high, src_high, shift); srw(dst_high, src_high, shift);
bind(&done); bind(&done);
...@@ -1111,7 +1111,7 @@ void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high, ...@@ -1111,7 +1111,7 @@ void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
// If shift < 32 // If shift < 32
subfic(scratch, shift, Operand(32)); subfic(scratch, shift, Operand(32));
srw(dst_low, src_low, shift); srw(dst_low, src_low, shift);
slw(scratch, src_high, scratch); ShiftLeftU32(scratch, src_high, scratch);
orx(dst_low, dst_low, scratch); orx(dst_low, dst_low, scratch);
sraw(dst_high, src_high, shift); sraw(dst_high, src_high, shift);
bind(&done); bind(&done);
...@@ -1370,7 +1370,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, ...@@ -1370,7 +1370,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
if (argument_count.is_valid()) { if (argument_count.is_valid()) {
if (!argument_count_is_length) { if (!argument_count_is_length) {
ShiftLeftImm(argument_count, argument_count, ShiftLeftU64(argument_count, argument_count,
Operand(kSystemPointerSizeLog2)); Operand(kSystemPointerSizeLog2));
} }
add(sp, sp, argument_count); add(sp, sp, argument_count);
...@@ -1394,7 +1394,7 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count, ...@@ -1394,7 +1394,7 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
// after we drop current frame. We add kSystemPointerSize to count the // after we drop current frame. We add kSystemPointerSize to count the
// receiver argument which is not included into formal parameters count. // receiver argument which is not included into formal parameters count.
Register dst_reg = scratch0; Register dst_reg = scratch0;
ShiftLeftImm(dst_reg, caller_args_count, Operand(kSystemPointerSizeLog2)); ShiftLeftU64(dst_reg, caller_args_count, Operand(kSystemPointerSizeLog2));
add(dst_reg, fp, dst_reg); add(dst_reg, fp, dst_reg);
AddS64(dst_reg, dst_reg, AddS64(dst_reg, dst_reg,
Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize), Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize),
...@@ -1402,7 +1402,7 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count, ...@@ -1402,7 +1402,7 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
Register src_reg = caller_args_count; Register src_reg = caller_args_count;
// Calculate the end of source area. +kSystemPointerSize is for the receiver. // Calculate the end of source area. +kSystemPointerSize is for the receiver.
ShiftLeftImm(src_reg, callee_args_count, Operand(kSystemPointerSizeLog2)); ShiftLeftU64(src_reg, callee_args_count, Operand(kSystemPointerSizeLog2));
add(src_reg, sp, src_reg); add(src_reg, sp, src_reg);
AddS64(src_reg, src_reg, Operand(kSystemPointerSize), scratch0); AddS64(src_reg, src_reg, Operand(kSystemPointerSize), scratch0);
...@@ -1458,7 +1458,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch, ...@@ -1458,7 +1458,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
// here which will cause scratch to become negative. // here which will cause scratch to become negative.
sub(scratch, sp, scratch); sub(scratch, sp, scratch);
// Check if the arguments will overflow the stack. // Check if the arguments will overflow the stack.
ShiftLeftImm(r0, num_args, Operand(kSystemPointerSizeLog2)); ShiftLeftU64(r0, num_args, Operand(kSystemPointerSizeLog2));
CmpS64(scratch, r0); CmpS64(scratch, r0);
ble(stack_overflow); // Signed comparison. ble(stack_overflow); // Signed comparison.
} }
...@@ -1497,7 +1497,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count, ...@@ -1497,7 +1497,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Label copy; Label copy;
Register src = r9, dest = r8; Register src = r9, dest = r8;
addi(src, sp, Operand(-kSystemPointerSize)); addi(src, sp, Operand(-kSystemPointerSize));
ShiftLeftImm(r0, expected_parameter_count, Operand(kSystemPointerSizeLog2)); ShiftLeftU64(r0, expected_parameter_count, Operand(kSystemPointerSizeLog2));
sub(sp, sp, r0); sub(sp, sp, r0);
// Update stack pointer. // Update stack pointer.
addi(dest, sp, Operand(-kSystemPointerSize)); addi(dest, sp, Operand(-kSystemPointerSize));
...@@ -2552,7 +2552,7 @@ void TurboAssembler::MovInt64ComponentsToDouble(DoubleRegister dst, ...@@ -2552,7 +2552,7 @@ void TurboAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
Register src_lo, Register src_lo,
Register scratch) { Register scratch) {
if (CpuFeatures::IsSupported(FPR_GPR_MOV)) { if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
sldi(scratch, src_hi, Operand(32)); ShiftLeftU64(scratch, src_hi, Operand(32));
rldimi(scratch, src_lo, 0, 32); rldimi(scratch, src_lo, 0, 32);
mtfprd(dst, scratch); mtfprd(dst, scratch);
return; return;
...@@ -3401,11 +3401,11 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { ...@@ -3401,11 +3401,11 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
// The builtin_index register contains the builtin index as a Smi. // The builtin_index register contains the builtin index as a Smi.
if (SmiValuesAre32Bits()) { if (SmiValuesAre32Bits()) {
ShiftRightArithImm(builtin_index, builtin_index, ShiftRightS64(builtin_index, builtin_index,
kSmiShift - kSystemPointerSizeLog2); Operand(kSmiShift - kSystemPointerSizeLog2));
} else { } else {
DCHECK(SmiValuesAre31Bits()); DCHECK(SmiValuesAre31Bits());
ShiftLeftImm(builtin_index, builtin_index, ShiftLeftU64(builtin_index, builtin_index,
Operand(kSystemPointerSizeLog2 - kSmiShift)); Operand(kSystemPointerSizeLog2 - kSmiShift));
} }
AddS64(builtin_index, builtin_index, AddS64(builtin_index, builtin_index,
...@@ -3456,7 +3456,7 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination, ...@@ -3456,7 +3456,7 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
bind(&if_code_is_off_heap); bind(&if_code_is_off_heap);
LoadS32(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset), LoadS32(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset),
r0); r0);
ShiftLeftImm(destination, scratch, Operand(kSystemPointerSizeLog2)); ShiftLeftU64(destination, scratch, Operand(kSystemPointerSizeLog2));
add(destination, destination, kRootRegister); add(destination, destination, kRootRegister);
LoadU64(destination, LoadU64(destination,
MemOperand(destination, IsolateData::builtin_entry_table_offset()), MemOperand(destination, IsolateData::builtin_entry_table_offset()),
......
...@@ -38,23 +38,11 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg, ...@@ -38,23 +38,11 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
// These exist to provide portability between 32 and 64bit // These exist to provide portability between 32 and 64bit
#if V8_TARGET_ARCH_PPC64 #if V8_TARGET_ARCH_PPC64
#define ShiftLeftImm sldi
#define ShiftRightImm srdi
#define ClearLeftImm clrldi #define ClearLeftImm clrldi
#define ClearRightImm clrrdi #define ClearRightImm clrrdi
#define ShiftRightArithImm sradi
#define ShiftLeft_ sld
#define ShiftRight_ srd
#define ShiftRightArith srad
#else #else
#define ShiftLeftImm slwi
#define ShiftRightImm srwi
#define ClearLeftImm clrlwi #define ClearLeftImm clrlwi
#define ClearRightImm clrrwi #define ClearRightImm clrrwi
#define ShiftRightArithImm srawi
#define ShiftLeft_ slw
#define ShiftRight_ srw
#define ShiftRightArith sraw
#endif #endif
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
...@@ -557,7 +545,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { ...@@ -557,7 +545,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
if (COMPRESS_POINTERS_BOOL) { if (COMPRESS_POINTERS_BOOL) {
srawi(dst, src, kSmiShift, rc); srawi(dst, src, kSmiShift, rc);
} else { } else {
ShiftRightArithImm(dst, src, kSmiShift, rc); ShiftRightS64(dst, src, Operand(kSmiShift), rc);
} }
} }
...@@ -1057,16 +1045,16 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { ...@@ -1057,16 +1045,16 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Shift left by kSmiShift // Shift left by kSmiShift
void SmiTag(Register reg, RCBit rc = LeaveRC) { SmiTag(reg, reg, rc); } void SmiTag(Register reg, RCBit rc = LeaveRC) { SmiTag(reg, reg, rc); }
void SmiTag(Register dst, Register src, RCBit rc = LeaveRC) { void SmiTag(Register dst, Register src, RCBit rc = LeaveRC) {
ShiftLeftImm(dst, src, Operand(kSmiShift), rc); ShiftLeftU64(dst, src, Operand(kSmiShift), rc);
} }
void SmiToPtrArrayOffset(Register dst, Register src) { void SmiToPtrArrayOffset(Register dst, Register src) {
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kSystemPointerSizeLog2); STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kSystemPointerSizeLog2);
ShiftLeftImm(dst, src, Operand(kSystemPointerSizeLog2 - kSmiShift)); ShiftLeftU64(dst, src, Operand(kSystemPointerSizeLog2 - kSmiShift));
#else #else
STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kSystemPointerSizeLog2); STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kSystemPointerSizeLog2);
ShiftRightArithImm(dst, src, kSmiShift - kSystemPointerSizeLog2); ShiftRightS64(dst, src, Operand(kSmiShift - kSystemPointerSizeLog2));
#endif #endif
} }
......
...@@ -347,7 +347,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr, ...@@ -347,7 +347,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
i.InputRegister(1), i.OutputRCBit()); \ i.InputRegister(1), i.OutputRCBit()); \
} else { \ } else { \
__ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \ __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
i.InputInt32(1), i.OutputRCBit()); \ i.InputImmediate(1), i.OutputRCBit()); \
} \ } \
} while (0) } while (0)
...@@ -1227,29 +1227,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1227,29 +1227,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
break; break;
case kPPC_ShiftLeft32: case kPPC_ShiftLeft32:
ASSEMBLE_BINOP_RC(slw, slwi); ASSEMBLE_BINOP_RC(ShiftLeftU32, ShiftLeftU32);
break; break;
#if V8_TARGET_ARCH_PPC64
case kPPC_ShiftLeft64: case kPPC_ShiftLeft64:
ASSEMBLE_BINOP_RC(sld, sldi); ASSEMBLE_BINOP_RC(ShiftLeftU64, ShiftLeftU64);
break; break;
#endif
case kPPC_ShiftRight32: case kPPC_ShiftRight32:
ASSEMBLE_BINOP_RC(srw, srwi); ASSEMBLE_BINOP_RC(ShiftRightU32, ShiftRightU32);
break; break;
#if V8_TARGET_ARCH_PPC64
case kPPC_ShiftRight64: case kPPC_ShiftRight64:
ASSEMBLE_BINOP_RC(srd, srdi); ASSEMBLE_BINOP_RC(ShiftRightU64, ShiftRightU64);
break; break;
#endif
case kPPC_ShiftRightAlg32: case kPPC_ShiftRightAlg32:
ASSEMBLE_BINOP_INT_RC(sraw, srawi); ASSEMBLE_BINOP_INT_RC(ShiftRightS32, ShiftRightS32);
break; break;
#if V8_TARGET_ARCH_PPC64
case kPPC_ShiftRightAlg64: case kPPC_ShiftRightAlg64:
ASSEMBLE_BINOP_INT_RC(srad, sradi); ASSEMBLE_BINOP_INT_RC(ShiftRightS64, ShiftRightS64);
break; break;
#endif
#if !V8_TARGET_ARCH_PPC64 #if !V8_TARGET_ARCH_PPC64
case kPPC_AddPair: case kPPC_AddPair:
// i.InputRegister(0) ... left low word. // i.InputRegister(0) ... left low word.
...@@ -1830,7 +1824,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1830,7 +1824,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
cr, static_cast<CRBit>(VXCVI % CRWIDTH)); cr, static_cast<CRBit>(VXCVI % CRWIDTH));
__ mcrfs(cr, VXCVI); // extract FPSCR field containing VXCVI into cr7 __ mcrfs(cr, VXCVI); // extract FPSCR field containing VXCVI into cr7
__ li(kScratchReg, Operand(1)); __ li(kScratchReg, Operand(1));
__ sldi(kScratchReg, kScratchReg, Operand(31)); // generate INT32_MIN. __ ShiftLeftU64(kScratchReg, kScratchReg,
Operand(31)); // generate INT32_MIN.
__ isel(i.OutputRegister(0), kScratchReg, i.OutputRegister(0), crbit); __ isel(i.OutputRegister(0), kScratchReg, i.OutputRegister(0), crbit);
} }
break; break;
...@@ -3941,7 +3936,7 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) { ...@@ -3941,7 +3936,7 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
__ CmpU64(input, Operand(case_count), r0); __ CmpU64(input, Operand(case_count), r0);
__ bge(GetLabel(i.InputRpo(1))); __ bge(GetLabel(i.InputRpo(1)));
__ mov_label_addr(kScratchReg, table); __ mov_label_addr(kScratchReg, table);
__ ShiftLeftImm(r0, input, Operand(kSystemPointerSizeLog2)); __ ShiftLeftU64(r0, input, Operand(kSystemPointerSizeLog2));
__ LoadU64(kScratchReg, MemOperand(kScratchReg, r0)); __ LoadU64(kScratchReg, MemOperand(kScratchReg, r0));
__ Jump(kScratchReg); __ Jump(kScratchReg);
} }
......
...@@ -747,7 +747,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) { ...@@ -747,7 +747,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
__ LoadU64(r4, MemOperand(frame_pointer(), kStartIndex)); __ LoadU64(r4, MemOperand(frame_pointer(), kStartIndex));
__ subi(r3, current_input_offset(), Operand(char_size())); __ subi(r3, current_input_offset(), Operand(char_size()));
if (mode_ == UC16) { if (mode_ == UC16) {
__ ShiftLeftImm(r0, r4, Operand(1)); __ ShiftLeftU64(r0, r4, Operand(1));
__ sub(r3, r3, r0); __ sub(r3, r3, r0);
} else { } else {
__ sub(r3, r3, r4); __ sub(r3, r3, r4);
...@@ -810,7 +810,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) { ...@@ -810,7 +810,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
__ sub(r4, end_of_input_address(), r4); __ sub(r4, end_of_input_address(), r4);
// r4 is length of input in bytes. // r4 is length of input in bytes.
if (mode_ == UC16) { if (mode_ == UC16) {
__ ShiftRightImm(r4, r4, Operand(1)); __ ShiftRightU64(r4, r4, Operand(1));
} }
// r4 is length of input in characters. // r4 is length of input in characters.
__ add(r4, r4, r5); __ add(r4, r4, r5);
...@@ -828,9 +828,9 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) { ...@@ -828,9 +828,9 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
__ mr(r25, r5); __ mr(r25, r5);
} }
if (mode_ == UC16) { if (mode_ == UC16) {
__ ShiftRightArithImm(r5, r5, 1); __ ShiftRightS64(r5, r5, Operand(1));
__ add(r5, r4, r5); __ add(r5, r4, r5);
__ ShiftRightArithImm(r6, r6, 1); __ ShiftRightS64(r6, r6, Operand(1));
__ add(r6, r4, r6); __ add(r6, r4, r6);
} else { } else {
__ add(r5, r4, r5); __ add(r5, r4, r5);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment