Commit 3745599a authored by Junliang Yan's avatar Junliang Yan Committed by Commit Bot

s390x: cleanup 32/64 portablility macros

Change-Id: I59c905182294dc4e8fb8caf03f10ea66d332e034
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2586153Reviewed-by: 's avatarMilad Fa <mfarazma@redhat.com>
Commit-Queue: Junliang Yan <junyan@redhat.com>
Cr-Commit-Position: refs/heads/master@{#71724}
parent ddbda0ee
This diff is collapsed.
......@@ -533,7 +533,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
#define DECLARE_S390_RS_SHIFT_FORMAT(name, opcode) \
void name(Register r1, Register r2, const Operand& opnd = Operand::Zero()) { \
DCHECK(r2 != r0); \
rs_format(opcode, r1.code(), r0.code(), r2.code(), opnd.immediate()); \
} \
void name(Register r1, const Operand& opnd) { \
......
This diff is collapsed.
......@@ -42,86 +42,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
Register reg5 = no_reg,
Register reg6 = no_reg);
// These exist to provide portability between 32 and 64bit
#if V8_TARGET_ARCH_S390X
// The length of the arithmetic operation is the length
// of the register.
// Length:
// H = halfword
// W = word
// arithmetics and bitwise
#define AddMI agsi
#define AddRR agr
#define SubRR sgr
#define AndRR ngr
#define OrRR ogr
#define XorRR xgr
#define LoadComplementRR lcgr
#define LoadNegativeRR lngr
// Distinct Operands
#define AddP_RRR agrk
#define AddPImm_RRI aghik
#define AddLogicalP_RRR algrk
#define SubP_RRR sgrk
#define SubLogicalP_RRR slgrk
#define AndP_RRR ngrk
#define OrP_RRR ogrk
#define XorP_RRR xgrk
// Load / Store
#define LoadAndTestRR ltgr
// Compare
#define CmpPH cghi
#define CmpLogicalPW clgfi
// Shifts
#define ShiftLeftP sllg
#define ShiftRightP srlg
#define ShiftLeftArithP slag
#define ShiftRightArithP srag
#else
// arithmetics and bitwise
// Reg2Reg
#define AddMI asi
#define AddRR ar
#define SubRR sr
#define AndRR nr
#define OrRR or_z
#define XorRR xr
#define LoadComplementRR lcr
#define LoadNegativeRR lnr
// Distinct Operands
#define AddP_RRR ark
#define AddPImm_RRI ahik
#define AddLogicalP_RRR alrk
#define SubP_RRR srk
#define SubLogicalP_RRR slrk
#define AndP_RRR nrk
#define OrP_RRR ork
#define XorP_RRR xrk
// Load / Store
#define LoadAndTestRR ltr
// Compare
#define CmpPH chi
#define CmpLogicalPW clfi
// Shifts
#define ShiftLeftP ShiftLeft
#define ShiftRightP ShiftRight
#define ShiftLeftArithP ShiftLeftArith
#define ShiftRightArithP ShiftRightArith
#endif
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
using TurboAssemblerBase::TurboAssemblerBase;
......@@ -448,14 +368,24 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void BranchOnCount(Register r1, Label* l);
// Shifts
void ShiftLeft(Register dst, Register src, Register val);
void ShiftLeft(Register dst, Register src, const Operand& val);
void ShiftRight(Register dst, Register src, Register val);
void ShiftRight(Register dst, Register src, const Operand& val);
void ShiftLeftArith(Register dst, Register src, Register shift);
void ShiftLeftArith(Register dst, Register src, const Operand& val);
void ShiftRightArith(Register dst, Register src, Register shift);
void ShiftRightArith(Register dst, Register src, const Operand& val);
void ShiftLeftU32(Register dst, Register src, Register val,
const Operand& val2 = Operand::Zero());
void ShiftLeftU32(Register dst, Register src, const Operand& val);
void ShiftLeftU64(Register dst, Register src, Register val,
const Operand& val2 = Operand::Zero());
void ShiftLeftU64(Register dst, Register src, const Operand& val);
void ShiftRightU32(Register dst, Register src, Register val,
const Operand& val2 = Operand::Zero());
void ShiftRightU32(Register dst, Register src, const Operand& val);
void ShiftRightU64(Register dst, Register src, Register val,
const Operand& val2 = Operand::Zero());
void ShiftRightU64(Register dst, Register src, const Operand& val);
void ShiftRightS32(Register dst, Register src, Register shift,
const Operand& val2 = Operand::Zero());
void ShiftRightS32(Register dst, Register src, const Operand& val);
void ShiftRightS64(Register dst, Register src, Register shift,
const Operand& val2 = Operand::Zero());
void ShiftRightS64(Register dst, Register src, const Operand& val);
void ClearRightImm(Register dst, Register src, const Operand& val);
......@@ -894,7 +824,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Operand(shiftAmount), true);
} else {
if (rangeEnd > 0) // Don't need to shift if rangeEnd is zero.
ShiftRightP(dst, src, Operand(rangeEnd));
ShiftRightU64(dst, src, Operand(rangeEnd));
else if (dst != src) // If we didn't shift, we might need to copy
mov(dst, src);
int width = rangeStart - rangeEnd + 1;
......@@ -979,9 +909,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void SmiUntag(Register dst, const MemOperand& src);
void SmiUntag(Register dst, Register src) {
if (SmiValuesAre31Bits()) {
ShiftRightArith(dst, src, Operand(kSmiShift));
ShiftRightS32(dst, src, Operand(kSmiShift));
} else {
ShiftRightArithP(dst, src, Operand(kSmiShift));
ShiftRightS64(dst, src, Operand(kSmiShift));
}
lgfr(dst, dst);
}
......@@ -1249,16 +1179,16 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Shift left by kSmiShift
void SmiTag(Register reg) { SmiTag(reg, reg); }
void SmiTag(Register dst, Register src) {
ShiftLeftP(dst, src, Operand(kSmiShift));
ShiftLeftU64(dst, src, Operand(kSmiShift));
}
void SmiToPtrArrayOffset(Register dst, Register src) {
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kSystemPointerSizeLog2);
ShiftLeftP(dst, src, Operand(kSystemPointerSizeLog2 - kSmiShift));
ShiftLeftU64(dst, src, Operand(kSystemPointerSizeLog2 - kSmiShift));
#else
STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kSystemPointerSizeLog2);
ShiftRightArithP(dst, src, Operand(kSmiShift - kSystemPointerSizeLog2));
ShiftRightS64(dst, src, Operand(kSmiShift - kSystemPointerSizeLog2));
#endif
}
......
......@@ -1569,7 +1569,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_ShiftLeft32:
// zero-ext
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
ASSEMBLE_BIN32_OP(RRRInstr(ShiftLeft), nullInstr, RRIInstr(ShiftLeft));
ASSEMBLE_BIN32_OP(RRRInstr(ShiftLeftU32), nullInstr,
RRIInstr(ShiftLeftU32));
} else {
ASSEMBLE_BIN32_OP(RRInstr(sll), nullInstr, RIInstr(sll));
}
......@@ -1602,7 +1603,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_RotRight32: {
// zero-ext
if (HasRegisterInput(instr, 1)) {
__ LoadComplementRR(kScratchReg, i.InputRegister(1));
__ lcgr(kScratchReg, i.InputRegister(1));
__ rll(i.OutputRegister(), i.InputRegister(0), kScratchReg);
} else {
__ rll(i.OutputRegister(), i.InputRegister(0),
......@@ -4308,7 +4309,7 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
__ CmpLogicalP(input, Operand(case_count));
__ bge(GetLabel(i.InputRpo(1)));
__ larl(kScratchReg, table);
__ ShiftLeftP(r1, input, Operand(kSystemPointerSizeLog2));
__ ShiftLeftU64(r1, input, Operand(kSystemPointerSizeLog2));
__ LoadP(kScratchReg, MemOperand(kScratchReg, r1));
__ Jump(kScratchReg);
}
......
......@@ -23,13 +23,10 @@ namespace compiler {
V(S390_Xor64) \
V(S390_ShiftLeft32) \
V(S390_ShiftLeft64) \
V(S390_ShiftLeftPair) \
V(S390_ShiftRight32) \
V(S390_ShiftRight64) \
V(S390_ShiftRightPair) \
V(S390_ShiftRightArith32) \
V(S390_ShiftRightArith64) \
V(S390_ShiftRightArithPair) \
V(S390_RotRight32) \
V(S390_RotRight64) \
V(S390_Not32) \
......@@ -40,15 +37,12 @@ namespace compiler {
V(S390_Lay) \
V(S390_Add32) \
V(S390_Add64) \
V(S390_AddPair) \
V(S390_AddFloat) \
V(S390_AddDouble) \
V(S390_Sub32) \
V(S390_Sub64) \
V(S390_SubFloat) \
V(S390_SubDouble) \
V(S390_SubPair) \
V(S390_MulPair) \
V(S390_Mul32) \
V(S390_Mul32WithOverflow) \
V(S390_Mul64) \
......
......@@ -23,13 +23,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_Xor64:
case kS390_ShiftLeft32:
case kS390_ShiftLeft64:
case kS390_ShiftLeftPair:
case kS390_ShiftRight32:
case kS390_ShiftRight64:
case kS390_ShiftRightPair:
case kS390_ShiftRightArith32:
case kS390_ShiftRightArith64:
case kS390_ShiftRightArithPair:
case kS390_RotRight32:
case kS390_RotRight64:
case kS390_Not32:
......@@ -40,13 +37,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_Lay:
case kS390_Add32:
case kS390_Add64:
case kS390_AddPair:
case kS390_AddFloat:
case kS390_AddDouble:
case kS390_Sub32:
case kS390_Sub64:
case kS390_SubPair:
case kS390_MulPair:
case kS390_SubFloat:
case kS390_SubDouble:
case kS390_Mul32:
......
......@@ -155,11 +155,11 @@ void RegExpMacroAssemblerS390::AdvanceRegister(int reg, int by) {
DCHECK_GT(num_registers_, reg);
if (by != 0) {
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_int8(by)) {
__ AddMI(register_location(reg), Operand(by));
__ agsi(register_location(reg), Operand(by));
} else {
__ LoadP(r2, register_location(reg), r0);
__ mov(r0, Operand(by));
__ AddRR(r2, r0);
__ agr(r2, r0);
__ StoreU64(r2, register_location(reg));
}
}
......@@ -726,7 +726,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ mov(r1, r4);
__ SubP(r1, current_input_offset(), Operand(char_size()));
if (mode_ == UC16) {
__ ShiftLeftP(r0, r3, Operand(1));
__ ShiftLeftU64(r0, r3, Operand(1));
__ SubP(r1, r1, r0);
} else {
__ SubP(r1, r1, r3);
......@@ -789,7 +789,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ SubP(r0, end_of_input_address(), r0);
// r0 is length of input in bytes.
if (mode_ == UC16) {
__ ShiftRightP(r0, r0, Operand(1));
__ ShiftRightU64(r0, r0, Operand(1));
}
// r0 is length of input in characters.
__ AddP(r0, r4);
......@@ -805,10 +805,10 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
// TODO(john.yan): Can be optimized by SIMD instructions
__ LoadMultipleP(r3, r6, register_location(i + 3));
if (mode_ == UC16) {
__ ShiftRightArithP(r3, r3, Operand(1));
__ ShiftRightArithP(r4, r4, Operand(1));
__ ShiftRightArithP(r5, r5, Operand(1));
__ ShiftRightArithP(r6, r6, Operand(1));
__ ShiftRightS64(r3, r3, Operand(1));
__ ShiftRightS64(r4, r4, Operand(1));
__ ShiftRightS64(r5, r5, Operand(1));
__ ShiftRightS64(r6, r6, Operand(1));
}
__ AddP(r3, r0);
__ AddP(r4, r0);
......@@ -826,8 +826,8 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
} else {
__ LoadMultipleP(r3, r4, register_location(i + 1));
if (mode_ == UC16) {
__ ShiftRightArithP(r3, r3, Operand(1));
__ ShiftRightArithP(r4, r4, Operand(1));
__ ShiftRightS64(r3, r3, Operand(1));
__ ShiftRightS64(r4, r4, Operand(1));
}
__ AddP(r3, r0);
__ AddP(r4, r0);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment