Commit b8705eb6 authored by Junliang Yan's avatar Junliang Yan Committed by Commit Bot

PPC64/s390x: move smi to lower 32-bit

Change-Id: Id203bb297547002a41e18d621b59ce4237f88e5a
Reviewed-on: https://chromium-review.googlesource.com/1183976Reviewed-by: 's avatarMuntasir Mallick <mmallick@ca.ibm.com>
Commit-Queue: Junliang Yan <jyan@ca.ibm.com>
Cr-Commit-Position: refs/heads/master@{#55320}
parent cbf26c4c
......@@ -941,13 +941,8 @@ class MacroAssembler : public TurboAssembler {
}
void SmiToPtrArrayOffset(Register dst, Register src) {
#if V8_TARGET_ARCH_PPC64
STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
ShiftRightArithImm(dst, src, kSmiShift - kPointerSizeLog2);
#else
STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
ShiftLeftImm(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
#endif
}
// Untag the source value into destination and jump if source is a smi.
......@@ -968,12 +963,6 @@ class MacroAssembler : public TurboAssembler {
#if V8_TARGET_ARCH_PPC64
// Ensure it is permissible to read/write int value directly from
// upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
#endif
#if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN
#define SmiWordOffset(offset) (offset + kPointerSize / 2)
#else
......
......@@ -3462,13 +3462,8 @@ void TurboAssembler::LoadIntLiteral(Register dst, int value) {
void TurboAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
intptr_t value = reinterpret_cast<intptr_t>(smi);
#if V8_TARGET_ARCH_S390X
DCHECK_EQ(value & 0xFFFFFFFF, 0);
// The smi value is loaded in upper 32-bits. Lower 32-bit are zeros.
llihf(dst, Operand(value >> 32));
#else
DCHECK(is_int32(value));
llilf(dst, Operand(value));
#endif
}
void TurboAssembler::LoadDoubleLiteral(DoubleRegister result, uint64_t value,
......@@ -3503,73 +3498,8 @@ void TurboAssembler::LoadFloat32Literal(DoubleRegister result, float value,
}
void TurboAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch) {
#if V8_TARGET_ARCH_S390X
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
cih(src1, Operand(reinterpret_cast<intptr_t>(smi) >> 32));
} else {
LoadSmiLiteral(scratch, smi);
cgr(src1, scratch);
}
#else
// CFI takes 32-bit immediate.
cfi(src1, Operand(smi));
#endif
}
void TurboAssembler::CmpLogicalSmiLiteral(Register src1, Smi* smi,
Register scratch) {
#if V8_TARGET_ARCH_S390X
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
clih(src1, Operand(reinterpret_cast<intptr_t>(smi) >> 32));
} else {
LoadSmiLiteral(scratch, smi);
clgr(src1, scratch);
}
#else
// CLFI takes 32-bit immediate
clfi(src1, Operand(smi));
#endif
}
void TurboAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
Register scratch) {
#if V8_TARGET_ARCH_S390X
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
if (dst != src) LoadRR(dst, src);
aih(dst, Operand(reinterpret_cast<intptr_t>(smi) >> 32));
} else {
LoadSmiLiteral(scratch, smi);
AddP(dst, src, scratch);
}
#else
AddP(dst, src, Operand(reinterpret_cast<intptr_t>(smi)));
#endif
}
void TurboAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
Register scratch) {
#if V8_TARGET_ARCH_S390X
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
if (dst != src) LoadRR(dst, src);
aih(dst, Operand((-reinterpret_cast<intptr_t>(smi)) >> 32));
} else {
LoadSmiLiteral(scratch, smi);
SubP(dst, src, scratch);
}
#else
AddP(dst, src, Operand(-(reinterpret_cast<intptr_t>(smi))));
#endif
}
void TurboAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi) {
if (dst != src) LoadRR(dst, src);
#if V8_TARGET_ARCH_S390X
DCHECK_EQ(reinterpret_cast<intptr_t>(smi) & 0xFFFFFFFF, 0);
int value = static_cast<int>(reinterpret_cast<intptr_t>(smi) >> 32);
nihf(dst, Operand(value));
#else
nilf(dst, Operand(reinterpret_cast<int>(smi)));
#endif
}
// Load a "pointer" sized value from the memory location
......
......@@ -769,13 +769,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register scratch = r0);
void StoreByte(Register src, const MemOperand& mem, Register scratch = r0);
void AddSmiLiteral(Register dst, Register src, Smi* smi,
Register scratch = r0);
void SubSmiLiteral(Register dst, Register src, Smi* smi,
Register scratch = r0);
void CmpSmiLiteral(Register src1, Smi* smi, Register scratch);
void CmpLogicalSmiLiteral(Register src1, Smi* smi, Register scratch);
void AndSmiLiteral(Register dst, Register src, Smi* smi);
// Set new rounding mode RN to FPSCR
void SetRoundingMode(FPRoundingMode RN);
......@@ -1198,13 +1192,8 @@ class MacroAssembler : public TurboAssembler {
}
void SmiToPtrArrayOffset(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
ShiftRightArithP(dst, src, Operand(kSmiShift - kPointerSizeLog2));
#else
STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
ShiftLeftP(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
#endif
}
// Untag the source value into destination and jump if source is a smi.
......@@ -1214,7 +1203,7 @@ class MacroAssembler : public TurboAssembler {
// Jump if either of the registers contain a non-smi.
inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
TestIfSmi(value);
bne(not_smi_label /*, cr0*/);
bne(not_smi_label);
}
// Jump if either of the registers contain a smi.
void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
......@@ -1223,12 +1212,6 @@ class MacroAssembler : public TurboAssembler {
void AssertNotSmi(Register object);
void AssertSmi(Register object);
#if V8_TARGET_ARCH_S390X
// Ensure it is permissible to read/write int value directly from
// upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
#endif
#if V8_TARGET_LITTLE_ENDIAN
#define SmiWordOffset(offset) (offset + kPointerSize / 2)
#else
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment