Commit ca404f3c authored by ivica.bogosavljevic's avatar ivica.bogosavljevic Committed by Commit bot

Revert of MIPS: Add FPXX support to MIPS32R2 (patchset #3 id:40001 of...

Revert of MIPS: Add FPXX support to MIPS32R2 (patchset #3 id:40001 of https://codereview.chromium.org/1586223004/ )

Reason for revert:
Revert patch due to a number of failures appearing on the MIPS v8 simulator

Original issue's description:
> MIPS: Add FPXX support to MIPS32R2
>
> The JIT code generated by V8 is FPXX compliant
> when v8 compiled with FPXX flag. This allows the code to
> run in both FP=1 and FP=0 mode. It also alows v8 to be used
> as a library by both FP32 and FP64 binaries.
>
> BUG=
>
> Committed: https://crrev.com/95110dde666158a230a823fd50a68558ad772320
> Cr-Commit-Position: refs/heads/master@{#33576}

TBR=paul.lind@imgtec.com,gergely.kis@imgtec.com,akos.palfi@imgtec.com,ilija.pavlovic@imgtec.com,marija.antic@imgtec.com,miran.karic@imgtec.com,balazs.kilvady@imgtec.com
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=

Review URL: https://codereview.chromium.org/1646813003

Cr-Commit-Position: refs/heads/master@{#33583}
parent cb9b8010
...@@ -2099,7 +2099,7 @@ void Assembler::ldc1(FPURegister fd, const MemOperand& src) { ...@@ -2099,7 +2099,7 @@ void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
GenInstrImmediate(LW, at, at, Register::kExponentOffset); GenInstrImmediate(LW, at, at, Register::kExponentOffset);
mthc1(at, fd); mthc1(at, fd);
} }
} else if (IsFp32Mode()) { // fp32 mode. } else { // fp32 mode.
if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) { if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
GenInstrImmediate(LWC1, src.rm(), fd, GenInstrImmediate(LWC1, src.rm(), fd,
src.offset_ + Register::kMantissaOffset); src.offset_ + Register::kMantissaOffset);
...@@ -2114,22 +2114,6 @@ void Assembler::ldc1(FPURegister fd, const MemOperand& src) { ...@@ -2114,22 +2114,6 @@ void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
nextfpreg.setcode(fd.code() + 1); nextfpreg.setcode(fd.code() + 1);
GenInstrImmediate(LWC1, at, nextfpreg, Register::kExponentOffset); GenInstrImmediate(LWC1, at, nextfpreg, Register::kExponentOffset);
} }
} else {
DCHECK(IsFpxxMode());
// Currently we support FPXX on Mips32r2 and Mips32r6
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
GenInstrImmediate(LWC1, src.rm(), fd,
src.offset_ + Register::kMantissaOffset);
GenInstrImmediate(LW, src.rm(), at,
src.offset_ + Register::kExponentOffset);
mthc1(at, fd);
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(src);
GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset);
GenInstrImmediate(LW, at, at, Register::kExponentOffset);
mthc1(at, fd);
}
} }
} }
...@@ -2162,7 +2146,7 @@ void Assembler::sdc1(FPURegister fd, const MemOperand& src) { ...@@ -2162,7 +2146,7 @@ void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
mfhc1(t8, fd); mfhc1(t8, fd);
GenInstrImmediate(SW, at, t8, Register::kExponentOffset); GenInstrImmediate(SW, at, t8, Register::kExponentOffset);
} }
} else if (IsFp32Mode()) { // fp32 mode. } else { // fp32 mode.
if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) { if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
GenInstrImmediate(SWC1, src.rm(), fd, GenInstrImmediate(SWC1, src.rm(), fd,
src.offset_ + Register::kMantissaOffset); src.offset_ + Register::kMantissaOffset);
...@@ -2177,22 +2161,6 @@ void Assembler::sdc1(FPURegister fd, const MemOperand& src) { ...@@ -2177,22 +2161,6 @@ void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
nextfpreg.setcode(fd.code() + 1); nextfpreg.setcode(fd.code() + 1);
GenInstrImmediate(SWC1, at, nextfpreg, Register::kExponentOffset); GenInstrImmediate(SWC1, at, nextfpreg, Register::kExponentOffset);
} }
} else {
DCHECK(IsFpxxMode());
// Currently we support FPXX on Mips32r2 and Mips32r6
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
GenInstrImmediate(SWC1, src.rm(), fd,
src.offset_ + Register::kMantissaOffset);
mfhc1(at, fd);
GenInstrImmediate(SW, src.rm(), at,
src.offset_ + Register::kExponentOffset);
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(src);
GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset);
mfhc1(t8, fd);
GenInstrImmediate(SW, at, t8, Register::kExponentOffset);
}
} }
} }
......
...@@ -64,13 +64,9 @@ enum FpuMode { ...@@ -64,13 +64,9 @@ enum FpuMode {
#elif defined(FPU_MODE_FP64) #elif defined(FPU_MODE_FP64)
static const FpuMode kFpuMode = kFP64; static const FpuMode kFpuMode = kFP64;
#elif defined(FPU_MODE_FPXX) #elif defined(FPU_MODE_FPXX)
#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS32R6) static const FpuMode kFpuMode = kFPXX;
static const FpuMode kFpuMode = kFPXX;
#else #else
#error "FPXX is supported only on Mips32R2 and Mips32R6" static const FpuMode kFpuMode = kFP32;
#endif
#else
static const FpuMode kFpuMode = kFP32;
#endif #endif
#if(defined(__mips_hard_float) && __mips_hard_float != 0) #if(defined(__mips_hard_float) && __mips_hard_float != 0)
...@@ -96,9 +92,13 @@ const uint32_t kHoleNanLower32Offset = 4; ...@@ -96,9 +92,13 @@ const uint32_t kHoleNanLower32Offset = 4;
#error Unknown endianness #error Unknown endianness
#endif #endif
#define IsFp64Mode() (kFpuMode == kFP64) #ifndef FPU_MODE_FPXX
#define IsFp32Mode() (kFpuMode == kFP32) #define IsFp64Mode() \
#define IsFpxxMode() (kFpuMode == kFPXX) (kFpuMode == kFP64)
#else
#define IsFp64Mode() \
(CpuFeatures::IsSupported(FP64FPU))
#endif
#ifndef _MIPS_ARCH_MIPS32RX #ifndef _MIPS_ARCH_MIPS32RX
#define IsMipsArchVariant(check) \ #define IsMipsArchVariant(check) \
......
...@@ -1396,7 +1396,7 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd, ...@@ -1396,7 +1396,7 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
void MacroAssembler::Mthc1(Register rt, FPURegister fs) { void MacroAssembler::Mthc1(Register rt, FPURegister fs) {
if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { if (IsFp64Mode()) {
mthc1(rt, fs); mthc1(rt, fs);
} else { } else {
mtc1(rt, fs.high()); mtc1(rt, fs.high());
...@@ -1405,7 +1405,7 @@ void MacroAssembler::Mthc1(Register rt, FPURegister fs) { ...@@ -1405,7 +1405,7 @@ void MacroAssembler::Mthc1(Register rt, FPURegister fs) {
void MacroAssembler::Mfhc1(Register rt, FPURegister fs) { void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { if (IsFp64Mode()) {
mfhc1(rt, fs); mfhc1(rt, fs);
} else { } else {
mfc1(rt, fs.high()); mfc1(rt, fs.high());
......
...@@ -384,14 +384,6 @@ TEST(MIPS3) { ...@@ -384,14 +384,6 @@ TEST(MIPS3) {
TEST(MIPS4) { TEST(MIPS4) {
// Exchange between GP anf FP registers is done through memory
// on FPXX compiled binaries and architectures that do not support
// MTHC1 and MTFC1. If this is the case, skipping this test.
if (IsFpxxMode() &&
(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson))) {
return;
}
// Test moves between floating point and integer registers. // Test moves between floating point and integer registers.
CcTest::InitializeVM(); CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
...@@ -411,7 +403,7 @@ TEST(MIPS4) { ...@@ -411,7 +403,7 @@ TEST(MIPS4) {
__ ldc1(f6, MemOperand(a0, offsetof(T, b)) ); __ ldc1(f6, MemOperand(a0, offsetof(T, b)) );
// Swap f4 and f6, by using four integer registers, t0-t3. // Swap f4 and f6, by using four integer registers, t0-t3.
if (IsFp32Mode()) { if (!IsFp64Mode()) {
__ mfc1(t0, f4); __ mfc1(t0, f4);
__ mfc1(t1, f5); __ mfc1(t1, f5);
__ mfc1(t2, f6); __ mfc1(t2, f6);
...@@ -423,7 +415,6 @@ TEST(MIPS4) { ...@@ -423,7 +415,6 @@ TEST(MIPS4) {
__ mtc1(t3, f5); __ mtc1(t3, f5);
} else { } else {
CHECK(!IsMipsArchVariant(kMips32r1) && !IsMipsArchVariant(kLoongson)); CHECK(!IsMipsArchVariant(kMips32r1) && !IsMipsArchVariant(kLoongson));
DCHECK(IsFp64Mode() || IsFpxxMode());
__ mfc1(t0, f4); __ mfc1(t0, f4);
__ mfhc1(t1, f4); __ mfhc1(t1, f4);
__ mfc1(t2, f6); __ mfc1(t2, f6);
...@@ -434,7 +425,6 @@ TEST(MIPS4) { ...@@ -434,7 +425,6 @@ TEST(MIPS4) {
__ mtc1(t2, f4); __ mtc1(t2, f4);
__ mthc1(t3, f4); __ mthc1(t3, f4);
} }
// Store the swapped f4 and f5 back to memory. // Store the swapped f4 and f5 back to memory.
__ sdc1(f4, MemOperand(a0, offsetof(T, a)) ); __ sdc1(f4, MemOperand(a0, offsetof(T, a)) );
__ sdc1(f6, MemOperand(a0, offsetof(T, c)) ); __ sdc1(f6, MemOperand(a0, offsetof(T, c)) );
...@@ -821,6 +811,8 @@ TEST(MIPS9) { ...@@ -821,6 +811,8 @@ TEST(MIPS9) {
TEST(MIPS10) { TEST(MIPS10) {
// Test conversions between doubles and words. // Test conversions between doubles and words.
// Test maps double to FP reg pairs in fp32 mode
// and into FP reg in fp64 mode.
CcTest::InitializeVM(); CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate); HandleScope scope(isolate);
...@@ -838,16 +830,24 @@ TEST(MIPS10) { ...@@ -838,16 +830,24 @@ TEST(MIPS10) {
Assembler assm(isolate, NULL, 0); Assembler assm(isolate, NULL, 0);
Label L, C; Label L, C;
if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) return; if (!IsMipsArchVariant(kMips32r2)) return;
// Load all structure elements to registers. // Load all structure elements to registers.
// (f0, f1) = a (fp32), f0 = a (fp64) // (f0, f1) = a (fp32), f0 = a (fp64)
__ ldc1(f0, MemOperand(a0, offsetof(T, a))); __ ldc1(f0, MemOperand(a0, offsetof(T, a)));
__ mfc1(t0, f0); // t0 = f0(31..0) if (IsFp64Mode()) {
__ mfhc1(t1, f0); // t1 = sign_extend(f0(63..32)) __ mfc1(t0, f0); // t0 = f0(31..0)
__ sw(t0, MemOperand(a0, offsetof(T, dbl_mant))); // dbl_mant = t0 __ mfhc1(t1, f0); // t1 = sign_extend(f0(63..32))
__ sw(t1, MemOperand(a0, offsetof(T, dbl_exp))); // dbl_exp = t1 __ sw(t0, MemOperand(a0, offsetof(T, dbl_mant))); // dbl_mant = t0
__ sw(t1, MemOperand(a0, offsetof(T, dbl_exp))); // dbl_exp = t1
} else {
// Save the raw bits of the double.
__ mfc1(t0, f0); // t0 = a1
__ mfc1(t1, f1); // t1 = a2
__ sw(t0, MemOperand(a0, offsetof(T, dbl_mant))); // dbl_mant = t0
__ sw(t1, MemOperand(a0, offsetof(T, dbl_exp))); // dbl_exp = t1
}
// Convert double in f0 to word, save hi/lo parts. // Convert double in f0 to word, save hi/lo parts.
__ cvt_w_d(f0, f0); // a_word = (word)a __ cvt_w_d(f0, f0); // a_word = (word)a
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment