Commit b23d5389 authored by ivica.bogosavljevic's avatar ivica.bogosavljevic Committed by Commit bot

Reland of MIPS: Add FPXX support to MIPS32R2

Fix failures on MIPS simulator because incomplete
handling of MTHC1 and MFHC1 in Fp32 mode
Fix failures on older kernels that have problems with
MTHC1 and MFHC1 in kernel FPU emulation

Original issue's description:
> Revert of MIPS: Add FPXX support to MIPS32R2 (patchset #3
> id:40001 of https://codereview.chromium.org/1586223004/ )
>
> Reason for revert:
> Revert patch due to a number of failures appearing on the > MIPS v8 simulator
>
> Original issue's description:
>> MIPS: Add FPXX support to MIPS32R2
>>
>> The JIT code generated by V8 is FPXX compliant
>> when v8 compiled with FPXX flag. This allows the code to
>> run in both FP=1 and FP=0 mode. It also alows v8 to be used
>> as a library by both FP32 and FP64 binaries.
>>
>> BUG=
>>
>> Committed: https://crrev.com/95110dde666158a230a823fd50a68558ad772320
>> Cr-Commit-Position: refs/heads/master@{#33576}

BUG=

Review URL: https://codereview.chromium.org/1659883002

Cr-Commit-Position: refs/heads/master@{#33808}
parent 8643391f
...@@ -2086,33 +2086,36 @@ void Assembler::ldc1(FPURegister fd, const MemOperand& src) { ...@@ -2086,33 +2086,36 @@ void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// load to two 32-bit loads. // load to two 32-bit loads.
DCHECK(!src.rm().is(at)); DCHECK(!src.rm().is(at));
if (IsFp64Mode()) { if (IsFp32Mode()) { // fp32 mode.
if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) { if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
GenInstrImmediate(LWC1, src.rm(), fd, GenInstrImmediate(LWC1, src.rm(), fd,
src.offset_ + Register::kMantissaOffset); src.offset_ + Register::kMantissaOffset);
GenInstrImmediate(LW, src.rm(), at, FPURegister nextfpreg;
nextfpreg.setcode(fd.code() + 1);
GenInstrImmediate(LWC1, src.rm(), nextfpreg,
src.offset_ + Register::kExponentOffset); src.offset_ + Register::kExponentOffset);
mthc1(at, fd);
} else { // Offset > 16 bits, use multiple instructions to load. } else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(src); LoadRegPlusOffsetToAt(src);
GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset); GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset);
GenInstrImmediate(LW, at, at, Register::kExponentOffset); FPURegister nextfpreg;
mthc1(at, fd); nextfpreg.setcode(fd.code() + 1);
GenInstrImmediate(LWC1, at, nextfpreg, Register::kExponentOffset);
} }
} else { // fp32 mode. } else {
DCHECK(IsFp64Mode() || IsFpxxMode());
// Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) { if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
GenInstrImmediate(LWC1, src.rm(), fd, GenInstrImmediate(LWC1, src.rm(), fd,
src.offset_ + Register::kMantissaOffset); src.offset_ + Register::kMantissaOffset);
FPURegister nextfpreg; GenInstrImmediate(LW, src.rm(), at,
nextfpreg.setcode(fd.code() + 1);
GenInstrImmediate(LWC1, src.rm(), nextfpreg,
src.offset_ + Register::kExponentOffset); src.offset_ + Register::kExponentOffset);
mthc1(at, fd);
} else { // Offset > 16 bits, use multiple instructions to load. } else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(src); LoadRegPlusOffsetToAt(src);
GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset); GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset);
FPURegister nextfpreg; GenInstrImmediate(LW, at, at, Register::kExponentOffset);
nextfpreg.setcode(fd.code() + 1); mthc1(at, fd);
GenInstrImmediate(LWC1, at, nextfpreg, Register::kExponentOffset);
} }
} }
} }
...@@ -2133,33 +2136,36 @@ void Assembler::sdc1(FPURegister fd, const MemOperand& src) { ...@@ -2133,33 +2136,36 @@ void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
// store to two 32-bit stores. // store to two 32-bit stores.
DCHECK(!src.rm().is(at)); DCHECK(!src.rm().is(at));
DCHECK(!src.rm().is(t8)); DCHECK(!src.rm().is(t8));
if (IsFp64Mode()) { if (IsFp32Mode()) { // fp32 mode.
if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) { if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
GenInstrImmediate(SWC1, src.rm(), fd, GenInstrImmediate(SWC1, src.rm(), fd,
src.offset_ + Register::kMantissaOffset); src.offset_ + Register::kMantissaOffset);
mfhc1(at, fd); FPURegister nextfpreg;
GenInstrImmediate(SW, src.rm(), at, nextfpreg.setcode(fd.code() + 1);
GenInstrImmediate(SWC1, src.rm(), nextfpreg,
src.offset_ + Register::kExponentOffset); src.offset_ + Register::kExponentOffset);
} else { // Offset > 16 bits, use multiple instructions to load. } else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(src); LoadRegPlusOffsetToAt(src);
GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset); GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset);
mfhc1(t8, fd); FPURegister nextfpreg;
GenInstrImmediate(SW, at, t8, Register::kExponentOffset); nextfpreg.setcode(fd.code() + 1);
GenInstrImmediate(SWC1, at, nextfpreg, Register::kExponentOffset);
} }
} else { // fp32 mode. } else {
DCHECK(IsFp64Mode() || IsFpxxMode());
// Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) { if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
GenInstrImmediate(SWC1, src.rm(), fd, GenInstrImmediate(SWC1, src.rm(), fd,
src.offset_ + Register::kMantissaOffset); src.offset_ + Register::kMantissaOffset);
FPURegister nextfpreg; mfhc1(at, fd);
nextfpreg.setcode(fd.code() + 1); GenInstrImmediate(SW, src.rm(), at,
GenInstrImmediate(SWC1, src.rm(), nextfpreg,
src.offset_ + Register::kExponentOffset); src.offset_ + Register::kExponentOffset);
} else { // Offset > 16 bits, use multiple instructions to load. } else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(src); LoadRegPlusOffsetToAt(src);
GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset); GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset);
FPURegister nextfpreg; mfhc1(t8, fd);
nextfpreg.setcode(fd.code() + 1); GenInstrImmediate(SW, at, t8, Register::kExponentOffset);
GenInstrImmediate(SWC1, at, nextfpreg, Register::kExponentOffset);
} }
} }
} }
......
...@@ -64,9 +64,13 @@ enum FpuMode { ...@@ -64,9 +64,13 @@ enum FpuMode {
#elif defined(FPU_MODE_FP64) #elif defined(FPU_MODE_FP64)
static const FpuMode kFpuMode = kFP64; static const FpuMode kFpuMode = kFP64;
#elif defined(FPU_MODE_FPXX) #elif defined(FPU_MODE_FPXX)
static const FpuMode kFpuMode = kFPXX; #if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS32R6)
static const FpuMode kFpuMode = kFPXX;
#else #else
static const FpuMode kFpuMode = kFP32; #error "FPXX is supported only on Mips32R2 and Mips32R6"
#endif
#else
static const FpuMode kFpuMode = kFP32;
#endif #endif
#if(defined(__mips_hard_float) && __mips_hard_float != 0) #if(defined(__mips_hard_float) && __mips_hard_float != 0)
...@@ -92,13 +96,9 @@ const uint32_t kHoleNanLower32Offset = 4; ...@@ -92,13 +96,9 @@ const uint32_t kHoleNanLower32Offset = 4;
#error Unknown endianness #error Unknown endianness
#endif #endif
#ifndef FPU_MODE_FPXX #define IsFp64Mode() (kFpuMode == kFP64)
#define IsFp64Mode() \ #define IsFp32Mode() (kFpuMode == kFP32)
(kFpuMode == kFP64) #define IsFpxxMode() (kFpuMode == kFPXX)
#else
#define IsFp64Mode() \
(CpuFeatures::IsSupported(FP64FPU))
#endif
#ifndef _MIPS_ARCH_MIPS32RX #ifndef _MIPS_ARCH_MIPS32RX
#define IsMipsArchVariant(check) \ #define IsMipsArchVariant(check) \
......
...@@ -1457,19 +1457,23 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd, ...@@ -1457,19 +1457,23 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
void MacroAssembler::Mthc1(Register rt, FPURegister fs) { void MacroAssembler::Mthc1(Register rt, FPURegister fs) {
if (IsFp64Mode()) { if (IsFp32Mode()) {
mthc1(rt, fs);
} else {
mtc1(rt, fs.high()); mtc1(rt, fs.high());
} else {
DCHECK(IsFp64Mode() || IsFpxxMode());
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
mthc1(rt, fs);
} }
} }
void MacroAssembler::Mfhc1(Register rt, FPURegister fs) { void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
if (IsFp64Mode()) { if (IsFp32Mode()) {
mfhc1(rt, fs);
} else {
mfc1(rt, fs.high()); mfc1(rt, fs.high());
} else {
DCHECK(IsFp64Mode() || IsFpxxMode());
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
mfhc1(rt, fs);
} }
} }
...@@ -1675,13 +1679,15 @@ void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target, ...@@ -1675,13 +1679,15 @@ void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) { void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
if (IsFp64Mode()) { if (IsFp32Mode()) {
mtc1(src_low, dst);
} else {
DCHECK(IsFp64Mode() || IsFpxxMode());
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
DCHECK(!src_low.is(at)); DCHECK(!src_low.is(at));
mfhc1(at, dst); mfhc1(at, dst);
mtc1(src_low, dst); mtc1(src_low, dst);
mthc1(at, dst); mthc1(at, dst);
} else {
mtc1(src_low, dst);
} }
} }
......
...@@ -3384,7 +3384,11 @@ void Simulator::DecodeTypeRegisterCOP1() { ...@@ -3384,7 +3384,11 @@ void Simulator::DecodeTypeRegisterCOP1() {
set_register(rt_reg(), get_fpu_register_word(fs_reg())); set_register(rt_reg(), get_fpu_register_word(fs_reg()));
break; break;
case MFHC1: case MFHC1:
set_register(rt_reg(), get_fpu_register_hi_word(fs_reg())); if (IsFp64Mode()) {
set_register(rt_reg(), get_fpu_register_hi_word(fs_reg()));
} else {
set_register(rt_reg(), get_fpu_register_word(fs_reg() + 1));
}
break; break;
case CTC1: { case CTC1: {
// At the moment only FCSR is supported. // At the moment only FCSR is supported.
...@@ -3404,7 +3408,11 @@ void Simulator::DecodeTypeRegisterCOP1() { ...@@ -3404,7 +3408,11 @@ void Simulator::DecodeTypeRegisterCOP1() {
set_fpu_register_word(fs_reg(), registers_[rt_reg()]); set_fpu_register_word(fs_reg(), registers_[rt_reg()]);
break; break;
case MTHC1: case MTHC1:
set_fpu_register_hi_word(fs_reg(), registers_[rt_reg()]); if (IsFp64Mode()) {
set_fpu_register_hi_word(fs_reg(), registers_[rt_reg()]);
} else {
set_fpu_register_word(fs_reg() + 1, registers_[rt_reg()]);
}
break; break;
case S: { case S: {
DecodeTypeRegisterSRsType(); DecodeTypeRegisterSRsType();
......
...@@ -384,6 +384,14 @@ TEST(MIPS3) { ...@@ -384,6 +384,14 @@ TEST(MIPS3) {
TEST(MIPS4) { TEST(MIPS4) {
// Exchange between GP anf FP registers is done through memory
// on FPXX compiled binaries and architectures that do not support
// MTHC1 and MTFC1. If this is the case, skipping this test.
if (IsFpxxMode() &&
(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson))) {
return;
}
// Test moves between floating point and integer registers. // Test moves between floating point and integer registers.
CcTest::InitializeVM(); CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
...@@ -403,7 +411,7 @@ TEST(MIPS4) { ...@@ -403,7 +411,7 @@ TEST(MIPS4) {
__ ldc1(f6, MemOperand(a0, offsetof(T, b)) ); __ ldc1(f6, MemOperand(a0, offsetof(T, b)) );
// Swap f4 and f6, by using four integer registers, t0-t3. // Swap f4 and f6, by using four integer registers, t0-t3.
if (!IsFp64Mode()) { if (IsFp32Mode()) {
__ mfc1(t0, f4); __ mfc1(t0, f4);
__ mfc1(t1, f5); __ mfc1(t1, f5);
__ mfc1(t2, f6); __ mfc1(t2, f6);
...@@ -415,6 +423,7 @@ TEST(MIPS4) { ...@@ -415,6 +423,7 @@ TEST(MIPS4) {
__ mtc1(t3, f5); __ mtc1(t3, f5);
} else { } else {
CHECK(!IsMipsArchVariant(kMips32r1) && !IsMipsArchVariant(kLoongson)); CHECK(!IsMipsArchVariant(kMips32r1) && !IsMipsArchVariant(kLoongson));
DCHECK(IsFp64Mode() || IsFpxxMode());
__ mfc1(t0, f4); __ mfc1(t0, f4);
__ mfhc1(t1, f4); __ mfhc1(t1, f4);
__ mfc1(t2, f6); __ mfc1(t2, f6);
...@@ -425,6 +434,7 @@ TEST(MIPS4) { ...@@ -425,6 +434,7 @@ TEST(MIPS4) {
__ mtc1(t2, f4); __ mtc1(t2, f4);
__ mthc1(t3, f4); __ mthc1(t3, f4);
} }
// Store the swapped f4 and f5 back to memory. // Store the swapped f4 and f5 back to memory.
__ sdc1(f4, MemOperand(a0, offsetof(T, a)) ); __ sdc1(f4, MemOperand(a0, offsetof(T, a)) );
__ sdc1(f6, MemOperand(a0, offsetof(T, c)) ); __ sdc1(f6, MemOperand(a0, offsetof(T, c)) );
...@@ -811,8 +821,6 @@ TEST(MIPS9) { ...@@ -811,8 +821,6 @@ TEST(MIPS9) {
TEST(MIPS10) { TEST(MIPS10) {
// Test conversions between doubles and words. // Test conversions between doubles and words.
// Test maps double to FP reg pairs in fp32 mode
// and into FP reg in fp64 mode.
CcTest::InitializeVM(); CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate); HandleScope scope(isolate);
...@@ -830,24 +838,16 @@ TEST(MIPS10) { ...@@ -830,24 +838,16 @@ TEST(MIPS10) {
Assembler assm(isolate, NULL, 0); Assembler assm(isolate, NULL, 0);
Label L, C; Label L, C;
if (!IsMipsArchVariant(kMips32r2)) return; if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) return;
// Load all structure elements to registers. // Load all structure elements to registers.
// (f0, f1) = a (fp32), f0 = a (fp64) // (f0, f1) = a (fp32), f0 = a (fp64)
__ ldc1(f0, MemOperand(a0, offsetof(T, a))); __ ldc1(f0, MemOperand(a0, offsetof(T, a)));
if (IsFp64Mode()) { __ mfc1(t0, f0); // t0 = f0(31..0)
__ mfc1(t0, f0); // t0 = f0(31..0) __ mfhc1(t1, f0); // t1 = sign_extend(f0(63..32))
__ mfhc1(t1, f0); // t1 = sign_extend(f0(63..32)) __ sw(t0, MemOperand(a0, offsetof(T, dbl_mant))); // dbl_mant = t0
__ sw(t0, MemOperand(a0, offsetof(T, dbl_mant))); // dbl_mant = t0 __ sw(t1, MemOperand(a0, offsetof(T, dbl_exp))); // dbl_exp = t1
__ sw(t1, MemOperand(a0, offsetof(T, dbl_exp))); // dbl_exp = t1
} else {
// Save the raw bits of the double.
__ mfc1(t0, f0); // t0 = a1
__ mfc1(t1, f1); // t1 = a2
__ sw(t0, MemOperand(a0, offsetof(T, dbl_mant))); // dbl_mant = t0
__ sw(t1, MemOperand(a0, offsetof(T, dbl_exp))); // dbl_exp = t1
}
// Convert double in f0 to word, save hi/lo parts. // Convert double in f0 to word, save hi/lo parts.
__ cvt_w_d(f0, f0); // a_word = (word)a __ cvt_w_d(f0, f0); // a_word = (word)a
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment