Commit ecfa9675 authored by palfia@homejinni.com's avatar palfia@homejinni.com

MIPS: Remove soft-float support.

Port r14159 (0c64645)

Original commit message:
Remove ARM support for VFP2

BUG=

TEST=

Review URL: https://codereview.chromium.org/14113011
Patch from Dusan Milosavljevic <Dusan.Milosavljevic@rt-rk.com>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@14275 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 102c5170
......@@ -321,8 +321,6 @@ DEFINE_bool(enable_unaligned_accesses, true,
"enable unaligned accesses for ARMv7 (ARM only)")
DEFINE_bool(enable_32dregs, true,
"enable use of d16-d31 registers on ARM - this requires VFP3")
DEFINE_bool(enable_fpu, true,
"enable use of MIPS FPU instructions if available (MIPS only)")
DEFINE_bool(enable_vldr_imm, false,
"enable use of constant pools for double immediate (ARM only)")
......
......@@ -81,29 +81,17 @@ bool Operand::is_reg() const {
int Register::NumAllocatableRegisters() {
if (CpuFeatures::IsSupported(FPU)) {
return kMaxNumAllocatableRegisters;
} else {
return kMaxNumAllocatableRegisters - kGPRsPerNonFPUDouble;
}
}
int DoubleRegister::NumRegisters() {
if (CpuFeatures::IsSupported(FPU)) {
return FPURegister::kMaxNumRegisters;
} else {
return 1;
}
}
int DoubleRegister::NumAllocatableRegisters() {
if (CpuFeatures::IsSupported(FPU)) {
return FPURegister::kMaxNumAllocatableRegisters;
} else {
return 1;
}
}
......
......@@ -80,29 +80,24 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
const char* DoubleRegister::AllocationIndexToString(int index) {
if (CpuFeatures::IsSupported(FPU)) {
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
const char* const names[] = {
"f0",
"f2",
"f4",
"f6",
"f8",
"f10",
"f12",
"f14",
"f16",
"f18",
"f20",
"f22",
"f24",
"f26"
};
return names[index];
} else {
ASSERT(index == 0);
return "sfpd0";
}
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
const char* const names[] = {
"f0",
"f2",
"f4",
"f6",
"f8",
"f10",
"f12",
"f14",
"f16",
"f18",
"f20",
"f22",
"f24",
"f26"
};
return names[index];
}
......@@ -127,10 +122,8 @@ void CpuFeatures::Probe() {
// If the compiler is allowed to use fpu then we can use fpu too in our
// code generation.
#if !defined(__mips__)
// For the simulator=mips build, use FPU when FLAG_enable_fpu is enabled.
if (FLAG_enable_fpu) {
supported_ |= static_cast<uint64_t>(1) << FPU;
}
// For the simulator build, use FPU.
supported_ |= static_cast<uint64_t>(1) << FPU;
#else
// Probe for additional features not already known to be available.
if (OS::MipsCpuHasFeature(FPU)) {
......@@ -876,7 +869,6 @@ void Assembler::GenInstrRegister(Opcode opcode,
FPURegister fd,
SecondaryField func) {
ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
ASSERT(IsEnabled(FPU));
Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
| (fd.code() << kFdShift) | func;
emit(instr);
......@@ -890,7 +882,6 @@ void Assembler::GenInstrRegister(Opcode opcode,
FPURegister fd,
SecondaryField func) {
ASSERT(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
ASSERT(IsEnabled(FPU));
Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
| (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
emit(instr);
......@@ -904,7 +895,6 @@ void Assembler::GenInstrRegister(Opcode opcode,
FPURegister fd,
SecondaryField func) {
ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
ASSERT(IsEnabled(FPU));
Instr instr = opcode | fmt | (rt.code() << kRtShift)
| (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
emit(instr);
......@@ -917,7 +907,6 @@ void Assembler::GenInstrRegister(Opcode opcode,
FPUControlRegister fs,
SecondaryField func) {
ASSERT(fs.is_valid() && rt.is_valid());
ASSERT(IsEnabled(FPU));
Instr instr =
opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
emit(instr);
......@@ -952,7 +941,6 @@ void Assembler::GenInstrImmediate(Opcode opcode,
FPURegister ft,
int32_t j) {
ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
ASSERT(IsEnabled(FPU));
Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
| (j & kImm16Mask);
emit(instr);
......@@ -1874,7 +1862,6 @@ void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
// Conditions.
void Assembler::c(FPUCondition cond, SecondaryField fmt,
FPURegister fs, FPURegister ft, uint16_t cc) {
ASSERT(IsEnabled(FPU));
ASSERT(is_uint3(cc));
ASSERT((fmt & ~(31 << kRsShift)) == 0);
Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
......@@ -1885,7 +1872,6 @@ void Assembler::c(FPUCondition cond, SecondaryField fmt,
void Assembler::fcmp(FPURegister src1, const double src2,
FPUCondition cond) {
ASSERT(IsEnabled(FPU));
ASSERT(src2 == 0.0);
mtc1(zero_reg, f14);
cvt_d_w(f14, f14);
......@@ -1894,7 +1880,6 @@ void Assembler::fcmp(FPURegister src1, const double src2,
void Assembler::bc1f(int16_t offset, uint16_t cc) {
ASSERT(IsEnabled(FPU));
ASSERT(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
emit(instr);
......@@ -1902,7 +1887,6 @@ void Assembler::bc1f(int16_t offset, uint16_t cc) {
void Assembler::bc1t(int16_t offset, uint16_t cc) {
ASSERT(IsEnabled(FPU));
ASSERT(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
emit(instr);
......
......@@ -74,7 +74,6 @@ struct Register {
static const int kNumRegisters = v8::internal::kNumRegisters;
static const int kMaxNumAllocatableRegisters = 14; // v0 through t7.
static const int kSizeInBytes = 4;
static const int kGPRsPerNonFPUDouble = 2;
inline static int NumAllocatableRegisters();
......@@ -300,9 +299,6 @@ const FPURegister f29 = { 29 };
const FPURegister f30 = { 30 };
const FPURegister f31 = { 31 };
const Register sfpd_lo = { kRegister_t6_Code };
const Register sfpd_hi = { kRegister_t7_Code };
// Register aliases.
// cp is assumed to be a callee saved register.
// Defined using #define instead of "static const Register&" because Clang
......@@ -403,7 +399,6 @@ class CpuFeatures : public AllStatic {
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
if (f == FPU && !FLAG_enable_fpu) return false;
return (supported_ & (1u << f)) != 0;
}
......
This diff is collapsed.
......@@ -62,9 +62,7 @@ class TranscendentalCacheStub: public PlatformCodeStub {
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) {
ASSERT(CpuFeatures::IsSafeForSnapshot(FPU) || save_fp == kDontSaveFPRegs);
}
: save_doubles_(save_fp) {}
void Generate(MacroAssembler* masm);
......@@ -486,7 +484,6 @@ class RecordWriteStub: public PlatformCodeStub {
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
masm->MultiPush((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
if (mode == kSaveFPRegs) {
CpuFeatureScope scope(masm, FPU);
masm->MultiPushFPU(kCallerSavedFPU);
}
}
......@@ -494,7 +491,6 @@ class RecordWriteStub: public PlatformCodeStub {
inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
CpuFeatureScope scope(masm, FPU);
masm->MultiPopFPU(kCallerSavedFPU);
}
masm->MultiPop((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
......@@ -685,27 +681,6 @@ class FloatingPointHelper : public AllStatic {
FPURegister double_scratch1,
Label* not_int32);
// Generate non FPU code to check if a double can be exactly represented by a
// 32-bit integer. This does not check for 0 or -0, which need
// to be checked for separately.
// Control jumps to not_int32 if the value is not a 32-bit integer, and falls
// through otherwise.
// src1 and src2 will be cloberred.
//
// Expected input:
// - src1: higher (exponent) part of the double value.
// - src2: lower (mantissa) part of the double value.
// Output status:
// - dst: 32 higher bits of the mantissa. (mantissa[51:20])
// - src2: contains 1.
// - other registers are clobbered.
static void DoubleIs32BitInteger(MacroAssembler* masm,
Register src1,
Register src2,
Register dst,
Register scratch,
Label* not_int32);
// Generates code to call a C function to do a double operation using core
// registers. (Used when FPU is not supported.)
// This code never falls through, but returns with a heap number containing
......
......@@ -62,7 +62,6 @@ double fast_exp_simulator(double x) {
UnaryMathFunction CreateExpFunction() {
if (!CpuFeatures::IsSupported(FPU)) return &exp;
if (!FLAG_fast_math) return &exp;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
......@@ -72,7 +71,6 @@ UnaryMathFunction CreateExpFunction() {
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
{
CpuFeatureScope use_fpu(&masm, FPU);
DoubleRegister input = f12;
DoubleRegister result = f0;
DoubleRegister double_scratch1 = f4;
......@@ -184,7 +182,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// -- t0 : scratch (elements)
// -----------------------------------
Label loop, entry, convert_hole, gc_required, only_change_map, done;
bool fpu_supported = CpuFeatures::IsSupported(FPU);
Register scratch = t6;
......@@ -249,8 +246,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// t2: end of destination FixedDoubleArray, not tagged
// t3: begin of FixedDoubleArray element fields, not tagged
if (!fpu_supported) __ Push(a1, a0);
__ Branch(&entry);
__ bind(&only_change_map);
......@@ -278,25 +273,11 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ UntagAndJumpIfNotSmi(t5, t5, &convert_hole);
// Normal smi, convert to double and store.
if (fpu_supported) {
CpuFeatureScope scope(masm, FPU);
__ mtc1(t5, f0);
__ cvt_d_w(f0, f0);
__ sdc1(f0, MemOperand(t3));
__ Addu(t3, t3, kDoubleSize);
} else {
FloatingPointHelper::ConvertIntToDouble(masm,
t5,
FloatingPointHelper::kCoreRegisters,
f0,
a0,
a1,
t7,
f0);
__ sw(a0, MemOperand(t3)); // mantissa
__ sw(a1, MemOperand(t3, kIntSize)); // exponent
__ Addu(t3, t3, kDoubleSize);
}
__ mtc1(t5, f0);
__ cvt_d_w(f0, f0);
__ sdc1(f0, MemOperand(t3));
__ Addu(t3, t3, kDoubleSize);
__ Branch(&entry);
// Hole found, store the-hole NaN.
......@@ -315,7 +296,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ bind(&entry);
__ Branch(&loop, lt, t3, Operand(t2));
if (!fpu_supported) __ Pop(a1, a0);
__ pop(ra);
__ bind(&done);
}
......
......@@ -61,8 +61,9 @@ enum ArchVariants {
// -mhard-float is passed to the compiler.
const bool IsMipsSoftFloatABI = false;
#elif(defined(__mips_soft_float) && __mips_soft_float != 0)
// Not using floating-point coprocessor instructions. This flag is raised when
// -msoft-float is passed to the compiler.
// This flag is raised when -msoft-float is passed to the compiler.
// Although FPU is a base requirement for v8, soft-float ABI is used
// on soft-float systems with FPU kernel emulation.
const bool IsMipsSoftFloatABI = true;
#else
const bool IsMipsSoftFloatABI = true;
......
......@@ -603,17 +603,12 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kDoubleRegsSize =
kDoubleSize * FPURegister::kMaxNumAllocatableRegisters;
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatureScope scope(masm(), FPU);
// Save all FPU registers before messing with them.
__ Subu(sp, sp, Operand(kDoubleRegsSize));
for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
__ sdc1(fpu_reg, MemOperand(sp, offset));
}
} else {
__ Subu(sp, sp, Operand(kDoubleRegsSize));
// Save all FPU registers before messing with them.
__ Subu(sp, sp, Operand(kDoubleRegsSize));
for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
__ sdc1(fpu_reg, MemOperand(sp, offset));
}
// Push saved_regs (needed to populate FrameDescription::registers_).
......@@ -686,16 +681,13 @@ void Deoptimizer::EntryGenerator::Generate() {
}
int double_regs_offset = FrameDescription::double_registers_offset();
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatureScope scope(masm(), FPU);
// Copy FPU registers to
// double_registers_[DoubleRegister::kNumAllocatableRegisters]
for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ ldc1(f0, MemOperand(sp, src_offset));
__ sdc1(f0, MemOperand(a1, dst_offset));
}
// Copy FPU registers to
// double_registers_[DoubleRegister::kNumAllocatableRegisters]
for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ ldc1(f0, MemOperand(sp, src_offset));
__ sdc1(f0, MemOperand(a1, dst_offset));
}
// Remove the bailout id, eventually return address, and the saved registers
......@@ -764,15 +756,11 @@ void Deoptimizer::EntryGenerator::Generate() {
__ bind(&outer_loop_header);
__ Branch(&outer_push_loop, lt, t0, Operand(a1));
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatureScope scope(masm(), FPU);
__ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
const FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
int src_offset = i * kDoubleSize + double_regs_offset;
__ ldc1(fpu_reg, MemOperand(a1, src_offset));
}
__ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
const FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
int src_offset = i * kDoubleSize + double_regs_offset;
__ ldc1(fpu_reg, MemOperand(a1, src_offset));
}
// Push state, pc, and continuation from the last output frame.
......
......@@ -674,17 +674,9 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true,
Label* if_false,
Label* fall_through) {
if (CpuFeatures::IsSupported(FPU)) {
ToBooleanStub stub(result_register());
__ CallStub(&stub, condition->test_id());
__ mov(at, zero_reg);
} else {
// Call the runtime to find the boolean value of the source and then
// translate it into control flow to the pair of labels.
__ push(result_register());
__ CallRuntime(Runtime::kToBool, 1);
__ LoadRoot(at, Heap::kFalseValueRootIndex);
}
ToBooleanStub stub(result_register());
__ CallStub(&stub, condition->test_id());
__ mov(at, zero_reg);
Split(ne, v0, Operand(at), if_true, if_false, fall_through);
}
......@@ -3045,31 +3037,21 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
// Convert 32 random bits in v0 to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
if (CpuFeatures::IsSupported(FPU)) {
__ PrepareCallCFunction(1, a0);
__ lw(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
CpuFeatureScope scope(masm(), FPU);
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
__ li(a1, Operand(0x41300000));
// Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
__ Move(f12, v0, a1);
// Move 0x4130000000000000 to FPU.
__ Move(f14, zero_reg, a1);
// Subtract and store the result in the heap number.
__ sub_d(f0, f12, f14);
__ sdc1(f0, FieldMemOperand(s0, HeapNumber::kValueOffset));
__ mov(v0, s0);
} else {
__ PrepareCallCFunction(2, a0);
__ mov(a0, s0);
__ lw(a1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ lw(a1, FieldMemOperand(a1, GlobalObject::kNativeContextOffset));
__ CallCFunction(
ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
}
__ PrepareCallCFunction(1, a0);
__ lw(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
__ li(a1, Operand(0x41300000));
// Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
__ Move(f12, v0, a1);
// Move 0x4130000000000000 to FPU.
__ Move(f14, zero_reg, a1);
// Subtract and store the result in the heap number.
__ sub_d(f0, f12, f14);
__ sdc1(f0, FieldMemOperand(s0, HeapNumber::kValueOffset));
__ mov(v0, s0);
context()->Plug(v0);
}
......@@ -3207,12 +3189,8 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
if (CpuFeatures::IsSupported(FPU)) {
MathPowStub stub(MathPowStub::ON_STACK);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kMath_pow, 2);
}
MathPowStub stub(MathPowStub::ON_STACK);
__ CallStub(&stub);
context()->Plug(v0);
}
......
This diff is collapsed.
......@@ -172,10 +172,8 @@ void LGapResolver::BreakCycle(int index) {
} else if (source->IsStackSlot()) {
__ lw(kLithiumScratchReg, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
CpuFeatureScope scope(cgen_->masm(), FPU);
__ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
CpuFeatureScope scope(cgen_->masm(), FPU);
__ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
......@@ -195,11 +193,9 @@ void LGapResolver::RestoreValue() {
} else if (saved_destination_->IsStackSlot()) {
__ sw(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
CpuFeatureScope scope(cgen_->masm(), FPU);
__ mov_d(cgen_->ToDoubleRegister(saved_destination_),
kLithiumScratchDouble);
} else if (saved_destination_->IsDoubleStackSlot()) {
CpuFeatureScope scope(cgen_->masm(), FPU);
__ sdc1(kLithiumScratchDouble,
cgen_->ToMemOperand(saved_destination_));
} else {
......@@ -236,7 +232,6 @@ void LGapResolver::EmitMove(int index) {
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
if (!destination_operand.OffsetIsInt16Encodable()) {
CpuFeatureScope scope(cgen_->masm(), FPU);
// 'at' is overwritten while saving the value to the destination.
// Therefore we can't use 'at'. It is OK if the read from the source
// destroys 'at', since that happens before the value is read.
......@@ -276,7 +271,6 @@ void LGapResolver::EmitMove(int index) {
}
} else if (source->IsDoubleRegister()) {
CpuFeatureScope scope(cgen_->masm(), FPU);
DoubleRegister source_register = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ mov_d(cgen_->ToDoubleRegister(destination), source_register);
......@@ -287,7 +281,6 @@ void LGapResolver::EmitMove(int index) {
}
} else if (source->IsDoubleStackSlot()) {
CpuFeatureScope scope(cgen_->masm(), FPU);
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
......
......@@ -2050,16 +2050,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
// float->double conversion on soft float requires an extra scratch
// register. For convenience, just mark the elements register as "UseTemp"
// so that it can be used as a temp during the float->double conversion
// after it's no longer needed after the float load.
bool needs_temp =
!CpuFeatures::IsSupported(FPU) &&
(elements_kind == EXTERNAL_FLOAT_ELEMENTS);
LOperand* external_pointer = needs_temp
? UseTempRegister(instr->elements())
: UseRegister(instr->elements());
LOperand* external_pointer = UseRegister(instr->elements());
result = new(zone()) LLoadKeyed(external_pointer, key);
}
......
......@@ -851,7 +851,6 @@ void MacroAssembler::MultiPopReversed(RegList regs) {
void MacroAssembler::MultiPushFPU(RegList regs) {
CpuFeatureScope scope(this, FPU);
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kDoubleSize;
......@@ -866,7 +865,6 @@ void MacroAssembler::MultiPushFPU(RegList regs) {
void MacroAssembler::MultiPushReversedFPU(RegList regs) {
CpuFeatureScope scope(this, FPU);
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kDoubleSize;
......@@ -881,7 +879,6 @@ void MacroAssembler::MultiPushReversedFPU(RegList regs) {
void MacroAssembler::MultiPopFPU(RegList regs) {
CpuFeatureScope scope(this, FPU);
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
......@@ -895,7 +892,6 @@ void MacroAssembler::MultiPopFPU(RegList regs) {
void MacroAssembler::MultiPopReversedFPU(RegList regs) {
CpuFeatureScope scope(this, FPU);
int16_t stack_offset = 0;
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
......@@ -1168,7 +1164,6 @@ void MacroAssembler::BranchF(Label* target,
void MacroAssembler::Move(FPURegister dst, double imm) {
ASSERT(IsEnabled(FPU));
static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation zero(0.0);
DoubleRepresentation value(imm);
......@@ -1338,61 +1333,17 @@ void MacroAssembler::ConvertToInt32(Register source,
Subu(scratch2, scratch2, Operand(zero_exponent));
// Dest already has a Smi zero.
Branch(&done, lt, scratch2, Operand(zero_reg));
if (!CpuFeatures::IsSupported(FPU)) {
// We have a shifted exponent between 0 and 30 in scratch2.
srl(dest, scratch2, HeapNumber::kExponentShift);
// We now have the exponent in dest. Subtract from 30 to get
// how much to shift down.
li(at, Operand(30));
subu(dest, at, dest);
}
bind(&right_exponent);
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatureScope scope(this, FPU);
// MIPS FPU instructions implementing double precision to integer
// conversion using round to zero. Since the FP value was qualified
// above, the resulting integer should be a legal int32.
// The original 'Exponent' word is still in scratch.
lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
trunc_w_d(double_scratch, double_scratch);
mfc1(dest, double_scratch);
} else {
// On entry, dest has final downshift, scratch has original sign/exp/mant.
// Save sign bit in top bit of dest.
And(scratch2, scratch, Operand(0x80000000));
Or(dest, dest, Operand(scratch2));
// Put back the implicit 1, just above mantissa field.
Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift));
// Shift up the mantissa bits to take up the space the exponent used to
// take. We just orred in the implicit bit so that took care of one and
// we want to leave the sign bit 0 so we subtract 2 bits from the shift
// distance. But we want to clear the sign-bit so shift one more bit
// left, then shift right one bit.
const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
sll(scratch, scratch, shift_distance + 1);
srl(scratch, scratch, 1);
// Get the second half of the double. For some exponents we don't
// actually need this because the bits get shifted out again, but
// it's probably slower to test than just to do it.
lw(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
// Extract the top 10 bits, and insert those bottom 10 bits of scratch.
// The width of the field here is the same as the shift amount above.
const int field_width = shift_distance;
Ext(scratch2, scratch2, 32-shift_distance, field_width);
Ins(scratch, scratch2, 0, field_width);
// Move down according to the exponent.
srlv(scratch, scratch, dest);
// Prepare the negative version of our integer.
subu(scratch2, zero_reg, scratch);
// Trick to check sign bit (msb) held in dest, count leading zero.
// 0 indicates negative, save negative version with conditional move.
Clz(dest, dest);
Movz(scratch, scratch2, dest);
mov(dest, scratch);
}
// MIPS FPU instructions implementing double precision to integer
// conversion using round to zero. Since the FP value was qualified
// above, the resulting integer should be a legal int32.
// The original 'Exponent' word is still in scratch.
lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
trunc_w_d(double_scratch, double_scratch);
mfc1(dest, double_scratch);
bind(&done);
}
......@@ -1408,8 +1359,6 @@ void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
ASSERT(!double_input.is(double_scratch));
ASSERT(!except_flag.is(scratch));
ASSERT(CpuFeatures::IsSupported(FPU));
CpuFeatureScope scope(this, FPU);
Label done;
// Clear the except flag (0 = no exception)
......@@ -1551,7 +1500,6 @@ void MacroAssembler::EmitECMATruncate(Register result,
Register scratch,
Register scratch2,
Register scratch3) {
CpuFeatureScope scope(this, FPU);
ASSERT(!scratch2.is(result));
ASSERT(!scratch3.is(result));
ASSERT(!scratch3.is(scratch2));
......@@ -3459,11 +3407,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
// scratch1 is now effective address of the double element
FloatingPointHelper::Destination destination;
if (CpuFeatures::IsSupported(FPU)) {
destination = FloatingPointHelper::kFPURegisters;
} else {
destination = FloatingPointHelper::kCoreRegisters;
}
destination = FloatingPointHelper::kFPURegisters;
Register untagged_value = elements_reg;
SmiUntag(untagged_value, value_reg);
......@@ -3476,7 +3420,6 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
scratch4,
f2);
if (destination == FloatingPointHelper::kFPURegisters) {
CpuFeatureScope scope(this, FPU);
sdc1(f0, MemOperand(scratch1, 0));
} else {
sw(mantissa_reg, MemOperand(scratch1, 0));
......@@ -3569,7 +3512,6 @@ void MacroAssembler::CheckMap(Register obj,
void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
CpuFeatureScope scope(this, FPU);
if (IsMipsSoftFloatABI) {
Move(dst, v0, v1);
} else {
......@@ -3579,7 +3521,6 @@ void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
CpuFeatureScope scope(this, FPU);
if (!IsMipsSoftFloatABI) {
Move(f12, dreg);
} else {
......@@ -3590,7 +3531,6 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
DoubleRegister dreg2) {
CpuFeatureScope scope(this, FPU);
if (!IsMipsSoftFloatABI) {
if (dreg2.is(f12)) {
ASSERT(!dreg1.is(f14));
......@@ -3609,7 +3549,6 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
Register reg) {
CpuFeatureScope scope(this, FPU);
if (!IsMipsSoftFloatABI) {
Move(f12, dreg);
Move(a2, reg);
......@@ -4252,10 +4191,7 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
PrepareCEntryArgs(function->nargs);
PrepareCEntryFunction(ExternalReference(function, isolate()));
SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU)
? kSaveFPRegs
: kDontSaveFPRegs;
CEntryStub stub(1, mode);
CEntryStub stub(1, kSaveFPRegs);
CallStub(&stub);
}
......@@ -4647,7 +4583,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
if (save_doubles) {
CpuFeatureScope scope(this, FPU);
// The stack must be allign to 0 modulo 8 for stores with sdc1.
ASSERT(kDoubleSize == frame_alignment);
if (frame_alignment > 0) {
......@@ -4685,7 +4620,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
bool do_return) {
// Optionally restore all double registers.
if (save_doubles) {
CpuFeatureScope scope(this, FPU);
// Remember: we only need to restore every 2nd double FPU value.
lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment