Commit ecfa9675 authored by palfia@homejinni.com's avatar palfia@homejinni.com

MIPS: Remove soft-float support.

Port r14159 (0c64645)

Original commit message:
Remove ARM support for VFP2

BUG=

TEST=

Review URL: https://codereview.chromium.org/14113011
Patch from Dusan Milosavljevic <Dusan.Milosavljevic@rt-rk.com>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@14275 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 102c5170
...@@ -321,8 +321,6 @@ DEFINE_bool(enable_unaligned_accesses, true, ...@@ -321,8 +321,6 @@ DEFINE_bool(enable_unaligned_accesses, true,
"enable unaligned accesses for ARMv7 (ARM only)") "enable unaligned accesses for ARMv7 (ARM only)")
DEFINE_bool(enable_32dregs, true, DEFINE_bool(enable_32dregs, true,
"enable use of d16-d31 registers on ARM - this requires VFP3") "enable use of d16-d31 registers on ARM - this requires VFP3")
DEFINE_bool(enable_fpu, true,
"enable use of MIPS FPU instructions if available (MIPS only)")
DEFINE_bool(enable_vldr_imm, false, DEFINE_bool(enable_vldr_imm, false,
"enable use of constant pools for double immediate (ARM only)") "enable use of constant pools for double immediate (ARM only)")
......
...@@ -81,29 +81,17 @@ bool Operand::is_reg() const { ...@@ -81,29 +81,17 @@ bool Operand::is_reg() const {
int Register::NumAllocatableRegisters() { int Register::NumAllocatableRegisters() {
if (CpuFeatures::IsSupported(FPU)) {
return kMaxNumAllocatableRegisters; return kMaxNumAllocatableRegisters;
} else {
return kMaxNumAllocatableRegisters - kGPRsPerNonFPUDouble;
}
} }
int DoubleRegister::NumRegisters() { int DoubleRegister::NumRegisters() {
if (CpuFeatures::IsSupported(FPU)) {
return FPURegister::kMaxNumRegisters; return FPURegister::kMaxNumRegisters;
} else {
return 1;
}
} }
int DoubleRegister::NumAllocatableRegisters() { int DoubleRegister::NumAllocatableRegisters() {
if (CpuFeatures::IsSupported(FPU)) {
return FPURegister::kMaxNumAllocatableRegisters; return FPURegister::kMaxNumAllocatableRegisters;
} else {
return 1;
}
} }
......
...@@ -80,29 +80,24 @@ static uint64_t CpuFeaturesImpliedByCompiler() { ...@@ -80,29 +80,24 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
const char* DoubleRegister::AllocationIndexToString(int index) { const char* DoubleRegister::AllocationIndexToString(int index) {
if (CpuFeatures::IsSupported(FPU)) { ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); const char* const names[] = {
const char* const names[] = { "f0",
"f0", "f2",
"f2", "f4",
"f4", "f6",
"f6", "f8",
"f8", "f10",
"f10", "f12",
"f12", "f14",
"f14", "f16",
"f16", "f18",
"f18", "f20",
"f20", "f22",
"f22", "f24",
"f24", "f26"
"f26" };
}; return names[index];
return names[index];
} else {
ASSERT(index == 0);
return "sfpd0";
}
} }
...@@ -127,10 +122,8 @@ void CpuFeatures::Probe() { ...@@ -127,10 +122,8 @@ void CpuFeatures::Probe() {
// If the compiler is allowed to use fpu then we can use fpu too in our // If the compiler is allowed to use fpu then we can use fpu too in our
// code generation. // code generation.
#if !defined(__mips__) #if !defined(__mips__)
// For the simulator=mips build, use FPU when FLAG_enable_fpu is enabled. // For the simulator build, use FPU.
if (FLAG_enable_fpu) { supported_ |= static_cast<uint64_t>(1) << FPU;
supported_ |= static_cast<uint64_t>(1) << FPU;
}
#else #else
// Probe for additional features not already known to be available. // Probe for additional features not already known to be available.
if (OS::MipsCpuHasFeature(FPU)) { if (OS::MipsCpuHasFeature(FPU)) {
...@@ -876,7 +869,6 @@ void Assembler::GenInstrRegister(Opcode opcode, ...@@ -876,7 +869,6 @@ void Assembler::GenInstrRegister(Opcode opcode,
FPURegister fd, FPURegister fd,
SecondaryField func) { SecondaryField func) {
ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid()); ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
ASSERT(IsEnabled(FPU));
Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift) Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
| (fd.code() << kFdShift) | func; | (fd.code() << kFdShift) | func;
emit(instr); emit(instr);
...@@ -890,7 +882,6 @@ void Assembler::GenInstrRegister(Opcode opcode, ...@@ -890,7 +882,6 @@ void Assembler::GenInstrRegister(Opcode opcode,
FPURegister fd, FPURegister fd,
SecondaryField func) { SecondaryField func) {
ASSERT(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid()); ASSERT(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
ASSERT(IsEnabled(FPU));
Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift) Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
| (fs.code() << kFsShift) | (fd.code() << kFdShift) | func; | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
emit(instr); emit(instr);
...@@ -904,7 +895,6 @@ void Assembler::GenInstrRegister(Opcode opcode, ...@@ -904,7 +895,6 @@ void Assembler::GenInstrRegister(Opcode opcode,
FPURegister fd, FPURegister fd,
SecondaryField func) { SecondaryField func) {
ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid()); ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
ASSERT(IsEnabled(FPU));
Instr instr = opcode | fmt | (rt.code() << kRtShift) Instr instr = opcode | fmt | (rt.code() << kRtShift)
| (fs.code() << kFsShift) | (fd.code() << kFdShift) | func; | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
emit(instr); emit(instr);
...@@ -917,7 +907,6 @@ void Assembler::GenInstrRegister(Opcode opcode, ...@@ -917,7 +907,6 @@ void Assembler::GenInstrRegister(Opcode opcode,
FPUControlRegister fs, FPUControlRegister fs,
SecondaryField func) { SecondaryField func) {
ASSERT(fs.is_valid() && rt.is_valid()); ASSERT(fs.is_valid() && rt.is_valid());
ASSERT(IsEnabled(FPU));
Instr instr = Instr instr =
opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func; opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
emit(instr); emit(instr);
...@@ -952,7 +941,6 @@ void Assembler::GenInstrImmediate(Opcode opcode, ...@@ -952,7 +941,6 @@ void Assembler::GenInstrImmediate(Opcode opcode,
FPURegister ft, FPURegister ft,
int32_t j) { int32_t j) {
ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j))); ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
ASSERT(IsEnabled(FPU));
Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift) Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
| (j & kImm16Mask); | (j & kImm16Mask);
emit(instr); emit(instr);
...@@ -1874,7 +1862,6 @@ void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) { ...@@ -1874,7 +1862,6 @@ void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
// Conditions. // Conditions.
void Assembler::c(FPUCondition cond, SecondaryField fmt, void Assembler::c(FPUCondition cond, SecondaryField fmt,
FPURegister fs, FPURegister ft, uint16_t cc) { FPURegister fs, FPURegister ft, uint16_t cc) {
ASSERT(IsEnabled(FPU));
ASSERT(is_uint3(cc)); ASSERT(is_uint3(cc));
ASSERT((fmt & ~(31 << kRsShift)) == 0); ASSERT((fmt & ~(31 << kRsShift)) == 0);
Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
...@@ -1885,7 +1872,6 @@ void Assembler::c(FPUCondition cond, SecondaryField fmt, ...@@ -1885,7 +1872,6 @@ void Assembler::c(FPUCondition cond, SecondaryField fmt,
void Assembler::fcmp(FPURegister src1, const double src2, void Assembler::fcmp(FPURegister src1, const double src2,
FPUCondition cond) { FPUCondition cond) {
ASSERT(IsEnabled(FPU));
ASSERT(src2 == 0.0); ASSERT(src2 == 0.0);
mtc1(zero_reg, f14); mtc1(zero_reg, f14);
cvt_d_w(f14, f14); cvt_d_w(f14, f14);
...@@ -1894,7 +1880,6 @@ void Assembler::fcmp(FPURegister src1, const double src2, ...@@ -1894,7 +1880,6 @@ void Assembler::fcmp(FPURegister src1, const double src2,
void Assembler::bc1f(int16_t offset, uint16_t cc) { void Assembler::bc1f(int16_t offset, uint16_t cc) {
ASSERT(IsEnabled(FPU));
ASSERT(is_uint3(cc)); ASSERT(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask); Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
emit(instr); emit(instr);
...@@ -1902,7 +1887,6 @@ void Assembler::bc1f(int16_t offset, uint16_t cc) { ...@@ -1902,7 +1887,6 @@ void Assembler::bc1f(int16_t offset, uint16_t cc) {
void Assembler::bc1t(int16_t offset, uint16_t cc) { void Assembler::bc1t(int16_t offset, uint16_t cc) {
ASSERT(IsEnabled(FPU));
ASSERT(is_uint3(cc)); ASSERT(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask); Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
emit(instr); emit(instr);
......
...@@ -74,7 +74,6 @@ struct Register { ...@@ -74,7 +74,6 @@ struct Register {
static const int kNumRegisters = v8::internal::kNumRegisters; static const int kNumRegisters = v8::internal::kNumRegisters;
static const int kMaxNumAllocatableRegisters = 14; // v0 through t7. static const int kMaxNumAllocatableRegisters = 14; // v0 through t7.
static const int kSizeInBytes = 4; static const int kSizeInBytes = 4;
static const int kGPRsPerNonFPUDouble = 2;
inline static int NumAllocatableRegisters(); inline static int NumAllocatableRegisters();
...@@ -300,9 +299,6 @@ const FPURegister f29 = { 29 }; ...@@ -300,9 +299,6 @@ const FPURegister f29 = { 29 };
const FPURegister f30 = { 30 }; const FPURegister f30 = { 30 };
const FPURegister f31 = { 31 }; const FPURegister f31 = { 31 };
const Register sfpd_lo = { kRegister_t6_Code };
const Register sfpd_hi = { kRegister_t7_Code };
// Register aliases. // Register aliases.
// cp is assumed to be a callee saved register. // cp is assumed to be a callee saved register.
// Defined using #define instead of "static const Register&" because Clang // Defined using #define instead of "static const Register&" because Clang
...@@ -403,7 +399,6 @@ class CpuFeatures : public AllStatic { ...@@ -403,7 +399,6 @@ class CpuFeatures : public AllStatic {
// Check whether a feature is supported by the target CPU. // Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) { static bool IsSupported(CpuFeature f) {
ASSERT(initialized_); ASSERT(initialized_);
if (f == FPU && !FLAG_enable_fpu) return false;
return (supported_ & (1u << f)) != 0; return (supported_ & (1u << f)) != 0;
} }
......
This diff is collapsed.
...@@ -62,9 +62,7 @@ class TranscendentalCacheStub: public PlatformCodeStub { ...@@ -62,9 +62,7 @@ class TranscendentalCacheStub: public PlatformCodeStub {
class StoreBufferOverflowStub: public PlatformCodeStub { class StoreBufferOverflowStub: public PlatformCodeStub {
public: public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { : save_doubles_(save_fp) {}
ASSERT(CpuFeatures::IsSafeForSnapshot(FPU) || save_fp == kDontSaveFPRegs);
}
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
...@@ -486,7 +484,6 @@ class RecordWriteStub: public PlatformCodeStub { ...@@ -486,7 +484,6 @@ class RecordWriteStub: public PlatformCodeStub {
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
masm->MultiPush((kJSCallerSaved | ra.bit()) & ~scratch1_.bit()); masm->MultiPush((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
if (mode == kSaveFPRegs) { if (mode == kSaveFPRegs) {
CpuFeatureScope scope(masm, FPU);
masm->MultiPushFPU(kCallerSavedFPU); masm->MultiPushFPU(kCallerSavedFPU);
} }
} }
...@@ -494,7 +491,6 @@ class RecordWriteStub: public PlatformCodeStub { ...@@ -494,7 +491,6 @@ class RecordWriteStub: public PlatformCodeStub {
inline void RestoreCallerSaveRegisters(MacroAssembler*masm, inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) { SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) { if (mode == kSaveFPRegs) {
CpuFeatureScope scope(masm, FPU);
masm->MultiPopFPU(kCallerSavedFPU); masm->MultiPopFPU(kCallerSavedFPU);
} }
masm->MultiPop((kJSCallerSaved | ra.bit()) & ~scratch1_.bit()); masm->MultiPop((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
...@@ -685,27 +681,6 @@ class FloatingPointHelper : public AllStatic { ...@@ -685,27 +681,6 @@ class FloatingPointHelper : public AllStatic {
FPURegister double_scratch1, FPURegister double_scratch1,
Label* not_int32); Label* not_int32);
// Generate non FPU code to check if a double can be exactly represented by a
// 32-bit integer. This does not check for 0 or -0, which need
// to be checked for separately.
// Control jumps to not_int32 if the value is not a 32-bit integer, and falls
// through otherwise.
// src1 and src2 will be cloberred.
//
// Expected input:
// - src1: higher (exponent) part of the double value.
// - src2: lower (mantissa) part of the double value.
// Output status:
// - dst: 32 higher bits of the mantissa. (mantissa[51:20])
// - src2: contains 1.
// - other registers are clobbered.
static void DoubleIs32BitInteger(MacroAssembler* masm,
Register src1,
Register src2,
Register dst,
Register scratch,
Label* not_int32);
// Generates code to call a C function to do a double operation using core // Generates code to call a C function to do a double operation using core
// registers. (Used when FPU is not supported.) // registers. (Used when FPU is not supported.)
// This code never falls through, but returns with a heap number containing // This code never falls through, but returns with a heap number containing
......
...@@ -62,7 +62,6 @@ double fast_exp_simulator(double x) { ...@@ -62,7 +62,6 @@ double fast_exp_simulator(double x) {
UnaryMathFunction CreateExpFunction() { UnaryMathFunction CreateExpFunction() {
if (!CpuFeatures::IsSupported(FPU)) return &exp;
if (!FLAG_fast_math) return &exp; if (!FLAG_fast_math) return &exp;
size_t actual_size; size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true)); byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
...@@ -72,7 +71,6 @@ UnaryMathFunction CreateExpFunction() { ...@@ -72,7 +71,6 @@ UnaryMathFunction CreateExpFunction() {
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
{ {
CpuFeatureScope use_fpu(&masm, FPU);
DoubleRegister input = f12; DoubleRegister input = f12;
DoubleRegister result = f0; DoubleRegister result = f0;
DoubleRegister double_scratch1 = f4; DoubleRegister double_scratch1 = f4;
...@@ -184,7 +182,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( ...@@ -184,7 +182,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// -- t0 : scratch (elements) // -- t0 : scratch (elements)
// ----------------------------------- // -----------------------------------
Label loop, entry, convert_hole, gc_required, only_change_map, done; Label loop, entry, convert_hole, gc_required, only_change_map, done;
bool fpu_supported = CpuFeatures::IsSupported(FPU);
Register scratch = t6; Register scratch = t6;
...@@ -249,8 +246,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( ...@@ -249,8 +246,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// t2: end of destination FixedDoubleArray, not tagged // t2: end of destination FixedDoubleArray, not tagged
// t3: begin of FixedDoubleArray element fields, not tagged // t3: begin of FixedDoubleArray element fields, not tagged
if (!fpu_supported) __ Push(a1, a0);
__ Branch(&entry); __ Branch(&entry);
__ bind(&only_change_map); __ bind(&only_change_map);
...@@ -278,25 +273,11 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( ...@@ -278,25 +273,11 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ UntagAndJumpIfNotSmi(t5, t5, &convert_hole); __ UntagAndJumpIfNotSmi(t5, t5, &convert_hole);
// Normal smi, convert to double and store. // Normal smi, convert to double and store.
if (fpu_supported) { __ mtc1(t5, f0);
CpuFeatureScope scope(masm, FPU); __ cvt_d_w(f0, f0);
__ mtc1(t5, f0); __ sdc1(f0, MemOperand(t3));
__ cvt_d_w(f0, f0); __ Addu(t3, t3, kDoubleSize);
__ sdc1(f0, MemOperand(t3));
__ Addu(t3, t3, kDoubleSize);
} else {
FloatingPointHelper::ConvertIntToDouble(masm,
t5,
FloatingPointHelper::kCoreRegisters,
f0,
a0,
a1,
t7,
f0);
__ sw(a0, MemOperand(t3)); // mantissa
__ sw(a1, MemOperand(t3, kIntSize)); // exponent
__ Addu(t3, t3, kDoubleSize);
}
__ Branch(&entry); __ Branch(&entry);
// Hole found, store the-hole NaN. // Hole found, store the-hole NaN.
...@@ -315,7 +296,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( ...@@ -315,7 +296,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ bind(&entry); __ bind(&entry);
__ Branch(&loop, lt, t3, Operand(t2)); __ Branch(&loop, lt, t3, Operand(t2));
if (!fpu_supported) __ Pop(a1, a0);
__ pop(ra); __ pop(ra);
__ bind(&done); __ bind(&done);
} }
......
...@@ -61,8 +61,9 @@ enum ArchVariants { ...@@ -61,8 +61,9 @@ enum ArchVariants {
// -mhard-float is passed to the compiler. // -mhard-float is passed to the compiler.
const bool IsMipsSoftFloatABI = false; const bool IsMipsSoftFloatABI = false;
#elif(defined(__mips_soft_float) && __mips_soft_float != 0) #elif(defined(__mips_soft_float) && __mips_soft_float != 0)
// Not using floating-point coprocessor instructions. This flag is raised when // This flag is raised when -msoft-float is passed to the compiler.
// -msoft-float is passed to the compiler. // Although FPU is a base requirement for v8, soft-float ABI is used
// on soft-float systems with FPU kernel emulation.
const bool IsMipsSoftFloatABI = true; const bool IsMipsSoftFloatABI = true;
#else #else
const bool IsMipsSoftFloatABI = true; const bool IsMipsSoftFloatABI = true;
......
...@@ -603,17 +603,12 @@ void Deoptimizer::EntryGenerator::Generate() { ...@@ -603,17 +603,12 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kDoubleRegsSize = const int kDoubleRegsSize =
kDoubleSize * FPURegister::kMaxNumAllocatableRegisters; kDoubleSize * FPURegister::kMaxNumAllocatableRegisters;
if (CpuFeatures::IsSupported(FPU)) { // Save all FPU registers before messing with them.
CpuFeatureScope scope(masm(), FPU); __ Subu(sp, sp, Operand(kDoubleRegsSize));
// Save all FPU registers before messing with them. for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
__ Subu(sp, sp, Operand(kDoubleRegsSize)); FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) { int offset = i * kDoubleSize;
FPURegister fpu_reg = FPURegister::FromAllocationIndex(i); __ sdc1(fpu_reg, MemOperand(sp, offset));
int offset = i * kDoubleSize;
__ sdc1(fpu_reg, MemOperand(sp, offset));
}
} else {
__ Subu(sp, sp, Operand(kDoubleRegsSize));
} }
// Push saved_regs (needed to populate FrameDescription::registers_). // Push saved_regs (needed to populate FrameDescription::registers_).
...@@ -686,16 +681,13 @@ void Deoptimizer::EntryGenerator::Generate() { ...@@ -686,16 +681,13 @@ void Deoptimizer::EntryGenerator::Generate() {
} }
int double_regs_offset = FrameDescription::double_registers_offset(); int double_regs_offset = FrameDescription::double_registers_offset();
if (CpuFeatures::IsSupported(FPU)) { // Copy FPU registers to
CpuFeatureScope scope(masm(), FPU); // double_registers_[DoubleRegister::kNumAllocatableRegisters]
// Copy FPU registers to for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) {
// double_registers_[DoubleRegister::kNumAllocatableRegisters] int dst_offset = i * kDoubleSize + double_regs_offset;
for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) { int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
int dst_offset = i * kDoubleSize + double_regs_offset; __ ldc1(f0, MemOperand(sp, src_offset));
int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize; __ sdc1(f0, MemOperand(a1, dst_offset));
__ ldc1(f0, MemOperand(sp, src_offset));
__ sdc1(f0, MemOperand(a1, dst_offset));
}
} }
// Remove the bailout id, eventually return address, and the saved registers // Remove the bailout id, eventually return address, and the saved registers
...@@ -764,15 +756,11 @@ void Deoptimizer::EntryGenerator::Generate() { ...@@ -764,15 +756,11 @@ void Deoptimizer::EntryGenerator::Generate() {
__ bind(&outer_loop_header); __ bind(&outer_loop_header);
__ Branch(&outer_push_loop, lt, t0, Operand(a1)); __ Branch(&outer_push_loop, lt, t0, Operand(a1));
if (CpuFeatures::IsSupported(FPU)) { __ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
CpuFeatureScope scope(masm(), FPU); for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
const FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
__ lw(a1, MemOperand(a0, Deoptimizer::input_offset())); int src_offset = i * kDoubleSize + double_regs_offset;
for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) { __ ldc1(fpu_reg, MemOperand(a1, src_offset));
const FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
int src_offset = i * kDoubleSize + double_regs_offset;
__ ldc1(fpu_reg, MemOperand(a1, src_offset));
}
} }
// Push state, pc, and continuation from the last output frame. // Push state, pc, and continuation from the last output frame.
......
...@@ -674,17 +674,9 @@ void FullCodeGenerator::DoTest(Expression* condition, ...@@ -674,17 +674,9 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true, Label* if_true,
Label* if_false, Label* if_false,
Label* fall_through) { Label* fall_through) {
if (CpuFeatures::IsSupported(FPU)) { ToBooleanStub stub(result_register());
ToBooleanStub stub(result_register()); __ CallStub(&stub, condition->test_id());
__ CallStub(&stub, condition->test_id()); __ mov(at, zero_reg);
__ mov(at, zero_reg);
} else {
// Call the runtime to find the boolean value of the source and then
// translate it into control flow to the pair of labels.
__ push(result_register());
__ CallRuntime(Runtime::kToBool, 1);
__ LoadRoot(at, Heap::kFalseValueRootIndex);
}
Split(ne, v0, Operand(at), if_true, if_false, fall_through); Split(ne, v0, Operand(at), if_true, if_false, fall_through);
} }
...@@ -3045,31 +3037,21 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) { ...@@ -3045,31 +3037,21 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
// Convert 32 random bits in v0 to 0.(32 random bits) in a double // Convert 32 random bits in v0 to 0.(32 random bits) in a double
// by computing: // by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)). // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
if (CpuFeatures::IsSupported(FPU)) { __ PrepareCallCFunction(1, a0);
__ PrepareCallCFunction(1, a0); __ lw(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ lw(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
__ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset)); __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
CpuFeatureScope scope(masm(), FPU); __ li(a1, Operand(0x41300000));
// 0x41300000 is the top half of 1.0 x 2^20 as a double. // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
__ li(a1, Operand(0x41300000)); __ Move(f12, v0, a1);
// Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU. // Move 0x4130000000000000 to FPU.
__ Move(f12, v0, a1); __ Move(f14, zero_reg, a1);
// Move 0x4130000000000000 to FPU. // Subtract and store the result in the heap number.
__ Move(f14, zero_reg, a1); __ sub_d(f0, f12, f14);
// Subtract and store the result in the heap number. __ sdc1(f0, FieldMemOperand(s0, HeapNumber::kValueOffset));
__ sub_d(f0, f12, f14); __ mov(v0, s0);
__ sdc1(f0, FieldMemOperand(s0, HeapNumber::kValueOffset));
__ mov(v0, s0);
} else {
__ PrepareCallCFunction(2, a0);
__ mov(a0, s0);
__ lw(a1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ lw(a1, FieldMemOperand(a1, GlobalObject::kNativeContextOffset));
__ CallCFunction(
ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
}
context()->Plug(v0); context()->Plug(v0);
} }
...@@ -3207,12 +3189,8 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) { ...@@ -3207,12 +3189,8 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
ASSERT(args->length() == 2); ASSERT(args->length() == 2);
VisitForStackValue(args->at(0)); VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1)); VisitForStackValue(args->at(1));
if (CpuFeatures::IsSupported(FPU)) { MathPowStub stub(MathPowStub::ON_STACK);
MathPowStub stub(MathPowStub::ON_STACK); __ CallStub(&stub);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kMath_pow, 2);
}
context()->Plug(v0); context()->Plug(v0);
} }
......
This diff is collapsed.
...@@ -172,10 +172,8 @@ void LGapResolver::BreakCycle(int index) { ...@@ -172,10 +172,8 @@ void LGapResolver::BreakCycle(int index) {
} else if (source->IsStackSlot()) { } else if (source->IsStackSlot()) {
__ lw(kLithiumScratchReg, cgen_->ToMemOperand(source)); __ lw(kLithiumScratchReg, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) { } else if (source->IsDoubleRegister()) {
CpuFeatureScope scope(cgen_->masm(), FPU);
__ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source)); __ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) { } else if (source->IsDoubleStackSlot()) {
CpuFeatureScope scope(cgen_->masm(), FPU);
__ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source)); __ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
} else { } else {
UNREACHABLE(); UNREACHABLE();
...@@ -195,11 +193,9 @@ void LGapResolver::RestoreValue() { ...@@ -195,11 +193,9 @@ void LGapResolver::RestoreValue() {
} else if (saved_destination_->IsStackSlot()) { } else if (saved_destination_->IsStackSlot()) {
__ sw(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_)); __ sw(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) { } else if (saved_destination_->IsDoubleRegister()) {
CpuFeatureScope scope(cgen_->masm(), FPU);
__ mov_d(cgen_->ToDoubleRegister(saved_destination_), __ mov_d(cgen_->ToDoubleRegister(saved_destination_),
kLithiumScratchDouble); kLithiumScratchDouble);
} else if (saved_destination_->IsDoubleStackSlot()) { } else if (saved_destination_->IsDoubleStackSlot()) {
CpuFeatureScope scope(cgen_->masm(), FPU);
__ sdc1(kLithiumScratchDouble, __ sdc1(kLithiumScratchDouble,
cgen_->ToMemOperand(saved_destination_)); cgen_->ToMemOperand(saved_destination_));
} else { } else {
...@@ -236,7 +232,6 @@ void LGapResolver::EmitMove(int index) { ...@@ -236,7 +232,6 @@ void LGapResolver::EmitMove(int index) {
MemOperand destination_operand = cgen_->ToMemOperand(destination); MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) { if (in_cycle_) {
if (!destination_operand.OffsetIsInt16Encodable()) { if (!destination_operand.OffsetIsInt16Encodable()) {
CpuFeatureScope scope(cgen_->masm(), FPU);
// 'at' is overwritten while saving the value to the destination. // 'at' is overwritten while saving the value to the destination.
// Therefore we can't use 'at'. It is OK if the read from the source // Therefore we can't use 'at'. It is OK if the read from the source
// destroys 'at', since that happens before the value is read. // destroys 'at', since that happens before the value is read.
...@@ -276,7 +271,6 @@ void LGapResolver::EmitMove(int index) { ...@@ -276,7 +271,6 @@ void LGapResolver::EmitMove(int index) {
} }
} else if (source->IsDoubleRegister()) { } else if (source->IsDoubleRegister()) {
CpuFeatureScope scope(cgen_->masm(), FPU);
DoubleRegister source_register = cgen_->ToDoubleRegister(source); DoubleRegister source_register = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) { if (destination->IsDoubleRegister()) {
__ mov_d(cgen_->ToDoubleRegister(destination), source_register); __ mov_d(cgen_->ToDoubleRegister(destination), source_register);
...@@ -287,7 +281,6 @@ void LGapResolver::EmitMove(int index) { ...@@ -287,7 +281,6 @@ void LGapResolver::EmitMove(int index) {
} }
} else if (source->IsDoubleStackSlot()) { } else if (source->IsDoubleStackSlot()) {
CpuFeatureScope scope(cgen_->masm(), FPU);
MemOperand source_operand = cgen_->ToMemOperand(source); MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) { if (destination->IsDoubleRegister()) {
__ ldc1(cgen_->ToDoubleRegister(destination), source_operand); __ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
......
...@@ -2050,16 +2050,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { ...@@ -2050,16 +2050,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() && (instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
// float->double conversion on soft float requires an extra scratch LOperand* external_pointer = UseRegister(instr->elements());
// register. For convenience, just mark the elements register as "UseTemp"
// so that it can be used as a temp during the float->double conversion
// after it's no longer needed after the float load.
bool needs_temp =
!CpuFeatures::IsSupported(FPU) &&
(elements_kind == EXTERNAL_FLOAT_ELEMENTS);
LOperand* external_pointer = needs_temp
? UseTempRegister(instr->elements())
: UseRegister(instr->elements());
result = new(zone()) LLoadKeyed(external_pointer, key); result = new(zone()) LLoadKeyed(external_pointer, key);
} }
......
...@@ -851,7 +851,6 @@ void MacroAssembler::MultiPopReversed(RegList regs) { ...@@ -851,7 +851,6 @@ void MacroAssembler::MultiPopReversed(RegList regs) {
void MacroAssembler::MultiPushFPU(RegList regs) { void MacroAssembler::MultiPushFPU(RegList regs) {
CpuFeatureScope scope(this, FPU);
int16_t num_to_push = NumberOfBitsSet(regs); int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kDoubleSize; int16_t stack_offset = num_to_push * kDoubleSize;
...@@ -866,7 +865,6 @@ void MacroAssembler::MultiPushFPU(RegList regs) { ...@@ -866,7 +865,6 @@ void MacroAssembler::MultiPushFPU(RegList regs) {
void MacroAssembler::MultiPushReversedFPU(RegList regs) { void MacroAssembler::MultiPushReversedFPU(RegList regs) {
CpuFeatureScope scope(this, FPU);
int16_t num_to_push = NumberOfBitsSet(regs); int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kDoubleSize; int16_t stack_offset = num_to_push * kDoubleSize;
...@@ -881,7 +879,6 @@ void MacroAssembler::MultiPushReversedFPU(RegList regs) { ...@@ -881,7 +879,6 @@ void MacroAssembler::MultiPushReversedFPU(RegList regs) {
void MacroAssembler::MultiPopFPU(RegList regs) { void MacroAssembler::MultiPopFPU(RegList regs) {
CpuFeatureScope scope(this, FPU);
int16_t stack_offset = 0; int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) { for (int16_t i = 0; i < kNumRegisters; i++) {
...@@ -895,7 +892,6 @@ void MacroAssembler::MultiPopFPU(RegList regs) { ...@@ -895,7 +892,6 @@ void MacroAssembler::MultiPopFPU(RegList regs) {
void MacroAssembler::MultiPopReversedFPU(RegList regs) { void MacroAssembler::MultiPopReversedFPU(RegList regs) {
CpuFeatureScope scope(this, FPU);
int16_t stack_offset = 0; int16_t stack_offset = 0;
for (int16_t i = kNumRegisters - 1; i >= 0; i--) { for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
...@@ -1168,7 +1164,6 @@ void MacroAssembler::BranchF(Label* target, ...@@ -1168,7 +1164,6 @@ void MacroAssembler::BranchF(Label* target,
void MacroAssembler::Move(FPURegister dst, double imm) { void MacroAssembler::Move(FPURegister dst, double imm) {
ASSERT(IsEnabled(FPU));
static const DoubleRepresentation minus_zero(-0.0); static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation zero(0.0); static const DoubleRepresentation zero(0.0);
DoubleRepresentation value(imm); DoubleRepresentation value(imm);
...@@ -1338,61 +1333,17 @@ void MacroAssembler::ConvertToInt32(Register source, ...@@ -1338,61 +1333,17 @@ void MacroAssembler::ConvertToInt32(Register source,
Subu(scratch2, scratch2, Operand(zero_exponent)); Subu(scratch2, scratch2, Operand(zero_exponent));
// Dest already has a Smi zero. // Dest already has a Smi zero.
Branch(&done, lt, scratch2, Operand(zero_reg)); Branch(&done, lt, scratch2, Operand(zero_reg));
if (!CpuFeatures::IsSupported(FPU)) {
// We have a shifted exponent between 0 and 30 in scratch2.
srl(dest, scratch2, HeapNumber::kExponentShift);
// We now have the exponent in dest. Subtract from 30 to get
// how much to shift down.
li(at, Operand(30));
subu(dest, at, dest);
}
bind(&right_exponent); bind(&right_exponent);
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatureScope scope(this, FPU); // MIPS FPU instructions implementing double precision to integer
// MIPS FPU instructions implementing double precision to integer // conversion using round to zero. Since the FP value was qualified
// conversion using round to zero. Since the FP value was qualified // above, the resulting integer should be a legal int32.
// above, the resulting integer should be a legal int32. // The original 'Exponent' word is still in scratch.
// The original 'Exponent' word is still in scratch. lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1)); trunc_w_d(double_scratch, double_scratch);
trunc_w_d(double_scratch, double_scratch); mfc1(dest, double_scratch);
mfc1(dest, double_scratch);
} else {
// On entry, dest has final downshift, scratch has original sign/exp/mant.
// Save sign bit in top bit of dest.
And(scratch2, scratch, Operand(0x80000000));
Or(dest, dest, Operand(scratch2));
// Put back the implicit 1, just above mantissa field.
Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift));
// Shift up the mantissa bits to take up the space the exponent used to
// take. We just orred in the implicit bit so that took care of one and
// we want to leave the sign bit 0 so we subtract 2 bits from the shift
// distance. But we want to clear the sign-bit so shift one more bit
// left, then shift right one bit.
const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
sll(scratch, scratch, shift_distance + 1);
srl(scratch, scratch, 1);
// Get the second half of the double. For some exponents we don't
// actually need this because the bits get shifted out again, but
// it's probably slower to test than just to do it.
lw(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
// Extract the top 10 bits, and insert those bottom 10 bits of scratch.
// The width of the field here is the same as the shift amount above.
const int field_width = shift_distance;
Ext(scratch2, scratch2, 32-shift_distance, field_width);
Ins(scratch, scratch2, 0, field_width);
// Move down according to the exponent.
srlv(scratch, scratch, dest);
// Prepare the negative version of our integer.
subu(scratch2, zero_reg, scratch);
// Trick to check sign bit (msb) held in dest, count leading zero.
// 0 indicates negative, save negative version with conditional move.
Clz(dest, dest);
Movz(scratch, scratch2, dest);
mov(dest, scratch);
}
bind(&done); bind(&done);
} }
...@@ -1408,8 +1359,6 @@ void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode, ...@@ -1408,8 +1359,6 @@ void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
ASSERT(!double_input.is(double_scratch)); ASSERT(!double_input.is(double_scratch));
ASSERT(!except_flag.is(scratch)); ASSERT(!except_flag.is(scratch));
ASSERT(CpuFeatures::IsSupported(FPU));
CpuFeatureScope scope(this, FPU);
Label done; Label done;
// Clear the except flag (0 = no exception) // Clear the except flag (0 = no exception)
...@@ -1551,7 +1500,6 @@ void MacroAssembler::EmitECMATruncate(Register result, ...@@ -1551,7 +1500,6 @@ void MacroAssembler::EmitECMATruncate(Register result,
Register scratch, Register scratch,
Register scratch2, Register scratch2,
Register scratch3) { Register scratch3) {
CpuFeatureScope scope(this, FPU);
ASSERT(!scratch2.is(result)); ASSERT(!scratch2.is(result));
ASSERT(!scratch3.is(result)); ASSERT(!scratch3.is(result));
ASSERT(!scratch3.is(scratch2)); ASSERT(!scratch3.is(scratch2));
...@@ -3459,11 +3407,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, ...@@ -3459,11 +3407,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
// scratch1 is now effective address of the double element // scratch1 is now effective address of the double element
FloatingPointHelper::Destination destination; FloatingPointHelper::Destination destination;
if (CpuFeatures::IsSupported(FPU)) { destination = FloatingPointHelper::kFPURegisters;
destination = FloatingPointHelper::kFPURegisters;
} else {
destination = FloatingPointHelper::kCoreRegisters;
}
Register untagged_value = elements_reg; Register untagged_value = elements_reg;
SmiUntag(untagged_value, value_reg); SmiUntag(untagged_value, value_reg);
...@@ -3476,7 +3420,6 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, ...@@ -3476,7 +3420,6 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
scratch4, scratch4,
f2); f2);
if (destination == FloatingPointHelper::kFPURegisters) { if (destination == FloatingPointHelper::kFPURegisters) {
CpuFeatureScope scope(this, FPU);
sdc1(f0, MemOperand(scratch1, 0)); sdc1(f0, MemOperand(scratch1, 0));
} else { } else {
sw(mantissa_reg, MemOperand(scratch1, 0)); sw(mantissa_reg, MemOperand(scratch1, 0));
...@@ -3569,7 +3512,6 @@ void MacroAssembler::CheckMap(Register obj, ...@@ -3569,7 +3512,6 @@ void MacroAssembler::CheckMap(Register obj,
void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) { void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
CpuFeatureScope scope(this, FPU);
if (IsMipsSoftFloatABI) { if (IsMipsSoftFloatABI) {
Move(dst, v0, v1); Move(dst, v0, v1);
} else { } else {
...@@ -3579,7 +3521,6 @@ void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) { ...@@ -3579,7 +3521,6 @@ void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) { void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
CpuFeatureScope scope(this, FPU);
if (!IsMipsSoftFloatABI) { if (!IsMipsSoftFloatABI) {
Move(f12, dreg); Move(f12, dreg);
} else { } else {
...@@ -3590,7 +3531,6 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) { ...@@ -3590,7 +3531,6 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1, void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
DoubleRegister dreg2) { DoubleRegister dreg2) {
CpuFeatureScope scope(this, FPU);
if (!IsMipsSoftFloatABI) { if (!IsMipsSoftFloatABI) {
if (dreg2.is(f12)) { if (dreg2.is(f12)) {
ASSERT(!dreg1.is(f14)); ASSERT(!dreg1.is(f14));
...@@ -3609,7 +3549,6 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1, ...@@ -3609,7 +3549,6 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg, void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
Register reg) { Register reg) {
CpuFeatureScope scope(this, FPU);
if (!IsMipsSoftFloatABI) { if (!IsMipsSoftFloatABI) {
Move(f12, dreg); Move(f12, dreg);
Move(a2, reg); Move(a2, reg);
...@@ -4252,10 +4191,7 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { ...@@ -4252,10 +4191,7 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id); const Runtime::Function* function = Runtime::FunctionForId(id);
PrepareCEntryArgs(function->nargs); PrepareCEntryArgs(function->nargs);
PrepareCEntryFunction(ExternalReference(function, isolate())); PrepareCEntryFunction(ExternalReference(function, isolate()));
SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU) CEntryStub stub(1, kSaveFPRegs);
? kSaveFPRegs
: kDontSaveFPRegs;
CEntryStub stub(1, mode);
CallStub(&stub); CallStub(&stub);
} }
...@@ -4647,7 +4583,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, ...@@ -4647,7 +4583,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
if (save_doubles) { if (save_doubles) {
CpuFeatureScope scope(this, FPU);
// The stack must be allign to 0 modulo 8 for stores with sdc1. // The stack must be allign to 0 modulo 8 for stores with sdc1.
ASSERT(kDoubleSize == frame_alignment); ASSERT(kDoubleSize == frame_alignment);
if (frame_alignment > 0) { if (frame_alignment > 0) {
...@@ -4685,7 +4620,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, ...@@ -4685,7 +4620,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
bool do_return) { bool do_return) {
// Optionally restore all double registers. // Optionally restore all double registers.
if (save_doubles) { if (save_doubles) {
CpuFeatureScope scope(this, FPU);
// Remember: we only need to restore every 2nd double FPU value. // Remember: we only need to restore every 2nd double FPU value.
lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset)); lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) { for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment