Commit ffe7f6a1 authored by danno@chromium.org's avatar danno@chromium.org

Remove ARM support for VFP2

R=yangguo@chromium.org

Review URL: https://codereview.chromium.org/13560007

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@14159 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent bdb305aa
...@@ -48,29 +48,17 @@ namespace internal { ...@@ -48,29 +48,17 @@ namespace internal {
int Register::NumAllocatableRegisters() { int Register::NumAllocatableRegisters() {
if (CpuFeatures::IsSupported(VFP2)) {
return kMaxNumAllocatableRegisters; return kMaxNumAllocatableRegisters;
} else {
return kMaxNumAllocatableRegisters - kGPRsPerNonVFP2Double;
}
} }
int DwVfpRegister::NumRegisters() { int DwVfpRegister::NumRegisters() {
if (CpuFeatures::IsSupported(VFP2)) {
return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16; return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
} else {
return 1;
}
} }
int DwVfpRegister::NumAllocatableRegisters() { int DwVfpRegister::NumAllocatableRegisters() {
if (CpuFeatures::IsSupported(VFP2)) {
return NumRegisters() - kNumReservedRegisters; return NumRegisters() - kNumReservedRegisters;
} else {
return 1;
}
} }
......
This diff is collapsed.
...@@ -59,7 +59,6 @@ class CpuFeatures : public AllStatic { ...@@ -59,7 +59,6 @@ class CpuFeatures : public AllStatic {
static bool IsSupported(CpuFeature f) { static bool IsSupported(CpuFeature f) {
ASSERT(initialized_); ASSERT(initialized_);
if (f == VFP3 && !FLAG_enable_vfp3) return false; if (f == VFP3 && !FLAG_enable_vfp3) return false;
if (f == VFP2 && !FLAG_enable_vfp2) return false;
if (f == SUDIV && !FLAG_enable_sudiv) return false; if (f == SUDIV && !FLAG_enable_sudiv) return false;
if (f == UNALIGNED_ACCESSES && !FLAG_enable_unaligned_accesses) { if (f == UNALIGNED_ACCESSES && !FLAG_enable_unaligned_accesses) {
return false; return false;
...@@ -117,7 +116,6 @@ struct Register { ...@@ -117,7 +116,6 @@ struct Register {
static const int kNumRegisters = 16; static const int kNumRegisters = 16;
static const int kMaxNumAllocatableRegisters = 8; static const int kMaxNumAllocatableRegisters = 8;
static const int kSizeInBytes = 4; static const int kSizeInBytes = 4;
static const int kGPRsPerNonVFP2Double = 2;
inline static int NumAllocatableRegisters(); inline static int NumAllocatableRegisters();
...@@ -370,9 +368,6 @@ const DwVfpRegister d29 = { 29 }; ...@@ -370,9 +368,6 @@ const DwVfpRegister d29 = { 29 };
const DwVfpRegister d30 = { 30 }; const DwVfpRegister d30 = { 30 };
const DwVfpRegister d31 = { 31 }; const DwVfpRegister d31 = { 31 };
const Register sfpd_lo = { kRegister_r6_Code };
const Register sfpd_hi = { kRegister_r7_Code };
// Aliases for double registers. Defined using #define instead of // Aliases for double registers. Defined using #define instead of
// "static const DwVfpRegister&" because Clang complains otherwise when a // "static const DwVfpRegister&" because Clang complains otherwise when a
// compilation unit that includes this header doesn't use the variables. // compilation unit that includes this header doesn't use the variables.
......
This diff is collapsed.
...@@ -61,9 +61,7 @@ class TranscendentalCacheStub: public PlatformCodeStub { ...@@ -61,9 +61,7 @@ class TranscendentalCacheStub: public PlatformCodeStub {
class StoreBufferOverflowStub: public PlatformCodeStub { class StoreBufferOverflowStub: public PlatformCodeStub {
public: public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { : save_doubles_(save_fp) {}
ASSERT(CpuFeatures::IsSafeForSnapshot(VFP2) || save_fp == kDontSaveFPRegs);
}
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
...@@ -473,7 +471,6 @@ class RecordWriteStub: public PlatformCodeStub { ...@@ -473,7 +471,6 @@ class RecordWriteStub: public PlatformCodeStub {
if (mode == kSaveFPRegs) { if (mode == kSaveFPRegs) {
// Number of d-regs not known at snapshot time. // Number of d-regs not known at snapshot time.
ASSERT(!Serializer::enabled()); ASSERT(!Serializer::enabled());
CpuFeatureScope scope(masm, VFP2);
masm->sub(sp, masm->sub(sp,
sp, sp,
Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1))); Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1)));
...@@ -491,7 +488,6 @@ class RecordWriteStub: public PlatformCodeStub { ...@@ -491,7 +488,6 @@ class RecordWriteStub: public PlatformCodeStub {
if (mode == kSaveFPRegs) { if (mode == kSaveFPRegs) {
// Number of d-regs not known at snapshot time. // Number of d-regs not known at snapshot time.
ASSERT(!Serializer::enabled()); ASSERT(!Serializer::enabled());
CpuFeatureScope scope(masm, VFP2);
// Restore all VFP registers except d0. // Restore all VFP registers except d0.
// TODO(hans): We should probably restore d0 too. And maybe use vldm. // TODO(hans): We should probably restore d0 too. And maybe use vldm.
for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) { for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) {
......
...@@ -62,7 +62,6 @@ double fast_exp_simulator(double x) { ...@@ -62,7 +62,6 @@ double fast_exp_simulator(double x) {
UnaryMathFunction CreateExpFunction() { UnaryMathFunction CreateExpFunction() {
if (!CpuFeatures::IsSupported(VFP2)) return &exp;
if (!FLAG_fast_math) return &exp; if (!FLAG_fast_math) return &exp;
size_t actual_size; size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true)); byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
...@@ -72,7 +71,6 @@ UnaryMathFunction CreateExpFunction() { ...@@ -72,7 +71,6 @@ UnaryMathFunction CreateExpFunction() {
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
{ {
CpuFeatureScope use_vfp(&masm, VFP2);
DwVfpRegister input = d0; DwVfpRegister input = d0;
DwVfpRegister result = d1; DwVfpRegister result = d1;
DwVfpRegister double_scratch1 = d2; DwVfpRegister double_scratch1 = d2;
...@@ -185,7 +183,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( ...@@ -185,7 +183,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// -- r4 : scratch (elements) // -- r4 : scratch (elements)
// ----------------------------------- // -----------------------------------
Label loop, entry, convert_hole, gc_required, only_change_map, done; Label loop, entry, convert_hole, gc_required, only_change_map, done;
bool vfp2_supported = CpuFeatures::IsSupported(VFP2);
if (mode == TRACK_ALLOCATION_SITE) { if (mode == TRACK_ALLOCATION_SITE) {
__ TestJSArrayForAllocationSiteInfo(r2, r4); __ TestJSArrayForAllocationSiteInfo(r2, r4);
...@@ -248,7 +245,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( ...@@ -248,7 +245,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// r5: kHoleNanUpper32 // r5: kHoleNanUpper32
// r6: end of destination FixedDoubleArray, not tagged // r6: end of destination FixedDoubleArray, not tagged
// r7: begin of FixedDoubleArray element fields, not tagged // r7: begin of FixedDoubleArray element fields, not tagged
if (!vfp2_supported) __ Push(r1, r0);
__ b(&entry); __ b(&entry);
...@@ -276,23 +272,10 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( ...@@ -276,23 +272,10 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ UntagAndJumpIfNotSmi(r9, r9, &convert_hole); __ UntagAndJumpIfNotSmi(r9, r9, &convert_hole);
// Normal smi, convert to double and store. // Normal smi, convert to double and store.
if (vfp2_supported) {
CpuFeatureScope scope(masm, VFP2);
__ vmov(s0, r9); __ vmov(s0, r9);
__ vcvt_f64_s32(d0, s0); __ vcvt_f64_s32(d0, s0);
__ vstr(d0, r7, 0); __ vstr(d0, r7, 0);
__ add(r7, r7, Operand(8)); __ add(r7, r7, Operand(8));
} else {
FloatingPointHelper::ConvertIntToDouble(masm,
r9,
FloatingPointHelper::kCoreRegisters,
d0,
r0,
r1,
lr,
s0);
__ Strd(r0, r1, MemOperand(r7, 8, PostIndex));
}
__ b(&entry); __ b(&entry);
// Hole found, store the-hole NaN. // Hole found, store the-hole NaN.
...@@ -310,7 +293,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( ...@@ -310,7 +293,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ cmp(r7, r6); __ cmp(r7, r6);
__ b(lt, &loop); __ b(lt, &loop);
if (!vfp2_supported) __ Pop(r1, r0);
__ pop(lr); __ pop(lr);
__ bind(&done); __ bind(&done);
} }
......
...@@ -594,8 +594,6 @@ void Deoptimizer::EntryGenerator::Generate() { ...@@ -594,8 +594,6 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kDoubleRegsSize = const int kDoubleRegsSize =
kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters; kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters;
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
// Save all allocatable VFP registers before messing with them. // Save all allocatable VFP registers before messing with them.
ASSERT(kDoubleRegZero.code() == 14); ASSERT(kDoubleRegZero.code() == 14);
ASSERT(kScratchDoubleReg.code() == 15); ASSERT(kScratchDoubleReg.code() == 15);
...@@ -608,9 +606,6 @@ void Deoptimizer::EntryGenerator::Generate() { ...@@ -608,9 +606,6 @@ void Deoptimizer::EntryGenerator::Generate() {
__ vstm(db_w, sp, d16, d31, ne); __ vstm(db_w, sp, d16, d31, ne);
__ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq); __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
__ vstm(db_w, sp, d0, d13); __ vstm(db_w, sp, d0, d13);
} else {
__ sub(sp, sp, Operand(kDoubleRegsSize));
}
// Push all 16 registers (needed to populate FrameDescription::registers_). // Push all 16 registers (needed to populate FrameDescription::registers_).
// TODO(1588) Note that using pc with stm is deprecated, so we should perhaps // TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
...@@ -669,8 +664,6 @@ void Deoptimizer::EntryGenerator::Generate() { ...@@ -669,8 +664,6 @@ void Deoptimizer::EntryGenerator::Generate() {
__ str(r2, MemOperand(r1, offset)); __ str(r2, MemOperand(r1, offset));
} }
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
// Copy VFP registers to // Copy VFP registers to
// double_registers_[DoubleRegister::kMaxNumAllocatableRegisters] // double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
int double_regs_offset = FrameDescription::double_registers_offset(); int double_regs_offset = FrameDescription::double_registers_offset();
...@@ -680,7 +673,6 @@ void Deoptimizer::EntryGenerator::Generate() { ...@@ -680,7 +673,6 @@ void Deoptimizer::EntryGenerator::Generate() {
__ vldr(d0, sp, src_offset); __ vldr(d0, sp, src_offset);
__ vstr(d0, r1, dst_offset); __ vstr(d0, r1, dst_offset);
} }
}
// Remove the bailout id, eventually return address, and the saved registers // Remove the bailout id, eventually return address, and the saved registers
// from the stack. // from the stack.
...@@ -749,8 +741,6 @@ void Deoptimizer::EntryGenerator::Generate() { ...@@ -749,8 +741,6 @@ void Deoptimizer::EntryGenerator::Generate() {
__ cmp(r4, r1); __ cmp(r4, r1);
__ b(lt, &outer_push_loop); __ b(lt, &outer_push_loop);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
// Check CPU flags for number of registers, setting the Z condition flag. // Check CPU flags for number of registers, setting the Z condition flag.
__ CheckFor32DRegs(ip); __ CheckFor32DRegs(ip);
...@@ -764,7 +754,6 @@ void Deoptimizer::EntryGenerator::Generate() { ...@@ -764,7 +754,6 @@ void Deoptimizer::EntryGenerator::Generate() {
__ vldr(reg, r1, src_offset, i < 16 ? al : ne); __ vldr(reg, r1, src_offset, i < 16 ? al : ne);
src_offset += kDoubleSize; src_offset += kDoubleSize;
} }
}
// Push state, pc, and continuation from the last output frame. // Push state, pc, and continuation from the last output frame.
if (type() != OSR) { if (type() != OSR) {
......
...@@ -3027,14 +3027,12 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) { ...@@ -3027,14 +3027,12 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
// Convert 32 random bits in r0 to 0.(32 random bits) in a double // Convert 32 random bits in r0 to 0.(32 random bits) in a double
// by computing: // by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)). // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
if (CpuFeatures::IsSupported(VFP2)) {
__ PrepareCallCFunction(1, r0); __ PrepareCallCFunction(1, r0);
__ ldr(r0, __ ldr(r0,
ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX)); ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset)); __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
CpuFeatureScope scope(masm(), VFP2);
// 0x41300000 is the top half of 1.0 x 2^20 as a double. // 0x41300000 is the top half of 1.0 x 2^20 as a double.
// Create this constant using mov/orr to avoid PC relative load. // Create this constant using mov/orr to avoid PC relative load.
__ mov(r1, Operand(0x41000000)); __ mov(r1, Operand(0x41000000));
...@@ -3049,15 +3047,6 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) { ...@@ -3049,15 +3047,6 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
__ sub(r0, r4, Operand(kHeapObjectTag)); __ sub(r0, r4, Operand(kHeapObjectTag));
__ vstr(d7, r0, HeapNumber::kValueOffset); __ vstr(d7, r0, HeapNumber::kValueOffset);
__ mov(r0, r4); __ mov(r0, r4);
} else {
__ PrepareCallCFunction(2, r0);
__ ldr(r1,
ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
__ mov(r0, Operand(r4));
__ ldr(r1, FieldMemOperand(r1, GlobalObject::kNativeContextOffset));
__ CallCFunction(
ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
}
context()->Plug(r0); context()->Plug(r0);
} }
...@@ -3194,12 +3183,8 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) { ...@@ -3194,12 +3183,8 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
ASSERT(args->length() == 2); ASSERT(args->length() == 2);
VisitForStackValue(args->at(0)); VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1)); VisitForStackValue(args->at(1));
if (CpuFeatures::IsSupported(VFP2)) {
MathPowStub stub(MathPowStub::ON_STACK); MathPowStub stub(MathPowStub::ON_STACK);
__ CallStub(&stub); __ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kMath_pow, 2);
}
context()->Plug(r0); context()->Plug(r0);
} }
......
...@@ -2133,16 +2133,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { ...@@ -2133,16 +2133,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() && (instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
// float->double conversion on non-VFP2 requires an extra scratch LOperand* external_pointer = UseRegister(instr->elements());
// register. For convenience, just mark the elements register as "UseTemp"
// so that it can be used as a temp during the float->double conversion
// after it's no longer needed after the float load.
bool needs_temp =
!CpuFeatures::IsSupported(VFP2) &&
(elements_kind == EXTERNAL_FLOAT_ELEMENTS);
LOperand* external_pointer = needs_temp
? UseTempRegister(instr->elements())
: UseRegister(instr->elements());
result = new(zone()) LLoadKeyed(external_pointer, key); result = new(zone()) LLoadKeyed(external_pointer, key);
} }
......
This diff is collapsed.
...@@ -171,10 +171,8 @@ void LGapResolver::BreakCycle(int index) { ...@@ -171,10 +171,8 @@ void LGapResolver::BreakCycle(int index) {
} else if (source->IsStackSlot()) { } else if (source->IsStackSlot()) {
__ ldr(kSavedValueRegister, cgen_->ToMemOperand(source)); __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) { } else if (source->IsDoubleRegister()) {
CpuFeatureScope scope(cgen_->masm(), VFP2);
__ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source)); __ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) { } else if (source->IsDoubleStackSlot()) {
CpuFeatureScope scope(cgen_->masm(), VFP2);
__ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source)); __ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source));
} else { } else {
UNREACHABLE(); UNREACHABLE();
...@@ -194,10 +192,8 @@ void LGapResolver::RestoreValue() { ...@@ -194,10 +192,8 @@ void LGapResolver::RestoreValue() {
} else if (saved_destination_->IsStackSlot()) { } else if (saved_destination_->IsStackSlot()) {
__ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_)); __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) { } else if (saved_destination_->IsDoubleRegister()) {
CpuFeatureScope scope(cgen_->masm(), VFP2);
__ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg); __ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
} else if (saved_destination_->IsDoubleStackSlot()) { } else if (saved_destination_->IsDoubleStackSlot()) {
CpuFeatureScope scope(cgen_->masm(), VFP2);
__ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_)); __ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
} else { } else {
UNREACHABLE(); UNREACHABLE();
...@@ -233,7 +229,6 @@ void LGapResolver::EmitMove(int index) { ...@@ -233,7 +229,6 @@ void LGapResolver::EmitMove(int index) {
MemOperand destination_operand = cgen_->ToMemOperand(destination); MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) { if (in_cycle_) {
if (!destination_operand.OffsetIsUint12Encodable()) { if (!destination_operand.OffsetIsUint12Encodable()) {
CpuFeatureScope scope(cgen_->masm(), VFP2);
// ip is overwritten while saving the value to the destination. // ip is overwritten while saving the value to the destination.
// Therefore we can't use ip. It is OK if the read from the source // Therefore we can't use ip. It is OK if the read from the source
// destroys ip, since that happens before the value is read. // destroys ip, since that happens before the value is read.
...@@ -272,7 +267,6 @@ void LGapResolver::EmitMove(int index) { ...@@ -272,7 +267,6 @@ void LGapResolver::EmitMove(int index) {
} }
} else if (source->IsDoubleRegister()) { } else if (source->IsDoubleRegister()) {
CpuFeatureScope scope(cgen_->masm(), VFP2);
DwVfpRegister source_register = cgen_->ToDoubleRegister(source); DwVfpRegister source_register = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) { if (destination->IsDoubleRegister()) {
__ vmov(cgen_->ToDoubleRegister(destination), source_register); __ vmov(cgen_->ToDoubleRegister(destination), source_register);
...@@ -282,7 +276,6 @@ void LGapResolver::EmitMove(int index) { ...@@ -282,7 +276,6 @@ void LGapResolver::EmitMove(int index) {
} }
} else if (source->IsDoubleStackSlot()) { } else if (source->IsDoubleStackSlot()) {
CpuFeatureScope scope(cgen_->masm(), VFP2);
MemOperand source_operand = cgen_->ToMemOperand(source); MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) { if (destination->IsDoubleRegister()) {
__ vldr(cgen_->ToDoubleRegister(destination), source_operand); __ vldr(cgen_->ToDoubleRegister(destination), source_operand);
......
...@@ -291,8 +291,6 @@ void MacroAssembler::Move(Register dst, Register src, Condition cond) { ...@@ -291,8 +291,6 @@ void MacroAssembler::Move(Register dst, Register src, Condition cond) {
void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) { void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
ASSERT(CpuFeatures::IsSupported(VFP2));
CpuFeatureScope scope(this, VFP2);
if (!dst.is(src)) { if (!dst.is(src)) {
vmov(dst, src); vmov(dst, src);
} }
...@@ -811,7 +809,6 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, ...@@ -811,7 +809,6 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
void MacroAssembler::Vmov(const DwVfpRegister dst, void MacroAssembler::Vmov(const DwVfpRegister dst,
const double imm, const double imm,
const Register scratch) { const Register scratch) {
ASSERT(IsEnabled(VFP2));
static const DoubleRepresentation minus_zero(-0.0); static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation zero(0.0); static const DoubleRepresentation zero(0.0);
DoubleRepresentation value(imm); DoubleRepresentation value(imm);
...@@ -873,7 +870,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { ...@@ -873,7 +870,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// Optionally save all double registers. // Optionally save all double registers.
if (save_doubles) { if (save_doubles) {
CpuFeatureScope scope(this, VFP2);
// Check CPU flags for number of registers, setting the Z condition flag. // Check CPU flags for number of registers, setting the Z condition flag.
CheckFor32DRegs(ip); CheckFor32DRegs(ip);
...@@ -938,7 +934,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, ...@@ -938,7 +934,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
Register argument_count) { Register argument_count) {
// Optionally restore all double registers. // Optionally restore all double registers.
if (save_doubles) { if (save_doubles) {
CpuFeatureScope scope(this, VFP2);
// Calculate the stack location of the saved doubles and restore them. // Calculate the stack location of the saved doubles and restore them.
const int offset = 2 * kPointerSize; const int offset = 2 * kPointerSize;
sub(r3, fp, sub(r3, fp,
...@@ -975,7 +970,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, ...@@ -975,7 +970,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
} }
void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) { void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) { if (use_eabi_hardfloat()) {
Move(dst, d0); Move(dst, d0);
} else { } else {
...@@ -2046,11 +2040,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, ...@@ -2046,11 +2040,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
// scratch1 is now effective address of the double element // scratch1 is now effective address of the double element
FloatingPointHelper::Destination destination; FloatingPointHelper::Destination destination;
if (CpuFeatures::IsSupported(VFP2)) {
destination = FloatingPointHelper::kVFPRegisters; destination = FloatingPointHelper::kVFPRegisters;
} else {
destination = FloatingPointHelper::kCoreRegisters;
}
Register untagged_value = elements_reg; Register untagged_value = elements_reg;
SmiUntag(untagged_value, value_reg); SmiUntag(untagged_value, value_reg);
...@@ -2063,7 +2053,6 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, ...@@ -2063,7 +2053,6 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
scratch4, scratch4,
s2); s2);
if (destination == FloatingPointHelper::kVFPRegisters) { if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatureScope scope(this, VFP2);
vstr(d0, scratch1, 0); vstr(d0, scratch1, 0);
} else { } else {
str(mantissa_reg, MemOperand(scratch1, 0)); str(mantissa_reg, MemOperand(scratch1, 0));
...@@ -2423,9 +2412,6 @@ void MacroAssembler::SmiToDoubleVFPRegister(Register smi, ...@@ -2423,9 +2412,6 @@ void MacroAssembler::SmiToDoubleVFPRegister(Register smi,
void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input, void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
DwVfpRegister double_scratch) { DwVfpRegister double_scratch) {
ASSERT(!double_input.is(double_scratch)); ASSERT(!double_input.is(double_scratch));
ASSERT(CpuFeatures::IsSupported(VFP2));
CpuFeatureScope scope(this, VFP2);
vcvt_s32_f64(double_scratch.low(), double_input); vcvt_s32_f64(double_scratch.low(), double_input);
vcvt_f64_s32(double_scratch, double_scratch.low()); vcvt_f64_s32(double_scratch, double_scratch.low());
VFPCompareAndSetFlags(double_input, double_scratch); VFPCompareAndSetFlags(double_input, double_scratch);
...@@ -2436,9 +2422,6 @@ void MacroAssembler::TryDoubleToInt32Exact(Register result, ...@@ -2436,9 +2422,6 @@ void MacroAssembler::TryDoubleToInt32Exact(Register result,
DwVfpRegister double_input, DwVfpRegister double_input,
DwVfpRegister double_scratch) { DwVfpRegister double_scratch) {
ASSERT(!double_input.is(double_scratch)); ASSERT(!double_input.is(double_scratch));
ASSERT(CpuFeatures::IsSupported(VFP2));
CpuFeatureScope scope(this, VFP2);
vcvt_s32_f64(double_scratch.low(), double_input); vcvt_s32_f64(double_scratch.low(), double_input);
vmov(result, double_scratch.low()); vmov(result, double_scratch.low());
vcvt_f64_s32(double_scratch, double_scratch.low()); vcvt_f64_s32(double_scratch, double_scratch.low());
...@@ -2454,8 +2437,6 @@ void MacroAssembler::TryInt32Floor(Register result, ...@@ -2454,8 +2437,6 @@ void MacroAssembler::TryInt32Floor(Register result,
Label* exact) { Label* exact) {
ASSERT(!result.is(input_high)); ASSERT(!result.is(input_high));
ASSERT(!double_input.is(double_scratch)); ASSERT(!double_input.is(double_scratch));
ASSERT(CpuFeatures::IsSupported(VFP2));
CpuFeatureScope scope(this, VFP2);
Label negative, exception; Label negative, exception;
// Test for NaN and infinities. // Test for NaN and infinities.
...@@ -2500,26 +2481,18 @@ void MacroAssembler::ECMAConvertNumberToInt32(Register source, ...@@ -2500,26 +2481,18 @@ void MacroAssembler::ECMAConvertNumberToInt32(Register source,
Register scratch, Register scratch,
DwVfpRegister double_scratch1, DwVfpRegister double_scratch1,
DwVfpRegister double_scratch2) { DwVfpRegister double_scratch2) {
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(this, VFP2);
vldr(double_scratch1, FieldMemOperand(source, HeapNumber::kValueOffset)); vldr(double_scratch1, FieldMemOperand(source, HeapNumber::kValueOffset));
ECMAToInt32VFP(result, double_scratch1, double_scratch2, ECMAToInt32(result, double_scratch1, double_scratch2,
scratch, input_high, input_low); scratch, input_high, input_low);
} else {
Ldrd(input_low, input_high,
FieldMemOperand(source, HeapNumber::kValueOffset));
ECMAToInt32NoVFP(result, scratch, input_high, input_low);
}
} }
void MacroAssembler::ECMAToInt32VFP(Register result, void MacroAssembler::ECMAToInt32(Register result,
DwVfpRegister double_input, DwVfpRegister double_input,
DwVfpRegister double_scratch, DwVfpRegister double_scratch,
Register scratch, Register scratch,
Register input_high, Register input_high,
Register input_low) { Register input_low) {
CpuFeatureScope scope(this, VFP2);
ASSERT(!input_high.is(result)); ASSERT(!input_high.is(result));
ASSERT(!input_low.is(result)); ASSERT(!input_low.is(result));
ASSERT(!input_low.is(input_high)); ASSERT(!input_low.is(input_high));
...@@ -2559,58 +2532,6 @@ void MacroAssembler::ECMAToInt32VFP(Register result, ...@@ -2559,58 +2532,6 @@ void MacroAssembler::ECMAToInt32VFP(Register result,
} }
void MacroAssembler::ECMAToInt32NoVFP(Register result,
Register scratch,
Register input_high,
Register input_low) {
ASSERT(!result.is(scratch));
ASSERT(!result.is(input_high));
ASSERT(!result.is(input_low));
ASSERT(!scratch.is(input_high));
ASSERT(!scratch.is(input_low));
ASSERT(!input_high.is(input_low));
Label both, out_of_range, negate, done;
Ubfx(scratch, input_high,
HeapNumber::kExponentShift, HeapNumber::kExponentBits);
// Load scratch with exponent.
sub(scratch, scratch, Operand(HeapNumber::kExponentBias));
// If exponent is negative, 0 < input < 1, the result is 0.
// If exponent is greater than or equal to 84, the 32 less significant
// bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
// the result is 0.
// This test also catch Nan and infinities which also return 0.
cmp(scratch, Operand(84));
// We do an unsigned comparison so negative numbers are treated as big
// positive number and the two tests above are done in one test.
b(hs, &out_of_range);
// Load scratch with 20 - exponent.
rsb(scratch, scratch, Operand(20), SetCC);
b(mi, &both);
// Test 0 and -0.
bic(result, input_high, Operand(HeapNumber::kSignMask));
orr(result, result, Operand(input_low), SetCC);
b(eq, &done);
// 0 <= exponent <= 20, shift only input_high.
// Scratch contains: 20 - exponent.
Ubfx(result, input_high,
0, HeapNumber::kMantissaBitsInTopWord);
// Set the implicit 1 before the mantissa part in input_high.
orr(result, result, Operand(1 << HeapNumber::kMantissaBitsInTopWord));
mov(result, Operand(result, LSR, scratch));
b(&negate);
bind(&both);
// Restore scratch to exponent - 1 to be consistent with ECMAToInt32VFP.
rsb(scratch, scratch, Operand(19));
ECMAToInt32Tail(result, scratch, input_high, input_low,
&out_of_range, &negate, &done);
}
void MacroAssembler::ECMAToInt32Tail(Register result, void MacroAssembler::ECMAToInt32Tail(Register result,
Register scratch, Register scratch,
Register input_high, Register input_high,
...@@ -2713,10 +2634,7 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { ...@@ -2713,10 +2634,7 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id); const Runtime::Function* function = Runtime::FunctionForId(id);
mov(r0, Operand(function->nargs)); mov(r0, Operand(function->nargs));
mov(r1, Operand(ExternalReference(function, isolate()))); mov(r1, Operand(ExternalReference(function, isolate())));
SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2) CEntryStub stub(1, kSaveFPRegs);
? kSaveFPRegs
: kDontSaveFPRegs;
CEntryStub stub(1, mode);
CallStub(&stub); CallStub(&stub);
} }
...@@ -3461,7 +3379,6 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, ...@@ -3461,7 +3379,6 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) { void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) { if (use_eabi_hardfloat()) {
Move(d0, dreg); Move(d0, dreg);
} else { } else {
...@@ -3472,7 +3389,6 @@ void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) { ...@@ -3472,7 +3389,6 @@ void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1, void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
DwVfpRegister dreg2) { DwVfpRegister dreg2) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) { if (use_eabi_hardfloat()) {
if (dreg2.is(d0)) { if (dreg2.is(d0)) {
ASSERT(!dreg1.is(d1)); ASSERT(!dreg1.is(d1));
...@@ -3491,7 +3407,6 @@ void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1, ...@@ -3491,7 +3407,6 @@ void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg, void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg,
Register reg) { Register reg) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) { if (use_eabi_hardfloat()) {
Move(d0, dreg); Move(d0, dreg);
Move(r0, reg); Move(r0, reg);
......
...@@ -969,21 +969,13 @@ class MacroAssembler: public Assembler { ...@@ -969,21 +969,13 @@ class MacroAssembler: public Assembler {
// Performs a truncating conversion of a floating point number as used by // Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
// Exits with 'result' holding the answer and all other registers clobbered. // Exits with 'result' holding the answer and all other registers clobbered.
void ECMAToInt32VFP(Register result, void ECMAToInt32(Register result,
DwVfpRegister double_input, DwVfpRegister double_input,
DwVfpRegister double_scratch, DwVfpRegister double_scratch,
Register scratch, Register scratch,
Register input_high, Register input_high,
Register input_low); Register input_low);
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
// Exits with 'result' holding the answer.
void ECMAToInt32NoVFP(Register result,
Register scratch,
Register input_high,
Register input_low);
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz // Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
// instruction. On pre-ARM5 hardware this routine gives the wrong answer // instruction. On pre-ARM5 hardware this routine gives the wrong answer
// for 0 (31 instead of 32). Source and scratch can be the same in which case // for 0 (31 instead of 32). Source and scratch can be the same in which case
......
This diff is collapsed.
...@@ -191,11 +191,9 @@ CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f) ...@@ -191,11 +191,9 @@ CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f)
uint64_t mask = static_cast<uint64_t>(1) << f; uint64_t mask = static_cast<uint64_t>(1) << f;
// TODO(svenpanne) This special case below doesn't belong here! // TODO(svenpanne) This special case below doesn't belong here!
#if V8_TARGET_ARCH_ARM #if V8_TARGET_ARCH_ARM
// VFP2 and ARMv7 are implied by VFP3. // ARMv7 is implied by VFP3.
if (f == VFP3) { if (f == VFP3) {
mask |= mask |= static_cast<uint64_t>(1) << ARMv7;
static_cast<uint64_t>(1) << VFP2 |
static_cast<uint64_t>(1) << ARMv7;
} }
#endif #endif
assembler_->set_enabled_cpu_features(old_enabled_ | mask); assembler_->set_enabled_cpu_features(old_enabled_ | mask);
......
...@@ -770,7 +770,7 @@ class BinaryOpStub: public PlatformCodeStub { ...@@ -770,7 +770,7 @@ class BinaryOpStub: public PlatformCodeStub {
private: private:
Token::Value op_; Token::Value op_;
OverwriteMode mode_; OverwriteMode mode_;
bool platform_specific_bit_; // Indicates SSE3 on IA32, VFP2 on ARM. bool platform_specific_bit_; // Indicates SSE3 on IA32.
// Operand type information determined at runtime. // Operand type information determined at runtime.
BinaryOpIC::TypeInfo left_type_; BinaryOpIC::TypeInfo left_type_;
......
...@@ -309,10 +309,7 @@ DEFINE_bool(enable_rdtsc, true, ...@@ -309,10 +309,7 @@ DEFINE_bool(enable_rdtsc, true,
DEFINE_bool(enable_sahf, true, DEFINE_bool(enable_sahf, true,
"enable use of SAHF instruction if available (X64 only)") "enable use of SAHF instruction if available (X64 only)")
DEFINE_bool(enable_vfp3, true, DEFINE_bool(enable_vfp3, true,
"enable use of VFP3 instructions if available - this implies " "enable use of VFP3 instructions if available")
"enabling ARMv7 and VFP2 instructions (ARM only)")
DEFINE_bool(enable_vfp2, true,
"enable use of VFP2 instructions if available")
DEFINE_bool(enable_armv7, true, DEFINE_bool(enable_armv7, true,
"enable use of ARMv7 instructions if available (ARM only)") "enable use of ARMv7 instructions if available (ARM only)")
DEFINE_bool(enable_sudiv, true, DEFINE_bool(enable_sudiv, true,
......
...@@ -2008,7 +2008,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { ...@@ -2008,7 +2008,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() && (instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
// float->double conversion on non-VFP2 requires an extra scratch // float->double conversion on soft float requires an extra scratch
// register. For convenience, just mark the elements register as "UseTemp" // register. For convenience, just mark the elements register as "UseTemp"
// so that it can be used as a temp during the float->double conversion // so that it can be used as a temp during the float->double conversion
// after it's no longer needed after the float load. // after it's no longer needed after the float load.
......
...@@ -146,9 +146,6 @@ bool OS::ArmCpuHasFeature(CpuFeature feature) { ...@@ -146,9 +146,6 @@ bool OS::ArmCpuHasFeature(CpuFeature feature) {
// facility is universally available on the ARM architectures, // facility is universally available on the ARM architectures,
// so it's up to individual OSes to provide such. // so it's up to individual OSes to provide such.
switch (feature) { switch (feature) {
case VFP2:
search_string = "vfp";
break;
case VFP3: case VFP3:
search_string = "vfpv3"; search_string = "vfpv3";
break; break;
......
...@@ -433,11 +433,10 @@ enum CpuFeature { SSE4_1 = 32 + 19, // x86 ...@@ -433,11 +433,10 @@ enum CpuFeature { SSE4_1 = 32 + 19, // x86
CPUID = 10, // x86 CPUID = 10, // x86
VFP3 = 1, // ARM VFP3 = 1, // ARM
ARMv7 = 2, // ARM ARMv7 = 2, // ARM
VFP2 = 3, // ARM SUDIV = 3, // ARM
SUDIV = 4, // ARM UNALIGNED_ACCESSES = 4, // ARM
UNALIGNED_ACCESSES = 5, // ARM MOVW_MOVT_IMMEDIATE_LOADS = 5, // ARM
MOVW_MOVT_IMMEDIATE_LOADS = 6, // ARM VFP32DREGS = 6, // ARM
VFP32DREGS = 7, // ARM
SAHF = 0, // x86 SAHF = 0, // x86
FPU = 1}; // MIPS FPU = 1}; // MIPS
......
...@@ -654,9 +654,6 @@ TEST(8) { ...@@ -654,9 +654,6 @@ TEST(8) {
// single precision values around in memory. // single precision values around in memory.
Assembler assm(isolate, NULL, 0); Assembler assm(isolate, NULL, 0);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(&assm, VFP2);
__ mov(ip, Operand(sp)); __ mov(ip, Operand(sp));
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
__ sub(fp, ip, Operand(4)); __ sub(fp, ip, Operand(4));
...@@ -728,7 +725,6 @@ TEST(8) { ...@@ -728,7 +725,6 @@ TEST(8) {
CHECK_EQ(4.0, f.f); CHECK_EQ(4.0, f.f);
CHECK_EQ(5.0, f.g); CHECK_EQ(5.0, f.g);
CHECK_EQ(6.0, f.h); CHECK_EQ(6.0, f.h);
}
} }
...@@ -766,9 +762,6 @@ TEST(9) { ...@@ -766,9 +762,6 @@ TEST(9) {
// single precision values around in memory. // single precision values around in memory.
Assembler assm(isolate, NULL, 0); Assembler assm(isolate, NULL, 0);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(&assm, VFP2);
__ mov(ip, Operand(sp)); __ mov(ip, Operand(sp));
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
__ sub(fp, ip, Operand(4)); __ sub(fp, ip, Operand(4));
...@@ -844,7 +837,6 @@ TEST(9) { ...@@ -844,7 +837,6 @@ TEST(9) {
CHECK_EQ(4.0, f.f); CHECK_EQ(4.0, f.f);
CHECK_EQ(5.0, f.g); CHECK_EQ(5.0, f.g);
CHECK_EQ(6.0, f.h); CHECK_EQ(6.0, f.h);
}
} }
...@@ -882,9 +874,6 @@ TEST(10) { ...@@ -882,9 +874,6 @@ TEST(10) {
// single precision values around in memory. // single precision values around in memory.
Assembler assm(isolate, NULL, 0); Assembler assm(isolate, NULL, 0);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(&assm, VFP2);
__ mov(ip, Operand(sp)); __ mov(ip, Operand(sp));
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
__ sub(fp, ip, Operand(4)); __ sub(fp, ip, Operand(4));
...@@ -956,7 +945,6 @@ TEST(10) { ...@@ -956,7 +945,6 @@ TEST(10) {
CHECK_EQ(4.0, f.f); CHECK_EQ(4.0, f.f);
CHECK_EQ(5.0, f.g); CHECK_EQ(5.0, f.g);
CHECK_EQ(6.0, f.h); CHECK_EQ(6.0, f.h);
}
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment