Commit a7a6abbd authored by yangguo@chromium.org's avatar yangguo@chromium.org

Require CMOV support for the ia32 port.

R=svenpanne@chromium.org

Review URL: https://codereview.chromium.org/275253004

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@21279 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent e7d8fb96
...@@ -359,8 +359,6 @@ DEFINE_bool(enable_sse3, true, ...@@ -359,8 +359,6 @@ DEFINE_bool(enable_sse3, true,
"enable use of SSE3 instructions if available") "enable use of SSE3 instructions if available")
DEFINE_bool(enable_sse4_1, true, DEFINE_bool(enable_sse4_1, true,
"enable use of SSE4.1 instructions if available") "enable use of SSE4.1 instructions if available")
DEFINE_bool(enable_cmov, true,
"enable use of CMOV instruction if available")
DEFINE_bool(enable_sahf, true, DEFINE_bool(enable_sahf, true,
"enable use of SAHF instruction if available (X64 only)") "enable use of SAHF instruction if available (X64 only)")
DEFINE_bool(enable_vfp3, ENABLE_VFP3_DEFAULT, DEFINE_bool(enable_vfp3, ENABLE_VFP3_DEFAULT,
......
...@@ -98,10 +98,7 @@ void CpuFeatures::Probe(bool serializer_enabled) { ...@@ -98,10 +98,7 @@ void CpuFeatures::Probe(bool serializer_enabled) {
} }
CHECK(cpu.has_sse2()); // SSE2 support is mandatory. CHECK(cpu.has_sse2()); // SSE2 support is mandatory.
CHECK(cpu.has_cmov()); // CMOV support is mandatory.
if (cpu.has_cmov()) {
probed_features |= static_cast<uint64_t>(1) << CMOV;
}
// SAHF must be available in compat/legacy mode. // SAHF must be available in compat/legacy mode.
ASSERT(cpu.has_sahf()); ASSERT(cpu.has_sahf());
...@@ -636,7 +633,6 @@ void Assembler::movzx_w(Register dst, const Operand& src) { ...@@ -636,7 +633,6 @@ void Assembler::movzx_w(Register dst, const Operand& src) {
void Assembler::cmov(Condition cc, Register dst, const Operand& src) { void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
ASSERT(IsEnabled(CMOV));
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
// Opcode: 0f 40 + cc /r. // Opcode: 0f 40 + cc /r.
EMIT(0x0F); EMIT(0x0F);
......
...@@ -481,9 +481,9 @@ class Displacement BASE_EMBEDDED { ...@@ -481,9 +481,9 @@ class Displacement BASE_EMBEDDED {
// CpuFeatures keeps track of which features are supported by the target CPU. // CpuFeatures keeps track of which features are supported by the target CPU.
// Supported features must be enabled by a CpuFeatureScope before use. // Supported features must be enabled by a CpuFeatureScope before use.
// Example: // Example:
// if (assembler->IsSupported(CMOV)) { // if (assembler->IsSupported(SSE3)) {
// CpuFeatureScope fscope(assembler, CMOV); // CpuFeatureScope fscope(assembler, SSE3);
// // Generate code containing cmov. // // Generate code containing SSE3 instructions.
// } else { // } else {
// // Generate alternative code. // // Generate alternative code.
// } // }
...@@ -499,7 +499,6 @@ class CpuFeatures : public AllStatic { ...@@ -499,7 +499,6 @@ class CpuFeatures : public AllStatic {
if (Check(f, cross_compile_)) return true; if (Check(f, cross_compile_)) return true;
if (f == SSE3 && !FLAG_enable_sse3) return false; if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == SSE4_1 && !FLAG_enable_sse4_1) return false; if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
return Check(f, supported_); return Check(f, supported_);
} }
......
...@@ -629,15 +629,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) { ...@@ -629,15 +629,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ shrd(result_reg, scratch1); __ shrd(result_reg, scratch1);
__ shr_cl(result_reg); __ shr_cl(result_reg);
__ test(ecx, Immediate(32)); __ test(ecx, Immediate(32));
if (CpuFeatures::IsSupported(CMOV)) { __ cmov(not_equal, scratch1, result_reg);
CpuFeatureScope use_cmov(masm, CMOV);
__ cmov(not_equal, scratch1, result_reg);
} else {
Label skip_mov;
__ j(equal, &skip_mov, Label::kNear);
__ mov(scratch1, result_reg);
__ bind(&skip_mov);
}
} }
// If the double was negative, negate the integer result. // If the double was negative, negate the integer result.
...@@ -649,15 +641,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) { ...@@ -649,15 +641,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
} else { } else {
__ cmp(exponent_operand, Immediate(0)); __ cmp(exponent_operand, Immediate(0));
} }
if (CpuFeatures::IsSupported(CMOV)) {
CpuFeatureScope use_cmov(masm, CMOV);
__ cmov(greater, result_reg, scratch1); __ cmov(greater, result_reg, scratch1);
} else {
Label skip_mov;
__ j(less_equal, &skip_mov, Label::kNear);
__ mov(result_reg, scratch1);
__ bind(&skip_mov);
}
// Restore registers // Restore registers
__ bind(&done); __ bind(&done);
...@@ -2068,32 +2052,12 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { ...@@ -2068,32 +2052,12 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Don't base result on EFLAGS when a NaN is involved. // Don't base result on EFLAGS when a NaN is involved.
__ j(parity_even, &unordered, Label::kNear); __ j(parity_even, &unordered, Label::kNear);
if (CpuFeatures::IsSupported(CMOV)) { __ mov(eax, 0); // equal
CpuFeatureScope use_cmov(masm, CMOV); __ mov(ecx, Immediate(Smi::FromInt(1)));
// Return a result of -1, 0, or 1, based on EFLAGS. __ cmov(above, eax, ecx);
__ mov(eax, 0); // equal __ mov(ecx, Immediate(Smi::FromInt(-1)));
__ mov(ecx, Immediate(Smi::FromInt(1))); __ cmov(below, eax, ecx);
__ cmov(above, eax, ecx); __ ret(0);
__ mov(ecx, Immediate(Smi::FromInt(-1)));
__ cmov(below, eax, ecx);
__ ret(0);
} else {
Label below_label, above_label;
// Return a result of -1, 0, or 1, based on EFLAGS.
__ j(below, &below_label, Label::kNear);
__ j(above, &above_label, Label::kNear);
__ Move(eax, Immediate(0));
__ ret(0);
__ bind(&below_label);
__ mov(eax, Immediate(Smi::FromInt(-1)));
__ ret(0);
__ bind(&above_label);
__ mov(eax, Immediate(Smi::FromInt(1)));
__ ret(0);
}
// If one of the numbers was NaN, then the result is always false. // If one of the numbers was NaN, then the result is always false.
// The cc is never not-equal. // The cc is never not-equal.
...@@ -3776,63 +3740,46 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { ...@@ -3776,63 +3740,46 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ JumpIfNotSmi(eax, &miss); __ JumpIfNotSmi(eax, &miss);
} }
// Inlining the double comparison and falling back to the general compare // Load left and right operand.
// stub if NaN is involved or SSE2 or CMOV is unsupported. Label done, left, left_smi, right_smi;
if (CpuFeatures::IsSupported(CMOV)) { __ JumpIfSmi(eax, &right_smi, Label::kNear);
CpuFeatureScope scope2(masm, CMOV); __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
// Load left and right operand. __ j(not_equal, &maybe_undefined1, Label::kNear);
Label done, left, left_smi, right_smi; __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ JumpIfSmi(eax, &right_smi, Label::kNear); __ jmp(&left, Label::kNear);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset), __ bind(&right_smi);
isolate()->factory()->heap_number_map()); __ mov(ecx, eax); // Can't clobber eax because we can still jump away.
__ j(not_equal, &maybe_undefined1, Label::kNear); __ SmiUntag(ecx);
__ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); __ Cvtsi2sd(xmm1, ecx);
__ jmp(&left, Label::kNear);
__ bind(&right_smi);
__ mov(ecx, eax); // Can't clobber eax because we can still jump away.
__ SmiUntag(ecx);
__ Cvtsi2sd(xmm1, ecx);
__ bind(&left);
__ JumpIfSmi(edx, &left_smi, Label::kNear);
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined2, Label::kNear);
__ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&left_smi);
__ mov(ecx, edx); // Can't clobber edx because we can still jump away.
__ SmiUntag(ecx);
__ Cvtsi2sd(xmm0, ecx);
__ bind(&done); __ bind(&left);
// Compare operands. __ JumpIfSmi(edx, &left_smi, Label::kNear);
__ ucomisd(xmm0, xmm1); __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
// Don't base result on EFLAGS when a NaN is involved. __ j(not_equal, &maybe_undefined2, Label::kNear);
__ j(parity_even, &unordered, Label::kNear); __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
__ jmp(&done);
// Return a result of -1, 0, or 1, based on EFLAGS. __ bind(&left_smi);
// Performing mov, because xor would destroy the flag register. __ mov(ecx, edx); // Can't clobber edx because we can still jump away.
__ mov(eax, 0); // equal __ SmiUntag(ecx);
__ mov(ecx, Immediate(Smi::FromInt(1))); __ Cvtsi2sd(xmm0, ecx);
__ cmov(above, eax, ecx);
__ mov(ecx, Immediate(Smi::FromInt(-1)));
__ cmov(below, eax, ecx);
__ ret(0);
} else {
__ mov(ecx, edx);
__ and_(ecx, eax);
__ JumpIfSmi(ecx, &generic_stub, Label::kNear);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset), __ bind(&done);
isolate()->factory()->heap_number_map()); // Compare operands.
__ j(not_equal, &maybe_undefined1, Label::kNear); __ ucomisd(xmm0, xmm1);
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map()); // Don't base result on EFLAGS when a NaN is involved.
__ j(not_equal, &maybe_undefined2, Label::kNear); __ j(parity_even, &unordered, Label::kNear);
}
// Return a result of -1, 0, or 1, based on EFLAGS.
// Performing mov, because xor would destroy the flag register.
__ mov(eax, 0); // equal
__ mov(ecx, Immediate(Smi::FromInt(1)));
__ cmov(above, eax, ecx);
__ mov(ecx, Immediate(Smi::FromInt(-1)));
__ cmov(below, eax, ecx);
__ ret(0);
__ bind(&unordered); __ bind(&unordered);
__ bind(&generic_stub); __ bind(&generic_stub);
......
...@@ -827,16 +827,8 @@ void MacroAssembler::IsInstanceJSObjectType(Register map, ...@@ -827,16 +827,8 @@ void MacroAssembler::IsInstanceJSObjectType(Register map,
void MacroAssembler::FCmp() { void MacroAssembler::FCmp() {
if (CpuFeatures::IsSupported(CMOV)) { fucomip();
fucomip(); fstp(0);
fstp(0);
} else {
fucompp();
push(eax);
fnstsw_ax();
sahf();
pop(eax);
}
} }
......
...@@ -54,14 +54,7 @@ static const pthread_t kNoThread = (pthread_t) 0; ...@@ -54,14 +54,7 @@ static const pthread_t kNoThread = (pthread_t) 0;
uint64_t OS::CpuFeaturesImpliedByPlatform() { uint64_t OS::CpuFeaturesImpliedByPlatform() {
#if V8_OS_MACOSX return 0; // Nothing special.
// Mac OS X requires CMOV to install so we can assume it is present.
// These constants are defined by the CPUid instructions.
const uint64_t one = 1;
return one << CMOV;
#else
return 0; // Nothing special about the other systems.
#endif
} }
......
...@@ -268,8 +268,7 @@ class OS { ...@@ -268,8 +268,7 @@ class OS {
static void SignalCodeMovingGC(); static void SignalCodeMovingGC();
// The return value indicates the CPU features we are sure of because of the // The return value indicates the CPU features we are sure of because of the
// OS. For example MacOSX doesn't run on any x86 CPUs that don't have CMOV // OS.
// instructions.
// This is a little messy because the interpretation is subject to the cross // This is a little messy because the interpretation is subject to the cross
// of the CPU and the OS. The bits in the answer correspond to the bit // of the CPU and the OS. The bits in the answer correspond to the bit
// positions indicated by the members of the CpuFeature enum from globals.h // positions indicated by the members of the CpuFeature enum from globals.h
......
...@@ -403,7 +403,6 @@ enum StateTag { ...@@ -403,7 +403,6 @@ enum StateTag {
// On X86/X64, values below 32 are bits in EDX, values above 32 are bits in ECX. // On X86/X64, values below 32 are bits in EDX, values above 32 are bits in ECX.
enum CpuFeature { SSE4_1 = 32 + 19, // x86 enum CpuFeature { SSE4_1 = 32 + 19, // x86
SSE3 = 32 + 0, // x86 SSE3 = 32 + 0, // x86
CMOV = 15, // x86
VFP3 = 1, // ARM VFP3 = 1, // ARM
ARMv7 = 2, // ARM ARMv7 = 2, // ARM
SUDIV = 3, // ARM SUDIV = 3, // ARM
......
...@@ -19,7 +19,7 @@ namespace internal { ...@@ -19,7 +19,7 @@ namespace internal {
#ifdef DEBUG #ifdef DEBUG
bool CpuFeatures::initialized_ = false; bool CpuFeatures::initialized_ = false;
#endif #endif
uint64_t CpuFeatures::supported_ = CpuFeatures::kDefaultCpuFeatures; uint64_t CpuFeatures::supported_ = 0;
uint64_t CpuFeatures::found_by_runtime_probing_only_ = 0; uint64_t CpuFeatures::found_by_runtime_probing_only_ = 0;
uint64_t CpuFeatures::cross_compile_ = 0; uint64_t CpuFeatures::cross_compile_ = 0;
...@@ -30,11 +30,11 @@ ExternalReference ExternalReference::cpu_features() { ...@@ -30,11 +30,11 @@ ExternalReference ExternalReference::cpu_features() {
void CpuFeatures::Probe(bool serializer_enabled) { void CpuFeatures::Probe(bool serializer_enabled) {
ASSERT(supported_ == CpuFeatures::kDefaultCpuFeatures); ASSERT(supported_ == 0);
#ifdef DEBUG #ifdef DEBUG
initialized_ = true; initialized_ = true;
#endif #endif
supported_ = kDefaultCpuFeatures; supported_ = 0;
if (serializer_enabled) { if (serializer_enabled) {
supported_ |= OS::CpuFeaturesImpliedByPlatform(); supported_ |= OS::CpuFeaturesImpliedByPlatform();
return; // No features if we might serialize. return; // No features if we might serialize.
...@@ -54,7 +54,6 @@ void CpuFeatures::Probe(bool serializer_enabled) { ...@@ -54,7 +54,6 @@ void CpuFeatures::Probe(bool serializer_enabled) {
// CMOV must be available on every x64 CPU. // CMOV must be available on every x64 CPU.
ASSERT(cpu.has_cmov()); ASSERT(cpu.has_cmov());
probed_features |= static_cast<uint64_t>(1) << CMOV;
// SAHF is not generally available in long mode. // SAHF is not generally available in long mode.
if (cpu.has_sahf()) { if (cpu.has_sahf()) {
...@@ -63,8 +62,7 @@ void CpuFeatures::Probe(bool serializer_enabled) { ...@@ -63,8 +62,7 @@ void CpuFeatures::Probe(bool serializer_enabled) {
uint64_t platform_features = OS::CpuFeaturesImpliedByPlatform(); uint64_t platform_features = OS::CpuFeaturesImpliedByPlatform();
supported_ = probed_features | platform_features; supported_ = probed_features | platform_features;
found_by_runtime_probing_only_ found_by_runtime_probing_only_ = probed_features & ~platform_features;
= probed_features & ~kDefaultCpuFeatures & ~platform_features;
} }
......
...@@ -458,7 +458,6 @@ class CpuFeatures : public AllStatic { ...@@ -458,7 +458,6 @@ class CpuFeatures : public AllStatic {
ASSERT(initialized_); ASSERT(initialized_);
if (f == SSE3 && !FLAG_enable_sse3) return false; if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == SSE4_1 && !FLAG_enable_sse4_1) return false; if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
if (f == SAHF && !FLAG_enable_sahf) return false; if (f == SAHF && !FLAG_enable_sahf) return false;
return Check(f, supported_); return Check(f, supported_);
} }
...@@ -491,12 +490,6 @@ class CpuFeatures : public AllStatic { ...@@ -491,12 +490,6 @@ class CpuFeatures : public AllStatic {
return static_cast<uint64_t>(1) << f; return static_cast<uint64_t>(1) << f;
} }
// Safe defaults include CMOV for X64. It is always available, if
// anyone checks, but they shouldn't need to check.
// The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
// fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
static const uint64_t kDefaultCpuFeatures = (1 << CMOV);
#ifdef DEBUG #ifdef DEBUG
static bool initialized_; static bool initialized_;
#endif #endif
......
...@@ -414,25 +414,22 @@ TEST(DisasmIa320) { ...@@ -414,25 +414,22 @@ TEST(DisasmIa320) {
// cmov. // cmov.
{ {
if (CpuFeatures::IsSupported(CMOV)) { __ cmov(overflow, eax, Operand(eax, 0));
CpuFeatureScope use_cmov(&assm, CMOV); __ cmov(no_overflow, eax, Operand(eax, 1));
__ cmov(overflow, eax, Operand(eax, 0)); __ cmov(below, eax, Operand(eax, 2));
__ cmov(no_overflow, eax, Operand(eax, 1)); __ cmov(above_equal, eax, Operand(eax, 3));
__ cmov(below, eax, Operand(eax, 2)); __ cmov(equal, eax, Operand(ebx, 0));
__ cmov(above_equal, eax, Operand(eax, 3)); __ cmov(not_equal, eax, Operand(ebx, 1));
__ cmov(equal, eax, Operand(ebx, 0)); __ cmov(below_equal, eax, Operand(ebx, 2));
__ cmov(not_equal, eax, Operand(ebx, 1)); __ cmov(above, eax, Operand(ebx, 3));
__ cmov(below_equal, eax, Operand(ebx, 2)); __ cmov(sign, eax, Operand(ecx, 0));
__ cmov(above, eax, Operand(ebx, 3)); __ cmov(not_sign, eax, Operand(ecx, 1));
__ cmov(sign, eax, Operand(ecx, 0)); __ cmov(parity_even, eax, Operand(ecx, 2));
__ cmov(not_sign, eax, Operand(ecx, 1)); __ cmov(parity_odd, eax, Operand(ecx, 3));
__ cmov(parity_even, eax, Operand(ecx, 2)); __ cmov(less, eax, Operand(edx, 0));
__ cmov(parity_odd, eax, Operand(ecx, 3)); __ cmov(greater_equal, eax, Operand(edx, 1));
__ cmov(less, eax, Operand(edx, 0)); __ cmov(less_equal, eax, Operand(edx, 2));
__ cmov(greater_equal, eax, Operand(edx, 1)); __ cmov(greater, eax, Operand(edx, 3));
__ cmov(less_equal, eax, Operand(edx, 2));
__ cmov(greater, eax, Operand(edx, 3));
}
} }
{ {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment