Commit a7a6abbd authored by yangguo@chromium.org's avatar yangguo@chromium.org

Require CMOV support for the ia32 port.

R=svenpanne@chromium.org

Review URL: https://codereview.chromium.org/275253004

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@21279 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent e7d8fb96
......@@ -359,8 +359,6 @@ DEFINE_bool(enable_sse3, true,
"enable use of SSE3 instructions if available")
DEFINE_bool(enable_sse4_1, true,
"enable use of SSE4.1 instructions if available")
DEFINE_bool(enable_cmov, true,
"enable use of CMOV instruction if available")
DEFINE_bool(enable_sahf, true,
"enable use of SAHF instruction if available (X64 only)")
DEFINE_bool(enable_vfp3, ENABLE_VFP3_DEFAULT,
......
......@@ -98,10 +98,7 @@ void CpuFeatures::Probe(bool serializer_enabled) {
}
CHECK(cpu.has_sse2()); // SSE2 support is mandatory.
if (cpu.has_cmov()) {
probed_features |= static_cast<uint64_t>(1) << CMOV;
}
CHECK(cpu.has_cmov()); // CMOV support is mandatory.
// SAHF must be available in compat/legacy mode.
ASSERT(cpu.has_sahf());
......@@ -636,7 +633,6 @@ void Assembler::movzx_w(Register dst, const Operand& src) {
void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
ASSERT(IsEnabled(CMOV));
EnsureSpace ensure_space(this);
// Opcode: 0f 40 + cc /r.
EMIT(0x0F);
......
......@@ -481,9 +481,9 @@ class Displacement BASE_EMBEDDED {
// CpuFeatures keeps track of which features are supported by the target CPU.
// Supported features must be enabled by a CpuFeatureScope before use.
// Example:
// if (assembler->IsSupported(CMOV)) {
// CpuFeatureScope fscope(assembler, CMOV);
// // Generate code containing cmov.
// if (assembler->IsSupported(SSE3)) {
// CpuFeatureScope fscope(assembler, SSE3);
// // Generate code containing SSE3 instructions.
// } else {
// // Generate alternative code.
// }
......@@ -499,7 +499,6 @@ class CpuFeatures : public AllStatic {
if (Check(f, cross_compile_)) return true;
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
return Check(f, supported_);
}
......
......@@ -629,15 +629,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ shrd(result_reg, scratch1);
__ shr_cl(result_reg);
__ test(ecx, Immediate(32));
if (CpuFeatures::IsSupported(CMOV)) {
CpuFeatureScope use_cmov(masm, CMOV);
__ cmov(not_equal, scratch1, result_reg);
} else {
Label skip_mov;
__ j(equal, &skip_mov, Label::kNear);
__ mov(scratch1, result_reg);
__ bind(&skip_mov);
}
__ cmov(not_equal, scratch1, result_reg);
}
// If the double was negative, negate the integer result.
......@@ -649,15 +641,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
} else {
__ cmp(exponent_operand, Immediate(0));
}
if (CpuFeatures::IsSupported(CMOV)) {
CpuFeatureScope use_cmov(masm, CMOV);
__ cmov(greater, result_reg, scratch1);
} else {
Label skip_mov;
__ j(less_equal, &skip_mov, Label::kNear);
__ mov(result_reg, scratch1);
__ bind(&skip_mov);
}
// Restore registers
__ bind(&done);
......@@ -2068,32 +2052,12 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Don't base result on EFLAGS when a NaN is involved.
__ j(parity_even, &unordered, Label::kNear);
if (CpuFeatures::IsSupported(CMOV)) {
CpuFeatureScope use_cmov(masm, CMOV);
// Return a result of -1, 0, or 1, based on EFLAGS.
__ mov(eax, 0); // equal
__ mov(ecx, Immediate(Smi::FromInt(1)));
__ cmov(above, eax, ecx);
__ mov(ecx, Immediate(Smi::FromInt(-1)));
__ cmov(below, eax, ecx);
__ ret(0);
} else {
Label below_label, above_label;
// Return a result of -1, 0, or 1, based on EFLAGS.
__ j(below, &below_label, Label::kNear);
__ j(above, &above_label, Label::kNear);
__ Move(eax, Immediate(0));
__ ret(0);
__ bind(&below_label);
__ mov(eax, Immediate(Smi::FromInt(-1)));
__ ret(0);
__ bind(&above_label);
__ mov(eax, Immediate(Smi::FromInt(1)));
__ ret(0);
}
__ mov(eax, 0); // equal
__ mov(ecx, Immediate(Smi::FromInt(1)));
__ cmov(above, eax, ecx);
__ mov(ecx, Immediate(Smi::FromInt(-1)));
__ cmov(below, eax, ecx);
__ ret(0);
// If one of the numbers was NaN, then the result is always false.
// The cc is never not-equal.
......@@ -3776,63 +3740,46 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ JumpIfNotSmi(eax, &miss);
}
// Inlining the double comparison and falling back to the general compare
// stub if NaN is involved or SSE2 or CMOV is unsupported.
if (CpuFeatures::IsSupported(CMOV)) {
CpuFeatureScope scope2(masm, CMOV);
// Load left and right operand.
Label done, left, left_smi, right_smi;
__ JumpIfSmi(eax, &right_smi, Label::kNear);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined1, Label::kNear);
__ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ jmp(&left, Label::kNear);
__ bind(&right_smi);
__ mov(ecx, eax); // Can't clobber eax because we can still jump away.
__ SmiUntag(ecx);
__ Cvtsi2sd(xmm1, ecx);
__ bind(&left);
__ JumpIfSmi(edx, &left_smi, Label::kNear);
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined2, Label::kNear);
__ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&left_smi);
__ mov(ecx, edx); // Can't clobber edx because we can still jump away.
__ SmiUntag(ecx);
__ Cvtsi2sd(xmm0, ecx);
// Load left and right operand.
Label done, left, left_smi, right_smi;
__ JumpIfSmi(eax, &right_smi, Label::kNear);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined1, Label::kNear);
__ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ jmp(&left, Label::kNear);
__ bind(&right_smi);
__ mov(ecx, eax); // Can't clobber eax because we can still jump away.
__ SmiUntag(ecx);
__ Cvtsi2sd(xmm1, ecx);
__ bind(&done);
// Compare operands.
__ ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved.
__ j(parity_even, &unordered, Label::kNear);
// Return a result of -1, 0, or 1, based on EFLAGS.
// Performing mov, because xor would destroy the flag register.
__ mov(eax, 0); // equal
__ mov(ecx, Immediate(Smi::FromInt(1)));
__ cmov(above, eax, ecx);
__ mov(ecx, Immediate(Smi::FromInt(-1)));
__ cmov(below, eax, ecx);
__ ret(0);
} else {
__ mov(ecx, edx);
__ and_(ecx, eax);
__ JumpIfSmi(ecx, &generic_stub, Label::kNear);
__ bind(&left);
__ JumpIfSmi(edx, &left_smi, Label::kNear);
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined2, Label::kNear);
__ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&left_smi);
__ mov(ecx, edx); // Can't clobber edx because we can still jump away.
__ SmiUntag(ecx);
__ Cvtsi2sd(xmm0, ecx);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined1, Label::kNear);
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined2, Label::kNear);
}
__ bind(&done);
// Compare operands.
__ ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved.
__ j(parity_even, &unordered, Label::kNear);
// Return a result of -1, 0, or 1, based on EFLAGS.
// Performing mov, because xor would destroy the flag register.
__ mov(eax, 0); // equal
__ mov(ecx, Immediate(Smi::FromInt(1)));
__ cmov(above, eax, ecx);
__ mov(ecx, Immediate(Smi::FromInt(-1)));
__ cmov(below, eax, ecx);
__ ret(0);
__ bind(&unordered);
__ bind(&generic_stub);
......
......@@ -827,16 +827,8 @@ void MacroAssembler::IsInstanceJSObjectType(Register map,
void MacroAssembler::FCmp() {
if (CpuFeatures::IsSupported(CMOV)) {
fucomip();
fstp(0);
} else {
fucompp();
push(eax);
fnstsw_ax();
sahf();
pop(eax);
}
fucomip();
fstp(0);
}
......
......@@ -54,14 +54,7 @@ static const pthread_t kNoThread = (pthread_t) 0;
uint64_t OS::CpuFeaturesImpliedByPlatform() {
#if V8_OS_MACOSX
// Mac OS X requires CMOV to install so we can assume it is present.
// These constants are defined by the CPUid instructions.
const uint64_t one = 1;
return one << CMOV;
#else
return 0; // Nothing special about the other systems.
#endif
return 0; // Nothing special.
}
......
......@@ -268,8 +268,7 @@ class OS {
static void SignalCodeMovingGC();
// The return value indicates the CPU features we are sure of because of the
// OS. For example MacOSX doesn't run on any x86 CPUs that don't have CMOV
// instructions.
// OS.
// This is a little messy because the interpretation is subject to the cross
// of the CPU and the OS. The bits in the answer correspond to the bit
// positions indicated by the members of the CpuFeature enum from globals.h
......
......@@ -403,7 +403,6 @@ enum StateTag {
// On X86/X64, values below 32 are bits in EDX, values above 32 are bits in ECX.
enum CpuFeature { SSE4_1 = 32 + 19, // x86
SSE3 = 32 + 0, // x86
CMOV = 15, // x86
VFP3 = 1, // ARM
ARMv7 = 2, // ARM
SUDIV = 3, // ARM
......
......@@ -19,7 +19,7 @@ namespace internal {
#ifdef DEBUG
bool CpuFeatures::initialized_ = false;
#endif
uint64_t CpuFeatures::supported_ = CpuFeatures::kDefaultCpuFeatures;
uint64_t CpuFeatures::supported_ = 0;
uint64_t CpuFeatures::found_by_runtime_probing_only_ = 0;
uint64_t CpuFeatures::cross_compile_ = 0;
......@@ -30,11 +30,11 @@ ExternalReference ExternalReference::cpu_features() {
void CpuFeatures::Probe(bool serializer_enabled) {
ASSERT(supported_ == CpuFeatures::kDefaultCpuFeatures);
ASSERT(supported_ == 0);
#ifdef DEBUG
initialized_ = true;
#endif
supported_ = kDefaultCpuFeatures;
supported_ = 0;
if (serializer_enabled) {
supported_ |= OS::CpuFeaturesImpliedByPlatform();
return; // No features if we might serialize.
......@@ -54,7 +54,6 @@ void CpuFeatures::Probe(bool serializer_enabled) {
// CMOV must be available on every x64 CPU.
ASSERT(cpu.has_cmov());
probed_features |= static_cast<uint64_t>(1) << CMOV;
// SAHF is not generally available in long mode.
if (cpu.has_sahf()) {
......@@ -63,8 +62,7 @@ void CpuFeatures::Probe(bool serializer_enabled) {
uint64_t platform_features = OS::CpuFeaturesImpliedByPlatform();
supported_ = probed_features | platform_features;
found_by_runtime_probing_only_
= probed_features & ~kDefaultCpuFeatures & ~platform_features;
found_by_runtime_probing_only_ = probed_features & ~platform_features;
}
......
......@@ -458,7 +458,6 @@ class CpuFeatures : public AllStatic {
ASSERT(initialized_);
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
if (f == SAHF && !FLAG_enable_sahf) return false;
return Check(f, supported_);
}
......@@ -491,12 +490,6 @@ class CpuFeatures : public AllStatic {
return static_cast<uint64_t>(1) << f;
}
// Safe defaults include CMOV for X64. It is always available, if
// anyone checks, but they shouldn't need to check.
// The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
// fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
static const uint64_t kDefaultCpuFeatures = (1 << CMOV);
#ifdef DEBUG
static bool initialized_;
#endif
......
......@@ -414,25 +414,22 @@ TEST(DisasmIa320) {
// cmov.
{
if (CpuFeatures::IsSupported(CMOV)) {
CpuFeatureScope use_cmov(&assm, CMOV);
__ cmov(overflow, eax, Operand(eax, 0));
__ cmov(no_overflow, eax, Operand(eax, 1));
__ cmov(below, eax, Operand(eax, 2));
__ cmov(above_equal, eax, Operand(eax, 3));
__ cmov(equal, eax, Operand(ebx, 0));
__ cmov(not_equal, eax, Operand(ebx, 1));
__ cmov(below_equal, eax, Operand(ebx, 2));
__ cmov(above, eax, Operand(ebx, 3));
__ cmov(sign, eax, Operand(ecx, 0));
__ cmov(not_sign, eax, Operand(ecx, 1));
__ cmov(parity_even, eax, Operand(ecx, 2));
__ cmov(parity_odd, eax, Operand(ecx, 3));
__ cmov(less, eax, Operand(edx, 0));
__ cmov(greater_equal, eax, Operand(edx, 1));
__ cmov(less_equal, eax, Operand(edx, 2));
__ cmov(greater, eax, Operand(edx, 3));
}
__ cmov(overflow, eax, Operand(eax, 0));
__ cmov(no_overflow, eax, Operand(eax, 1));
__ cmov(below, eax, Operand(eax, 2));
__ cmov(above_equal, eax, Operand(eax, 3));
__ cmov(equal, eax, Operand(ebx, 0));
__ cmov(not_equal, eax, Operand(ebx, 1));
__ cmov(below_equal, eax, Operand(ebx, 2));
__ cmov(above, eax, Operand(ebx, 3));
__ cmov(sign, eax, Operand(ecx, 0));
__ cmov(not_sign, eax, Operand(ecx, 1));
__ cmov(parity_even, eax, Operand(ecx, 2));
__ cmov(parity_odd, eax, Operand(ecx, 3));
__ cmov(less, eax, Operand(edx, 0));
__ cmov(greater_equal, eax, Operand(edx, 1));
__ cmov(less_equal, eax, Operand(edx, 2));
__ cmov(greater, eax, Operand(edx, 3));
}
{
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment