Commit 30a2c00d authored by yangguo@chromium.org's avatar yangguo@chromium.org

Tweaks on Math.pow (ia32 and x64).

Review URL: http://codereview.chromium.org/8831008

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10203 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 0fd73505
...@@ -1115,19 +1115,7 @@ double power_double_int(double x, int y) { ...@@ -1115,19 +1115,7 @@ double power_double_int(double x, int y) {
double power_double_double(double x, double y) { double power_double_double(double x, double y) {
// The checks for special cases can be dropped in ia32 because it has already // The checks for special cases can be dropped in ia32 because it has already
// been done in generated code before bailing out here. // been done in generated code before bailing out here.
#if !defined(V8_TARGET_ARCH_IA32) if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) return OS::nan_value();
int y_int = static_cast<int>(y);
if (y == y_int) {
return power_double_int(x, y_int); // Returns 1.0 for exponent 0.
}
if (!isinf(x)) {
if (y == 0.5) return sqrt(x + 0.0); // -0 must be converted to +0.
if (y == -0.5) return 1.0 / sqrt(x + 0.0);
}
#endif
if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
return OS::nan_value();
}
return pow(x, y); return pow(x, y);
} }
......
...@@ -2948,8 +2948,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { ...@@ -2948,8 +2948,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
const XMMRegister double_exponent = xmm1; const XMMRegister double_exponent = xmm1;
const XMMRegister double_scratch = xmm4; const XMMRegister double_scratch = xmm4;
Label double_int_runtime, generic_runtime, done; Label call_runtime, done, exponent_not_smi, int_exponent;
Label exponent_not_smi, int_exponent;
// Save 1 in double_result - we need this several times later on. // Save 1 in double_result - we need this several times later on.
__ mov(scratch, Immediate(1)); __ mov(scratch, Immediate(1));
...@@ -2966,7 +2965,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { ...@@ -2966,7 +2965,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(base, &base_is_smi, Label::kNear); __ JumpIfSmi(base, &base_is_smi, Label::kNear);
__ cmp(FieldOperand(base, HeapObject::kMapOffset), __ cmp(FieldOperand(base, HeapObject::kMapOffset),
factory->heap_number_map()); factory->heap_number_map());
__ j(not_equal, &generic_runtime); __ j(not_equal, &call_runtime);
__ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset)); __ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset));
__ jmp(&unpack_exponent, Label::kNear); __ jmp(&unpack_exponent, Label::kNear);
...@@ -2983,7 +2982,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { ...@@ -2983,7 +2982,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&exponent_not_smi); __ bind(&exponent_not_smi);
__ cmp(FieldOperand(exponent, HeapObject::kMapOffset), __ cmp(FieldOperand(exponent, HeapObject::kMapOffset),
factory->heap_number_map()); factory->heap_number_map());
__ j(not_equal, &generic_runtime); __ j(not_equal, &call_runtime);
__ movdbl(double_exponent, __ movdbl(double_exponent,
FieldOperand(exponent, HeapNumber::kValueOffset)); FieldOperand(exponent, HeapNumber::kValueOffset));
} else if (exponent_type_ == TAGGED) { } else if (exponent_type_ == TAGGED) {
...@@ -3002,7 +3001,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { ...@@ -3002,7 +3001,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ cvttsd2si(exponent, Operand(double_exponent)); __ cvttsd2si(exponent, Operand(double_exponent));
// Skip to runtime if possibly NaN (indicated by the indefinite integer). // Skip to runtime if possibly NaN (indicated by the indefinite integer).
__ cmp(exponent, Immediate(0x80000000u)); __ cmp(exponent, Immediate(0x80000000u));
__ j(equal, &generic_runtime); __ j(equal, &call_runtime);
__ cvtsi2sd(double_scratch, exponent); __ cvtsi2sd(double_scratch, exponent);
// Already ruled out NaNs for exponent. // Already ruled out NaNs for exponent.
__ ucomisd(double_exponent, double_scratch); __ ucomisd(double_exponent, double_scratch);
...@@ -3119,33 +3118,35 @@ void MathPowStub::Generate(MacroAssembler* masm) { ...@@ -3119,33 +3118,35 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&fast_power_failed); __ bind(&fast_power_failed);
__ fninit(); __ fninit();
__ add(esp, Immediate(kDoubleSize)); __ add(esp, Immediate(kDoubleSize));
__ jmp(&generic_runtime); __ jmp(&call_runtime);
} }
// Calculate power with integer exponent. // Calculate power with integer exponent.
__ bind(&int_exponent); __ bind(&int_exponent);
const XMMRegister double_scratch2 = double_exponent; const XMMRegister double_scratch2 = double_exponent;
__ mov(scratch, exponent); // Back up exponent. __ mov(scratch, exponent); // Back up exponent.
__ movsd(double_scratch, double_base); // Back up base. __ movsd(double_scratch, double_base); // Back up base.
__ movsd(double_scratch2, double_result); // Load double_exponent with 1. __ movsd(double_scratch2, double_result); // Load double_exponent with 1.
// Get absolute value of exponent. // Get absolute value of exponent.
Label while_true, no_multiply; Label no_neg, while_true, no_multiply;
const uint32_t kClearSignBitMask = 0x7FFFFFFF; __ test(scratch, scratch);
__ and_(exponent, Immediate(kClearSignBitMask)); __ j(positive, &no_neg, Label::kNear);
__ neg(scratch);
__ bind(&no_neg);
__ bind(&while_true); __ bind(&while_true);
__ shr(exponent, 1); __ shr(scratch, 1);
__ j(not_carry, &no_multiply, Label::kNear); __ j(not_carry, &no_multiply, Label::kNear);
__ mulsd(double_result, double_base); __ mulsd(double_result, double_scratch);
__ bind(&no_multiply); __ bind(&no_multiply);
__ mulsd(double_base, double_base); __ mulsd(double_scratch, double_scratch);
__ j(not_zero, &while_true); __ j(not_zero, &while_true);
// scratch has the original value of the exponent - if the exponent is // scratch has the original value of the exponent - if the exponent is
// negative, return 1/result. // negative, return 1/result.
__ test(scratch, scratch); __ test(exponent, exponent);
__ j(positive, &done); __ j(positive, &done);
__ divsd(double_scratch2, double_result); __ divsd(double_scratch2, double_result);
__ movsd(double_result, double_scratch2); __ movsd(double_result, double_scratch2);
...@@ -3153,47 +3154,36 @@ void MathPowStub::Generate(MacroAssembler* masm) { ...@@ -3153,47 +3154,36 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases. // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
__ xorps(double_scratch2, double_scratch2); __ xorps(double_scratch2, double_scratch2);
__ ucomisd(double_scratch2, double_result); // Result cannot be NaN. __ ucomisd(double_scratch2, double_result); // Result cannot be NaN.
__ j(equal, &double_int_runtime); // double_exponent aliased as double_scratch2 has already been overwritten
// and may not have contained the exponent value in the first place when the
// exponent is a smi. We reset it with exponent value before bailing out.
__ j(not_equal, &done);
__ cvtsi2sd(double_exponent, exponent);
// Returning or bailing out. // Returning or bailing out.
Counters* counters = masm->isolate()->counters();
if (exponent_type_ == ON_STACK) { if (exponent_type_ == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
__ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
// The stub is called from non-optimized code, which expects the result // The stub is called from non-optimized code, which expects the result
// as heap number in exponent. // as heap number in exponent.
__ bind(&done); __ bind(&done);
__ AllocateHeapNumber(exponent, scratch, base, &generic_runtime); __ AllocateHeapNumber(eax, scratch, base, &call_runtime);
__ movdbl(FieldOperand(exponent, HeapNumber::kValueOffset), double_result); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
__ IncrementCounter(counters->math_pow(), 1);
__ ret(2 * kPointerSize); __ ret(2 * kPointerSize);
// The arguments are still on the stack.
__ bind(&generic_runtime);
__ bind(&double_int_runtime);
__ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
} else { } else {
__ jmp(&done); __ bind(&call_runtime);
Label return_from_runtime;
__ bind(&generic_runtime);
{ {
AllowExternalCallThatCantCauseGC scope(masm); AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(4, exponent); __ PrepareCallCFunction(4, scratch);
__ movdbl(Operand(esp, 0 * kDoubleSize), double_base); __ movdbl(Operand(esp, 0 * kDoubleSize), double_base);
__ movdbl(Operand(esp, 1 * kDoubleSize), double_exponent); __ movdbl(Operand(esp, 1 * kDoubleSize), double_exponent);
__ CallCFunction( __ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()), 4); ExternalReference::power_double_double_function(masm->isolate()), 4);
} }
__ jmp(&return_from_runtime, Label::kNear);
__ bind(&double_int_runtime);
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(4, exponent);
__ movdbl(Operand(esp, 0 * kDoubleSize), double_scratch);
__ mov(Operand(esp, 1 * kDoubleSize), scratch);
__ CallCFunction(
ExternalReference::power_double_int_function(masm->isolate()), 4);
}
__ bind(&return_from_runtime);
// Return value is in st(0) on ia32. // Return value is in st(0) on ia32.
// Store it into the (fixed) result register. // Store it into the (fixed) result register.
__ sub(esp, Immediate(kDoubleSize)); __ sub(esp, Immediate(kDoubleSize));
...@@ -3202,6 +3192,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { ...@@ -3202,6 +3192,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ add(esp, Immediate(kDoubleSize)); __ add(esp, Immediate(kDoubleSize));
__ bind(&done); __ bind(&done);
__ IncrementCounter(counters->math_pow(), 1);
__ ret(0); __ ret(0);
} }
} }
......
...@@ -7398,7 +7398,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_log) { ...@@ -7398,7 +7398,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_log) {
return isolate->transcendental_cache()->Get(TranscendentalCache::LOG, x); return isolate->transcendental_cache()->Get(TranscendentalCache::LOG, x);
} }
// Slow version of Math.pow. We check for fast paths for special cases.
// Used if SSE2/VFP3 is not available.
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) { RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
NoHandleAllocation ha; NoHandleAllocation ha;
ASSERT(args.length() == 2); ASSERT(args.length() == 2);
...@@ -7414,25 +7415,36 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) { ...@@ -7414,25 +7415,36 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
} }
CONVERT_DOUBLE_ARG_CHECKED(y, 1); CONVERT_DOUBLE_ARG_CHECKED(y, 1);
// Returning a smi would not confuse crankshaft as this part of code is only int y_int = static_cast<int>(y);
// run if SSE2 was not available, in which case crankshaft is disabled. double result;
if (y == 0) return Smi::FromInt(1); // Returns 1 if exponent is 0. if (y == y_int) {
return isolate->heap()->AllocateHeapNumber(power_double_double(x, y)); result = power_double_int(x, y_int); // Returns 1 if exponent is 0.
} else if (y == 0.5) {
result = (isinf(x)) ? V8_INFINITY : sqrt(x + 0.0); // Convert -0 to +0.
} else if (y == -0.5) {
result = (isinf(x)) ? 0 : 1.0 / sqrt(x + 0.0); // Convert -0 to +0.
} else {
result = power_double_double(x, y);
}
if (isnan(result)) return isolate->heap()->nan_value();
return isolate->heap()->AllocateHeapNumber(result);
} }
// Fast version of Math.pow if we know that y is not an integer and // Fast version of Math.pow if we know that y is not an integer and y is not
// y is not -0.5 or 0.5. Used as slowcase from codegen. // -0.5 or 0.5. Used as slow case from fullcodegen.
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) { RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) {
NoHandleAllocation ha; NoHandleAllocation ha;
ASSERT(args.length() == 2); ASSERT(args.length() == 2);
isolate->counters()->math_pow()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0); CONVERT_DOUBLE_ARG_CHECKED(x, 0);
CONVERT_DOUBLE_ARG_CHECKED(y, 1); CONVERT_DOUBLE_ARG_CHECKED(y, 1);
if (y == 0) { if (y == 0) {
return Smi::FromInt(1); return Smi::FromInt(1);
} else if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
return isolate->heap()->nan_value();
} else { } else {
return isolate->heap()->AllocateHeapNumber(pow(x, y)); double result = power_double_double(x, y);
if (isnan(result)) return isolate->heap()->nan_value();
return isolate->heap()->AllocateHeapNumber(result);
} }
} }
......
...@@ -2004,8 +2004,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { ...@@ -2004,8 +2004,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
const XMMRegister double_exponent = xmm1; const XMMRegister double_exponent = xmm1;
const XMMRegister double_scratch = xmm4; const XMMRegister double_scratch = xmm4;
Label double_int_runtime, generic_runtime, done; Label call_runtime, done, exponent_not_smi, int_exponent;
Label exponent_not_smi, int_exponent;
// Save 1 in double_result - we need this several times later on. // Save 1 in double_result - we need this several times later on.
__ movq(scratch, Immediate(1)); __ movq(scratch, Immediate(1));
...@@ -2021,7 +2020,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { ...@@ -2021,7 +2020,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(base, &base_is_smi, Label::kNear); __ JumpIfSmi(base, &base_is_smi, Label::kNear);
__ CompareRoot(FieldOperand(base, HeapObject::kMapOffset), __ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex); Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &generic_runtime); __ j(not_equal, &call_runtime);
__ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset)); __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
__ jmp(&unpack_exponent, Label::kNear); __ jmp(&unpack_exponent, Label::kNear);
...@@ -2038,7 +2037,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { ...@@ -2038,7 +2037,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&exponent_not_smi); __ bind(&exponent_not_smi);
__ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset), __ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex); Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &generic_runtime); __ j(not_equal, &call_runtime);
__ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset)); __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
} else if (exponent_type_ == TAGGED) { } else if (exponent_type_ == TAGGED) {
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear); __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
...@@ -2055,7 +2054,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { ...@@ -2055,7 +2054,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ cvttsd2si(exponent, double_exponent); __ cvttsd2si(exponent, double_exponent);
// Skip to runtime if possibly NaN (indicated by the indefinite integer). // Skip to runtime if possibly NaN (indicated by the indefinite integer).
__ cmpl(exponent, Immediate(0x80000000u)); __ cmpl(exponent, Immediate(0x80000000u));
__ j(equal, &generic_runtime); __ j(equal, &call_runtime);
__ cvtlsi2sd(double_scratch, exponent); __ cvtlsi2sd(double_scratch, exponent);
// Already ruled out NaNs for exponent. // Already ruled out NaNs for exponent.
__ ucomisd(double_exponent, double_scratch); __ ucomisd(double_exponent, double_scratch);
...@@ -2169,7 +2168,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { ...@@ -2169,7 +2168,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&fast_power_failed); __ bind(&fast_power_failed);
__ fninit(); __ fninit();
__ addq(rsp, Immediate(kDoubleSize)); __ addq(rsp, Immediate(kDoubleSize));
__ jmp(&generic_runtime); __ jmp(&call_runtime);
} }
// Calculate power with integer exponent. // Calculate power with integer exponent.
...@@ -2181,9 +2180,11 @@ void MathPowStub::Generate(MacroAssembler* masm) { ...@@ -2181,9 +2180,11 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ movsd(double_scratch2, double_result); // Load double_exponent with 1. __ movsd(double_scratch2, double_result); // Load double_exponent with 1.
// Get absolute value of exponent. // Get absolute value of exponent.
Label while_true, no_multiply; Label no_neg, while_true, no_multiply;
const uint32_t kClearSignBitMask = 0x7FFFFFFF; __ testl(scratch, scratch);
__ andl(scratch, Immediate(kClearSignBitMask)); __ j(positive, &no_neg, Label::kNear);
__ negl(scratch);
__ bind(&no_neg);
__ bind(&while_true); __ bind(&while_true);
__ shrl(scratch, Immediate(1)); __ shrl(scratch, Immediate(1));
...@@ -2194,8 +2195,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { ...@@ -2194,8 +2195,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ mulsd(double_scratch, double_scratch); __ mulsd(double_scratch, double_scratch);
__ j(not_zero, &while_true); __ j(not_zero, &while_true);
// scratch has the original value of the exponent - if the exponent is // If the exponent is negative, return 1/result.
// negative, return 1/result.
__ testl(exponent, exponent); __ testl(exponent, exponent);
__ j(greater, &done); __ j(greater, &done);
__ divsd(double_scratch2, double_result); __ divsd(double_scratch2, double_result);
...@@ -2204,27 +2204,28 @@ void MathPowStub::Generate(MacroAssembler* masm) { ...@@ -2204,27 +2204,28 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases. // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
__ xorps(double_scratch2, double_scratch2); __ xorps(double_scratch2, double_scratch2);
__ ucomisd(double_scratch2, double_result); __ ucomisd(double_scratch2, double_result);
__ j(equal, &double_int_runtime); // double_exponent aliased as double_scratch2 has already been overwritten
// and may not have contained the exponent value in the first place when the
// input was a smi. We reset it with exponent value before bailing out.
__ j(not_equal, &done);
__ cvtlsi2sd(double_exponent, exponent);
// Returning or bailing out. // Returning or bailing out.
Counters* counters = masm->isolate()->counters();
if (exponent_type_ == ON_STACK) { if (exponent_type_ == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
__ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
// The stub is called from non-optimized code, which expects the result // The stub is called from non-optimized code, which expects the result
// as heap number in eax. // as heap number in eax.
__ bind(&done); __ bind(&done);
__ AllocateHeapNumber(rax, rcx, &generic_runtime); __ AllocateHeapNumber(rax, rcx, &call_runtime);
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result); __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
__ IncrementCounter(counters->math_pow(), 1);
__ ret(2 * kPointerSize); __ ret(2 * kPointerSize);
// The arguments are still on the stack.
__ bind(&generic_runtime);
__ bind(&double_int_runtime);
__ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
} else { } else {
__ jmp(&done); __ bind(&call_runtime);
Label return_from_runtime;
StubRuntimeCallHelper callhelper;
__ bind(&generic_runtime);
// Move base to the correct argument register. Exponent is already in xmm1. // Move base to the correct argument register. Exponent is already in xmm1.
__ movsd(xmm0, double_base); __ movsd(xmm0, double_base);
ASSERT(double_exponent.is(xmm1)); ASSERT(double_exponent.is(xmm1));
...@@ -2234,27 +2235,13 @@ void MathPowStub::Generate(MacroAssembler* masm) { ...@@ -2234,27 +2235,13 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ CallCFunction( __ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()), 2); ExternalReference::power_double_double_function(masm->isolate()), 2);
} }
__ jmp(&return_from_runtime, Label::kNear);
__ bind(&double_int_runtime);
// Move base to the correct argument register.
__ movsd(xmm0, double_base);
// Exponent is already in the correct argument register:
// edi (not rdi) on Linux and edx on Windows.
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(2);
__ CallCFunction(
ExternalReference::power_double_int_function(masm->isolate()), 2);
}
__ bind(&return_from_runtime);
// Return value is in xmm0. // Return value is in xmm0.
__ movsd(double_result, xmm0); __ movsd(double_result, xmm0);
// Restore context register. // Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&done); __ bind(&done);
__ IncrementCounter(counters->math_pow(), 1);
__ ret(0); __ ret(0);
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment