Commit 05638b9d authored by bjaideep's avatar bjaideep Committed by Commit bot

PPC/s390: [turbofan] Introduce Float64Pow and NumberPow operators.

Port e607e12e

Original commit message:
    Introduce a new machine operator Float64Pow that for now is backed by
    the existing MathPowStub to start the unification of Math.pow, and at
    the same time address the main performance issue that TurboFan still has
    with the imaging-darkroom benchmark in Kraken.

    Also migrate the Math.pow builtin itself to a TurboFan builtin and
    remove a few hundred lines of hand-written platform code for special
    handling of the fullcodegen Math.pow version.

R=bmeurer@chromium.org, joransiu@ca.ibm.com, jyan@ca.ibm.com, michael_dawson@ca.ibm.com, mbrandy@us.ibm.com

BUG=v8:3599,v8:5086,v8:5157
LOG=N

Review-Url: https://codereview.chromium.org/2106883002
Cr-Commit-Position: refs/heads/master@{#37344}
parent ab7234a4
......@@ -1301,6 +1301,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIeee754Float64Log10:
ASSEMBLE_IEEE754_UNOP(log10);
break;
case kIeee754Float64Pow: {
MathPowStub stub(isolate(), MathPowStub::DOUBLE);
__ CallStub(&stub);
__ Move(d1, d3);
break;
}
case kPPC_Neg:
__ neg(i.OutputRegister(), i.InputRegister(0), LeaveOE, i.OutputRCBit());
break;
......
......@@ -1285,6 +1285,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIeee754Float64Log10:
ASSEMBLE_IEEE754_UNOP(log10);
break;
case kIeee754Float64Pow: {
MathPowStub stub(isolate(), MathPowStub::DOUBLE);
__ CallStub(&stub);
__ Move(d1, d3);
break;
}
case kS390_Neg:
__ LoadComplementRR(i.OutputRegister(), i.InputRegister(0));
break;
......
......@@ -734,11 +734,8 @@ void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
void MathPowStub::Generate(MacroAssembler* masm) {
const Register base = r4;
const Register exponent = MathPowTaggedDescriptor::exponent();
DCHECK(exponent.is(r5));
const Register heapnumbermap = r8;
const Register heapnumber = r3;
const DoubleRegister double_base = d1;
const DoubleRegister double_exponent = d2;
const DoubleRegister double_result = d3;
......@@ -747,36 +744,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
const Register scratch2 = r10;
Label call_runtime, done, int_exponent;
if (exponent_type() == ON_STACK) {
Label base_is_smi, unpack_exponent;
// The exponent and base are supplied as arguments on the stack.
// This can only happen if the stub is called from non-optimized code.
// Load input parameters from stack to double registers.
__ LoadP(base, MemOperand(sp, 1 * kPointerSize));
__ LoadP(exponent, MemOperand(sp, 0 * kPointerSize));
__ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
__ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
__ LoadP(scratch, FieldMemOperand(base, JSObject::kMapOffset));
__ cmp(scratch, heapnumbermap);
__ bne(&call_runtime);
__ lfd(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
__ b(&unpack_exponent);
__ bind(&base_is_smi);
__ ConvertIntToDouble(scratch, double_base);
__ bind(&unpack_exponent);
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
__ LoadP(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
__ cmp(scratch, heapnumbermap);
__ bne(&call_runtime);
__ lfd(double_exponent,
FieldMemOperand(exponent, HeapNumber::kValueOffset));
} else if (exponent_type() == TAGGED) {
if (exponent_type() == TAGGED) {
// Base is already in double_base.
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
......@@ -790,53 +758,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
double_scratch);
__ beq(&int_exponent);
if (exponent_type() == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
// compile time and uses DoMathPowHalf instead. We then skip this check
// for non-constant cases of +/-0.5 as these hardly occur.
Label not_plus_half, not_minus_inf1, not_minus_inf2;
// Test for 0.5.
__ LoadDoubleLiteral(double_scratch, 0.5, scratch);
__ fcmpu(double_exponent, double_scratch);
__ bne(&not_plus_half);
// Calculates square root of base. Check for the special case of
// Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
__ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch);
__ fcmpu(double_base, double_scratch);
__ bne(&not_minus_inf1);
__ fneg(double_result, double_scratch);
__ b(&done);
__ bind(&not_minus_inf1);
// Add +0 to convert -0 to +0.
__ fadd(double_scratch, double_base, kDoubleRegZero);
__ fsqrt(double_result, double_scratch);
__ b(&done);
__ bind(&not_plus_half);
__ LoadDoubleLiteral(double_scratch, -0.5, scratch);
__ fcmpu(double_exponent, double_scratch);
__ bne(&call_runtime);
// Calculates square root of base. Check for the special case of
// Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
__ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch);
__ fcmpu(double_base, double_scratch);
__ bne(&not_minus_inf2);
__ fmr(double_result, kDoubleRegZero);
__ b(&done);
__ bind(&not_minus_inf2);
// Add +0 to convert -0 to +0.
__ fadd(double_scratch, double_base, kDoubleRegZero);
__ LoadDoubleLiteral(double_result, 1.0, scratch);
__ fsqrt(double_scratch, double_scratch);
__ fdiv(double_result, double_result, double_scratch);
__ b(&done);
}
__ mflr(r0);
__ push(r0);
{
......@@ -905,37 +826,21 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ ConvertIntToDouble(exponent, double_exponent);
// Returning or bailing out.
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
__ TailCallRuntime(Runtime::kMathPowRT);
// The stub is called from non-optimized code, which expects the result
// as heap number in exponent.
__ bind(&done);
__ AllocateHeapNumber(heapnumber, scratch, scratch2, heapnumbermap,
&call_runtime);
__ stfd(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
DCHECK(heapnumber.is(r3));
__ Ret(2);
} else {
__ mflr(r0);
__ push(r0);
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(0, 2, scratch);
__ MovToFloatParameters(double_base, double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(isolate()), 0, 2);
}
__ pop(r0);
__ mtlr(r0);
__ MovFromFloatResult(double_result);
__ bind(&done);
__ Ret();
__ mflr(r0);
__ push(r0);
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(0, 2, scratch);
__ MovToFloatParameters(double_base, double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(isolate()), 0, 2);
}
__ pop(r0);
__ mtlr(r0);
__ MovFromFloatResult(double_result);
__ bind(&done);
__ Ret();
}
......
......@@ -711,11 +711,8 @@ void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
}
void MathPowStub::Generate(MacroAssembler* masm) {
const Register base = r3;
const Register exponent = MathPowTaggedDescriptor::exponent();
DCHECK(exponent.is(r4));
const Register heapnumbermap = r7;
const Register heapnumber = r2;
const DoubleRegister double_base = d1;
const DoubleRegister double_exponent = d2;
const DoubleRegister double_result = d3;
......@@ -724,36 +721,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
const Register scratch2 = r9;
Label call_runtime, done, int_exponent;
if (exponent_type() == ON_STACK) {
Label base_is_smi, unpack_exponent;
// The exponent and base are supplied as arguments on the stack.
// This can only happen if the stub is called from non-optimized code.
// Load input parameters from stack to double registers.
__ LoadP(base, MemOperand(sp, 1 * kPointerSize));
__ LoadP(exponent, MemOperand(sp, 0 * kPointerSize));
__ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
__ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
__ LoadP(scratch, FieldMemOperand(base, JSObject::kMapOffset));
__ CmpP(scratch, heapnumbermap);
__ bne(&call_runtime);
__ LoadDouble(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
__ b(&unpack_exponent, Label::kNear);
__ bind(&base_is_smi);
__ ConvertIntToDouble(scratch, double_base);
__ bind(&unpack_exponent);
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
__ LoadP(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
__ CmpP(scratch, heapnumbermap);
__ bne(&call_runtime);
__ LoadDouble(double_exponent,
FieldMemOperand(exponent, HeapNumber::kValueOffset));
} else if (exponent_type() == TAGGED) {
if (exponent_type() == TAGGED) {
// Base is already in double_base.
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
......@@ -767,57 +735,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
double_scratch);
__ beq(&int_exponent, Label::kNear);
if (exponent_type() == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
// compile time and uses DoMathPowHalf instead. We then skip this check
// for non-constant cases of +/-0.5 as these hardly occur.
Label not_plus_half, not_minus_inf1, not_minus_inf2;
// Test for 0.5.
__ LoadDoubleLiteral(double_scratch, 0.5, scratch);
__ cdbr(double_exponent, double_scratch);
__ bne(&not_plus_half, Label::kNear);
// Calculates square root of base. Check for the special case of
// Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
__ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch);
__ cdbr(double_base, double_scratch);
__ bne(&not_minus_inf1, Label::kNear);
__ lcdbr(double_result, double_scratch);
__ b(&done);
__ bind(&not_minus_inf1);
// Add +0 to convert -0 to +0.
__ ldr(double_scratch, double_base);
__ lzdr(kDoubleRegZero);
__ adbr(double_scratch, kDoubleRegZero);
__ sqdbr(double_result, double_scratch);
__ b(&done);
__ bind(&not_plus_half);
__ LoadDoubleLiteral(double_scratch, -0.5, scratch);
__ cdbr(double_exponent, double_scratch);
__ bne(&call_runtime);
// Calculates square root of base. Check for the special case of
// Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
__ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch);
__ cdbr(double_base, double_scratch);
__ bne(&not_minus_inf2, Label::kNear);
__ ldr(double_result, kDoubleRegZero);
__ b(&done);
__ bind(&not_minus_inf2);
// Add +0 to convert -0 to +0.
__ ldr(double_scratch, double_base);
__ lzdr(kDoubleRegZero);
__ adbr(double_scratch, kDoubleRegZero);
__ LoadDoubleLiteral(double_result, 1.0, scratch);
__ sqdbr(double_scratch, double_scratch);
__ ddbr(double_result, double_scratch);
__ b(&done);
}
__ push(r14);
{
AllowExternalCallThatCantCauseGC scope(masm);
......@@ -884,35 +801,19 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ ConvertIntToDouble(exponent, double_exponent);
// Returning or bailing out.
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
__ TailCallRuntime(Runtime::kMathPowRT);
// The stub is called from non-optimized code, which expects the result
// as heap number in exponent.
__ bind(&done);
__ AllocateHeapNumber(heapnumber, scratch, scratch2, heapnumbermap,
&call_runtime);
__ StoreDouble(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
DCHECK(heapnumber.is(r2));
__ Ret(2);
} else {
__ push(r14);
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(0, 2, scratch);
__ MovToFloatParameters(double_base, double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(isolate()), 0, 2);
}
__ pop(r14);
__ MovFromFloatResult(double_result);
__ bind(&done);
__ Ret();
__ push(r14);
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(0, 2, scratch);
__ MovToFloatParameters(double_base, double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(isolate()), 0, 2);
}
__ pop(r14);
__ MovFromFloatResult(double_result);
__ bind(&done);
__ Ret();
}
bool CEntryStub::NeedsImmovableCode() { return true; }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment