Commit 5b5feaa9 authored by verwaest@chromium.org's avatar verwaest@chromium.org

Remove the special MathFloor / MathAbs call handlers.

BUG=
R=svenpanne@chromium.org

Review URL: https://codereview.chromium.org/141733002

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18669 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 431dcc93
......@@ -1941,177 +1941,6 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
}
Handle<Code> CallStubCompiler::CompileMathFloorCall(
Handle<Object> object,
Handle<JSObject> holder,
Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss, slow;
HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
if (!cell.is_null()) {
ASSERT(cell->value() == *function);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
// Load the (only) argument into r0.
__ ldr(r0, MemOperand(sp, 0 * kPointerSize));
// If the argument is a smi, just return.
__ SmiTst(r0);
__ Drop(argc + 1, eq);
__ Ret(eq);
__ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
Label smi_check, just_return;
// Load the HeapNumber value.
// We will need access to the value in the core registers, so we load it
// with ldrd and move it to the fpu. It also spares a sub instruction for
// updating the HeapNumber value address, as vldr expects a multiple
// of 4 offset.
__ Ldrd(r4, r5, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ vmov(d1, r4, r5);
// Check for NaN, Infinities and -0.
// They are invariant through a Math.Floor call, so just
// return the original argument.
__ Sbfx(r3, r5, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
__ cmp(r3, Operand(-1));
__ b(eq, &just_return);
__ eor(r3, r5, Operand(0x80000000u));
__ orr(r3, r3, r4, SetCC);
__ b(eq, &just_return);
// Test for values that can be exactly represented as a
// signed 32-bit integer.
__ TryDoubleToInt32Exact(r0, d1, d2);
// If exact, check smi
__ b(eq, &smi_check);
__ cmp(r5, Operand(0));
// If input is in ]+0, +inf[, the cmp has cleared overflow and negative
// (V=0 and N=0), the two following instructions won't execute and
// we fall through smi_check to check if the result can fit into a smi.
// If input is in ]-inf, -0[, sub one and, go to slow if we have
// an overflow. Else we fall through smi check.
// Hint: if x is a negative, non integer number,
// floor(x) <=> round_to_zero(x) - 1.
__ sub(r0, r0, Operand(1), SetCC, mi);
__ b(vs, &slow);
__ bind(&smi_check);
// Check if the result can fit into an smi. If we had an overflow,
// the result is either 0x80000000 or 0x7FFFFFFF and won't fit into an smi.
// If result doesn't fit into an smi, branch to slow.
__ SmiTag(r0, SetCC);
__ b(vs, &slow);
__ bind(&just_return);
__ Drop(argc + 1);
__ Ret();
__ bind(&slow);
// We do not have to patch the receiver because the function makes no use of
// it.
GenerateJumpFunctionIgnoreReceiver(function);
HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileMathAbsCall(
Handle<Object> object,
Handle<JSObject> holder,
Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
if (!cell.is_null()) {
ASSERT(cell->value() == *function);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
// Load the (only) argument into r0.
__ ldr(r0, MemOperand(sp, 0 * kPointerSize));
// Check if the argument is a smi.
Label not_smi;
__ JumpIfNotSmi(r0, &not_smi);
// Do bitwise not or do nothing depending on the sign of the
// argument.
__ eor(r1, r0, Operand(r0, ASR, kBitsPerInt - 1));
// Add 1 or do nothing depending on the sign of the argument.
__ sub(r0, r1, Operand(r0, ASR, kBitsPerInt - 1), SetCC);
// If the result is still negative, go to the slow case.
// This only happens for the most negative smi.
Label slow;
__ b(mi, &slow);
// Smi case done.
__ Drop(argc + 1);
__ Ret();
// Check if the argument is a heap number and load its exponent and
// sign.
__ bind(&not_smi);
__ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
__ ldr(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset));
// Check the sign of the argument. If the argument is positive,
// just return it.
Label negative_sign;
__ tst(r1, Operand(HeapNumber::kSignMask));
__ b(ne, &negative_sign);
__ Drop(argc + 1);
__ Ret();
// If the argument is negative, clear the sign, and return a new
// number.
__ bind(&negative_sign);
__ eor(r1, r1, Operand(HeapNumber::kSignMask));
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r0, r4, r5, r6, &slow);
__ str(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ str(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
__ Drop(argc + 1);
__ Ret();
__ bind(&slow);
// We do not have to patch the receiver because the function makes no use of
// it.
GenerateJumpFunctionIgnoreReceiver(function);
HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileFastApiCall(
const CallOptimization& optimization,
Handle<Object> object,
......
......@@ -2035,203 +2035,6 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
}
Handle<Code> CallStubCompiler::CompileMathFloorCall(
Handle<Object> object,
Handle<JSObject> holder,
Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
if (!CpuFeatures::IsSupported(SSE2)) {
return Handle<Code>::null();
}
CpuFeatureScope use_sse2(masm(), SSE2);
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
if (!object->IsJSObject() || argc != 1) {
return Handle<Code>::null();
}
Label miss;
HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
if (!cell.is_null()) {
ASSERT(cell->value() == *function);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
// Load the (only) argument into eax.
__ mov(eax, Operand(esp, 1 * kPointerSize));
// Check if the argument is a smi.
Label smi;
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(eax, &smi);
// Check if the argument is a heap number and load its value into xmm0.
Label slow;
__ CheckMap(eax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
__ movsd(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
// Check if the argument is strictly positive. Note this also
// discards NaN.
__ xorpd(xmm1, xmm1);
__ ucomisd(xmm0, xmm1);
__ j(below_equal, &slow);
// Do a truncating conversion.
__ cvttsd2si(eax, Operand(xmm0));
// Check if the result fits into a smi. Note this also checks for
// 0x80000000 which signals a failed conversion.
Label wont_fit_into_smi;
__ test(eax, Immediate(0xc0000000));
__ j(not_zero, &wont_fit_into_smi);
// Smi tag and return.
__ SmiTag(eax);
__ bind(&smi);
__ ret(2 * kPointerSize);
// Check if the argument is < 2^kMantissaBits.
Label already_round;
__ bind(&wont_fit_into_smi);
__ LoadPowerOf2(xmm1, ebx, HeapNumber::kMantissaBits);
__ ucomisd(xmm0, xmm1);
__ j(above_equal, &already_round);
// Save a copy of the argument.
__ movaps(xmm2, xmm0);
// Compute (argument + 2^kMantissaBits) - 2^kMantissaBits.
__ addsd(xmm0, xmm1);
__ subsd(xmm0, xmm1);
// Compare the argument and the tentative result to get the right mask:
// if xmm2 < xmm0:
// xmm2 = 1...1
// else:
// xmm2 = 0...0
__ cmpltsd(xmm2, xmm0);
// Subtract 1 if the argument was less than the tentative result.
__ LoadPowerOf2(xmm1, ebx, 0);
__ andpd(xmm1, xmm2);
__ subsd(xmm0, xmm1);
// Return a new heap number.
__ AllocateHeapNumber(eax, ebx, edx, &slow);
__ movsd(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
__ ret(2 * kPointerSize);
// Return the argument (when it's an already round heap number).
__ bind(&already_round);
__ mov(eax, Operand(esp, 1 * kPointerSize));
__ ret(2 * kPointerSize);
__ bind(&slow);
// We do not have to patch the receiver because the function makes no use of
// it.
GenerateJumpFunctionIgnoreReceiver(function);
HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileMathAbsCall(
Handle<Object> object,
Handle<JSObject> holder,
Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
if (!object->IsJSObject() || argc != 1) {
return Handle<Code>::null();
}
Label miss;
HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
if (!cell.is_null()) {
ASSERT(cell->value() == *function);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
// Load the (only) argument into eax.
__ mov(eax, Operand(esp, 1 * kPointerSize));
// Check if the argument is a smi.
Label not_smi;
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfNotSmi(eax, &not_smi);
// Branchless abs implementation, refer to below:
// http://graphics.stanford.edu/~seander/bithacks.html#IntegerAbs
// Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
// otherwise.
__ mov(ebx, eax);
__ sar(ebx, kBitsPerInt - 1);
// Do bitwise not or do nothing depending on ebx.
__ xor_(eax, ebx);
// Add 1 or do nothing depending on ebx.
__ sub(eax, ebx);
// If the result is still negative, go to the slow case.
// This only happens for the most negative smi.
Label slow;
__ j(negative, &slow);
// Smi case done.
__ ret(2 * kPointerSize);
// Check if the argument is a heap number and load its exponent and
// sign into ebx.
__ bind(&not_smi);
__ CheckMap(eax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
__ mov(ebx, FieldOperand(eax, HeapNumber::kExponentOffset));
// Check the sign of the argument. If the argument is positive,
// just return it.
Label negative_sign;
__ test(ebx, Immediate(HeapNumber::kSignMask));
__ j(not_zero, &negative_sign);
__ ret(2 * kPointerSize);
// If the argument is negative, clear the sign, and return a new
// number.
__ bind(&negative_sign);
__ and_(ebx, ~HeapNumber::kSignMask);
__ mov(ecx, FieldOperand(eax, HeapNumber::kMantissaOffset));
__ AllocateHeapNumber(eax, edi, edx, &slow);
__ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ebx);
__ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
__ ret(2 * kPointerSize);
__ bind(&slow);
// We do not have to patch the receiver because the function makes no use of
// it.
GenerateJumpFunctionIgnoreReceiver(function);
HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileFastApiCall(
const CallOptimization& optimization,
Handle<Object> object,
......
......@@ -1923,194 +1923,6 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
}
Handle<Code> CallStubCompiler::CompileMathFloorCall(
Handle<Object> object,
Handle<JSObject> holder,
Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss, slow;
HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
if (!cell.is_null()) {
ASSERT(cell->value() == *function);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
// Load the (only) argument into v0.
__ lw(v0, MemOperand(sp, 0 * kPointerSize));
// If the argument is a smi, just return.
STATIC_ASSERT(kSmiTag == 0);
__ SmiTst(v0, t0);
__ DropAndRet(argc + 1, eq, t0, Operand(zero_reg));
__ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
Label wont_fit_smi, no_fpu_error, restore_fcsr_and_return;
// If fpu is enabled, we use the floor instruction.
// Load the HeapNumber value.
__ ldc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
// Backup FCSR.
__ cfc1(a3, FCSR);
// Clearing FCSR clears the exception mask with no side-effects.
__ ctc1(zero_reg, FCSR);
// Convert the argument to an integer.
__ floor_w_d(f0, f0);
// Start checking for special cases.
// Get the argument exponent and clear the sign bit.
__ lw(t1, FieldMemOperand(v0, HeapNumber::kValueOffset + kPointerSize));
__ And(t2, t1, Operand(~HeapNumber::kSignMask));
__ srl(t2, t2, HeapNumber::kMantissaBitsInTopWord);
// Retrieve FCSR and check for fpu errors.
__ cfc1(t5, FCSR);
__ And(t5, t5, Operand(kFCSRExceptionFlagMask));
__ Branch(&no_fpu_error, eq, t5, Operand(zero_reg));
// Check for NaN, Infinity, and -Infinity.
// They are invariant through a Math.Floor call, so just
// return the original argument.
__ Subu(t3, t2, Operand(HeapNumber::kExponentMask
>> HeapNumber::kMantissaBitsInTopWord));
__ Branch(&restore_fcsr_and_return, eq, t3, Operand(zero_reg));
// We had an overflow or underflow in the conversion. Check if we
// have a big exponent.
// If greater or equal, the argument is already round and in v0.
__ Branch(&restore_fcsr_and_return, ge, t3,
Operand(HeapNumber::kMantissaBits));
__ Branch(&wont_fit_smi);
__ bind(&no_fpu_error);
// Move the result back to v0.
__ mfc1(v0, f0);
// Check if the result fits into a smi.
__ Addu(a1, v0, Operand(0x40000000));
__ Branch(&wont_fit_smi, lt, a1, Operand(zero_reg));
// Tag the result.
STATIC_ASSERT(kSmiTag == 0);
__ sll(v0, v0, kSmiTagSize);
// Check for -0.
__ Branch(&restore_fcsr_and_return, ne, v0, Operand(zero_reg));
// t1 already holds the HeapNumber exponent.
__ And(t0, t1, Operand(HeapNumber::kSignMask));
// If our HeapNumber is negative it was -0, so load its address and return.
// Else v0 is loaded with 0, so we can also just return.
__ Branch(&restore_fcsr_and_return, eq, t0, Operand(zero_reg));
__ lw(v0, MemOperand(sp, 0 * kPointerSize));
__ bind(&restore_fcsr_and_return);
// Restore FCSR and return.
__ ctc1(a3, FCSR);
__ DropAndRet(argc + 1);
__ bind(&wont_fit_smi);
// Restore FCSR and fall to slow case.
__ ctc1(a3, FCSR);
__ bind(&slow);
// We do not have to patch the receiver because the function makes no use of
// it.
GenerateJumpFunctionIgnoreReceiver(function);
HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileMathAbsCall(
Handle<Object> object,
Handle<JSObject> holder,
Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
if (!cell.is_null()) {
ASSERT(cell->value() == *function);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
// Load the (only) argument into v0.
__ lw(v0, MemOperand(sp, 0 * kPointerSize));
// Check if the argument is a smi.
Label not_smi;
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfNotSmi(v0, &not_smi);
// Do bitwise not or do nothing depending on the sign of the
// argument.
__ sra(t0, v0, kBitsPerInt - 1);
__ Xor(a1, v0, t0);
// Add 1 or do nothing depending on the sign of the argument.
__ Subu(v0, a1, t0);
// If the result is still negative, go to the slow case.
// This only happens for the most negative smi.
Label slow;
__ Branch(&slow, lt, v0, Operand(zero_reg));
// Smi case done.
__ DropAndRet(argc + 1);
// Check if the argument is a heap number and load its exponent and
// sign.
__ bind(&not_smi);
__ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
__ lw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
// Check the sign of the argument. If the argument is positive,
// just return it.
Label negative_sign;
__ And(t0, a1, Operand(HeapNumber::kSignMask));
__ Branch(&negative_sign, ne, t0, Operand(zero_reg));
__ DropAndRet(argc + 1);
// If the argument is negative, clear the sign, and return a new
// number.
__ bind(&negative_sign);
__ Xor(a1, a1, Operand(HeapNumber::kSignMask));
__ lw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
__ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(v0, t0, t1, t2, &slow);
__ sw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
__ sw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
__ DropAndRet(argc + 1);
__ bind(&slow);
// We do not have to patch the receiver because the function makes no use of
// it.
GenerateJumpFunctionIgnoreReceiver(function);
HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileFastApiCall(
const CallOptimization& optimization,
Handle<Object> object,
......
......@@ -903,9 +903,7 @@ class KeyedStoreStubCompiler: public StoreStubCompiler {
#define CUSTOM_CALL_IC_GENERATORS(V) \
V(ArrayPush) \
V(ArrayPop) \
V(StringFromCharCode) \
V(MathFloor) \
V(MathAbs)
V(StringFromCharCode)
class CallStubCompiler: public StubCompiler {
......
......@@ -1956,196 +1956,6 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
}
Handle<Code> CallStubCompiler::CompileMathFloorCall(
Handle<Object> object,
Handle<JSObject> holder,
Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
const int argc = arguments().immediate();
StackArgumentsAccessor args(rsp, argc);
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
if (!object->IsJSObject() || argc != 1) {
return Handle<Code>::null();
}
Label miss, slow;
HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
if (!cell.is_null()) {
ASSERT(cell->value() == *function);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
// Load the (only) argument into rax.
__ movq(rax, args.GetArgumentOperand(1));
// Check if the argument is a smi.
Label smi;
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(rax, &smi);
// Check if the argument is a heap number and load its value into xmm0.
__ CheckMap(rax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
__ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
// Check if the argument is strictly positive. Note this also discards NaN.
__ xorpd(xmm1, xmm1);
__ ucomisd(xmm0, xmm1);
__ j(below_equal, &slow);
// Do a truncating conversion.
__ cvttsd2si(rax, xmm0);
// Checks for 0x80000000 which signals a failed conversion.
Label conversion_failure;
__ cmpl(rax, Immediate(0x80000000));
__ j(equal, &conversion_failure);
// Smi tag and return.
__ Integer32ToSmi(rax, rax);
__ bind(&smi);
__ ret(2 * kPointerSize);
// Check if the argument is < 2^kMantissaBits.
Label already_round;
__ bind(&conversion_failure);
int64_t kTwoMantissaBits= V8_INT64_C(0x4330000000000000);
__ movq(rbx, kTwoMantissaBits);
__ movq(xmm1, rbx);
__ ucomisd(xmm0, xmm1);
__ j(above_equal, &already_round);
// Save a copy of the argument.
__ movaps(xmm2, xmm0);
// Compute (argument + 2^kMantissaBits) - 2^kMantissaBits.
__ addsd(xmm0, xmm1);
__ subsd(xmm0, xmm1);
// Compare the argument and the tentative result to get the right mask:
// if xmm2 < xmm0:
// xmm2 = 1...1
// else:
// xmm2 = 0...0
__ cmpltsd(xmm2, xmm0);
// Subtract 1 if the argument was less than the tentative result.
int64_t kOne = V8_INT64_C(0x3ff0000000000000);
__ movq(rbx, kOne);
__ movq(xmm1, rbx);
__ andpd(xmm1, xmm2);
__ subsd(xmm0, xmm1);
// Return a new heap number.
__ AllocateHeapNumber(rax, rbx, &slow);
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
__ ret(2 * kPointerSize);
// Return the argument (when it's an already round heap number).
__ bind(&already_round);
__ movq(rax, args.GetArgumentOperand(1));
__ ret(2 * kPointerSize);
__ bind(&slow);
// We do not have to patch the receiver because the function makes no use of
// it.
GenerateJumpFunctionIgnoreReceiver(function);
HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileMathAbsCall(
Handle<Object> object,
Handle<JSObject> holder,
Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
const int argc = arguments().immediate();
StackArgumentsAccessor args(rsp, argc);
if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
if (!cell.is_null()) {
ASSERT(cell->value() == *function);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
// Load the (only) argument into rax.
__ movq(rax, args.GetArgumentOperand(1));
// Check if the argument is a smi.
Label not_smi;
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfNotSmi(rax, &not_smi);
// Branchless abs implementation, refer to below:
// http://graphics.stanford.edu/~seander/bithacks.html#IntegerAbs
// Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
// otherwise.
__ movq(rbx, rax);
__ sar(rbx, Immediate(kBitsPerPointer - 1));
// Do bitwise not or do nothing depending on ebx.
__ xor_(rax, rbx);
// Add 1 or do nothing depending on ebx.
__ subq(rax, rbx);
// If the result is still negative, go to the slow case.
// This only happens for the most negative smi.
Label slow;
__ j(negative, &slow);
__ ret(2 * kPointerSize);
// Check if the argument is a heap number and load its value.
__ bind(&not_smi);
__ CheckMap(rax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
__ MoveDouble(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
// Check the sign of the argument. If the argument is positive,
// just return it.
Label negative_sign;
const int sign_mask_shift =
(HeapNumber::kExponentOffset - HeapNumber::kValueOffset) * kBitsPerByte;
__ Set(rdi, static_cast<int64_t>(HeapNumber::kSignMask) << sign_mask_shift);
__ testq(rbx, rdi);
__ j(not_zero, &negative_sign);
__ ret(2 * kPointerSize);
// If the argument is negative, clear the sign, and return a new
// number. We still have the sign mask in rdi.
__ bind(&negative_sign);
__ xor_(rbx, rdi);
__ AllocateHeapNumber(rax, rdx, &slow);
__ MoveDouble(FieldOperand(rax, HeapNumber::kValueOffset), rbx);
__ ret(2 * kPointerSize);
__ bind(&slow);
// We do not have to patch the receiver because the function makes no use of
// it.
GenerateJumpFunctionIgnoreReceiver(function);
HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileFastApiCall(
const CallOptimization& optimization,
Handle<Object> object,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment