Commit cb9b8010 authored by bmeurer's avatar bmeurer Committed by Commit bot

[builtins] Make Math.max and Math.min fast by default.

The previous versions of Math.max and Math.min made it difficult to
optimize those (that's why we already have custom code in Crankshaft),
and due to lack of ideas what to do about the variable number of
arguments, we will probably need to stick in special code in TurboFan
as well; so inlining those builtins is off the table, hence there's no
real advantage in having them around as "not quite JS" with extra work
necessary in the optimizing compilers to still make those builtins
somewhat fast in cases where we cannot inline them (also there's a
tricky deopt loop in Crankshaft related to Math.min and Math.max, but
that will be dealt with later).

So to sum up: Instead of trying to make Math.max and Math.min semi-fast
in the optimizing compilers with weird work-arounds support %_Arguments
%_ArgumentsLength, we do provide the optimal code as native builtins
instead and call it a day (which gives a nice performance boost on some
benchmarks).

R=jarin@chromium.org

Review URL: https://codereview.chromium.org/1641083003

Cr-Commit-Position: refs/heads/master@{#33582}
parent d51398fe
......@@ -136,6 +136,108 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
// static
void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- lr : return address
// -- sp[(argc - n) * 8] : arg[n] (zero-based)
// -- sp[(argc + 1) * 8] : receiver
// -----------------------------------
Condition const cc_done = (kind == MathMaxMinKind::kMin) ? mi : gt;
Condition const cc_swap = (kind == MathMaxMinKind::kMin) ? gt : mi;
Heap::RootListIndex const root_index =
(kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
: Heap::kMinusInfinityValueRootIndex;
DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? d2 : d1;
// Load the accumulator with the default return value (either -Infinity or
// +Infinity), with the tagged value in r1 and the double value in d1.
__ LoadRoot(r1, root_index);
__ vldr(d1, FieldMemOperand(r1, HeapNumber::kValueOffset));
__ mov(r4, r0);
Label done_loop, loop;
__ bind(&loop);
{
// Check if all parameters done.
__ sub(r0, r0, Operand(1), SetCC);
__ b(lt, &done_loop);
// Load the next parameter tagged value into r2.
__ ldr(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2));
// Load the double value of the parameter into d2, maybe converting the
// parameter to a number first using the ToNumberStub if necessary.
Label convert, convert_smi, convert_number, done_convert;
__ bind(&convert);
__ JumpIfSmi(r2, &convert_smi);
__ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
__ JumpIfRoot(r3, Heap::kHeapNumberMapRootIndex, &convert_number);
{
// Parameter is not a Number, use the ToNumberStub to convert it.
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(r0, r0);
__ SmiTag(r4, r4);
__ Push(r0, r1, r4);
__ mov(r0, r2);
ToNumberStub stub(masm->isolate());
__ CallStub(&stub);
__ mov(r2, r0);
__ Pop(r0, r1, r4);
{
// Restore the double accumulator value (d1).
Label restore_smi, done_restore;
__ JumpIfSmi(r1, &restore_smi);
__ vldr(d1, FieldMemOperand(r1, HeapNumber::kValueOffset));
__ b(&done_restore);
__ bind(&restore_smi);
__ SmiToDouble(d1, r1);
__ bind(&done_restore);
}
__ SmiUntag(r4);
__ SmiUntag(r0);
}
__ b(&convert);
__ bind(&convert_number);
__ vldr(d2, FieldMemOperand(r2, HeapNumber::kValueOffset));
__ b(&done_convert);
__ bind(&convert_smi);
__ SmiToDouble(d2, r2);
__ bind(&done_convert);
// Perform the actual comparison with the accumulator value on the left hand
// side (d1) and the next parameter value on the right hand side (d2).
Label compare_nan, compare_swap;
__ VFPCompareAndSetFlags(d1, d2);
__ b(cc_done, &loop);
__ b(cc_swap, &compare_swap);
__ b(vs, &compare_nan);
// Left and right hand side are equal, check for -0 vs. +0.
__ VmovHigh(ip, reg);
__ cmp(ip, Operand(0x80000000));
__ b(ne, &loop);
// Result is on the right hand side.
__ bind(&compare_swap);
__ vmov(d1, d2);
__ mov(r1, r2);
__ b(&loop);
// At least one side is NaN, which means that the result will be NaN too.
__ bind(&compare_nan);
__ LoadRoot(r1, Heap::kNanValueRootIndex);
__ vldr(d1, FieldMemOperand(r1, HeapNumber::kValueOffset));
__ b(&loop);
}
__ bind(&done_loop);
__ mov(r0, r1);
__ Drop(r4);
__ Ret(1);
}
// static
void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
......
......@@ -185,6 +185,9 @@ void MacroAssembler::Drop(int count, Condition cond) {
}
}
void MacroAssembler::Drop(Register count, Condition cond) {
add(sp, sp, Operand(count, LSL, kPointerSizeLog2), LeaveCC, cond);
}
void MacroAssembler::Ret(int drop, Condition cond) {
Drop(drop, cond);
......
......@@ -128,6 +128,7 @@ class MacroAssembler: public Assembler {
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
void Drop(int count, Condition cond = al);
void Drop(Register count, Condition cond = al);
void Ret(int drop, Condition cond = al);
......
......@@ -137,6 +137,110 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
// static
void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
// -- lr : return address
// -- sp[(argc - n) * 8] : arg[n] (zero-based)
// -- sp[(argc + 1) * 8] : receiver
// -----------------------------------
ASM_LOCATION("Builtins::Generate_MathMaxMin");
Condition const cc_done = (kind == MathMaxMinKind::kMin) ? mi : gt;
Condition const cc_swap = (kind == MathMaxMinKind::kMin) ? gt : mi;
Heap::RootListIndex const root_index =
(kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
: Heap::kMinusInfinityValueRootIndex;
DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? d2 : d1;
// Load the accumulator with the default return value (either -Infinity or
// +Infinity), with the tagged value in x1 and the double value in d1.
__ LoadRoot(x1, root_index);
__ Ldr(d1, FieldMemOperand(x1, HeapNumber::kValueOffset));
__ Mov(x4, x0);
Label done_loop, loop;
__ Bind(&loop);
{
// Check if all parameters done.
__ Subs(x0, x0, 1);
__ B(lt, &done_loop);
// Load the next parameter tagged value into x2.
__ Peek(x2, Operand(x0, LSL, kPointerSizeLog2));
// Load the double value of the parameter into d2, maybe converting the
// parameter to a number first using the ToNumberStub if necessary.
Label convert, convert_smi, convert_number, done_convert;
__ Bind(&convert);
__ JumpIfSmi(x2, &convert_smi);
__ Ldr(x3, FieldMemOperand(x2, HeapObject::kMapOffset));
__ JumpIfRoot(x3, Heap::kHeapNumberMapRootIndex, &convert_number);
{
// Parameter is not a Number, use the ToNumberStub to convert it.
FrameScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(x0);
__ SmiTag(x4);
__ Push(x0, x1, x4);
__ Mov(x0, x2);
ToNumberStub stub(masm->isolate());
__ CallStub(&stub);
__ Mov(x2, x0);
__ Pop(x4, x1, x0);
{
// Restore the double accumulator value (d1).
Label restore_smi, done_restore;
__ JumpIfSmi(x1, &restore_smi);
__ Ldr(d1, FieldMemOperand(x1, HeapNumber::kValueOffset));
__ B(&done_restore);
__ Bind(&restore_smi);
__ SmiUntagToDouble(d1, x1);
__ bind(&done_restore);
}
__ SmiUntag(x4);
__ SmiUntag(x0);
}
__ B(&convert);
__ Bind(&convert_number);
__ Ldr(d2, FieldMemOperand(x2, HeapNumber::kValueOffset));
__ B(&done_convert);
__ Bind(&convert_smi);
__ SmiUntagToDouble(d2, x2);
__ Bind(&done_convert);
// Perform the actual comparison with the accumulator value on the left hand
// side (d1) and the next parameter value on the right hand side (d2).
Label compare_nan, compare_swap;
__ Fcmp(d1, d2);
__ B(cc_done, &loop);
__ B(cc_swap, &compare_swap);
__ B(vs, &compare_nan);
// Left and right hand side are equal, check for -0 vs. +0.
__ Fmov(x3, reg);
__ TestAndBranchIfAllClear(x3, V8_INT64_C(0x8000000000000000), &loop);
// Result is on the right hand side.
__ Bind(&compare_swap);
__ Fmov(d1, d2);
__ Mov(x1, x2);
__ B(&loop);
// At least one side is NaN, which means that the result will be NaN too.
__ Bind(&compare_nan);
__ LoadRoot(x1, Heap::kNanValueRootIndex);
__ Ldr(d1, FieldMemOperand(x1, HeapNumber::kValueOffset));
__ B(&loop);
}
__ Bind(&done_loop);
__ Mov(x0, x1);
__ Drop(x4);
__ Drop(1);
__ Ret();
}
// static
void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
......
......@@ -1517,9 +1517,11 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
cons,
Handle<Object>(native_context()->initial_object_prototype(), isolate));
cons->shared()->set_instance_class_name(*name);
Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED);
DCHECK(json_object->IsJSObject());
JSObject::AddProperty(global, name, json_object, DONT_ENUM);
Handle<JSObject> math = factory->NewJSObject(cons, TENURED);
DCHECK(math->IsJSObject());
JSObject::AddProperty(global, name, math, DONT_ENUM);
SimpleInstallFunction(math, "max", Builtins::kMathMax, 2, false);
SimpleInstallFunction(math, "min", Builtins::kMathMin, 2, false);
}
{ // -- A r r a y B u f f e r
......
......@@ -264,6 +264,9 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
V(InternalArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(ArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
\
V(MathMax, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(MathMin, BUILTIN, UNINITIALIZED, kNoExtraICState) \
\
V(NumberConstructor, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(NumberConstructor_ConstructStub, BUILTIN, UNINITIALIZED, kNoExtraICState) \
\
......@@ -546,6 +549,17 @@ class Builtins {
static void Generate_InternalArrayCode(MacroAssembler* masm);
static void Generate_ArrayCode(MacroAssembler* masm);
enum class MathMaxMinKind { kMax, kMin };
static void Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind);
// ES6 section 20.2.2.24 Math.max ( value1, value2 , ...values )
static void Generate_MathMax(MacroAssembler* masm) {
Generate_MathMaxMin(masm, MathMaxMinKind::kMax);
}
// ES6 section 20.2.2.25 Math.min ( value1, value2 , ...values )
static void Generate_MathMin(MacroAssembler* masm) {
Generate_MathMaxMin(masm, MathMaxMinKind::kMin);
}
// ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Call]] case.
static void Generate_NumberConstructor(MacroAssembler* masm);
// ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Construct]] case.
......
......@@ -1415,6 +1415,122 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
// static
void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
// ----------- S t a t e -------------
// -- eax : number of arguments
// -- esp[0] : return address
// -- esp[(argc - n) * 8] : arg[n] (zero-based)
// -- esp[(argc + 1) * 8] : receiver
// -----------------------------------
Condition const cc = (kind == MathMaxMinKind::kMin) ? below : above;
Heap::RootListIndex const root_index =
(kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
: Heap::kMinusInfinityValueRootIndex;
XMMRegister const reg = (kind == MathMaxMinKind::kMin) ? xmm1 : xmm0;
// Load the accumulator with the default return value (either -Infinity or
// +Infinity), with the tagged value in edx and the double value in xmm0.
__ LoadRoot(edx, root_index);
__ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
__ Move(ecx, eax);
Label done_loop, loop;
__ bind(&loop);
{
// Check if all parameters done.
__ test(ecx, ecx);
__ j(zero, &done_loop);
// Load the next parameter tagged value into ebx.
__ mov(ebx, Operand(esp, ecx, times_pointer_size, 0));
// Load the double value of the parameter into xmm1, maybe converting the
// parameter to a number first using the ToNumberStub if necessary.
Label convert, convert_smi, convert_number, done_convert;
__ bind(&convert);
__ JumpIfSmi(ebx, &convert_smi);
__ JumpIfRoot(FieldOperand(ebx, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex, &convert_number);
{
// Parameter is not a Number, use the ToNumberStub to convert it.
FrameScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(eax);
__ SmiTag(ecx);
__ Push(eax);
__ Push(ecx);
__ Push(edx);
__ mov(eax, ebx);
ToNumberStub stub(masm->isolate());
__ CallStub(&stub);
__ mov(ebx, eax);
__ Pop(edx);
__ Pop(ecx);
__ Pop(eax);
{
// Restore the double accumulator value (xmm0).
Label restore_smi, done_restore;
__ JumpIfSmi(edx, &restore_smi, Label::kNear);
__ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
__ jmp(&done_restore, Label::kNear);
__ bind(&restore_smi);
__ SmiUntag(edx);
__ Cvtsi2sd(xmm0, edx);
__ SmiTag(edx);
__ bind(&done_restore);
}
__ SmiUntag(ecx);
__ SmiUntag(eax);
}
__ jmp(&convert);
__ bind(&convert_number);
__ movsd(xmm1, FieldOperand(ebx, HeapNumber::kValueOffset));
__ jmp(&done_convert, Label::kNear);
__ bind(&convert_smi);
__ SmiUntag(ebx);
__ Cvtsi2sd(xmm1, ebx);
__ SmiTag(ebx);
__ bind(&done_convert);
// Perform the actual comparison with the accumulator value on the left hand
// side (xmm0) and the next parameter value on the right hand side (xmm1).
Label compare_equal, compare_nan, compare_swap, done_compare;
__ ucomisd(xmm0, xmm1);
__ j(parity_even, &compare_nan, Label::kNear);
__ j(cc, &done_compare, Label::kNear);
__ j(equal, &compare_equal, Label::kNear);
// Result is on the right hand side.
__ bind(&compare_swap);
__ movaps(xmm0, xmm1);
__ mov(edx, ebx);
__ jmp(&done_compare, Label::kNear);
// At least one side is NaN, which means that the result will be NaN too.
__ bind(&compare_nan);
__ LoadRoot(edx, Heap::kNanValueRootIndex);
__ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
__ jmp(&done_compare, Label::kNear);
// Left and right hand side are equal, check for -0 vs. +0.
__ bind(&compare_equal);
__ movmskpd(edi, reg);
__ test(edi, Immediate(1));
__ j(not_zero, &compare_swap);
__ bind(&done_compare);
__ dec(ecx);
__ jmp(&loop);
}
__ bind(&done_loop);
__ PopReturnAddressTo(ecx);
__ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
__ PushReturnAddressFrom(ecx);
__ mov(eax, edx);
__ Ret();
}
// static
void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
......
......@@ -75,60 +75,6 @@ function MathLog(x) {
return %_MathLogRT(TO_NUMBER(x));
}
// ECMA 262 - 15.8.2.11
function MathMax(arg1, arg2) { // length == 2
var length = %_ArgumentsLength();
if (length == 2) {
arg1 = TO_NUMBER(arg1);
arg2 = TO_NUMBER(arg2);
if (arg2 > arg1) return arg2;
if (arg1 > arg2) return arg1;
if (arg1 == arg2) {
// Make sure -0 is considered less than +0.
return (arg1 === 0 && %_IsMinusZero(arg1)) ? arg2 : arg1;
}
// All comparisons failed, one of the arguments must be NaN.
return NaN;
}
var r = -INFINITY;
for (var i = 0; i < length; i++) {
var n = %_Arguments(i);
n = TO_NUMBER(n);
// Make sure +0 is considered greater than -0.
if (NUMBER_IS_NAN(n) || n > r || (r === 0 && n === 0 && %_IsMinusZero(r))) {
r = n;
}
}
return r;
}
// ECMA 262 - 15.8.2.12
function MathMin(arg1, arg2) { // length == 2
var length = %_ArgumentsLength();
if (length == 2) {
arg1 = TO_NUMBER(arg1);
arg2 = TO_NUMBER(arg2);
if (arg2 > arg1) return arg1;
if (arg1 > arg2) return arg2;
if (arg1 == arg2) {
// Make sure -0 is considered less than +0.
return (arg1 === 0 && %_IsMinusZero(arg1)) ? arg1 : arg2;
}
// All comparisons failed, one of the arguments must be NaN.
return NaN;
}
var r = INFINITY;
for (var i = 0; i < length; i++) {
var n = %_Arguments(i);
n = TO_NUMBER(n);
// Make sure -0 is considered less than +0.
if (NUMBER_IS_NAN(n) || n < r || (r === 0 && n === 0 && %_IsMinusZero(n))) {
r = n;
}
}
return r;
}
// ECMA 262 - 15.8.2.13
function MathPowJS(x, y) {
return %_MathPow(TO_NUMBER(x), TO_NUMBER(y));
......@@ -314,8 +260,6 @@ utils.InstallFunctions(GlobalMath, DONT_ENUM, [
"sqrt", MathSqrtJS,
"atan2", MathAtan2JS,
"pow", MathPowJS,
"max", MathMax,
"min", MathMin,
"imul", MathImul,
"sign", MathSign,
"trunc", MathTrunc,
......@@ -349,8 +293,6 @@ utils.Export(function(to) {
to.MathExp = MathExp;
to.MathFloor = MathFloorJS;
to.IntRandom = MathRandomRaw;
to.MathMax = MathMax;
to.MathMin = MathMin;
});
})
......@@ -179,8 +179,6 @@ function PostNatives(utils) {
"MapEntries",
"MapIterator",
"MapIteratorNext",
"MathMax",
"MathMin",
"MaxSimple",
"MinSimple",
"ObjectDefineProperty",
......
......@@ -17,8 +17,8 @@ var InternalArray = utils.InternalArray;
var InternalPackedArray = utils.InternalPackedArray;
var MakeRangeError;
var MakeTypeError;
var MathMax;
var MathMin;
var MaxSimple;
var MinSimple;
var matchSymbol = utils.ImportNow("match_symbol");
var RegExpExecNoTests;
var replaceSymbol = utils.ImportNow("replace_symbol");
......@@ -30,8 +30,8 @@ utils.Import(function(from) {
ArrayJoin = from.ArrayJoin;
MakeRangeError = from.MakeRangeError;
MakeTypeError = from.MakeTypeError;
MathMax = from.MathMax;
MathMin = from.MathMin;
MaxSimple = from.MaxSimple;
MinSimple = from.MinSimple;
RegExpExecNoTests = from.RegExpExecNoTests;
});
......@@ -735,7 +735,7 @@ function StringStartsWith(searchString /* position */) { // length == 1
}
var s_len = s.length;
var start = MathMin(MathMax(pos, 0), s_len);
var start = MinSimple(MaxSimple(pos, 0), s_len);
var ss_len = ss.length;
if (ss_len + start > s_len) {
return false;
......@@ -765,7 +765,7 @@ function StringEndsWith(searchString /* position */) { // length == 1
}
}
var end = MathMin(MathMax(pos, 0), s_len);
var end = MinSimple(MaxSimple(pos, 0), s_len);
var ss_len = ss.length;
var start = end - ss_len;
if (start < 0) {
......
......@@ -141,6 +141,109 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
// static
void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- ra : return address
// -- sp[(argc - n) * 8] : arg[n] (zero-based)
// -- sp[(argc + 1) * 8] : receiver
// -----------------------------------
Condition const cc = (kind == MathMaxMinKind::kMin) ? ge : le;
Heap::RootListIndex const root_index =
(kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
: Heap::kMinusInfinityValueRootIndex;
DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? f2 : f0;
// Load the accumulator with the default return value (either -Infinity or
// +Infinity), with the tagged value in a1 and the double value in f0.
__ LoadRoot(a1, root_index);
__ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
__ mov(a3, a0);
Label done_loop, loop;
__ bind(&loop);
{
// Check if all parameters done.
__ Subu(a0, a0, Operand(1));
__ Branch(USE_DELAY_SLOT, &done_loop, lt, a0, Operand(zero_reg));
// Load the next parameter tagged value into a2.
__ sll(at, a0, kPointerSizeLog2); // In delay slot
__ Addu(at, at, sp);
__ lw(a2, MemOperand(at));
// Load the double value of the parameter into f2, maybe converting the
// parameter to a number first using the ToNumberStub if necessary.
Label convert, convert_smi, convert_number, done_convert;
__ bind(&convert);
__ JumpIfSmi(a2, &convert_smi);
__ lw(t0, FieldMemOperand(a2, HeapObject::kMapOffset));
__ JumpIfRoot(t0, Heap::kHeapNumberMapRootIndex, &convert_number);
{
// Parameter is not a Number, use the ToNumberStub to convert it.
FrameScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(a0);
__ SmiTag(a3);
__ Push(a0, a1, a3);
__ mov(a0, a2);
ToNumberStub stub(masm->isolate());
__ CallStub(&stub);
__ mov(a2, v0);
__ Pop(a0, a1, a3);
{
// Restore the double accumulator value (f0).
Label restore_smi, done_restore;
__ JumpIfSmi(a1, &restore_smi);
__ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
__ jmp(&done_restore);
__ bind(&restore_smi);
__ SmiToDoubleFPURegister(a1, f0, t0);
__ bind(&done_restore);
}
__ SmiUntag(a3);
__ SmiUntag(a0);
}
__ jmp(&convert);
__ bind(&convert_number);
__ ldc1(f2, FieldMemOperand(a2, HeapNumber::kValueOffset));
__ jmp(&done_convert);
__ bind(&convert_smi);
__ SmiToDoubleFPURegister(a2, f2, t0);
__ bind(&done_convert);
// Perform the actual comparison with the accumulator value on the left hand
// side (d1) and the next parameter value on the right hand side (d2).
Label compare_equal, compare_nan, compare_swap;
__ BranchF(&compare_equal, &compare_nan, eq, f0, f2);
__ BranchF(&compare_swap, nullptr, cc, f0, f2);
__ Branch(&loop);
// Left and right hand side are equal, check for -0 vs. +0.
__ bind(&compare_equal);
__ FmoveHigh(t0, reg);
__ Branch(&loop, ne, t0, Operand(0x80000000));
// Result is on the right hand side.
__ bind(&compare_swap);
__ mov_d(f0, f2);
__ mov(a1, a2);
__ jmp(&loop);
// At least one side is NaN, which means that the result will be NaN too.
__ bind(&compare_nan);
__ LoadRoot(a1, Heap::kNanValueRootIndex);
__ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
__ jmp(&loop);
}
__ bind(&done_loop);
__ sll(a3, a3, kPointerSizeLog2);
__ addu(sp, sp, a3);
__ mov(v0, a1);
__ DropAndRet(1);
}
// static
void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
......
......@@ -140,6 +140,109 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
// static
void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- ra : return address
// -- sp[(argc - n) * 8] : arg[n] (zero-based)
// -- sp[(argc + 1) * 8] : receiver
// -----------------------------------
Condition const cc = (kind == MathMaxMinKind::kMin) ? ge : le;
Heap::RootListIndex const root_index =
(kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
: Heap::kMinusInfinityValueRootIndex;
DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? f2 : f0;
// Load the accumulator with the default return value (either -Infinity or
// +Infinity), with the tagged value in a1 and the double value in f0.
__ LoadRoot(a1, root_index);
__ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
__ mov(a3, a0);
Label done_loop, loop;
__ bind(&loop);
{
// Check if all parameters done.
__ Dsubu(a0, a0, Operand(1));
__ Branch(USE_DELAY_SLOT, &done_loop, lt, a0, Operand(zero_reg));
// Load the next parameter tagged value into a2.
__ dsll(at, a0, kPointerSizeLog2); // In delay slot
__ Daddu(at, at, sp);
__ ld(a2, MemOperand(at));
// Load the double value of the parameter into f2, maybe converting the
// parameter to a number first using the ToNumberStub if necessary.
Label convert, convert_smi, convert_number, done_convert;
__ bind(&convert);
__ JumpIfSmi(a2, &convert_smi);
__ ld(t0, FieldMemOperand(a2, HeapObject::kMapOffset));
__ JumpIfRoot(t0, Heap::kHeapNumberMapRootIndex, &convert_number);
{
// Parameter is not a Number, use the ToNumberStub to convert it.
FrameScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(a0);
__ SmiTag(a3);
__ Push(a0, a1, a3);
__ mov(a0, a2);
ToNumberStub stub(masm->isolate());
__ CallStub(&stub);
__ mov(a2, v0);
__ Pop(a0, a1, a3);
{
// Restore the double accumulator value (f0).
Label restore_smi, done_restore;
__ JumpIfSmi(a1, &restore_smi);
__ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
__ jmp(&done_restore);
__ bind(&restore_smi);
__ SmiToDoubleFPURegister(a1, f0, t0);
__ bind(&done_restore);
}
__ SmiUntag(a3);
__ SmiUntag(a0);
}
__ jmp(&convert);
__ bind(&convert_number);
__ ldc1(f2, FieldMemOperand(a2, HeapNumber::kValueOffset));
__ jmp(&done_convert);
__ bind(&convert_smi);
__ SmiToDoubleFPURegister(a2, f2, t0);
__ bind(&done_convert);
// Perform the actual comparison with the accumulator value on the left hand
// side (d1) and the next parameter value on the right hand side (d2).
Label compare_equal, compare_nan, compare_swap;
__ BranchF(&compare_equal, &compare_nan, eq, f0, f2);
__ BranchF(&compare_swap, nullptr, cc, f0, f2);
__ Branch(&loop);
// Left and right hand side are equal, check for -0 vs. +0.
__ bind(&compare_equal);
__ FmoveHigh(t0, reg);
__ Branch(&loop, ne, t0, Operand(0x80000000));
// Result is on the right hand side.
__ bind(&compare_swap);
__ mov_d(f0, f2);
__ mov(a1, a2);
__ jmp(&loop);
// At least one side is NaN, which means that the result will be NaN too.
__ bind(&compare_nan);
__ LoadRoot(a1, Heap::kNanValueRootIndex);
__ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
__ jmp(&loop);
}
__ bind(&done_loop);
__ dsll(a3, a3, kPointerSizeLog2);
__ Daddu(sp, sp, a3);
__ mov(v0, a1);
__ DropAndRet(1);
}
// static
void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
......
......@@ -1479,6 +1479,118 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
// static
void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
// ----------- S t a t e -------------
// -- rax : number of arguments
// -- rsp[0] : return address
// -- rsp[(argc - n) * 8] : arg[n] (zero-based)
// -- rsp[(argc + 1) * 8] : receiver
// -----------------------------------
Condition const cc = (kind == MathMaxMinKind::kMin) ? below : above;
Heap::RootListIndex const root_index =
(kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
: Heap::kMinusInfinityValueRootIndex;
XMMRegister const reg = (kind == MathMaxMinKind::kMin) ? xmm1 : xmm0;
// Load the accumulator with the default return value (either -Infinity or
// +Infinity), with the tagged value in rdx and the double value in xmm0.
__ LoadRoot(rdx, root_index);
__ Movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
__ Move(rcx, rax);
Label done_loop, loop;
__ bind(&loop);
{
// Check if all parameters done.
__ testp(rcx, rcx);
__ j(zero, &done_loop);
// Load the next parameter tagged value into rbx.
__ movp(rbx, Operand(rsp, rcx, times_pointer_size, 0));
// Load the double value of the parameter into xmm1, maybe converting the
// parameter to a number first using the ToNumberStub if necessary.
Label convert, convert_smi, convert_number, done_convert;
__ bind(&convert);
__ JumpIfSmi(rbx, &convert_smi);
__ JumpIfRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex, &convert_number);
{
// Parameter is not a Number, use the ToNumberStub to convert it.
FrameScope scope(masm, StackFrame::INTERNAL);
__ Integer32ToSmi(rax, rax);
__ Integer32ToSmi(rcx, rcx);
__ Push(rax);
__ Push(rcx);
__ Push(rdx);
__ movp(rax, rbx);
ToNumberStub stub(masm->isolate());
__ CallStub(&stub);
__ movp(rbx, rax);
__ Pop(rdx);
__ Pop(rcx);
__ Pop(rax);
{
// Restore the double accumulator value (xmm0).
Label restore_smi, done_restore;
__ JumpIfSmi(rdx, &restore_smi, Label::kNear);
__ Movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
__ jmp(&done_restore, Label::kNear);
__ bind(&restore_smi);
__ SmiToDouble(xmm0, rdx);
__ bind(&done_restore);
}
__ SmiToInteger32(rcx, rcx);
__ SmiToInteger32(rax, rax);
}
__ jmp(&convert);
__ bind(&convert_number);
__ Movsd(xmm1, FieldOperand(rbx, HeapNumber::kValueOffset));
__ jmp(&done_convert, Label::kNear);
__ bind(&convert_smi);
__ SmiToDouble(xmm1, rbx);
__ bind(&done_convert);
// Perform the actual comparison with the accumulator value on the left hand
// side (xmm0) and the next parameter value on the right hand side (xmm1).
Label compare_equal, compare_nan, compare_swap, done_compare;
__ Ucomisd(xmm0, xmm1);
__ j(parity_even, &compare_nan, Label::kNear);
__ j(cc, &done_compare, Label::kNear);
__ j(equal, &compare_equal, Label::kNear);
// Result is on the right hand side.
__ bind(&compare_swap);
__ Movaps(xmm0, xmm1);
__ Move(rdx, rbx);
__ jmp(&done_compare, Label::kNear);
// At least one side is NaN, which means that the result will be NaN too.
__ bind(&compare_nan);
__ LoadRoot(rdx, Heap::kNanValueRootIndex);
__ Movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
__ jmp(&done_compare, Label::kNear);
// Left and right hand side are equal, check for -0 vs. +0.
__ bind(&compare_equal);
__ Movmskpd(kScratchRegister, reg);
__ testl(kScratchRegister, Immediate(1));
__ j(not_zero, &compare_swap);
__ bind(&done_compare);
__ decp(rcx);
__ jmp(&loop);
}
__ bind(&done_loop);
__ PopReturnAddressTo(rcx);
__ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
__ PushReturnAddressFrom(rcx);
__ movp(rax, rdx);
__ Ret();
}
// static
void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
......
......@@ -427,6 +427,12 @@ class MacroAssembler: public Assembler {
void SmiToInteger64(Register dst, Register src);
void SmiToInteger64(Register dst, const Operand& src);
// Convert smi to double.
void SmiToDouble(XMMRegister dst, Register src) {
SmiToInteger32(kScratchRegister, src);
Cvtlsi2sd(dst, kScratchRegister);
}
// Multiply a positive smi's integer value by a power of two.
// Provides result as 64-bit integer value.
void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
......
......@@ -527,6 +527,7 @@
'test-compiler/OptimizedCodeSharing2': [SKIP],
'test-compiler/OptimizedCodeSharing3': [SKIP],
'test-decls/CrossScriptDynamicLookup': [SKIP],
'test-decls/CrossScriptReferences_Simple2': [SKIP],
'test-decls/Regress425510': [SKIP],
'test-feedback-vector/VectorCallICStates': [SKIP],
'test-heap/CanonicalSharedFunctionInfo': [SKIP],
......@@ -546,6 +547,8 @@
'test-heap/TestCodeFlushingPreAged': [SKIP],
'test-heap/TestCodeFlushing': [SKIP],
'test-heap/WeakFunctionInConstructor': [SKIP],
'test-lockers/IsolateLockingStress': [SKIP],
'test-lockers/SeparateIsolatesLocksNonexclusive': [SKIP],
'test-log-stack-tracer/CFromJSStackTrace': [SKIP],
'test-log-stack-tracer/PureJSStackTrace': [SKIP],
'test-parsing/DestructuringNegativeTests': [SKIP],
......
......@@ -33,6 +33,5 @@ function f(x) { return 1 / Math.min(1, x); }
for (var i = 0; i < 5; ++i) f(1);
%OptimizeFunctionOnNextCall(f);
%OptimizeFunctionOnNextCall(Math.min);
assertEquals(-Infinity, f(-0));
......@@ -29,7 +29,6 @@
// Test Math.max with negative zero as input.
for (var i = 0; i < 5; i++) Math.max(0, 0);
%OptimizeFunctionOnNextCall(Math.max);
Math.max(0, 0);
var r = Math.max(-0, -0);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment