Commit a971a64d authored by Peter Marshall's avatar Peter Marshall Committed by Commit Bot

[runtime] Port SpreadCall code to CSA.

We can remove a lot of native code and rely on CallOrConstructVarargs
to do the stack manipulation for us.

This will also take advantage of the fast-path for double arrays in
CallOrConstructDoubleVarargs.

We can also remove Runtime_SpreadIterableFixed because it isn't used
anymore. We just call directly into spread_iterable from CSA.

Bug: v8:6488, chromium:704966
Change-Id: I81a18281f062619851134fff7ce88471566ee3b5
Reviewed-on: https://chromium-review.googlesource.com/535615Reviewed-by: 's avatarBenedikt Meurer <bmeurer@chromium.org>
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Cr-Commit-Position: refs/heads/master@{#46038}
parent 2854ea7b
...@@ -176,6 +176,15 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific( ...@@ -176,6 +176,15 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void CallWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments (on the stack, not including receiver)
// r1 : the target to call
// r2 : the object to spread
Register registers[] = {r1, r0, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallWithArrayLikeDescriptor::InitializePlatformSpecific( void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// r1 : the target to call // r1 : the target to call
...@@ -205,6 +214,16 @@ void ConstructForwardVarargsDescriptor::InitializePlatformSpecific( ...@@ -205,6 +214,16 @@ void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments (on the stack, not including receiver)
// r1 : the target to call
// r3 : the new target
// r2 : the object to spread
Register registers[] = {r1, r3, r0, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific( void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// r1 : the target to call // r1 : the target to call
......
...@@ -195,6 +195,15 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific( ...@@ -195,6 +195,15 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void CallWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0 : number of arguments (on the stack, not including receiver)
// x1 : the target to call
// x2 : the object to spread
Register registers[] = {x1, x0, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallWithArrayLikeDescriptor::InitializePlatformSpecific( void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// x1 : the target to call // x1 : the target to call
...@@ -224,6 +233,16 @@ void ConstructForwardVarargsDescriptor::InitializePlatformSpecific( ...@@ -224,6 +233,16 @@ void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0 : number of arguments (on the stack, not including receiver)
// x1 : the target to call
// x3 : the new target
// x2 : the object to spread
Register registers[] = {x1, x3, x0, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific( void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// x1 : the target to call // x1 : the target to call
......
...@@ -1321,6 +1321,11 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( ...@@ -1321,6 +1321,11 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Push the arguments. r2, r4, r5 will be modified. // Push the arguments. r2, r4, r5 will be modified.
Generate_InterpreterPushArgs(masm, r3, r2, r4, r5); Generate_InterpreterPushArgs(masm, r3, r2, r4, r5);
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Pop(r2); // Pass the spread in a register
__ sub(r0, r0, Operand(1)); // Subtract one for spread
}
// Call the target. // Call the target.
if (mode == InterpreterPushArgsMode::kJSFunction) { if (mode == InterpreterPushArgsMode::kJSFunction) {
__ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny, __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
...@@ -1364,7 +1369,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl( ...@@ -1364,7 +1369,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Push the arguments. r5, r4, r6 will be modified. // Push the arguments. r5, r4, r6 will be modified.
Generate_InterpreterPushArgs(masm, r0, r4, r5, r6); Generate_InterpreterPushArgs(masm, r0, r4, r5, r6);
__ AssertUndefinedOrAllocationSite(r2, r5); if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Pop(r2); // Pass the spread in a register
__ sub(r0, r0, Operand(1)); // Subtract one for spread
} else {
__ AssertUndefinedOrAllocationSite(r2, r5);
}
if (mode == InterpreterPushArgsMode::kJSFunction) { if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(r1); __ AssertFunction(r1);
...@@ -2619,161 +2630,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode, ...@@ -2619,161 +2630,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
} }
} }
static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
Register argc = r0;
Register constructor = r1;
Register new_target = r3;
Register scratch = r2;
Register scratch2 = r6;
Register spread = r4;
Register spread_map = r5;
Register spread_len = r5;
Label runtime_call, push_args;
__ ldr(spread, MemOperand(sp, 0));
__ JumpIfSmi(spread, &runtime_call);
__ ldr(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
// Check that the spread is an array.
__ CompareInstanceType(spread_map, scratch, JS_ARRAY_TYPE);
__ b(ne, &runtime_call);
// Check that we have the original ArrayPrototype.
__ ldr(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
__ ldr(scratch2, NativeContextMemOperand());
__ ldr(scratch2,
ContextMemOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
__ cmp(scratch, scratch2);
__ b(ne, &runtime_call);
// Check that the ArrayPrototype hasn't been modified in a way that would
// affect iteration.
__ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
__ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ cmp(scratch, Operand(Smi::FromInt(Isolate::kProtectorValid)));
__ b(ne, &runtime_call);
// Check that the map of the initial array iterator hasn't changed.
__ ldr(scratch2, NativeContextMemOperand());
__ ldr(scratch,
ContextMemOperand(scratch2,
Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
__ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
__ ldr(scratch2,
ContextMemOperand(
scratch2, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
__ cmp(scratch, scratch2);
__ b(ne, &runtime_call);
// For FastPacked kinds, iteration will have the same effect as simply
// accessing each property in order.
Label no_protector_check;
__ ldr(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
__ DecodeField<Map::ElementsKindBits>(scratch);
__ cmp(scratch, Operand(FAST_HOLEY_ELEMENTS));
__ b(hi, &runtime_call);
// For non-FastHoley kinds, we can skip the protector check.
__ cmp(scratch, Operand(FAST_SMI_ELEMENTS));
__ b(eq, &no_protector_check);
__ cmp(scratch, Operand(FAST_ELEMENTS));
__ b(eq, &no_protector_check);
// Check the ArrayProtector cell.
__ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
__ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ cmp(scratch, Operand(Smi::FromInt(Isolate::kProtectorValid)));
__ b(ne, &runtime_call);
__ bind(&no_protector_check);
// Load the FixedArray backing store, but use the length from the array.
__ ldr(spread_len, FieldMemOperand(spread, JSArray::kLengthOffset));
__ SmiUntag(spread_len);
__ ldr(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
__ b(&push_args);
__ bind(&runtime_call);
{
// Call the builtin for the result of the spread.
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(argc);
__ Push(constructor);
__ Push(new_target);
__ Push(argc);
__ Push(spread);
__ CallRuntime(Runtime::kSpreadIterableFixed);
__ mov(spread, r0);
__ Pop(argc);
__ Pop(new_target);
__ Pop(constructor);
__ SmiUntag(argc);
}
{
// Calculate the new nargs including the result of the spread.
__ ldr(spread_len, FieldMemOperand(spread, FixedArray::kLengthOffset));
__ SmiUntag(spread_len);
__ bind(&push_args);
// argc += spread_len - 1. Subtract 1 for the spread itself.
__ add(argc, argc, spread_len);
__ sub(argc, argc, Operand(1));
// Pop the spread argument off the stack.
__ Pop(scratch);
}
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label done;
__ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
// Make scratch the space we have left. The stack might already be
// overflowed here which will cause scratch to become negative.
__ sub(scratch, sp, scratch);
// Check if the arguments will overflow the stack.
__ cmp(scratch, Operand(spread_len, LSL, kPointerSizeLog2));
__ b(gt, &done); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
}
// Put the evaluated spread onto the stack as additional arguments.
{
__ mov(scratch, Operand(0));
Label done, push, loop;
__ bind(&loop);
__ cmp(scratch, spread_len);
__ b(eq, &done);
__ add(scratch2, spread, Operand(scratch, LSL, kPointerSizeLog2));
__ ldr(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
__ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
__ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
__ bind(&push);
__ Push(scratch2);
__ add(scratch, scratch, Operand(1));
__ b(&loop);
__ bind(&done);
}
}
// static
void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r1 : the constructor to call (can be any Object)
// -----------------------------------
// CheckSpreadAndPushToStack will push r3 to save it.
__ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
CheckSpreadAndPushToStack(masm);
__ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
TailCallMode::kDisallow),
RelocInfo::CODE_TARGET);
}
// static // static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
...@@ -2888,19 +2744,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { ...@@ -2888,19 +2744,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
} }
// static
void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r1 : the constructor to call (can be any Object)
// -- r3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
CheckSpreadAndPushToStack(masm);
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
// static // static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) { void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
......
...@@ -1351,6 +1351,11 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( ...@@ -1351,6 +1351,11 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Push the arguments. x2, x4, x5, x6 will be modified. // Push the arguments. x2, x4, x5, x6 will be modified.
Generate_InterpreterPushArgs(masm, x3, x2, x4, x5, x6); Generate_InterpreterPushArgs(masm, x3, x2, x4, x5, x6);
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Pop(x2); // Pass the spread in a register
__ Sub(x0, x0, 1); // Subtract one for spread
}
// Call the target. // Call the target.
if (mode == InterpreterPushArgsMode::kJSFunction) { if (mode == InterpreterPushArgsMode::kJSFunction) {
__ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny, __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
...@@ -1393,7 +1398,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl( ...@@ -1393,7 +1398,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Push the arguments. x5, x4, x6, x7 will be modified. // Push the arguments. x5, x4, x6, x7 will be modified.
Generate_InterpreterPushArgs(masm, x0, x4, x5, x6, x7); Generate_InterpreterPushArgs(masm, x0, x4, x5, x6, x7);
__ AssertUndefinedOrAllocationSite(x2, x6); if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Pop(x2); // Pass the spread in a register
__ Sub(x0, x0, 1); // Subtract one for spread
} else {
__ AssertUndefinedOrAllocationSite(x2, x6);
}
if (mode == InterpreterPushArgsMode::kJSFunction) { if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(x1); __ AssertFunction(x1);
...@@ -2736,155 +2747,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode, ...@@ -2736,155 +2747,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
} }
} }
static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
Register argc = x0;
Register constructor = x1;
Register new_target = x3;
Register scratch = x2;
Register scratch2 = x6;
Register spread = x4;
Register spread_map = x5;
Register spread_len = x5;
Label runtime_call, push_args;
__ Peek(spread, 0);
__ JumpIfSmi(spread, &runtime_call);
__ Ldr(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
// Check that the spread is an array.
__ CompareInstanceType(spread_map, scratch, JS_ARRAY_TYPE);
__ B(ne, &runtime_call);
// Check that we have the original ArrayPrototype.
__ Ldr(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
__ Ldr(scratch2, NativeContextMemOperand());
__ Ldr(scratch2,
ContextMemOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
__ Cmp(scratch, scratch2);
__ B(ne, &runtime_call);
// Check that the ArrayPrototype hasn't been modified in a way that would
// affect iteration.
__ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
__ Ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ Cmp(scratch, Smi::FromInt(Isolate::kProtectorValid));
__ B(ne, &runtime_call);
// Check that the map of the initial array iterator hasn't changed.
__ Ldr(scratch2, NativeContextMemOperand());
__ Ldr(scratch,
ContextMemOperand(scratch2,
Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
__ Ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
__ Ldr(scratch2,
ContextMemOperand(
scratch2, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
__ Cmp(scratch, scratch2);
__ B(ne, &runtime_call);
// For FastPacked kinds, iteration will have the same effect as simply
// accessing each property in order.
Label no_protector_check;
__ Ldr(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
__ DecodeField<Map::ElementsKindBits>(scratch);
__ Cmp(scratch, FAST_HOLEY_ELEMENTS);
__ B(hi, &runtime_call);
// For non-FastHoley kinds, we can skip the protector check.
__ Cmp(scratch, FAST_SMI_ELEMENTS);
__ B(eq, &no_protector_check);
__ Cmp(scratch, FAST_ELEMENTS);
__ B(eq, &no_protector_check);
// Check the ArrayProtector cell.
__ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
__ Ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ Cmp(scratch, Smi::FromInt(Isolate::kProtectorValid));
__ B(ne, &runtime_call);
__ Bind(&no_protector_check);
// Load the FixedArray backing store, but use the length from the array.
__ Ldrsw(spread_len, UntagSmiFieldMemOperand(spread, JSArray::kLengthOffset));
__ Ldr(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
__ B(&push_args);
__ Bind(&runtime_call);
{
// Call the builtin for the result of the spread.
FrameScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(argc);
__ Push(constructor, new_target, argc, spread);
__ CallRuntime(Runtime::kSpreadIterableFixed);
__ Mov(spread, x0);
__ Pop(argc, new_target, constructor);
__ SmiUntag(argc);
}
{
// Calculate the new nargs including the result of the spread.
__ Ldrsw(spread_len,
UntagSmiFieldMemOperand(spread, FixedArray::kLengthOffset));
__ Bind(&push_args);
// argc += spread_len - 1. Subtract 1 for the spread itself.
__ Add(argc, argc, spread_len);
__ Sub(argc, argc, 1);
// Pop the spread argument off the stack.
__ Pop(scratch);
}
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label done;
__ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
// Make scratch the space we have left. The stack might already be
// overflowed here which will cause scratch to become negative.
__ Sub(scratch, masm->StackPointer(), scratch);
// Check if the arguments will overflow the stack.
__ Cmp(scratch, Operand(spread_len, LSL, kPointerSizeLog2));
__ B(gt, &done); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ Bind(&done);
}
// Put the evaluated spread onto the stack as additional arguments.
{
__ Mov(scratch, 0);
Label done, push, loop;
__ Bind(&loop);
__ Cmp(scratch, spread_len);
__ B(eq, &done);
__ Add(scratch2, spread, Operand(scratch, LSL, kPointerSizeLog2));
__ Ldr(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
__ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
__ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
__ bind(&push);
__ Push(scratch2);
__ Add(scratch, scratch, Operand(1));
__ B(&loop);
__ Bind(&done);
}
}
// static
void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x1 : the constructor to call (can be any Object)
// -----------------------------------
// CheckSpreadAndPushToStack will push r3 to save it.
__ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
CheckSpreadAndPushToStack(masm);
__ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
TailCallMode::kDisallow),
RelocInfo::CODE_TARGET);
}
// static // static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
...@@ -3005,19 +2867,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { ...@@ -3005,19 +2867,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
} }
// static
void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x1 : the constructor to call (can be any Object)
// -- x3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
CheckSpreadAndPushToStack(masm);
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
// static // static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) { void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_AllocateInNewSpace"); ASM_LOCATION("Builtins::Generate_AllocateInNewSpace");
......
This diff is collapsed.
...@@ -17,6 +17,12 @@ class CallOrConstructBuiltinsAssembler : public CodeStubAssembler { ...@@ -17,6 +17,12 @@ class CallOrConstructBuiltinsAssembler : public CodeStubAssembler {
void CallOrConstructWithArrayLike(Node* target, Node* new_target, void CallOrConstructWithArrayLike(Node* target, Node* new_target,
Node* arguments_list, Node* context); Node* arguments_list, Node* context);
void CallOrConstructDoubleVarargs(Node* target, Node* new_target,
Node* elements, Node* length,
Node* args_count, Node* context,
Node* kind);
void CallOrConstructWithSpread(Node* target, Node* new_target, Node* spread,
Node* args_count, Node* context);
}; };
} // namespace internal } // namespace internal
......
...@@ -42,6 +42,15 @@ TF_BUILTIN(ConstructWithArrayLike, CallOrConstructBuiltinsAssembler) { ...@@ -42,6 +42,15 @@ TF_BUILTIN(ConstructWithArrayLike, CallOrConstructBuiltinsAssembler) {
CallOrConstructWithArrayLike(target, new_target, arguments_list, context); CallOrConstructWithArrayLike(target, new_target, arguments_list, context);
} }
TF_BUILTIN(ConstructWithSpread, CallOrConstructBuiltinsAssembler) {
Node* target = Parameter(ConstructWithSpreadDescriptor::kTarget);
Node* new_target = Parameter(ConstructWithSpreadDescriptor::kNewTarget);
Node* spread = Parameter(ConstructWithSpreadDescriptor::kSpread);
Node* args_count = Parameter(ConstructWithSpreadDescriptor::kArgumentsCount);
Node* context = Parameter(ConstructWithSpreadDescriptor::kContext);
CallOrConstructWithSpread(target, new_target, spread, args_count, context);
}
typedef compiler::Node Node; typedef compiler::Node Node;
Node* ConstructorBuiltinsAssembler::CopyFixedArrayBase(Node* fixed_array) { Node* ConstructorBuiltinsAssembler::CopyFixedArrayBase(Node* fixed_array) {
......
...@@ -76,7 +76,7 @@ namespace internal { ...@@ -76,7 +76,7 @@ namespace internal {
ASM(TailCall_ReceiverIsNotNullOrUndefined) \ ASM(TailCall_ReceiverIsNotNullOrUndefined) \
ASM(TailCall_ReceiverIsAny) \ ASM(TailCall_ReceiverIsAny) \
ASM(CallVarargs) \ ASM(CallVarargs) \
ASM(CallWithSpread) \ TFC(CallWithSpread, CallWithSpread, 1) \
TFC(CallWithArrayLike, CallWithArrayLike, 1) \ TFC(CallWithArrayLike, CallWithArrayLike, 1) \
ASM(CallForwardVarargs) \ ASM(CallForwardVarargs) \
ASM(CallFunctionForwardVarargs) \ ASM(CallFunctionForwardVarargs) \
...@@ -92,7 +92,7 @@ namespace internal { ...@@ -92,7 +92,7 @@ namespace internal {
/* ES6 section 7.3.13 Construct (F, [argumentsList], [newTarget]) */ \ /* ES6 section 7.3.13 Construct (F, [argumentsList], [newTarget]) */ \
ASM(Construct) \ ASM(Construct) \
ASM(ConstructVarargs) \ ASM(ConstructVarargs) \
ASM(ConstructWithSpread) \ TFC(ConstructWithSpread, ConstructWithSpread, 1) \
TFC(ConstructWithArrayLike, ConstructWithArrayLike, 1) \ TFC(ConstructWithArrayLike, ConstructWithArrayLike, 1) \
ASM(ConstructForwardVarargs) \ ASM(ConstructForwardVarargs) \
ASM(ConstructFunctionForwardVarargs) \ ASM(ConstructFunctionForwardVarargs) \
......
...@@ -1009,6 +1009,11 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( ...@@ -1009,6 +1009,11 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ add(ecx, ebx); __ add(ecx, ebx);
Generate_InterpreterPushArgs(masm, ecx, ebx); Generate_InterpreterPushArgs(masm, ecx, ebx);
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Pop(ebx); // Pass the spread in a register
__ sub(eax, Immediate(1)); // Subtract one for spread
}
// Call the target. // Call the target.
__ Push(edx); // Re-push return address. __ Push(edx); // Re-push return address.
...@@ -1152,7 +1157,15 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl( ...@@ -1152,7 +1157,15 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ Pop(edx); __ Pop(edx);
__ Pop(edi); __ Pop(edi);
__ AssertUndefinedOrAllocationSite(ebx); if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ PopReturnAddressTo(ecx);
__ Pop(ebx); // Pass the spread in a register
__ PushReturnAddressFrom(ecx);
__ sub(eax, Immediate(1)); // Subtract one for spread
} else {
__ AssertUndefinedOrAllocationSite(ebx);
}
if (mode == InterpreterPushArgsMode::kJSFunction) { if (mode == InterpreterPushArgsMode::kJSFunction) {
// Tail call to the function-specific construct stub (still in the caller // Tail call to the function-specific construct stub (still in the caller
// context at this point). // context at this point).
...@@ -2814,178 +2827,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode, ...@@ -2814,178 +2827,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
} }
} }
static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
// Free up some registers.
__ movd(xmm0, edx);
__ movd(xmm1, edi);
Register argc = eax;
Register scratch = ecx;
Register scratch2 = edi;
Register spread = ebx;
Register spread_map = edx;
Register spread_len = edx;
Label runtime_call, push_args;
__ mov(spread, Operand(esp, kPointerSize));
__ JumpIfSmi(spread, &runtime_call);
__ mov(spread_map, FieldOperand(spread, HeapObject::kMapOffset));
// Check that the spread is an array.
__ CmpInstanceType(spread_map, JS_ARRAY_TYPE);
__ j(not_equal, &runtime_call);
// Check that we have the original ArrayPrototype.
__ mov(scratch, FieldOperand(spread_map, Map::kPrototypeOffset));
__ mov(scratch2, NativeContextOperand());
__ cmp(scratch,
ContextOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
__ j(not_equal, &runtime_call);
// Check that the ArrayPrototype hasn't been modified in a way that would
// affect iteration.
__ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
__ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
Immediate(Smi::FromInt(Isolate::kProtectorValid)));
__ j(not_equal, &runtime_call);
// Check that the map of the initial array iterator hasn't changed.
__ mov(scratch2, NativeContextOperand());
__ mov(scratch,
ContextOperand(scratch2,
Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
__ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
__ cmp(scratch,
ContextOperand(scratch2,
Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
__ j(not_equal, &runtime_call);
// For FastPacked kinds, iteration will have the same effect as simply
// accessing each property in order.
Label no_protector_check;
__ mov(scratch, FieldOperand(spread_map, Map::kBitField2Offset));
__ DecodeField<Map::ElementsKindBits>(scratch);
__ cmp(scratch, Immediate(FAST_HOLEY_ELEMENTS));
__ j(above, &runtime_call);
// For non-FastHoley kinds, we can skip the protector check.
__ cmp(scratch, Immediate(FAST_SMI_ELEMENTS));
__ j(equal, &no_protector_check);
__ cmp(scratch, Immediate(FAST_ELEMENTS));
__ j(equal, &no_protector_check);
// Check the ArrayProtector cell.
__ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
__ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
Immediate(Smi::FromInt(Isolate::kProtectorValid)));
__ j(not_equal, &runtime_call);
__ bind(&no_protector_check);
// Load the FixedArray backing store, but use the length from the array.
__ mov(spread_len, FieldOperand(spread, JSArray::kLengthOffset));
__ SmiUntag(spread_len);
__ mov(spread, FieldOperand(spread, JSArray::kElementsOffset));
__ jmp(&push_args);
__ bind(&runtime_call);
{
// Call the builtin for the result of the spread.
FrameScope scope(masm, StackFrame::INTERNAL);
// Need to save these on the stack.
__ movd(edi, xmm1);
__ movd(edx, xmm0);
__ Push(edi);
__ Push(edx);
__ SmiTag(argc);
__ Push(argc);
__ Push(spread);
__ CallRuntime(Runtime::kSpreadIterableFixed);
__ mov(spread, eax);
__ Pop(argc);
__ SmiUntag(argc);
__ Pop(edx);
__ Pop(edi);
// Free up some registers.
__ movd(xmm0, edx);
__ movd(xmm1, edi);
}
{
// Calculate the new nargs including the result of the spread.
__ mov(spread_len, FieldOperand(spread, FixedArray::kLengthOffset));
__ SmiUntag(spread_len);
__ bind(&push_args);
// argc += spread_len - 1. Subtract 1 for the spread itself.
__ lea(argc, Operand(argc, spread_len, times_1, -1));
}
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label done;
__ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
// Make scratch the space we have left. The stack might already be
// overflowed here which will cause scratch to become negative.
__ neg(scratch);
__ add(scratch, esp);
__ sar(scratch, kPointerSizeLog2);
// Check if the arguments will overflow the stack.
__ cmp(scratch, spread_len);
__ j(greater, &done, Label::kNear); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
}
// Put the evaluated spread onto the stack as additional arguments.
{
Register return_address = edi;
// Pop the return address and spread argument.
__ PopReturnAddressTo(return_address);
__ Pop(scratch);
Register scratch2 = esi;
__ movd(xmm2, esi);
__ mov(scratch, Immediate(0));
Label done, push, loop;
__ bind(&loop);
__ cmp(scratch, spread_len);
__ j(equal, &done, Label::kNear);
__ mov(scratch2, FieldOperand(spread, scratch, times_pointer_size,
FixedArray::kHeaderSize));
__ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &push, Label::kNear);
__ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
__ bind(&push);
__ Push(scratch2);
__ inc(scratch);
__ jmp(&loop);
__ bind(&done);
__ PushReturnAddressFrom(return_address);
__ movd(esi, xmm2);
__ movd(edi, xmm1);
__ movd(edx, xmm0);
}
}
// static
void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the target to call (can be any Object)
// -----------------------------------
// CheckSpreadAndPushToStack will push edx to save it.
__ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
CheckSpreadAndPushToStack(masm);
__ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
TailCallMode::kDisallow),
RelocInfo::CODE_TARGET);
}
// static // static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
...@@ -3108,19 +2949,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { ...@@ -3108,19 +2949,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
} }
// static
void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edx : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -- edi : the constructor to call (can be any Object)
// -----------------------------------
CheckSpreadAndPushToStack(masm);
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
// static // static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) { void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
......
...@@ -1312,6 +1312,11 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( ...@@ -1312,6 +1312,11 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// This function modifies a2, t4 and t1. // This function modifies a2, t4 and t1.
Generate_InterpreterPushArgs(masm, t0, a2, t4, t1); Generate_InterpreterPushArgs(masm, t0, a2, t4, t1);
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Pop(a2); // Pass the spread in a register
__ Subu(a0, a0, Operand(1)); // Subtract one for spread
}
// Call the target. // Call the target.
if (mode == InterpreterPushArgsMode::kJSFunction) { if (mode == InterpreterPushArgsMode::kJSFunction) {
__ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny, __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
...@@ -1354,7 +1359,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl( ...@@ -1354,7 +1359,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// This function modified t4, t1 and t0. // This function modified t4, t1 and t0.
Generate_InterpreterPushArgs(masm, a0, t4, t1, t0); Generate_InterpreterPushArgs(masm, a0, t4, t1, t0);
__ AssertUndefinedOrAllocationSite(a2, t0); if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Pop(a2); // Pass the spread in a register
__ Subu(a0, a0, Operand(1)); // Subtract one for spread
} else {
__ AssertUndefinedOrAllocationSite(a2, t0);
}
if (mode == InterpreterPushArgsMode::kJSFunction) { if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(a1); __ AssertFunction(a1);
...@@ -2621,151 +2632,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode, ...@@ -2621,151 +2632,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
} }
} }
static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
Register argc = a0;
Register constructor = a1;
Register new_target = a3;
Register scratch = t0;
Register scratch2 = t1;
Register spread = a2;
Register spread_map = t3;
Register spread_len = t3;
Register native_context = t4;
Label runtime_call, push_args;
__ lw(spread, MemOperand(sp, 0));
__ JumpIfSmi(spread, &runtime_call);
__ lw(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
__ lw(native_context, NativeContextMemOperand());
// Check that the spread is an array.
__ lbu(scratch, FieldMemOperand(spread_map, Map::kInstanceTypeOffset));
__ Branch(&runtime_call, ne, scratch, Operand(JS_ARRAY_TYPE));
// Check that we have the original ArrayPrototype.
__ lw(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
__ lw(scratch2, ContextMemOperand(native_context,
Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
__ Branch(&runtime_call, ne, scratch, Operand(scratch2));
// Check that the ArrayPrototype hasn't been modified in a way that would
// affect iteration.
__ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
__ lw(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ Branch(&runtime_call, ne, scratch,
Operand(Smi::FromInt(Isolate::kProtectorValid)));
// Check that the map of the initial array iterator hasn't changed.
__ lw(scratch,
ContextMemOperand(native_context,
Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
__ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
__ lw(scratch2,
ContextMemOperand(native_context,
Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
__ Branch(&runtime_call, ne, scratch, Operand(scratch2));
// For FastPacked kinds, iteration will have the same effect as simply
// accessing each property in order.
Label no_protector_check;
__ lbu(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
__ DecodeField<Map::ElementsKindBits>(scratch);
__ Branch(&runtime_call, hi, scratch, Operand(FAST_HOLEY_ELEMENTS));
// For non-FastHoley kinds, we can skip the protector check.
__ Branch(&no_protector_check, eq, scratch, Operand(FAST_SMI_ELEMENTS));
__ Branch(&no_protector_check, eq, scratch, Operand(FAST_ELEMENTS));
// Check the ArrayProtector cell.
__ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
__ lw(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ Branch(&runtime_call, ne, scratch,
Operand(Smi::FromInt(Isolate::kProtectorValid)));
__ bind(&no_protector_check);
// Load the FixedArray backing store, but use the length from the array.
__ lw(spread_len, FieldMemOperand(spread, JSArray::kLengthOffset));
__ SmiUntag(spread_len);
__ lw(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
__ Branch(&push_args);
__ bind(&runtime_call);
{
// Call the builtin for the result of the spread.
FrameScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(argc);
__ Push(constructor, new_target, argc, spread);
__ CallRuntime(Runtime::kSpreadIterableFixed);
__ mov(spread, v0);
__ Pop(constructor, new_target, argc);
__ SmiUntag(argc);
}
{
// Calculate the new nargs including the result of the spread.
__ lw(spread_len, FieldMemOperand(spread, FixedArray::kLengthOffset));
__ SmiUntag(spread_len);
__ bind(&push_args);
// argc += spread_len - 1. Subtract 1 for the spread itself.
__ Addu(argc, argc, spread_len);
__ Subu(argc, argc, Operand(1));
// Pop the spread argument off the stack.
__ Pop(scratch);
}
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label done;
__ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
// Make scratch the space we have left. The stack might already be
// overflowed here which will cause ip to become negative.
__ Subu(scratch, sp, scratch);
// Check if the arguments will overflow the stack.
__ sll(at, spread_len, kPointerSizeLog2);
__ Branch(&done, gt, scratch, Operand(at)); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
}
// Put the evaluated spread onto the stack as additional arguments.
{
__ mov(scratch, zero_reg);
Label done, push, loop;
__ bind(&loop);
__ Branch(&done, eq, scratch, Operand(spread_len));
__ Lsa(scratch2, spread, scratch, kPointerSizeLog2);
__ lw(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
__ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
__ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
__ bind(&push);
__ Push(scratch2);
__ Addu(scratch, scratch, Operand(1));
__ Branch(&loop);
__ bind(&done);
}
}
// static
void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the target to call (can be any Object).
// -----------------------------------
// CheckSpreadAndPushToStack will push a3 to save it.
__ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
CheckSpreadAndPushToStack(masm);
__ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
TailCallMode::kDisallow),
RelocInfo::CODE_TARGET);
}
// static // static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
...@@ -2945,19 +2811,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { ...@@ -2945,19 +2811,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
} }
// static
void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the constructor to call (can be any Object)
// -- a3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
CheckSpreadAndPushToStack(masm);
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
// static // static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) { void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
......
...@@ -1313,6 +1313,11 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( ...@@ -1313,6 +1313,11 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// This function modifies a2, t0 and a4. // This function modifies a2, t0 and a4.
Generate_InterpreterPushArgs(masm, a3, a2, a4, t0); Generate_InterpreterPushArgs(masm, a3, a2, a4, t0);
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Pop(a2); // Pass the spread in a register
__ Subu(a0, a0, Operand(1)); // Subtract one for spread
}
// Call the target. // Call the target.
if (mode == InterpreterPushArgsMode::kJSFunction) { if (mode == InterpreterPushArgsMode::kJSFunction) {
__ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny, __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
...@@ -1355,7 +1360,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl( ...@@ -1355,7 +1360,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// This function modifies t0, a4 and a5. // This function modifies t0, a4 and a5.
Generate_InterpreterPushArgs(masm, a0, a4, a5, t0); Generate_InterpreterPushArgs(masm, a0, a4, a5, t0);
__ AssertUndefinedOrAllocationSite(a2, t0); if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Pop(a2); // Pass the spread in a register
__ Subu(a0, a0, Operand(1)); // Subtract one for spread
} else {
__ AssertUndefinedOrAllocationSite(a2, t0);
}
if (mode == InterpreterPushArgsMode::kJSFunction) { if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(a1); __ AssertFunction(a1);
...@@ -2644,150 +2655,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode, ...@@ -2644,150 +2655,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
} }
} }
static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
Register argc = a0;
Register constructor = a1;
Register new_target = a3;
Register scratch = t0;
Register scratch2 = t1;
Register spread = a2;
Register spread_map = a4;
Register spread_len = a4;
Register native_context = a5;
Label runtime_call, push_args;
__ Ld(spread, MemOperand(sp, 0));
__ JumpIfSmi(spread, &runtime_call);
__ Ld(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
__ Ld(native_context, NativeContextMemOperand());
// Check that the spread is an array.
__ Lbu(scratch, FieldMemOperand(spread_map, Map::kInstanceTypeOffset));
__ Branch(&runtime_call, ne, scratch, Operand(JS_ARRAY_TYPE));
// Check that we have the original ArrayPrototype.
__ Ld(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
__ Ld(scratch2, ContextMemOperand(native_context,
Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
__ Branch(&runtime_call, ne, scratch, Operand(scratch2));
// Check that the ArrayPrototype hasn't been modified in a way that would
// affect iteration.
__ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
__ Ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ Branch(&runtime_call, ne, scratch,
Operand(Smi::FromInt(Isolate::kProtectorValid)));
// Check that the map of the initial array iterator hasn't changed.
__ Ld(scratch,
ContextMemOperand(native_context,
Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
__ Ld(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
__ Ld(scratch2,
ContextMemOperand(native_context,
Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
__ Branch(&runtime_call, ne, scratch, Operand(scratch2));
// For FastPacked kinds, iteration will have the same effect as simply
// accessing each property in order.
Label no_protector_check;
__ Lbu(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
__ DecodeField<Map::ElementsKindBits>(scratch);
__ Branch(&runtime_call, hi, scratch, Operand(FAST_HOLEY_ELEMENTS));
// For non-FastHoley kinds, we can skip the protector check.
__ Branch(&no_protector_check, eq, scratch, Operand(FAST_SMI_ELEMENTS));
__ Branch(&no_protector_check, eq, scratch, Operand(FAST_ELEMENTS));
// Check the ArrayProtector cell.
__ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
__ Ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ Branch(&runtime_call, ne, scratch,
Operand(Smi::FromInt(Isolate::kProtectorValid)));
__ bind(&no_protector_check);
// Load the FixedArray backing store, but use the length from the array.
__ Lw(spread_len, UntagSmiFieldMemOperand(spread, JSArray::kLengthOffset));
__ Ld(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
__ Branch(&push_args);
__ bind(&runtime_call);
{
// Call the builtin for the result of the spread.
FrameScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(argc);
__ Push(constructor, new_target, argc, spread);
__ CallRuntime(Runtime::kSpreadIterableFixed);
__ mov(spread, v0);
__ Pop(constructor, new_target, argc);
__ SmiUntag(argc);
}
{
// Calculate the new nargs including the result of the spread.
__ Lw(spread_len,
UntagSmiFieldMemOperand(spread, FixedArray::kLengthOffset));
__ bind(&push_args);
// argc += spread_len - 1. Subtract 1 for the spread itself.
__ Daddu(argc, argc, spread_len);
__ Dsubu(argc, argc, Operand(1));
// Pop the spread argument off the stack.
__ Pop(scratch);
}
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label done;
__ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
// Make scratch the space we have left. The stack might already be
// overflowed here which will cause ip to become negative.
__ Dsubu(scratch, sp, scratch);
// Check if the arguments will overflow the stack.
__ dsll(at, spread_len, kPointerSizeLog2);
__ Branch(&done, gt, scratch, Operand(at)); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
}
// Put the evaluated spread onto the stack as additional arguments.
{
__ mov(scratch, zero_reg);
Label done, push, loop;
__ bind(&loop);
__ Branch(&done, eq, scratch, Operand(spread_len));
__ Dlsa(scratch2, spread, scratch, kPointerSizeLog2);
__ Ld(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
__ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
__ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
__ bind(&push);
__ Push(scratch2);
__ Daddu(scratch, scratch, Operand(1));
__ Branch(&loop);
__ bind(&done);
}
}
// static
void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the target to call (can be any Object).
// -----------------------------------
// CheckSpreadAndPushToStack will push a3 to save it.
__ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
CheckSpreadAndPushToStack(masm);
__ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
TailCallMode::kDisallow),
RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver) // -- a0 : the number of arguments (not including the receiver)
...@@ -2966,19 +2833,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { ...@@ -2966,19 +2833,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
} }
// static
void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the constructor to call (can be any Object)
// -- a3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
CheckSpreadAndPushToStack(masm);
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
// static // static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) { void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
......
...@@ -1072,6 +1072,11 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( ...@@ -1072,6 +1072,11 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// rbx and rdx will be modified. // rbx and rdx will be modified.
Generate_InterpreterPushArgs(masm, rcx, rbx, rdx); Generate_InterpreterPushArgs(masm, rcx, rbx, rdx);
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Pop(rbx); // Pass the spread in a register
__ subp(rax, Immediate(1)); // Subtract one for spread
}
// Call the target. // Call the target.
__ PushReturnAddressFrom(kScratchRegister); // Re-push return address. __ PushReturnAddressFrom(kScratchRegister); // Re-push return address.
...@@ -1123,10 +1128,17 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl( ...@@ -1123,10 +1128,17 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// rcx and r8 will be modified. // rcx and r8 will be modified.
Generate_InterpreterPushArgs(masm, rax, rcx, r8); Generate_InterpreterPushArgs(masm, rax, rcx, r8);
// Push return address in preparation for the tail-call. if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ PushReturnAddressFrom(kScratchRegister); __ Pop(rbx); // Pass the spread in a register
__ subp(rax, Immediate(1)); // Subtract one for spread
// Push return address in preparation for the tail-call.
__ PushReturnAddressFrom(kScratchRegister);
} else {
__ PushReturnAddressFrom(kScratchRegister);
__ AssertUndefinedOrAllocationSite(rbx);
}
__ AssertUndefinedOrAllocationSite(rbx);
if (mode == InterpreterPushArgsMode::kJSFunction) { if (mode == InterpreterPushArgsMode::kJSFunction) {
// Tail call to the function-specific construct stub (still in the caller // Tail call to the function-specific construct stub (still in the caller
// context at this point). // context at this point).
...@@ -2910,148 +2922,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode, ...@@ -2910,148 +2922,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
} }
} }
static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
Label runtime_call, push_args;
// Load the spread argument into rbx.
__ movp(rbx, Operand(rsp, kPointerSize));
__ JumpIfSmi(rbx, &runtime_call);
// Load the map of the spread into r15.
__ movp(r15, FieldOperand(rbx, HeapObject::kMapOffset));
// Load native context into r14.
__ movp(r14, NativeContextOperand());
// Check that the spread is an array.
__ CmpInstanceType(r15, JS_ARRAY_TYPE);
__ j(not_equal, &runtime_call);
// Check that we have the original ArrayPrototype.
__ movp(rcx, FieldOperand(r15, Map::kPrototypeOffset));
__ cmpp(rcx, ContextOperand(r14, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
__ j(not_equal, &runtime_call);
// Check that the ArrayPrototype hasn't been modified in a way that would
// affect iteration.
__ LoadRoot(rcx, Heap::kArrayIteratorProtectorRootIndex);
__ Cmp(FieldOperand(rcx, PropertyCell::kValueOffset),
Smi::FromInt(Isolate::kProtectorValid));
__ j(not_equal, &runtime_call);
// Check that the map of the initial array iterator hasn't changed.
__ movp(rcx,
ContextOperand(r14, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
__ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
__ cmpp(rcx, ContextOperand(
r14, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
__ j(not_equal, &runtime_call);
// For FastPacked kinds, iteration will have the same effect as simply
// accessing each property in order.
Label no_protector_check;
__ movzxbp(rcx, FieldOperand(r15, Map::kBitField2Offset));
__ DecodeField<Map::ElementsKindBits>(rcx);
__ cmpp(rcx, Immediate(FAST_HOLEY_ELEMENTS));
__ j(above, &runtime_call);
// For non-FastHoley kinds, we can skip the protector check.
__ cmpp(rcx, Immediate(FAST_SMI_ELEMENTS));
__ j(equal, &no_protector_check);
__ cmpp(rcx, Immediate(FAST_ELEMENTS));
__ j(equal, &no_protector_check);
// Check the ArrayProtector cell.
__ LoadRoot(rcx, Heap::kArrayProtectorRootIndex);
__ Cmp(FieldOperand(rcx, PropertyCell::kValueOffset),
Smi::FromInt(Isolate::kProtectorValid));
__ j(not_equal, &runtime_call);
__ bind(&no_protector_check);
// Load the FixedArray backing store, but use the length from the array.
__ SmiToInteger32(r9, FieldOperand(rbx, JSArray::kLengthOffset));
__ movp(rbx, FieldOperand(rbx, JSArray::kElementsOffset));
__ jmp(&push_args);
__ bind(&runtime_call);
{
// Call the builtin for the result of the spread.
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(rdi); // target
__ Push(rdx); // new target
__ Integer32ToSmi(rax, rax);
__ Push(rax); // nargs
__ Push(rbx);
__ CallRuntime(Runtime::kSpreadIterableFixed);
__ movp(rbx, rax);
__ Pop(rax); // nargs
__ SmiToInteger32(rax, rax);
__ Pop(rdx); // new target
__ Pop(rdi); // target
}
{
// Calculate the new nargs including the result of the spread.
__ SmiToInteger32(r9, FieldOperand(rbx, FixedArray::kLengthOffset));
__ bind(&push_args);
// rax += r9 - 1. Subtract 1 for the spread itself.
__ leap(rax, Operand(rax, r9, times_1, -1));
}
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label done;
__ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
__ movp(rcx, rsp);
// Make rcx the space we have left. The stack might already be overflowed
// here which will cause rcx to become negative.
__ subp(rcx, kScratchRegister);
__ sarp(rcx, Immediate(kPointerSizeLog2));
// Check if the arguments will overflow the stack.
__ cmpp(rcx, r9);
__ j(greater, &done, Label::kNear); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
}
// Put the evaluated spread onto the stack as additional arguments.
{
// Pop the return address and spread argument.
__ PopReturnAddressTo(r8);
__ Pop(rcx);
__ Set(rcx, 0);
Label done, push, loop;
__ bind(&loop);
__ cmpl(rcx, r9);
__ j(equal, &done, Label::kNear);
__ movp(kScratchRegister, FieldOperand(rbx, rcx, times_pointer_size,
FixedArray::kHeaderSize));
__ CompareRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &push, Label::kNear);
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ bind(&push);
__ Push(kScratchRegister);
__ incl(rcx);
__ jmp(&loop);
__ bind(&done);
__ PushReturnAddressFrom(r8);
}
}
// static
void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rdi : the target to call (can be any Object)
// -----------------------------------
// CheckSpreadAndPushToStack will push rdx to save it.
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
CheckSpreadAndPushToStack(masm);
__ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
TailCallMode::kDisallow),
RelocInfo::CODE_TARGET);
}
// static // static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
...@@ -3174,19 +3044,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { ...@@ -3174,19 +3044,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
} }
// static
void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rdx : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -- rdi : the constructor to call (can be any Object)
// -----------------------------------
CheckSpreadAndPushToStack(masm);
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
static void Generate_OnStackReplacementHelper(MacroAssembler* masm, static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
bool has_handler_frame) { bool has_handler_frame) {
// Lookup the function in the JavaScript frame. // Lookup the function in the JavaScript frame.
......
...@@ -289,7 +289,7 @@ Callable CodeFactory::Call(Isolate* isolate, ConvertReceiverMode mode, ...@@ -289,7 +289,7 @@ Callable CodeFactory::Call(Isolate* isolate, ConvertReceiverMode mode,
// static // static
Callable CodeFactory::CallWithSpread(Isolate* isolate) { Callable CodeFactory::CallWithSpread(Isolate* isolate) {
return Callable(isolate->builtins()->CallWithSpread(), return Callable(isolate->builtins()->CallWithSpread(),
CallTrampolineDescriptor(isolate)); CallWithSpreadDescriptor(isolate));
} }
// static // static
...@@ -326,7 +326,7 @@ Callable CodeFactory::Construct(Isolate* isolate) { ...@@ -326,7 +326,7 @@ Callable CodeFactory::Construct(Isolate* isolate) {
// static // static
Callable CodeFactory::ConstructWithSpread(Isolate* isolate) { Callable CodeFactory::ConstructWithSpread(Isolate* isolate) {
return Callable(isolate->builtins()->ConstructWithSpread(), return Callable(isolate->builtins()->ConstructWithSpread(),
ConstructTrampolineDescriptor(isolate)); ConstructWithSpreadDescriptor(isolate));
} }
// static // static
......
...@@ -592,19 +592,26 @@ void JSGenericLowering::LowerJSConstruct(Node* node) { ...@@ -592,19 +592,26 @@ void JSGenericLowering::LowerJSConstruct(Node* node) {
void JSGenericLowering::LowerJSConstructWithSpread(Node* node) { void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
SpreadWithArityParameter const& p = SpreadWithArityParameterOf(node->op()); SpreadWithArityParameter const& p = SpreadWithArityParameterOf(node->op());
int const arg_count = static_cast<int>(p.arity() - 2); int const arg_count = static_cast<int>(p.arity() - 2);
int const spread_index = arg_count;
int const new_target_index = arg_count + 1;
CallDescriptor::Flags flags = FrameStateFlagForCall(node); CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::ConstructWithSpread(isolate()); Callable callable = CodeFactory::ConstructWithSpread(isolate());
CallDescriptor* desc = Linkage::GetStubCallDescriptor( CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), arg_count + 1, flags); isolate(), zone(), callable.descriptor(), arg_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code()); Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* stub_arity = jsgraph()->Int32Constant(arg_count); Node* stack_arg_count = jsgraph()->Int32Constant(arg_count - 1);
Node* new_target = node->InputAt(arg_count + 1); Node* new_target = node->InputAt(new_target_index);
Node* spread = node->InputAt(spread_index);
Node* receiver = jsgraph()->UndefinedConstant(); Node* receiver = jsgraph()->UndefinedConstant();
node->RemoveInput(arg_count + 1); // Drop new target. DCHECK(new_target_index > spread_index);
node->RemoveInput(new_target_index); // Drop new target.
node->RemoveInput(spread_index);
node->InsertInput(zone(), 0, stub_code); node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 2, new_target); node->InsertInput(zone(), 2, new_target);
node->InsertInput(zone(), 3, stub_arity); node->InsertInput(zone(), 3, stack_arg_count);
node->InsertInput(zone(), 4, receiver); node->InsertInput(zone(), 4, spread);
node->InsertInput(zone(), 5, receiver);
NodeProperties::ChangeOp(node, common()->Call(desc)); NodeProperties::ChangeOp(node, common()->Call(desc));
} }
...@@ -648,14 +655,18 @@ void JSGenericLowering::LowerJSCall(Node* node) { ...@@ -648,14 +655,18 @@ void JSGenericLowering::LowerJSCall(Node* node) {
void JSGenericLowering::LowerJSCallWithSpread(Node* node) { void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
SpreadWithArityParameter const& p = SpreadWithArityParameterOf(node->op()); SpreadWithArityParameter const& p = SpreadWithArityParameterOf(node->op());
int const arg_count = static_cast<int>(p.arity() - 2); int const arg_count = static_cast<int>(p.arity() - 2);
Callable callable = CodeFactory::CallWithSpread(isolate()); int const spread_index = static_cast<int>(p.arity() + 1);
CallDescriptor::Flags flags = FrameStateFlagForCall(node); CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::CallWithSpread(isolate());
CallDescriptor* desc = Linkage::GetStubCallDescriptor( CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), arg_count + 1, flags); isolate(), zone(), callable.descriptor(), arg_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code()); Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* stub_arity = jsgraph()->Int32Constant(arg_count); // We pass the spread in a register, not on the stack.
Node* stack_arg_count = jsgraph()->Int32Constant(arg_count - 1);
node->InsertInput(zone(), 0, stub_code); node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 2, stub_arity); node->InsertInput(zone(), 2, stack_arg_count);
node->InsertInput(zone(), 3, node->InputAt(spread_index));
node->RemoveInput(spread_index + 1);
NodeProperties::ChangeOp(node, common()->Call(desc)); NodeProperties::ChangeOp(node, common()->Call(desc));
} }
......
...@@ -175,6 +175,15 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific( ...@@ -175,6 +175,15 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void CallWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// eax : number of arguments (on the stack, not including receiver)
// edi : the target to call
// ebx : the object to spread
Register registers[] = {edi, eax, ebx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallWithArrayLikeDescriptor::InitializePlatformSpecific( void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// edi : the target to call // edi : the target to call
...@@ -204,6 +213,16 @@ void ConstructForwardVarargsDescriptor::InitializePlatformSpecific( ...@@ -204,6 +213,16 @@ void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// eax : number of arguments (on the stack, not including receiver)
// edi : the target to call
// edx : the new target
// ebx : the object to spread
Register registers[] = {edi, edx, eax, ebx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific( void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// edi : the target to call // edi : the target to call
......
...@@ -478,6 +478,15 @@ void CallForwardVarargsDescriptor::InitializePlatformIndependent( ...@@ -478,6 +478,15 @@ void CallForwardVarargsDescriptor::InitializePlatformIndependent(
machine_types); machine_types);
} }
void CallWithSpreadDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kTarget, kArgumentsCount, kArgumentsList
MachineType machine_types[] = {MachineType::AnyTagged(), MachineType::Int32(),
MachineType::AnyTagged()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
}
void CallWithArrayLikeDescriptor::InitializePlatformIndependent( void CallWithArrayLikeDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// kTarget, kArgumentsList // kTarget, kArgumentsList
...@@ -508,6 +517,16 @@ void ConstructForwardVarargsDescriptor::InitializePlatformIndependent( ...@@ -508,6 +517,16 @@ void ConstructForwardVarargsDescriptor::InitializePlatformIndependent(
machine_types); machine_types);
} }
void ConstructWithSpreadDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kTarget, kNewTarget, kArgumentsCount, kSpread
MachineType machine_types[] = {MachineType::AnyTagged(),
MachineType::AnyTagged(), MachineType::Int32(),
MachineType::AnyTagged()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
}
void ConstructWithArrayLikeDescriptor::InitializePlatformIndependent( void ConstructWithArrayLikeDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// kTarget, kNewTarget, kArgumentsList // kTarget, kNewTarget, kArgumentsList
......
...@@ -46,12 +46,14 @@ class PlatformInterfaceDescriptor; ...@@ -46,12 +46,14 @@ class PlatformInterfaceDescriptor;
V(CallICTrampoline) \ V(CallICTrampoline) \
V(CallVarargs) \ V(CallVarargs) \
V(CallForwardVarargs) \ V(CallForwardVarargs) \
V(CallWithSpread) \
V(CallWithArrayLike) \ V(CallWithArrayLike) \
V(CallConstruct) \ V(CallConstruct) \
V(CallTrampoline) \ V(CallTrampoline) \
V(ConstructStub) \ V(ConstructStub) \
V(ConstructVarargs) \ V(ConstructVarargs) \
V(ConstructForwardVarargs) \ V(ConstructForwardVarargs) \
V(ConstructWithSpread) \
V(ConstructWithArrayLike) \ V(ConstructWithArrayLike) \
V(ConstructTrampoline) \ V(ConstructTrampoline) \
V(TransitionElementsKind) \ V(TransitionElementsKind) \
...@@ -593,6 +595,13 @@ class CallForwardVarargsDescriptor : public CallInterfaceDescriptor { ...@@ -593,6 +595,13 @@ class CallForwardVarargsDescriptor : public CallInterfaceDescriptor {
CallInterfaceDescriptor) CallInterfaceDescriptor)
}; };
class CallWithSpreadDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kTarget, kArgumentsCount, kSpread)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CallWithSpreadDescriptor,
CallInterfaceDescriptor)
};
class CallWithArrayLikeDescriptor : public CallInterfaceDescriptor { class CallWithArrayLikeDescriptor : public CallInterfaceDescriptor {
public: public:
DEFINE_PARAMETERS(kTarget, kArgumentsList) DEFINE_PARAMETERS(kTarget, kArgumentsList)
...@@ -615,6 +624,13 @@ class ConstructForwardVarargsDescriptor : public CallInterfaceDescriptor { ...@@ -615,6 +624,13 @@ class ConstructForwardVarargsDescriptor : public CallInterfaceDescriptor {
ConstructForwardVarargsDescriptor, CallInterfaceDescriptor) ConstructForwardVarargsDescriptor, CallInterfaceDescriptor)
}; };
class ConstructWithSpreadDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kTarget, kNewTarget, kArgumentsCount, kSpread)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ConstructWithSpreadDescriptor,
CallInterfaceDescriptor)
};
class ConstructWithArrayLikeDescriptor : public CallInterfaceDescriptor { class ConstructWithArrayLikeDescriptor : public CallInterfaceDescriptor {
public: public:
DEFINE_PARAMETERS(kTarget, kNewTarget, kArgumentsList) DEFINE_PARAMETERS(kTarget, kNewTarget, kArgumentsList)
......
...@@ -174,6 +174,15 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific( ...@@ -174,6 +174,15 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void CallWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a0 : number of arguments (on the stack, not including receiver)
// a1 : the target to call
// a2 : the object to spread
Register registers[] = {a1, a0, a2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallWithArrayLikeDescriptor::InitializePlatformSpecific( void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// a1 : the target to call // a1 : the target to call
...@@ -203,6 +212,16 @@ void ConstructForwardVarargsDescriptor::InitializePlatformSpecific( ...@@ -203,6 +212,16 @@ void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a0 : number of arguments (on the stack, not including receiver)
// a1 : the target to call
// a3 : the new target
// a2 : the object to spread
Register registers[] = {a1, a3, a0, a2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific( void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// a1 : the target to call // a1 : the target to call
......
...@@ -174,6 +174,15 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific( ...@@ -174,6 +174,15 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void CallWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a0 : number of arguments (on the stack, not including receiver)
// a1 : the target to call
// a2 : the object to spread
Register registers[] = {a1, a0, a2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallWithArrayLikeDescriptor::InitializePlatformSpecific( void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// a1 : the target to call // a1 : the target to call
...@@ -203,6 +212,16 @@ void ConstructForwardVarargsDescriptor::InitializePlatformSpecific( ...@@ -203,6 +212,16 @@ void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a0 : number of arguments (on the stack, not including receiver)
// a1 : the target to call
// a3 : the new target
// a2 : the object to spread
Register registers[] = {a1, a3, a0, a2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific( void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// a1 : the target to call // a1 : the target to call
......
...@@ -641,33 +641,5 @@ RUNTIME_FUNCTION(Runtime_SpreadIterablePrepare) { ...@@ -641,33 +641,5 @@ RUNTIME_FUNCTION(Runtime_SpreadIterablePrepare) {
return *spread; return *spread;
} }
RUNTIME_FUNCTION(Runtime_SpreadIterableFixed) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, spread, 0);
// The caller should check if proper iteration is necessary.
Handle<JSFunction> spread_iterable_function = isolate->spread_iterable();
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, spread,
Execution::Call(isolate, spread_iterable_function,
isolate->factory()->undefined_value(), 1, &spread));
// Create a new FixedArray and put the result of the spread into it.
Handle<JSArray> spread_array = Handle<JSArray>::cast(spread);
uint32_t spread_length;
CHECK(spread_array->length()->ToArrayIndex(&spread_length));
Handle<FixedArray> result = isolate->factory()->NewFixedArray(spread_length);
ElementsAccessor* accessor = spread_array->GetElementsAccessor();
for (uint32_t i = 0; i < spread_length; i++) {
DCHECK(accessor->HasElement(*spread_array, i));
Handle<Object> element = accessor->Get(spread_array, i);
result->set(i, *element);
}
return *result;
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -36,27 +36,26 @@ namespace internal { ...@@ -36,27 +36,26 @@ namespace internal {
// A variable number of arguments is specified by a -1, additional restrictions // A variable number of arguments is specified by a -1, additional restrictions
// are specified by inline comments // are specified by inline comments
#define FOR_EACH_INTRINSIC_ARRAY(F) \ #define FOR_EACH_INTRINSIC_ARRAY(F) \
F(SpecialArrayFunctions, 0, 1) \ F(SpecialArrayFunctions, 0, 1) \
F(TransitionElementsKind, 2, 1) \ F(TransitionElementsKind, 2, 1) \
F(RemoveArrayHoles, 2, 1) \ F(RemoveArrayHoles, 2, 1) \
F(MoveArrayContents, 2, 1) \ F(MoveArrayContents, 2, 1) \
F(EstimateNumberOfElements, 1, 1) \ F(EstimateNumberOfElements, 1, 1) \
F(GetArrayKeys, 2, 1) \ F(GetArrayKeys, 2, 1) \
F(NewArray, -1 /* >= 3 */, 1) \ F(NewArray, -1 /* >= 3 */, 1) \
F(FunctionBind, -1, 1) \ F(FunctionBind, -1, 1) \
F(NormalizeElements, 1, 1) \ F(NormalizeElements, 1, 1) \
F(GrowArrayElements, 2, 1) \ F(GrowArrayElements, 2, 1) \
F(HasComplexElements, 1, 1) \ F(HasComplexElements, 1, 1) \
F(IsArray, 1, 1) \ F(IsArray, 1, 1) \
F(ArrayIsArray, 1, 1) \ F(ArrayIsArray, 1, 1) \
F(FixedArrayGet, 2, 1) \ F(FixedArrayGet, 2, 1) \
F(FixedArraySet, 3, 1) \ F(FixedArraySet, 3, 1) \
F(ArraySpeciesConstructor, 1, 1) \ F(ArraySpeciesConstructor, 1, 1) \
F(ArrayIncludes_Slow, 3, 1) \ F(ArrayIncludes_Slow, 3, 1) \
F(ArrayIndexOf, 3, 1) \ F(ArrayIndexOf, 3, 1) \
F(SpreadIterablePrepare, 1, 1) \ F(SpreadIterablePrepare, 1, 1)
F(SpreadIterableFixed, 1, 1)
#define FOR_EACH_INTRINSIC_ATOMICS(F) \ #define FOR_EACH_INTRINSIC_ATOMICS(F) \
F(ThrowNotIntegerSharedTypedArrayError, 1, 1) \ F(ThrowNotIntegerSharedTypedArrayError, 1, 1) \
......
...@@ -174,6 +174,15 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific( ...@@ -174,6 +174,15 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void CallWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rax : number of arguments (on the stack, not including receiver)
// rdi : the target to call
// rbx : the object to spread
Register registers[] = {rdi, rax, rbx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallWithArrayLikeDescriptor::InitializePlatformSpecific( void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// rdi : the target to call // rdi : the target to call
...@@ -203,6 +212,16 @@ void ConstructForwardVarargsDescriptor::InitializePlatformSpecific( ...@@ -203,6 +212,16 @@ void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rax : number of arguments (on the stack, not including receiver)
// rdi : the target to call
// rdx : the new target
// rbx : the object to spread
Register registers[] = {rdi, rdx, rax, rbx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific( void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// rdi : the target to call // rdi : the target to call
......
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax
function tests() {
"use strict"
function countArgs() { return arguments.length; }
// Array params
assertEquals(3, countArgs(...[1, 2, 3])); // Smi
assertEquals(4, countArgs(...[1, 2, , 3])); // HoleySmi
assertEquals(3, countArgs(...[1.1, 2, 3])); // Double
assertEquals(4, countArgs(...[1.1, 2, , 3])); // HoleyDouble
assertEquals(3, countArgs(...[{valueOf: () => 0}, 1.1, '2'])); // Object
assertEquals(
4, countArgs(...[{valueOf: () => 0}, 1.1, , '2'])); // HoleyObject
// Smi param
assertThrows(() => countArgs(...1), TypeError);
// Object param
assertThrows(() => countArgs(...{0: 0}), TypeError);
// Strict arguments
assertEquals(0, countArgs(...arguments));
}
tests();
tests();
%OptimizeFunctionOnNextCall(tests);
tests();
function testRest(...args) {
function countArgs() { return arguments.length; }
assertEquals(3, countArgs(...args));
assertEquals(4, countArgs(1, ...args));
assertEquals(5, countArgs(1, 2, ...args));
}
testRest(1, 2, 3);
testRest(1, 2, 3);
%OptimizeFunctionOnNextCall(testRest);
testRest(1, 2, 3);
function testRestAndArgs(a, b, ...args) {
function countArgs() { return arguments.length; }
assertEquals(1, countArgs(...args));
assertEquals(2, countArgs(b, ...args));
assertEquals(3, countArgs(a, b, ...args));
assertEquals(4, countArgs(1, a, b, ...args));
assertEquals(5, countArgs(1, 2, a, b, ...args));
}
testRestAndArgs(1, 2, 3);
testRestAndArgs(1, 2, 3);
%OptimizeFunctionOnNextCall(testRestAndArgs);
testRestAndArgs(1, 2, 3);
function testArgumentsStrict() {
"use strict"
function countArgs() { return arguments.length; }
assertEquals(3, countArgs(...arguments));
assertEquals(4, countArgs(1, ...arguments));
assertEquals(5, countArgs(1, 2, ...arguments));
}
testArgumentsStrict(1, 2, 3);
testArgumentsStrict(1, 2, 3);
%OptimizeFunctionOnNextCall(testArgumentsStrict);
testArgumentsStrict(1, 2, 3);
function testArgumentsSloppy() {
function countArgs() { return arguments.length; }
assertEquals(3, countArgs(...arguments));
assertEquals(4, countArgs(1, ...arguments));
assertEquals(5, countArgs(1, 2, ...arguments));
}
testArgumentsSloppy(1, 2, 3);
testArgumentsSloppy(1, 2, 3);
%OptimizeFunctionOnNextCall(testArgumentsSloppy);
testArgumentsSloppy(1, 2, 3);
...@@ -2,7 +2,9 @@ ...@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
(function testSpreadCallsStrict() { // Flags: --allow-natives-syntax
function testSpreadCallsStrict() {
"use strict" "use strict"
function countArgs() { return arguments.length; } function countArgs() { return arguments.length; }
...@@ -158,7 +160,10 @@ ...@@ -158,7 +160,10 @@
// Interleaved spread/unspread args // Interleaved spread/unspread args
assertEquals(36, O.sum(0, ...[1], 2, 3, ...[4, 5], 6, 7, 8)); assertEquals(36, O.sum(0, ...[1], 2, 3, ...[4, 5], 6, 7, 8));
assertEquals(45, O.sum(0, ...[1], 2, 3, ...[4, 5], 6, 7, 8, ...[9])); assertEquals(45, O.sum(0, ...[1], 2, 3, ...[4, 5], 6, 7, 8, ...[9]));
})(); };
testSpreadCallsStrict();
%OptimizeFunctionOnNextCall(testSpreadCallsStrict);
testSpreadCallsStrict();
(function testSpreadCallsSloppy() { (function testSpreadCallsSloppy() {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment