Commit 789f9592 authored by Jaideep Bajwa's avatar Jaideep Bajwa Committed by Commit Bot

PPC/s390: [runtime] Port SpreadCall code to CSA.

Port a971a64d

Original Commit Message:

    We can remove a lot of native code and rely on CallOrConstructVarargs
    to do the stack manipulation for us.

    This will also take advantage of the fast-path for double arrays in
    CallOrConstructDoubleVarargs.

    We can also remove Runtime_SpreadIterableFixed because it isn't used
    anymore. We just call directly into spread_iterable from CSA.

R=petermarshall@chromium.org, joransiu@ca.ibm.com, jyan@ca.ibm.com, michael_dawson@ca.ibm.com
BUG=
LOG=N

Change-Id: Idad830c9aace4151db866c9f863158cd7525e881
Reviewed-on: https://chromium-review.googlesource.com/546575Reviewed-by: 's avatarJoran Siu <joransiu@ca.ibm.com>
Commit-Queue: Jaideep Bajwa <bjaideep@ca.ibm.com>
Cr-Commit-Position: refs/heads/master@{#46187}
parent 44e578e4
......@@ -1354,6 +1354,11 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Push the arguments. r5, r6, r7 will be modified.
Generate_InterpreterPushArgs(masm, r6, r5, r6, r7);
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Pop(r5); // Pass the spread in a register
__ subi(r3, r3, Operand(1)); // Subtract one for spread
}
// Call the target.
if (mode == InterpreterPushArgsMode::kJSFunction) {
__ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
......@@ -1400,8 +1405,12 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Push the arguments. r8, r7, r9 will be modified.
Generate_InterpreterPushArgs(masm, r3, r7, r3, r9);
__ bind(&skip);
__ AssertUndefinedOrAllocationSite(r5, r8);
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Pop(r5); // Pass the spread in a register
__ subi(r3, r3, Operand(1)); // Subtract one for spread
} else {
__ AssertUndefinedOrAllocationSite(r5, r8);
}
if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(r4);
......@@ -2732,156 +2741,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
Register argc = r3;
Register constructor = r4;
Register new_target = r6;
Register scratch = r5;
Register scratch2 = r9;
Register spread = r7;
Register spread_map = r8;
Register spread_len = r8;
Label runtime_call, push_args;
__ LoadP(spread, MemOperand(sp, 0));
__ JumpIfSmi(spread, &runtime_call);
__ LoadP(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
// Check that the spread is an array.
__ CompareInstanceType(spread_map, scratch, JS_ARRAY_TYPE);
__ bne(&runtime_call);
// Check that we have the original ArrayPrototype.
__ LoadP(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
__ LoadP(scratch2, NativeContextMemOperand());
__ LoadP(scratch2,
ContextMemOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
__ cmp(scratch, scratch2);
__ bne(&runtime_call);
// Check that the ArrayPrototype hasn't been modified in a way that would
// affect iteration.
__ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
__ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ CmpSmiLiteral(scratch, Smi::FromInt(Isolate::kProtectorValid), r0);
__ bne(&runtime_call);
// Check that the map of the initial array iterator hasn't changed.
__ LoadP(scratch2, NativeContextMemOperand());
__ LoadP(scratch,
ContextMemOperand(scratch2,
Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
__ LoadP(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
__ LoadP(scratch2,
ContextMemOperand(
scratch2, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
__ cmp(scratch, scratch2);
__ bne(&runtime_call);
// For FastPacked kinds, iteration will have the same effect as simply
// accessing each property in order.
Label no_protector_check;
__ lbz(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
__ DecodeField<Map::ElementsKindBits>(scratch);
__ cmpi(scratch, Operand(FAST_HOLEY_ELEMENTS));
__ bgt(&runtime_call);
// For non-FastHoley kinds, we can skip the protector check.
__ cmpi(scratch, Operand(FAST_SMI_ELEMENTS));
__ beq(&no_protector_check);
__ cmpi(scratch, Operand(FAST_ELEMENTS));
__ beq(&no_protector_check);
// Check the ArrayProtector cell.
__ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
__ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ CmpSmiLiteral(scratch, Smi::FromInt(Isolate::kProtectorValid), r0);
__ bne(&runtime_call);
__ bind(&no_protector_check);
// Load the FixedArray backing store, but use the length from the array.
__ LoadP(spread_len, FieldMemOperand(spread, JSArray::kLengthOffset));
__ SmiUntag(spread_len);
__ LoadP(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
__ b(&push_args);
__ bind(&runtime_call);
{
// Call the builtin for the result of the spread.
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(argc);
__ Push(constructor, new_target, argc, spread);
__ CallRuntime(Runtime::kSpreadIterableFixed);
__ mr(spread, r3);
__ Pop(constructor, new_target, argc);
__ SmiUntag(argc);
}
{
// Calculate the new nargs including the result of the spread.
__ LoadP(spread_len, FieldMemOperand(spread, FixedArray::kLengthOffset));
__ SmiUntag(spread_len);
__ bind(&push_args);
// argc += spread_len - 1. Subtract 1 for the spread itself.
__ add(argc, argc, spread_len);
__ subi(argc, argc, Operand(1));
// Pop the spread argument off the stack.
__ Pop(scratch);
}
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label done;
__ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
// Make scratch the space we have left. The stack might already be
// overflowed here which will cause scratch to become negative.
__ sub(scratch, sp, scratch);
// Check if the arguments will overflow the stack.
__ ShiftLeftImm(r0, spread_len, Operand(kPointerSizeLog2));
__ cmp(scratch, r0);
__ bgt(&done); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
}
// Put the evaluated spread onto the stack as additional arguments.
{
__ li(scratch, Operand::Zero());
Label done, push, loop;
__ bind(&loop);
__ cmp(scratch, spread_len);
__ beq(&done);
__ ShiftLeftImm(r0, scratch, Operand(kPointerSizeLog2));
__ add(scratch2, spread, r0);
__ LoadP(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
__ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
__ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
__ bind(&push);
__ Push(scratch2);
__ addi(scratch, scratch, Operand(1));
__ b(&loop);
__ bind(&done);
}
}
// static
void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the constructor to call (can be any Object)
// -----------------------------------
// CheckSpreadAndPushToStack will push r6 to save it.
__ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
CheckSpreadAndPushToStack(masm);
__ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
TailCallMode::kDisallow),
RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
......@@ -3002,18 +2861,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the constructor to call (can be any Object)
// -- r6 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
CheckSpreadAndPushToStack(masm);
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
......
......@@ -1347,6 +1347,10 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Push the arguments.
Generate_InterpreterPushArgs(masm, r5, r4, r5, r6);
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Pop(r4); // Pass the spread in a register
__ SubP(r2, r2, Operand(1)); // Subtract one for spread
}
// Call the target.
if (mode == InterpreterPushArgsMode::kJSFunction) {
......@@ -1394,7 +1398,12 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
Generate_InterpreterPushArgs(masm, r2, r6, r2, r7);
__ bind(&skip);
__ AssertUndefinedOrAllocationSite(r4, r7);
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Pop(r4); // Pass the spread in a register
__ SubP(r2, r2, Operand(1)); // Subtract one for spread
} else {
__ AssertUndefinedOrAllocationSite(r4, r7);
}
if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(r3);
......@@ -2731,156 +2740,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
Register argc = r2;
Register constructor = r3;
Register new_target = r5;
Register scratch = r4;
Register scratch2 = r8;
Register spread = r6;
Register spread_map = r7;
Register spread_len = r7;
Label runtime_call, push_args;
__ LoadP(spread, MemOperand(sp, 0));
__ JumpIfSmi(spread, &runtime_call);
__ LoadP(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
// Check that the spread is an array.
__ CompareInstanceType(spread_map, scratch, JS_ARRAY_TYPE);
__ bne(&runtime_call);
// Check that we have the original ArrayPrototype.
__ LoadP(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
__ LoadP(scratch2, NativeContextMemOperand());
__ LoadP(scratch2,
ContextMemOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
__ CmpP(scratch, scratch2);
__ bne(&runtime_call);
// Check that the ArrayPrototype hasn't been modified in a way that would
// affect iteration.
__ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
__ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ CmpSmiLiteral(scratch, Smi::FromInt(Isolate::kProtectorValid), r0);
__ bne(&runtime_call);
// Check that the map of the initial array iterator hasn't changed.
__ LoadP(scratch2, NativeContextMemOperand());
__ LoadP(scratch,
ContextMemOperand(scratch2,
Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
__ LoadP(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
__ LoadP(scratch2,
ContextMemOperand(
scratch2, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
__ CmpP(scratch, scratch2);
__ bne(&runtime_call);
// For FastPacked kinds, iteration will have the same effect as simply
// accessing each property in order.
Label no_protector_check;
__ LoadlB(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
__ DecodeField<Map::ElementsKindBits>(scratch);
__ CmpP(scratch, Operand(FAST_HOLEY_ELEMENTS));
__ bgt(&runtime_call);
// For non-FastHoley kinds, we can skip the protector check.
__ CmpP(scratch, Operand(FAST_SMI_ELEMENTS));
__ beq(&no_protector_check);
__ CmpP(scratch, Operand(FAST_ELEMENTS));
__ beq(&no_protector_check);
// Check the ArrayProtector cell.
__ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
__ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ CmpSmiLiteral(scratch, Smi::FromInt(Isolate::kProtectorValid), r0);
__ bne(&runtime_call);
__ bind(&no_protector_check);
// Load the FixedArray backing store, but use the length from the array.
__ LoadP(spread_len, FieldMemOperand(spread, JSArray::kLengthOffset));
__ SmiUntag(spread_len);
__ LoadP(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
__ b(&push_args);
__ bind(&runtime_call);
{
// Call the builtin for the result of the spread.
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(argc);
__ Push(constructor, new_target, argc, spread);
__ CallRuntime(Runtime::kSpreadIterableFixed);
__ LoadRR(spread, r2);
__ Pop(constructor, new_target, argc);
__ SmiUntag(argc);
}
{
// Calculate the new nargs including the result of the spread.
__ LoadP(spread_len, FieldMemOperand(spread, FixedArray::kLengthOffset));
__ SmiUntag(spread_len);
__ bind(&push_args);
// argc += spread_len - 1. Subtract 1 for the spread itself.
__ AddP(argc, argc, spread_len);
__ SubP(argc, argc, Operand(1));
// Pop the spread argument off the stack.
__ Pop(scratch);
}
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label done;
__ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
// Make scratch the space we have left. The stack might already be
// overflowed here which will cause scratch to become negative.
__ SubP(scratch, sp, scratch);
// Check if the arguments will overflow the stack.
__ ShiftLeftP(r0, spread_len, Operand(kPointerSizeLog2));
__ CmpP(scratch, r0);
__ bgt(&done); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
}
// Put the evaluated spread onto the stack as additional arguments.
{
__ LoadImmP(scratch, Operand::Zero());
Label done, push, loop;
__ bind(&loop);
__ CmpP(scratch, spread_len);
__ beq(&done);
__ ShiftLeftP(r0, scratch, Operand(kPointerSizeLog2));
__ AddP(scratch2, spread, r0);
__ LoadP(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
__ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
__ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
__ bind(&push);
__ Push(scratch2);
__ AddP(scratch, scratch, Operand(1));
__ b(&loop);
__ bind(&done);
}
}
// static
void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r3 : the constructor to call (can be any Object)
// -----------------------------------
// CheckSpreadAndPushToStack will push r5 to save it.
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
CheckSpreadAndPushToStack(masm);
__ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
TailCallMode::kDisallow),
RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
......@@ -3001,18 +2860,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r3 : the constructor to call (can be any Object)
// -- r5 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
CheckSpreadAndPushToStack(masm);
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
......
......@@ -174,6 +174,15 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r3 : number of arguments (on the stack, not including receiver)
// r4 : the target to call
// r5 : the object to spread
Register registers[] = {r4, r3, r5};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r4 : the target to call
......@@ -203,6 +212,16 @@ void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r3 : number of arguments (on the stack, not including receiver)
// r4 : the target to call
// r6 : the new target
// r5 : the object to spread
Register registers[] = {r4, r6, r3, r5};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r4 : the target to call
......
......@@ -164,6 +164,15 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r2 : number of arguments (on the stack, not including receiver)
// r3 : the target to call
// r4 : the object to spread
Register registers[] = {r3, r2, r4};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r3 : the target to call
......@@ -193,6 +202,16 @@ void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r2 : number of arguments (on the stack, not including receiver)
// r3 : the target to call
// r5 : the new target
// r4 : the object to spread
Register registers[] = {r3, r5, r2, r4};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r3 : the target to call
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment