Commit 545df256 authored by Victor Gomes's avatar Victor Gomes Committed by Commit Bot

[builtins][arm64] Reverse JS arguments for arm64

Change-Id: Ib9a14265692dbcdce05accb78b753d268e77ad9e
Bug: v8:10201
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2150587Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Commit-Queue: Victor Gomes <victorgomes@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67525}
parent 780665ad
......@@ -143,17 +143,19 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Claim(slot_count);
// Preserve the incoming parameters on the stack.
__ LoadRoot(x10, RootIndex::kTheHoleValue);
__ LoadRoot(x4, RootIndex::kTheHoleValue);
// Compute a pointer to the slot immediately above the location on the
// stack to which arguments will be later copied.
__ SlotAddress(x2, argc);
#ifndef V8_REVERSE_JSARGS
// Poke the hole (receiver) in the highest slot.
__ Str(x10, MemOperand(x2));
__ Tbnz(slot_count_without_rounding, 0, &already_aligned);
__ Str(x4, MemOperand(x2));
#endif
// Store padding, if needed.
__ Tbnz(slot_count_without_rounding, 0, &already_aligned);
__ Str(padreg, MemOperand(x2, 1 * kSystemPointerSize));
__ Bind(&already_aligned);
......@@ -162,9 +164,18 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
Register count = x2;
Register dst = x10;
Register src = x11;
__ Mov(count, argc);
__ SlotAddress(dst, 0);
#ifdef V8_REVERSE_JSARGS
// Poke the hole (receiver).
__ Str(x4, MemOperand(dst));
__ Add(dst, dst, kSystemPointerSize); // Skip receiver.
__ Add(src, fp,
StandardFrameConstants::kCallerSPOffset +
kSystemPointerSize); // Skip receiver.
#else
__ Add(src, fp, StandardFrameConstants::kCallerSPOffset);
#endif
__ Mov(count, argc);
__ CopyDoubleWords(dst, src, count);
}
......@@ -175,7 +186,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// If argc is odd:
// -- sp[0*kSystemPointerSize]: argument n - 1
// -- ...
// -- sp[(n-1)*kSystemPointerSize]: argument 0
// -- sp[(n-1)*kSystemPointerSize]: argument 1
// -- sp[(n+0)*kSystemPointerSize]: the hole (receiver)
// -- sp[(n+1)*kSystemPointerSize]: padding
// -- sp[(n+2)*kSystemPointerSize]: padding
......@@ -184,12 +195,13 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// If argc is even:
// -- sp[0*kSystemPointerSize]: argument n - 1
// -- ...
// -- sp[(n-1)*kSystemPointerSize]: argument 0
// -- sp[(n-1)*kSystemPointerSize]: argument 1
// -- sp[(n+0)*kSystemPointerSize]: the hole (receiver)
// -- sp[(n+1)*kSystemPointerSize]: padding
// -- sp[(n+2)*kSystemPointerSize]: number of arguments (tagged)
// -- sp[(n+3)*kSystemPointerSize]: context (pushed by FrameScope)
// -----------------------------------
// NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
// Call the function.
__ InvokeFunctionWithNewTarget(x1, x3, argc, CALL_FUNCTION);
......@@ -264,8 +276,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
x4, x5);
__ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
RelocInfo::CODE_TARGET);
__ B(&post_instantiation_deopt_entry);
// Else: use TheHoleValue as receiver for constructor call
......@@ -346,8 +360,15 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Register dst = x10;
Register src = x11;
__ Mov(count, x12);
#ifdef V8_REVERSE_JSARGS
__ Poke(x0, 0); // Add the receiver.
__ SlotAddress(dst, 1); // Skip receiver.
__ Add(src, fp,
StandardFrameConstants::kCallerSPOffset + kSystemPointerSize);
#else
__ SlotAddress(dst, 0);
__ Add(src, fp, StandardFrameConstants::kCallerSPOffset);
#endif
__ CopyDoubleWords(dst, src, count);
}
......@@ -496,7 +517,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Poke receiver into highest claimed slot.
__ LoadTaggedPointerField(
x5, FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
__ Poke(x5, Operand(x10, LSL, kSystemPointerSizeLog2));
__ Poke(x5, __ ReceiverOperand(x10));
// ----------- S t a t e -------------
// -- x1 : the JSGeneratorObject to resume
......@@ -504,26 +525,33 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- x10 : argument count
// -- cp : generator context
// -- lr : return address
// -- sp[arg count] : generator receiver
// -- sp[0 .. arg count - 1] : claimed for args
// -- sp[0 .. arg count] : claimed for receiver and args
// -----------------------------------
// Copy the function arguments from the generator object's register file.
__ LoadTaggedPointerField(
x5,
FieldMemOperand(x1, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label loop, done;
__ Cbz(x10, &done);
#ifdef V8_REVERSE_JSARGS
__ SlotAddress(x12, x10);
__ Add(x5, x5, Operand(x10, LSL, kTaggedSizeLog2));
__ Add(x5, x5, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ Bind(&loop);
__ Sub(x10, x10, 1);
__ LoadAnyTaggedField(x11, MemOperand(x5, -kTaggedSize, PreIndex));
__ Str(x11, MemOperand(x12, -kSystemPointerSize, PostIndex));
#else
__ Mov(x12, 0);
__ Bind(&loop);
__ Sub(x10, x10, 1);
__ Add(x11, x5, Operand(x12, LSL, kTaggedSizeLog2));
__ LoadAnyTaggedField(x11, FieldMemOperand(x11, FixedArray::kHeaderSize));
__ Poke(x11, Operand(x10, LSL, kSystemPointerSizeLog2));
__ Add(x12, x12, 1);
#endif
__ Cbnz(x10, &loop);
__ Bind(&done);
}
......@@ -862,9 +890,17 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ SlotAddress(scratch, slots_to_claim);
__ Str(padreg, MemOperand(scratch, -kSystemPointerSize));
#ifdef V8_REVERSE_JSARGS
// Store receiver on the stack.
__ Poke(receiver, 0);
// Store function on the stack.
__ SlotAddress(scratch, argc);
__ Str(function, MemOperand(scratch, kSystemPointerSize));
#else
// Store receiver and function on the stack.
__ SlotAddress(scratch, argc);
__ Stp(receiver, function, MemOperand(scratch));
#endif
// Copy arguments to the stack in a loop, in reverse order.
// x4: argc.
......@@ -874,9 +910,21 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Skip the argument set up if we have no arguments.
__ Cbz(argc, &done);
// scratch has been set to point to the location of the receiver, which
// scratch has been set to point to the location of the function, which
// marks the end of the argument copy.
#ifdef V8_REVERSE_JSARGS
__ SlotAddress(x0, 1); // Skips receiver.
__ Bind(&loop);
// Load the handle.
__ Ldr(x11, MemOperand(argv, kSystemPointerSize, PostIndex));
// Dereference the handle.
__ Ldr(x11, MemOperand(x11));
// Poke the result into the stack.
__ Str(x11, MemOperand(x0, kSystemPointerSize, PostIndex));
// Loop if we've not reached the end of copy marker.
__ Cmp(x0, scratch);
__ B(le, &loop);
#else
__ Bind(&loop);
// Load the handle.
__ Ldr(x11, MemOperand(argv, kSystemPointerSize, PostIndex));
......@@ -887,6 +935,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Loop if we've not reached the end of copy marker.
__ Cmp(sp, scratch);
__ B(lt, &loop);
#endif
__ Bind(&done);
......@@ -1418,6 +1467,36 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
__ Poke(padreg, Operand(scratch, LSL, kSystemPointerSizeLog2));
}
#ifdef V8_REVERSE_JSARGS
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
__ Mov(slots_to_copy, num_args);
__ SlotAddress(stack_addr, 1);
} else {
// If we're not given an explicit receiver to store, we'll need to copy it
// together with the rest of the arguments.
__ Add(slots_to_copy, num_args, 1);
__ SlotAddress(stack_addr, 0);
}
__ Sub(last_arg_addr, first_arg_index,
Operand(slots_to_copy, LSL, kSystemPointerSizeLog2));
__ Add(last_arg_addr, last_arg_addr, kSystemPointerSize);
// Load the final spread argument into spread_arg_out, if necessary.
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Ldr(spread_arg_out, MemOperand(last_arg_addr, -kSystemPointerSize));
}
__ CopyDoubleWords(stack_addr, last_arg_addr, slots_to_copy,
TurboAssembler::kDstLessThanSrcAndReverse);
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// Store "undefined" as the receiver arg if we need to.
Register receiver = x14;
__ LoadRoot(receiver, RootIndex::kUndefinedValue);
__ Poke(receiver, 0);
}
#else // !V8_REVERSE_JSARGS
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// Store "undefined" as the receiver arg if we need to.
Register receiver = x14;
......@@ -1443,6 +1522,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// Copy the rest of the arguments.
__ SlotAddress(stack_addr, 0);
__ CopyDoubleWords(stack_addr, last_arg_addr, slots_to_copy);
#endif // !V8_REVERSE_JSARGS
}
// static
......@@ -1778,14 +1858,16 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// -- sp[8] : thisArg (if argc >= 1)
// -- sp[16] : receiver
// -----------------------------------
// NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
ASM_LOCATION("Builtins::Generate_FunctionPrototypeApply");
Register argc = x0;
Register arg_array = x2;
Register receiver = x1;
Register this_arg = x0;
Register undefined_value = x3;
Register null_value = x4;
Register arg_array = x2;
Register this_arg = x3;
Register undefined_value = x4;
Register null_value = x5;
__ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
__ LoadRoot(null_value, RootIndex::kNullValue);
......@@ -1793,8 +1875,21 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// 1. Load receiver into x1, argArray into x2 (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
#ifdef V8_REVERSE_JSARGS
{
Label done;
__ Mov(this_arg, undefined_value);
__ Mov(arg_array, undefined_value);
__ Peek(receiver, 0);
__ Cmp(argc, Immediate(1));
__ B(lt, &done);
__ Peek(this_arg, kSystemPointerSize);
__ B(eq, &done);
__ Peek(arg_array, 2 * kSystemPointerSize);
__ bind(&done);
}
#else // !V8_REVERSE_JSARGS
{
Register saved_argc = x10;
Register scratch = x11;
// Push two undefined values on the stack, to put it in a consistent state
......@@ -1814,16 +1909,13 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// There are now always three arguments to read, in the slots starting from
// slot argc.
__ SlotAddress(scratch, argc);
__ Mov(saved_argc, argc);
__ Ldp(arg_array, this_arg, MemOperand(scratch)); // Overwrites argc.
__ Ldp(arg_array, this_arg, MemOperand(scratch));
__ Ldr(receiver, MemOperand(scratch, 2 * kSystemPointerSize));
__ Drop(2); // Drop the undefined values we pushed above.
__ DropArguments(saved_argc, TurboAssembler::kCountExcludesReceiver);
__ PushArgument(this_arg);
}
#endif // !V8_REVERSE_JSARGS
__ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
__ PushArgument(this_arg);
// ----------- S t a t e -------------
// -- x2 : argArray
......@@ -1863,7 +1955,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_FunctionPrototypeCall");
// 1. Get the callable to call (passed as receiver) from the stack.
__ Peek(function, Operand(argc, LSL, kXRegSizeLog2));
__ Peek(function, __ ReceiverOperand(argc));
// 2. Handle case with no arguments.
{
......@@ -1879,9 +1971,39 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ Bind(&non_zero);
}
Label arguments_ready;
#ifdef V8_REVERSE_JSARGS
// 3. Shift arguments. It depends if the arguments is even or odd.
// That is if padding exists or not.
{
Label even;
Register copy_from = x10;
Register copy_to = x11;
Register count = x12;
__ Mov(count, argc); // CopyDoubleWords changes the count argument.
__ Tbz(argc, 0, &even);
// Shift arguments one slot down on the stack (overwriting the original
// receiver).
__ SlotAddress(copy_from, 1);
__ Sub(copy_to, copy_from, kSystemPointerSize);
__ CopyDoubleWords(copy_to, copy_from, count);
// Overwrite the duplicated remaining last argument.
__ Poke(padreg, Operand(argc, LSL, kXRegSizeLog2));
__ B(&arguments_ready);
// Copy arguments one slot higher in memory, overwriting the original
// receiver and padding.
__ Bind(&even);
__ SlotAddress(copy_from, count);
__ Add(copy_to, copy_from, kSystemPointerSize);
__ CopyDoubleWords(copy_to, copy_from, count,
TurboAssembler::kSrcLessThanDst);
__ Drop(2);
}
#else // !V8_REVERSE_JSARGS
// 3. Overwrite the receiver with padding. If argc is odd, this is all we
// need to do.
Label arguments_ready;
__ Poke(padreg, Operand(argc, LSL, kXRegSizeLog2));
__ Tbnz(argc, 0, &arguments_ready);
......@@ -1902,6 +2024,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// Drop two slots. These are copies of the last two arguments.
__ Drop(2);
}
#endif // !V8_REVERSE_JSARGS
// 5. Adjust argument count to make the original first argument the new
// receiver and call the callable.
......@@ -1918,6 +2041,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// -- sp[16] : target (if argc >= 1)
// -- sp[24] : receiver
// -----------------------------------
// NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
ASM_LOCATION("Builtins::Generate_ReflectApply");
Register argc = x0;
......@@ -1931,6 +2056,23 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// 1. Load target into x1 (if present), argumentsList into x2 (if present),
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
#ifdef V8_REVERSE_JSARGS
{
Label done;
__ Mov(target, undefined_value);
__ Mov(this_argument, undefined_value);
__ Mov(arguments_list, undefined_value);
__ Cmp(argc, Immediate(1));
__ B(lt, &done);
__ Peek(target, kSystemPointerSize);
__ B(eq, &done);
__ Peek(this_argument, 2 * kSystemPointerSize);
__ Cmp(argc, Immediate(3));
__ B(lt, &done);
__ Peek(arguments_list, 3 * kSystemPointerSize);
__ bind(&done);
}
#else // !V8_REVERSE_JSARGS
{
// Push four undefined values on the stack, to put it in a consistent state
// so that we can always read the three arguments we need from it. The
......@@ -1967,10 +2109,10 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ Ldr(target, MemOperand(scratch, 3 * kSystemPointerSize));
__ Drop(4); // Drop the undefined values we pushed above.
__ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
__ PushArgument(this_argument);
}
#endif // !V8_REVERSE_JSARGS
__ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
__ PushArgument(this_argument);
// ----------- S t a t e -------------
// -- x2 : argumentsList
......@@ -1995,6 +2137,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// -- sp[16] : target
// -- sp[24] : receiver
// -----------------------------------
// NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
ASM_LOCATION("Builtins::Generate_ReflectConstruct");
Register argc = x0;
......@@ -2009,6 +2153,24 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// new.target into x3 (if present, otherwise use target), remove all
// arguments from the stack (including the receiver), and push thisArgument
// (if present) instead.
#ifdef V8_REVERSE_JSARGS
{
Label done;
__ Mov(target, undefined_value);
__ Mov(arguments_list, undefined_value);
__ Mov(new_target, undefined_value);
__ Cmp(argc, Immediate(1));
__ B(lt, &done);
__ Peek(target, kSystemPointerSize);
__ B(eq, &done);
__ Peek(arguments_list, 2 * kSystemPointerSize);
__ Mov(new_target, target); // new.target defaults to target
__ Cmp(argc, Immediate(3));
__ B(lt, &done);
__ Peek(new_target, 3 * kSystemPointerSize);
__ bind(&done);
}
#else // !V8_REVERSE_JSARGS
{
// Push four undefined values on the stack, to put it in a consistent state
// so that we can always read the three arguments we need from it. The
......@@ -2048,11 +2210,13 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ CmovX(new_target, target, ls); // target if argc <= 2.
__ Drop(4); // Drop the undefined values we pushed above.
__ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
// Push receiver (undefined).
__ PushArgument(undefined_value);
}
#endif // !V8_REVERSE_JSARGS
__ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
// Push receiver (undefined).
__ PushArgument(undefined_value);
// ----------- S t a t e -------------
// -- x2 : argumentsList
......@@ -2105,6 +2269,39 @@ void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// one slot up or one slot down, as needed.
void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
Register len) {
#ifdef V8_REVERSE_JSARGS
Label even;
Register slots_to_copy = x10;
Register slots_to_claim = x12;
__ Add(slots_to_copy, argc, 1); // Copy with receiver.
__ Mov(slots_to_claim, len);
__ Tbz(slots_to_claim, 0, &even);
// Claim space we need. If argc is even, slots_to_claim = len + 1, as we need
// one extra padding slot. If argc is odd, we know that the original arguments
// will have a padding slot we can reuse (since len is odd), so
// slots_to_claim = len - 1.
{
Register scratch = x11;
__ Add(slots_to_claim, len, 1);
__ And(scratch, argc, 1);
__ Eor(scratch, scratch, 1);
__ Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
}
__ Bind(&even);
__ Claim(slots_to_claim);
// Move the arguments already in the stack including the receiver.
{
Register src = x11;
Register dst = x12;
__ SlotAddress(src, slots_to_claim);
__ SlotAddress(dst, 0);
__ CopyDoubleWords(dst, src, slots_to_copy);
}
#else // !V8_REVERSE_JSARGS
Label len_odd, exit;
Register slots_to_copy = x10; // If needed.
__ Add(slots_to_copy, argc, 1);
......@@ -2158,6 +2355,7 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
}
__ Bind(&exit);
#endif // !V8_REVERSE_JSARGS
}
} // namespace
......@@ -2217,6 +2415,19 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// We do not use the CompareRoot macro as it would do a LoadRoot behind the
// scenes and we want to avoid that in a loop.
// TODO(all): Consider using Ldp and Stp.
#ifdef V8_REVERSE_JSARGS
Register dst = x16;
__ Add(dst, argc, Immediate(1)); // Consider the receiver as well.
__ SlotAddress(dst, dst);
__ Add(argc, argc, len); // Update new argc.
__ Bind(&loop);
__ Sub(len, len, 1);
__ LoadAnyTaggedField(scratch, MemOperand(src, kTaggedSize, PostIndex));
__ CmpTagged(scratch, the_hole_value);
__ Csel(scratch, scratch, undefined_value, ne);
__ Str(scratch, MemOperand(dst, kSystemPointerSize, PostIndex));
__ Cbnz(len, &loop);
#else
__ Bind(&loop);
__ Sub(len, len, 1);
__ LoadAnyTaggedField(scratch, MemOperand(src, kTaggedSize, PostIndex));
......@@ -2224,9 +2435,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ Csel(scratch, scratch, undefined_value, ne);
__ Poke(scratch, Operand(len, LSL, kSystemPointerSizeLog2));
__ Cbnz(len, &loop);
#endif
}
__ Bind(&done);
// Tail-call to the actual Call or Construct builtin.
__ Jump(code, RelocInfo::CODE_TARGET);
......@@ -2368,7 +2579,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadGlobalProxy(x3);
} else {
Label convert_to_object, convert_receiver;
__ Peek(x3, Operand(x0, LSL, kXRegSizeLog2));
__ Peek(x3, __ ReceiverOperand(x0));
__ JumpIfSmi(x3, &convert_to_object);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ CompareObjectType(x3, x4, x4, FIRST_JS_RECEIVER_TYPE);
......@@ -2403,7 +2614,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Bind(&convert_receiver);
}
__ Poke(x3, Operand(x0, LSL, kXRegSizeLog2));
__ Poke(x3, __ ReceiverOperand(x0));
}
__ Bind(&done_convert);
......@@ -2474,6 +2685,83 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ Bind(&done);
}
#ifdef V8_REVERSE_JSARGS
Label copy_bound_args;
Register total_argc = x15;
Register slots_to_claim = x12;
Register scratch = x10;
Register receiver = x14;
__ Add(total_argc, argc, bound_argc);
__ Peek(receiver, 0);
// Round up slots_to_claim to an even number if it is odd.
__ Add(slots_to_claim, bound_argc, 1);
__ Bic(slots_to_claim, slots_to_claim, 1);
__ Claim(slots_to_claim, kSystemPointerSize);
__ Tbz(bound_argc, 0, &copy_bound_args);
{
Label argc_even;
__ Tbz(argc, 0, &argc_even);
// Arguments count is odd (with the receiver it's even), so there's no
// alignment padding above the arguments and we have to "add" it. We
// claimed bound_argc + 1, since it is odd and it was rounded up. +1 here
// is for stack alignment padding.
// 1. Shift args one slot down.
{
Register copy_from = x11;
Register copy_to = x12;
__ SlotAddress(copy_to, slots_to_claim);
__ Add(copy_from, copy_to, kSystemPointerSize);
__ CopyDoubleWords(copy_to, copy_from, argc);
}
// 2. Write a padding in the last slot.
__ Add(scratch, total_argc, 1);
__ Str(padreg, MemOperand(sp, scratch, LSL, kSystemPointerSizeLog2));
__ B(&copy_bound_args);
__ Bind(&argc_even);
// Arguments count is even (with the receiver it's odd), so there's an
// alignment padding above the arguments and we can reuse it. We need to
// claim bound_argc - 1, but we claimed bound_argc + 1, since it is odd
// and it was rounded up.
// 1. Drop 2.
__ Drop(2);
// 2. Shift args one slot up.
{
Register copy_from = x11;
Register copy_to = x12;
__ SlotAddress(copy_to, total_argc);
__ Sub(copy_from, copy_to, kSystemPointerSize);
__ CopyDoubleWords(copy_to, copy_from, argc,
TurboAssembler::kSrcLessThanDst);
}
}
// If bound_argc is even, there is no alignment massage to do, and we have
// already claimed the correct number of slots (bound_argc).
__ Bind(&copy_bound_args);
// Copy the receiver back.
__ Poke(receiver, 0);
// Copy [[BoundArguments]] to the stack (below the receiver).
{
Label loop;
Register counter = bound_argc;
Register copy_to = x12;
__ Add(bound_argv, bound_argv, FixedArray::kHeaderSize - kHeapObjectTag);
__ SlotAddress(copy_to, 1);
__ Bind(&loop);
__ Sub(counter, counter, 1);
__ LoadAnyTaggedField(scratch,
MemOperand(bound_argv, kTaggedSize, PostIndex));
__ Str(scratch, MemOperand(copy_to, kSystemPointerSize, PostIndex));
__ Cbnz(counter, &loop);
}
// Update argc.
__ Mov(argc, total_argc);
#else // !V8_REVERSE_JSARGS
// Check if we need padding.
Label copy_args, copy_bound_args;
Register total_argc = x15;
......@@ -2546,6 +2834,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ Bind(&done);
}
}
#endif // !V8_REVERSE_JSARGS
}
__ Bind(&no_bound_arguments);
}
......@@ -2563,7 +2852,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// Patch the receiver to [[BoundThis]].
__ LoadAnyTaggedField(x10,
FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset));
__ Poke(x10, Operand(x0, LSL, kSystemPointerSizeLog2));
__ Poke(x10, __ ReceiverOperand(x0));
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
......@@ -2604,7 +2893,8 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
// Overwrite the original receiver with the (original) target.
__ Poke(x1, Operand(x0, LSL, kXRegSizeLog2));
__ Poke(x1, __ ReceiverOperand(x0));
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, x1);
__ Jump(masm->isolate()->builtins()->CallFunction(
......@@ -2720,7 +3010,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ bind(&non_proxy);
{
// Overwrite the original receiver with the (original) target.
__ Poke(x1, Operand(x0, LSL, kXRegSizeLog2));
__ Poke(x1, __ ReceiverOperand(x0));
// Let the "call_as_constructor_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, x1);
__ Jump(masm->isolate()->builtins()->CallFunction(),
......@@ -2793,18 +3084,22 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// alignment of the arguments.
// If the number of expected arguments is larger than the number of actual
// arguments, the remaining expected slots will be filled with undefined.
// TODO(v8:10201) update comment once reversed arguments order sticks
Register argc_actual = x0; // Excluding the receiver.
Register argc_expected = x2; // Excluding the receiver.
Register function = x1;
Register argc_actual_minus_expected = x5;
Label create_adaptor_frame, dont_adapt_arguments, stack_overflow,
adapt_arguments_in_place;
Label create_adaptor_frame, dont_adapt_arguments, stack_overflow;
__ Cmp(argc_expected, kDontAdaptArgumentsSentinel);
__ B(eq, &dont_adapt_arguments);
#ifndef V8_REVERSE_JSARGS
// This optimization is disabled when the arguments are reversed.
Label adapt_arguments_in_place;
Register argc_actual_minus_expected = x5;
// When the difference between argc_actual and argc_expected is odd, we
// create an arguments adaptor frame.
__ Sub(argc_actual_minus_expected, argc_actual, argc_expected);
......@@ -2818,6 +3113,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ TestAndBranchIfAnySet(
w4, SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask,
&adapt_arguments_in_place);
#endif
// -------------------------------------------
// Create an arguments adaptor frame.
......@@ -2828,11 +3124,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
EnterArgumentsAdaptorFrame(masm);
Register copy_from = x10;
Register copy_end = x11;
Register copy_to = x12;
Register copy_end = x11;
Register argc_to_copy = x13;
Register argc_unused_actual = x14;
Register scratch1 = x15, scratch2 = x16;
Register scratch1 = x15;
// We need slots for the expected arguments, with one extra slot for the
// receiver.
......@@ -2846,12 +3141,61 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Bic(scratch1, scratch1, 1);
__ Claim(scratch1, kSystemPointerSize);
#ifdef V8_REVERSE_JSARGS
// If we don't have enough arguments, fill the remaining expected
// arguments with undefined, otherwise skip this step.
Label enough_arguments;
__ Cmp(argc_actual, argc_expected);
__ Csel(argc_to_copy, argc_expected, argc_actual, ge);
__ Add(argc_to_copy, argc_to_copy, 1); // Include receiver.
__ B(ge, &enough_arguments);
// Fill the remaining expected arguments with undefined.
__ RecordComment("-- Fill slots with undefined --");
Label fill;
// scratch1 still contains the size of the claimed area,
// which is RoundUp(argc_expected + 1, 2).
__ SlotAddress(copy_to, scratch1);
__ SlotAddress(copy_end, argc_to_copy);
__ LoadRoot(scratch1, RootIndex::kUndefinedValue);
// Now we can write pairs of undefineds, potentially overwriting one word
// below copy_end, but that's ok because that slot is still within claimed
// region. This loop will execute at least once because at this point we
// know that there's at least one undefined to be pushed and
// argc_to_copy >= 1.
__ Bind(&fill);
__ Stp(scratch1, scratch1,
MemOperand(copy_to, -2 * kSystemPointerSize, PreIndex));
__ Cmp(copy_to, copy_end);
__ B(hi, &fill);
// Enough arguments.
__ Bind(&enough_arguments);
// Store padding if needed, when expected arguments is even.
__ RecordComment("-- Store padding --");
Label skip_padding;
__ Tbnz(argc_expected, 0, &skip_padding);
__ SlotAddress(scratch1, argc_expected);
__ Str(padreg, MemOperand(scratch1, kSystemPointerSize));
__ bind(&skip_padding);
// Copy arguments.
__ RecordComment("-- Copy actual arguments --");
__ Mov(copy_to, sp);
__ Add(copy_from, fp, 2 * kSystemPointerSize);
__ CopyDoubleWords(copy_to, copy_from, argc_to_copy);
#else // !V8_REVERSE_JSARGS
Register argc_unused_actual = x14;
Register scratch2 = x16;
// Preparing the expected arguments is done in four steps, the order of
// which is chosen so we can use LDP/STP and avoid conditional branches as
// much as possible.
__ Mov(copy_to, sp);
// (1) If we don't have enough arguments, fill the remaining expected
// arguments with undefined, otherwise skip this step.
Label enough_arguments;
......@@ -2918,6 +3262,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
MemOperand(copy_from, argc_actual, LSL, kSystemPointerSizeLog2));
__ Str(scratch1,
MemOperand(sp, argc_expected, LSL, kSystemPointerSizeLog2));
#endif
// Arguments have been adapted. Now call the entry point.
__ RecordComment("-- Call entry point --");
......@@ -2939,6 +3284,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Ret();
}
#ifndef V8_REVERSE_JSARGS
// -----------------------------------------
// Adapt arguments in the existing frame.
// -----------------------------------------
......@@ -2976,6 +3322,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ B(&dont_adapt_arguments);
}
}
#endif
// -------------------------------------------
// Dont adapt arguments.
......@@ -3492,6 +3839,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// -- sp[(argc - 1) * 8] : first argument
// -- sp[(argc + 0) * 8] : receiver
// -----------------------------------
// NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
Register api_function_address = x1;
Register argc = x2;
......@@ -3561,9 +3909,14 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
#ifdef V8_REVERSE_JSARGS
__ Add(scratch, scratch,
Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
#else
__ Add(scratch, scratch,
Operand((FCA::kArgsLength - 1) * kSystemPointerSize));
__ Add(scratch, scratch, Operand(argc, LSL, kSystemPointerSizeLog2));
#endif
__ Str(scratch, MemOperand(sp, 2 * kSystemPointerSize));
// FunctionCallbackInfo::length_.
......
......@@ -1306,7 +1306,14 @@ void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count,
static_assert(kSystemPointerSize == kDRegSize,
"pointers must be the same size as doubles");
int direction = (mode == kDstLessThanSrc) ? 1 : -1;
if (mode == kDstLessThanSrcAndReverse) {
Add(src, src, Operand(count, LSL, kSystemPointerSizeLog2));
Sub(src, src, kSystemPointerSize);
}
int src_direction = (mode == kDstLessThanSrc) ? 1 : -1;
int dst_direction = (mode == kSrcLessThanDst) ? -1 : 1;
UseScratchRegisterScope scope(this);
VRegister temp0 = scope.AcquireD();
VRegister temp1 = scope.AcquireD();
......@@ -1314,23 +1321,30 @@ void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count,
Label pairs, loop, done;
Tbz(count, 0, &pairs);
Ldr(temp0, MemOperand(src, direction * kSystemPointerSize, PostIndex));
Ldr(temp0, MemOperand(src, src_direction * kSystemPointerSize, PostIndex));
Sub(count, count, 1);
Str(temp0, MemOperand(dst, direction * kSystemPointerSize, PostIndex));
Str(temp0, MemOperand(dst, dst_direction * kSystemPointerSize, PostIndex));
Bind(&pairs);
if (mode == kSrcLessThanDst) {
// Adjust pointers for post-index ldp/stp with negative offset:
Sub(dst, dst, kSystemPointerSize);
Sub(src, src, kSystemPointerSize);
} else if (mode == kDstLessThanSrcAndReverse) {
Sub(src, src, kSystemPointerSize);
}
Bind(&loop);
Cbz(count, &done);
Ldp(temp0, temp1,
MemOperand(src, 2 * direction * kSystemPointerSize, PostIndex));
MemOperand(src, 2 * src_direction * kSystemPointerSize, PostIndex));
Sub(count, count, 2);
Stp(temp0, temp1,
MemOperand(dst, 2 * direction * kSystemPointerSize, PostIndex));
if (mode == kDstLessThanSrcAndReverse) {
Stp(temp1, temp0,
MemOperand(dst, 2 * dst_direction * kSystemPointerSize, PostIndex));
} else {
Stp(temp0, temp1,
MemOperand(dst, 2 * dst_direction * kSystemPointerSize, PostIndex));
}
B(&loop);
// TODO(all): large copies may benefit from using temporary Q registers
......@@ -2093,7 +2107,7 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count) {
// Load receiver to pass it later to DebugOnFunctionCall hook.
Ldr(x4, MemOperand(sp, actual_parameter_count, LSL, kSystemPointerSizeLog2));
Peek(x4, ReceiverOperand(actual_parameter_count));
FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
if (!new_target.is_valid()) new_target = padreg;
......@@ -2165,6 +2179,14 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Bind(&done);
}
Operand MacroAssembler::ReceiverOperand(Register arg_count) {
#ifdef V8_REVERSE_JSARGS
return Operand(0);
#else
return Operand(arg_count, LSL, kXRegSizeLog2);
#endif
}
void MacroAssembler::InvokeFunctionWithNewTarget(
Register function, Register new_target, Register actual_parameter_count,
InvokeFlag flag) {
......
......@@ -703,7 +703,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CopySlots(Register dst, Register src, Register slot_count);
// Copy count double words from the address in register src to the address
// in register dst. There are two modes for this function:
// in register dst. There are three modes for this function:
// 1) Address dst must be less than src, or the gap between them must be
// greater than or equal to count double words, otherwise the result is
// unpredictable. This is the default mode.
......@@ -711,10 +711,15 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// greater than or equal to count double words, otherwise the result is
// undpredictable. In this mode, src and dst specify the last (highest)
// address of the regions to copy from and to.
// 3) The same as mode 1, but the words are copied in the reversed order.
// The case where src == dst is not supported.
// The function may corrupt its register arguments. The registers must not
// alias each other.
enum CopyDoubleWordsMode { kDstLessThanSrc, kSrcLessThanDst };
enum CopyDoubleWordsMode {
kDstLessThanSrc,
kSrcLessThanDst,
kDstLessThanSrcAndReverse
};
void CopyDoubleWords(Register dst, Register src, Register count,
CopyDoubleWordsMode mode = kDstLessThanSrc);
......@@ -1762,6 +1767,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DecodeField<Field>(reg, reg);
}
// TODO(victorgomes): inline this function once we remove V8_REVERSE_JSARGS
// flag.
Operand ReceiverOperand(const Register arg_count);
// ---- SMI and Number Utilities ----
inline void SmiTag(Register dst, Register src);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment