A64: Clean and improve ArgumentsAdaptorTrampoline.

This patch:
 - uses named registers
 - reserves all the stack space in one go to avoid operations on csp
 - uses LDP/STP to copy the arguments

R=jochen@chromium.org

Review URL: https://codereview.chromium.org/184103003

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@19725 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 33a46be6
......@@ -1364,43 +1364,55 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- x2 : expected number of arguments
// -----------------------------------
Register argc_actual = x0; // Excluding the receiver.
Register argc_expected = x2; // Excluding the receiver.
Register function = x1;
Register code_entry = x3;
Label invoke, dont_adapt_arguments;
Label enough, too_few;
__ Ldr(x3, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
__ Cmp(x0, x2);
__ Ldr(code_entry, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
__ Cmp(argc_actual, argc_expected);
__ B(lt, &too_few);
__ Cmp(x2, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
__ Cmp(argc_expected, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
__ B(eq, &dont_adapt_arguments);
{ // Enough parameters: actual >= expected
EnterArgumentsAdaptorFrame(masm);
// Calculate copy start address into x10 and end address into x11.
// x0: actual number of arguments
// x1: function
// x2: expected number of arguments
// x3: code entry to call
__ Add(x10, fp, Operand(x0, LSL, kPointerSizeLog2));
// Adjust for return address and receiver
__ Add(x10, x10, 2 * kPointerSize);
__ Sub(x11, x10, Operand(x2, LSL, kPointerSizeLog2));
Register copy_start = x10;
Register copy_end = x11;
Register copy_to = x12;
Register scratch1 = x13, scratch2 = x14;
__ Lsl(argc_expected, argc_expected, kPointerSizeLog2);
// Adjust for fp, lr, and the receiver.
__ Add(copy_start, fp, 3 * kPointerSize);
__ Add(copy_start, copy_start, Operand(argc_actual, LSL, kPointerSizeLog2));
__ Sub(copy_end, copy_start, argc_expected);
__ Sub(copy_end, copy_end, kPointerSize);
__ Mov(copy_to, jssp);
// Claim space for the arguments, the receiver, and one extra slot.
// The extra slot ensures we do not write under jssp. It will be popped
// later.
__ Add(scratch1, argc_expected, 2 * kPointerSize);
__ Claim(scratch1, 1);
// Copy the arguments (including the receiver) to the new stack frame.
// x0: actual number of arguments
// x1: function
// x2: expected number of arguments
// x3: code entry to call
// x10: copy start address
// x11: copy end address
// TODO(all): Should we push values 2 by 2?
Label copy;
__ Bind(&copy);
__ Cmp(x10, x11);
__ Ldr(x12, MemOperand(x10, -kPointerSize, PostIndex));
__ Push(x12);
__ B(gt, &copy);
Label copy_2_by_2;
__ Bind(&copy_2_by_2);
__ Ldp(scratch1, scratch2,
MemOperand(copy_start, - 2 * kPointerSize, PreIndex));
__ Stp(scratch1, scratch2,
MemOperand(copy_to, - 2 * kPointerSize, PreIndex));
__ Cmp(copy_start, copy_end);
__ B(hi, &copy_2_by_2);
// Correct the space allocated for the extra slot.
__ Drop(1);
__ B(&invoke);
}
......@@ -1409,52 +1421,57 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Bind(&too_few);
EnterArgumentsAdaptorFrame(masm);
// Calculate copy start address into x10 and copy end address into x11.
// x0: actual number of arguments
// x1: function
// x2: expected number of arguments
// x3: code entry to call
// Adjust for return address.
__ Add(x11, fp, 1 * kPointerSize);
__ Add(x10, x11, Operand(x0, LSL, kPointerSizeLog2));
__ Add(x10, x10, 1 * kPointerSize);
Register copy_from = x10;
Register copy_end = x11;
Register copy_to = x12;
Register scratch1 = x13, scratch2 = x14;
__ Lsl(argc_expected, argc_expected, kPointerSizeLog2);
__ Lsl(argc_actual, argc_actual, kPointerSizeLog2);
// Adjust for fp, lr, and the receiver.
__ Add(copy_from, fp, 3 * kPointerSize);
__ Add(copy_from, copy_from, argc_actual);
__ Mov(copy_to, jssp);
__ Sub(copy_end, copy_to, 1 * kPointerSize); // Adjust for the receiver.
__ Sub(copy_end, copy_end, argc_actual);
// Claim space for the arguments, the receiver, and one extra slot.
// The extra slot ensures we do not write under jssp. It will be popped
// later.
__ Add(scratch1, argc_expected, 2 * kPointerSize);
__ Claim(scratch1, 1);
// Copy the arguments (including the receiver) to the new stack frame.
// x0: actual number of arguments
// x1: function
// x2: expected number of arguments
// x3: code entry to call
// x10: copy start address
// x11: copy end address
Label copy;
__ Bind(&copy);
__ Ldr(x12, MemOperand(x10, -kPointerSize, PostIndex));
__ Push(x12);
__ Cmp(x10, x11); // Compare before moving to next argument.
__ B(ne, &copy);
Label copy_2_by_2;
__ Bind(&copy_2_by_2);
__ Ldp(scratch1, scratch2,
MemOperand(copy_from, - 2 * kPointerSize, PreIndex));
__ Stp(scratch1, scratch2,
MemOperand(copy_to, - 2 * kPointerSize, PreIndex));
__ Cmp(copy_to, copy_end);
__ B(hi, &copy_2_by_2);
__ Mov(copy_to, copy_end);
// Fill the remaining expected arguments with undefined.
// x0: actual number of arguments
// x1: function
// x2: expected number of arguments
// x3: code entry to call
__ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
__ Sub(x11, fp, Operand(x2, LSL, kPointerSizeLog2));
// Adjust for the arguments adaptor frame and already pushed receiver.
__ Sub(x11, x11,
StandardFrameConstants::kFixedFrameSizeFromFp + (2 * kPointerSize));
// TODO(all): Optimize this to use ldp?
__ LoadRoot(scratch1, Heap::kUndefinedValueRootIndex);
__ Add(copy_end, jssp, kPointerSize);
Label fill;
__ Bind(&fill);
__ Push(x10);
__ Cmp(jssp, x11);
__ B(ne, &fill);
__ Stp(scratch1, scratch1,
MemOperand(copy_to, - 2 * kPointerSize, PreIndex));
__ Cmp(copy_to, copy_end);
__ B(hi, &fill);
// Correct the space allocated for the extra slot.
__ Drop(1);
}
// Arguments have been adapted. Now call the entry point.
__ Bind(&invoke);
__ Call(x3);
__ Call(code_entry);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
......@@ -1465,7 +1482,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Call the entry point without adapting the arguments.
__ Bind(&dont_adapt_arguments);
__ Jump(x3);
__ Jump(code_entry);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment