A64: Clean and improve ArgumentsAdaptorTrampoline.

This patch:
 - uses named registers
 - reserves all the stack space in one go to avoid operations on csp
 - uses LDP/STP to copy the arguments

R=jochen@chromium.org

Review URL: https://codereview.chromium.org/184103003

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@19725 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 33a46be6
...@@ -1364,43 +1364,55 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { ...@@ -1364,43 +1364,55 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- x2 : expected number of arguments // -- x2 : expected number of arguments
// ----------------------------------- // -----------------------------------
Register argc_actual = x0; // Excluding the receiver.
Register argc_expected = x2; // Excluding the receiver.
Register function = x1;
Register code_entry = x3;
Label invoke, dont_adapt_arguments; Label invoke, dont_adapt_arguments;
Label enough, too_few; Label enough, too_few;
__ Ldr(x3, FieldMemOperand(x1, JSFunction::kCodeEntryOffset)); __ Ldr(code_entry, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
__ Cmp(x0, x2); __ Cmp(argc_actual, argc_expected);
__ B(lt, &too_few); __ B(lt, &too_few);
__ Cmp(x2, SharedFunctionInfo::kDontAdaptArgumentsSentinel); __ Cmp(argc_expected, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
__ B(eq, &dont_adapt_arguments); __ B(eq, &dont_adapt_arguments);
{ // Enough parameters: actual >= expected { // Enough parameters: actual >= expected
EnterArgumentsAdaptorFrame(masm); EnterArgumentsAdaptorFrame(masm);
// Calculate copy start address into x10 and end address into x11. Register copy_start = x10;
// x0: actual number of arguments Register copy_end = x11;
// x1: function Register copy_to = x12;
// x2: expected number of arguments Register scratch1 = x13, scratch2 = x14;
// x3: code entry to call
__ Add(x10, fp, Operand(x0, LSL, kPointerSizeLog2)); __ Lsl(argc_expected, argc_expected, kPointerSizeLog2);
// Adjust for return address and receiver
__ Add(x10, x10, 2 * kPointerSize); // Adjust for fp, lr, and the receiver.
__ Sub(x11, x10, Operand(x2, LSL, kPointerSizeLog2)); __ Add(copy_start, fp, 3 * kPointerSize);
__ Add(copy_start, copy_start, Operand(argc_actual, LSL, kPointerSizeLog2));
__ Sub(copy_end, copy_start, argc_expected);
__ Sub(copy_end, copy_end, kPointerSize);
__ Mov(copy_to, jssp);
// Claim space for the arguments, the receiver, and one extra slot.
// The extra slot ensures we do not write under jssp. It will be popped
// later.
__ Add(scratch1, argc_expected, 2 * kPointerSize);
__ Claim(scratch1, 1);
// Copy the arguments (including the receiver) to the new stack frame. // Copy the arguments (including the receiver) to the new stack frame.
// x0: actual number of arguments Label copy_2_by_2;
// x1: function __ Bind(&copy_2_by_2);
// x2: expected number of arguments __ Ldp(scratch1, scratch2,
// x3: code entry to call MemOperand(copy_start, - 2 * kPointerSize, PreIndex));
// x10: copy start address __ Stp(scratch1, scratch2,
// x11: copy end address MemOperand(copy_to, - 2 * kPointerSize, PreIndex));
__ Cmp(copy_start, copy_end);
// TODO(all): Should we push values 2 by 2? __ B(hi, &copy_2_by_2);
Label copy;
__ Bind(&copy); // Correct the space allocated for the extra slot.
__ Cmp(x10, x11); __ Drop(1);
__ Ldr(x12, MemOperand(x10, -kPointerSize, PostIndex));
__ Push(x12);
__ B(gt, &copy);
__ B(&invoke); __ B(&invoke);
} }
...@@ -1409,52 +1421,57 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { ...@@ -1409,52 +1421,57 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Bind(&too_few); __ Bind(&too_few);
EnterArgumentsAdaptorFrame(masm); EnterArgumentsAdaptorFrame(masm);
// Calculate copy start address into x10 and copy end address into x11. Register copy_from = x10;
// x0: actual number of arguments Register copy_end = x11;
// x1: function Register copy_to = x12;
// x2: expected number of arguments Register scratch1 = x13, scratch2 = x14;
// x3: code entry to call
// Adjust for return address. __ Lsl(argc_expected, argc_expected, kPointerSizeLog2);
__ Add(x11, fp, 1 * kPointerSize); __ Lsl(argc_actual, argc_actual, kPointerSizeLog2);
__ Add(x10, x11, Operand(x0, LSL, kPointerSizeLog2));
__ Add(x10, x10, 1 * kPointerSize); // Adjust for fp, lr, and the receiver.
__ Add(copy_from, fp, 3 * kPointerSize);
__ Add(copy_from, copy_from, argc_actual);
__ Mov(copy_to, jssp);
__ Sub(copy_end, copy_to, 1 * kPointerSize); // Adjust for the receiver.
__ Sub(copy_end, copy_end, argc_actual);
// Claim space for the arguments, the receiver, and one extra slot.
// The extra slot ensures we do not write under jssp. It will be popped
// later.
__ Add(scratch1, argc_expected, 2 * kPointerSize);
__ Claim(scratch1, 1);
// Copy the arguments (including the receiver) to the new stack frame. // Copy the arguments (including the receiver) to the new stack frame.
// x0: actual number of arguments Label copy_2_by_2;
// x1: function __ Bind(&copy_2_by_2);
// x2: expected number of arguments __ Ldp(scratch1, scratch2,
// x3: code entry to call MemOperand(copy_from, - 2 * kPointerSize, PreIndex));
// x10: copy start address __ Stp(scratch1, scratch2,
// x11: copy end address MemOperand(copy_to, - 2 * kPointerSize, PreIndex));
Label copy; __ Cmp(copy_to, copy_end);
__ Bind(&copy); __ B(hi, &copy_2_by_2);
__ Ldr(x12, MemOperand(x10, -kPointerSize, PostIndex));
__ Push(x12); __ Mov(copy_to, copy_end);
__ Cmp(x10, x11); // Compare before moving to next argument.
__ B(ne, &copy);
// Fill the remaining expected arguments with undefined. // Fill the remaining expected arguments with undefined.
// x0: actual number of arguments __ LoadRoot(scratch1, Heap::kUndefinedValueRootIndex);
// x1: function __ Add(copy_end, jssp, kPointerSize);
// x2: expected number of arguments
// x3: code entry to call
__ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
__ Sub(x11, fp, Operand(x2, LSL, kPointerSizeLog2));
// Adjust for the arguments adaptor frame and already pushed receiver.
__ Sub(x11, x11,
StandardFrameConstants::kFixedFrameSizeFromFp + (2 * kPointerSize));
// TODO(all): Optimize this to use ldp?
Label fill; Label fill;
__ Bind(&fill); __ Bind(&fill);
__ Push(x10); __ Stp(scratch1, scratch1,
__ Cmp(jssp, x11); MemOperand(copy_to, - 2 * kPointerSize, PreIndex));
__ B(ne, &fill); __ Cmp(copy_to, copy_end);
__ B(hi, &fill);
// Correct the space allocated for the extra slot.
__ Drop(1);
} }
// Arguments have been adapted. Now call the entry point. // Arguments have been adapted. Now call the entry point.
__ Bind(&invoke); __ Bind(&invoke);
__ Call(x3); __ Call(code_entry);
// Store offset of return address for deoptimizer. // Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset()); masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
...@@ -1465,7 +1482,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { ...@@ -1465,7 +1482,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Call the entry point without adapting the arguments. // Call the entry point without adapting the arguments.
__ Bind(&dont_adapt_arguments); __ Bind(&dont_adapt_arguments);
__ Jump(x3); __ Jump(code_entry);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment