Commit 0dfb0e21 authored by georgia.kouveli's avatar georgia.kouveli Committed by Commit bot

[arm64] Support passing more than eight arguments to C functions

BUG=v8:6102

Review-Url: https://codereview.chromium.org/2833463002
Cr-Commit-Position: refs/heads/master@{#44765}
parent 9a511bdf
......@@ -1791,14 +1791,12 @@ void MacroAssembler::CallCFunction(ExternalReference function,
CallCFunction(temp, num_of_reg_args, num_of_double_args);
}
static const int kRegisterPassedArguments = 8;
void MacroAssembler::CallCFunction(Register function,
int num_of_reg_args,
int num_of_double_args) {
DCHECK(has_frame());
// We can pass 8 integer arguments in registers. If we need to pass more than
// that, we'll need to implement support for passing them on the stack.
DCHECK(num_of_reg_args <= 8);
// If we're passing doubles, we're limited to the following prototypes
// (defined by ExternalReference::Type):
......@@ -1811,6 +1809,10 @@ void MacroAssembler::CallCFunction(Register function,
DCHECK((num_of_double_args + num_of_reg_args) <= 2);
}
// We rely on the frame alignment being 16 bytes, which means we never need
// to align the CSP by an unknown number of bytes and we always know the delta
// between the stack pointer and the frame pointer.
DCHECK(ActivationFrameAlignment() == 16);
// If the stack pointer is not csp, we need to derive an aligned csp from the
// current stack pointer.
......@@ -1819,16 +1821,18 @@ void MacroAssembler::CallCFunction(Register function,
AssertStackConsistency();
int sp_alignment = ActivationFrameAlignment();
// The ABI mandates at least 16-byte alignment.
DCHECK(sp_alignment >= 16);
DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
// The current stack pointer is a callee saved register, and is preserved
// across the call.
DCHECK(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
// Align and synchronize the system stack pointer with jssp.
Bic(csp, old_stack_pointer, sp_alignment - 1);
// If more than eight arguments are passed to the function, we expect the
// ninth argument onwards to have been placed on the csp-based stack
// already. We assume csp already points to the last stack-passed argument
// in that case.
// Otherwise, align and synchronize the system stack pointer with jssp.
if (num_of_reg_args <= kRegisterPassedArguments) {
Bic(csp, old_stack_pointer, sp_alignment - 1);
}
SetStackPointer(csp);
}
......@@ -1836,19 +1840,39 @@ void MacroAssembler::CallCFunction(Register function,
// so the return address in the link register stays correct.
Call(function);
if (!csp.Is(old_stack_pointer)) {
if (csp.Is(old_stack_pointer)) {
if (num_of_reg_args > kRegisterPassedArguments) {
// Drop the register passed arguments.
int claim_slots = RoundUp(num_of_reg_args - kRegisterPassedArguments, 2);
Drop(claim_slots);
}
} else {
DCHECK(jssp.Is(old_stack_pointer));
if (emit_debug_code()) {
// Because the stack pointer must be aligned on a 16-byte boundary, the
// aligned csp can be up to 12 bytes below the jssp. This is the case
// where we only pushed one W register on top of an aligned jssp.
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
DCHECK(ActivationFrameAlignment() == 16);
Sub(temp, csp, old_stack_pointer);
// We want temp <= 0 && temp >= -12.
Cmp(temp, 0);
Ccmp(temp, -12, NFlag, le);
Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
if (num_of_reg_args > kRegisterPassedArguments) {
// We don't need to drop stack arguments, as the stack pointer will be
// jssp when returning from this function. However, in debug builds, we
// can check that jssp is as expected.
int claim_slots =
RoundUp(num_of_reg_args - kRegisterPassedArguments, 2);
// Check jssp matches the previous value on the stack.
Ldr(temp, MemOperand(csp, claim_slots * kPointerSize));
Cmp(jssp, temp);
Check(eq, kTheStackWasCorruptedByMacroAssemblerCall);
} else {
// Because the stack pointer must be aligned on a 16-byte boundary, the
// aligned csp can be up to 12 bytes below the jssp. This is the case
// where we only pushed one W register on top of an aligned jssp.
Sub(temp, csp, old_stack_pointer);
// We want temp <= 0 && temp >= -12.
Cmp(temp, 0);
Ccmp(temp, -12, NFlag, le);
Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
}
}
SetStackPointer(old_stack_pointer);
}
......
......@@ -772,8 +772,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchPrepareCallCFunction:
// We don't need kArchPrepareCallCFunction on arm64 as the instruction
// selector already perform a Claim to reserve space on the stack and
// guarantee correct alignment of stack pointer.
// selector has already performed a Claim to reserve space on the stack.
// Frame alignment is always 16 bytes, and the stack pointer is already
// 16-byte aligned, therefore we do not need to align the stack pointer
// by an unknown value, and it is safe to continue accessing the frame
// via the stack pointer.
UNREACHABLE();
break;
case kArchPrepareTailCall:
......@@ -788,9 +791,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters, 0);
}
// CallCFunction only supports register arguments so we never need to call
// frame()->ClearOutgoingParameterSlots() here.
DCHECK(frame_access_state()->sp_delta() == 0);
frame_access_state()->SetFrameAccessToDefault();
frame_access_state()->ClearSPDelta();
break;
}
case kArchJmp:
......@@ -1228,14 +1230,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register prev = __ StackPointer();
if (prev.Is(jssp)) {
// TODO(titzer): make this a macro-assembler method.
// Align the CSP and store the previous JSSP on the stack.
// Align the CSP and store the previous JSSP on the stack. We do not
// need to modify the SP delta here, as we will continue to access the
// frame via JSSP.
UseScratchRegisterScope scope(masm());
Register tmp = scope.AcquireX();
// TODO(arm64): Storing JSSP on the stack is redundant when calling a C
// function, as JSSP is callee-saved (we still need to do this when
// calling a code object that uses the CSP as the stack pointer). See
// the code generation for kArchCallCodeObject vs. kArchCallCFunction
// (the latter does not restore CSP/JSSP).
// MacroAssembler::CallCFunction() (safely) drops this extra slot
// anyway.
int sp_alignment = __ ActivationFrameAlignment();
__ Sub(tmp, jssp, kPointerSize);
__ And(tmp, tmp, Operand(~static_cast<uint64_t>(sp_alignment - 1)));
__ Mov(csp, tmp);
__ Bic(csp, tmp, sp_alignment - 1);
__ Str(jssp, MemOperand(csp));
if (count > 0) {
__ SetStackPointer(csp);
......@@ -1259,7 +1269,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (count > 0) {
int even = RoundUp(count, 2);
__ Sub(jssp, csp, count * kPointerSize);
// We must also update CSP to maintain stack consistency:
__ Sub(csp, csp, even * kPointerSize); // Must always be aligned.
__ AssertStackConsistency();
frame_access_state()->IncreaseSPDelta(even);
} else {
__ Mov(jssp, csp);
......
......@@ -1776,7 +1776,8 @@ void InstructionSelector::EmitPrepareArguments(
// TODO(titzer): it would be better to bump the csp here only
// and emit paired stores with increment for non c frames.
ArchOpcode claim = to_native_stack ? kArm64ClaimCSP : kArm64ClaimJSSP;
// Claim(0) isn't a nop if there is a mismatch between CSP and JSSP.
// ClaimJSSP(0) or ClaimCSP(0) isn't a nop if there is a mismatch between
// CSP and JSSP.
Emit(claim, g.NoOutput(), g.TempImmediate(claim_count));
}
......
......@@ -298,6 +298,31 @@ Node* RawMachineAssembler::CallCFunction8(
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
return AddNode(common()->Call(descriptor), arraysize(args), args);
}
Node* RawMachineAssembler::CallCFunction9(
MachineType return_type, MachineType arg0_type, MachineType arg1_type,
MachineType arg2_type, MachineType arg3_type, MachineType arg4_type,
MachineType arg5_type, MachineType arg6_type, MachineType arg7_type,
MachineType arg8_type, Node* function, Node* arg0, Node* arg1, Node* arg2,
Node* arg3, Node* arg4, Node* arg5, Node* arg6, Node* arg7, Node* arg8) {
MachineSignature::Builder builder(zone(), 1, 9);
builder.AddReturn(return_type);
builder.AddParam(arg0_type);
builder.AddParam(arg1_type);
builder.AddParam(arg2_type);
builder.AddParam(arg3_type);
builder.AddParam(arg4_type);
builder.AddParam(arg5_type);
builder.AddParam(arg6_type);
builder.AddParam(arg7_type);
builder.AddParam(arg8_type);
Node* args[] = {function, arg0, arg1, arg2, arg3,
arg4, arg5, arg6, arg7, arg8};
const CallDescriptor* descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
return AddNode(common()->Call(descriptor), arraysize(args), args);
}
BasicBlock* RawMachineAssembler::Use(RawMachineLabel* label) {
label->used_ = true;
return EnsureBlock(label);
......
......@@ -788,6 +788,15 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
MachineType arg7_type, Node* function, Node* arg0,
Node* arg1, Node* arg2, Node* arg3, Node* arg4,
Node* arg5, Node* arg6, Node* arg7);
// Call to a C function with nine arguments.
Node* CallCFunction9(MachineType return_type, MachineType arg0_type,
MachineType arg1_type, MachineType arg2_type,
MachineType arg3_type, MachineType arg4_type,
MachineType arg5_type, MachineType arg6_type,
MachineType arg7_type, MachineType arg8_type,
Node* function, Node* arg0, Node* arg1, Node* arg2,
Node* arg3, Node* arg4, Node* arg5, Node* arg6,
Node* arg7, Node* arg8);
// ===========================================================================
// The following utility methods deal with control flow, hence might switch
......
......@@ -6163,6 +6163,11 @@ int32_t foo8(int32_t a, int32_t b, int32_t c, int32_t d, int32_t e, int32_t f,
return a + b + c + d + e + f + g + h;
}
int32_t foo9(int32_t a, int32_t b, int32_t c, int32_t d, int32_t e, int32_t f,
int32_t g, int32_t h, int32_t i) {
return a + b + c + d + e + f + g + h + i;
}
} // namespace
......@@ -6221,6 +6226,30 @@ TEST(RunCallCFunction8) {
CHECK_EQ(x * 8, m.Call(x));
}
}
TEST(RunCallCFunction9) {
auto* foo9_ptr = &foo9;
RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
Node* function = m.LoadFromPointer(&foo9_ptr, MachineType::Pointer());
Node* param = m.Parameter(0);
m.Return(m.CallCFunction9(
MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
MachineType::Int32(), function, param,
m.Int32Add(param, m.Int32Constant(1)),
m.Int32Add(param, m.Int32Constant(2)),
m.Int32Add(param, m.Int32Constant(3)),
m.Int32Add(param, m.Int32Constant(4)),
m.Int32Add(param, m.Int32Constant(5)),
m.Int32Add(param, m.Int32Constant(6)),
m.Int32Add(param, m.Int32Constant(7)),
m.Int32Add(param, m.Int32Constant(8))));
FOR_INT32_INPUTS(i) {
int32_t const x = *i;
CHECK_EQ(x * 9 + 36, m.Call(x));
}
}
#endif // USE_SIMULATOR
#if V8_TARGET_ARCH_64_BIT
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment