Commit bb552d4c authored by titzer's avatar titzer Committed by Commit bot

[wasm] Add more thorough tests for WASM->JS and JS->WASM parameters.

R=ahaas@chromium.org,bradnelson@chromium.org
BUG=

Review URL: https://codereview.chromium.org/1603533002

Cr-Commit-Position: refs/heads/master@{#33362}
parent ccb3181c
......@@ -488,7 +488,8 @@ void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Arm64OperandConverter i(this, instr);
InstructionCode opcode = instr->opcode();
switch (ArchOpcodeField::decode(opcode)) {
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
switch (arch_opcode) {
case kArchCallCodeObject: {
EnsureSpaceForLazyDeopt();
if (instr->InputAt(0)->IsImmediate()) {
......@@ -499,6 +500,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
__ Call(target);
}
// TODO(titzer): this is ugly. JSSP should be a caller-save register
// in this case, but it is not possible to express in the register
// allocator.
CallDescriptor::Flags flags =
static_cast<CallDescriptor::Flags>(MiscField::decode(opcode));
if (flags & CallDescriptor::kRestoreJSSP) {
__ mov(jssp, csp);
}
frame_access_state()->ClearSPDelta();
RecordCallPosition(instr);
break;
......@@ -530,6 +539,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
__ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(x10);
// TODO(titzer): this is ugly. JSSP should be a caller-save register
// in this case, but it is not possible to express in the register
// allocator.
CallDescriptor::Flags flags =
static_cast<CallDescriptor::Flags>(MiscField::decode(opcode));
if (flags & CallDescriptor::kRestoreJSSP) {
__ mov(jssp, csp);
}
frame_access_state()->ClearSPDelta();
RecordCallPosition(instr);
break;
......@@ -885,18 +902,41 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64CompareAndBranch32:
// Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
break;
case kArm64ClaimForCallArguments: {
__ Claim(i.InputInt32(0));
frame_access_state()->IncreaseSPDelta(i.InputInt32(0));
case kArm64ClaimCSP: {
int count = i.InputInt32(0);
Register prev = __ StackPointer();
__ SetStackPointer(csp);
__ Claim(count);
__ SetStackPointer(prev);
frame_access_state()->IncreaseSPDelta(count);
break;
}
case kArm64ClaimJSSP: {
int count = i.InputInt32(0);
if (csp.Is(__ StackPointer())) {
// No JSP is set up. Compute it from the CSP.
int even = RoundUp(count, 2);
__ Sub(jssp, csp, count * kPointerSize);
__ Sub(csp, csp, even * kPointerSize); // Must always be aligned.
frame_access_state()->IncreaseSPDelta(even);
} else {
// JSSP is the current stack pointer, just use regular Claim().
__ Claim(count);
frame_access_state()->IncreaseSPDelta(count);
}
break;
}
case kArm64Poke: {
case kArm64PokeCSP: // fall through
case kArm64PokeJSSP: {
Register prev = __ StackPointer();
__ SetStackPointer(arch_opcode == kArm64PokeCSP ? csp : jssp);
Operand operand(i.InputInt32(1) * kPointerSize);
if (instr->InputAt(0)->IsDoubleRegister()) {
__ Poke(i.InputFloat64Register(0), operand);
} else {
__ Poke(i.InputRegister(0), operand);
}
__ SetStackPointer(prev);
break;
}
case kArm64PokePair: {
......
......@@ -76,8 +76,10 @@ namespace compiler {
V(Arm64TestAndBranch32) \
V(Arm64TestAndBranch) \
V(Arm64CompareAndBranch32) \
V(Arm64ClaimForCallArguments) \
V(Arm64Poke) \
V(Arm64ClaimCSP) \
V(Arm64ClaimJSSP) \
V(Arm64PokeCSP) \
V(Arm64PokeJSSP) \
V(Arm64PokePair) \
V(Arm64Float32Cmp) \
V(Arm64Float32Add) \
......@@ -171,6 +173,8 @@ namespace compiler {
V(Operand2_R_SXTB) /* %r0 SXTB (signed extend byte) */ \
V(Operand2_R_SXTH) /* %r0 SXTH (signed extend halfword) */
enum ResetJSSPAfterCall { kNoResetJSSP, kResetJSSP };
} // namespace compiler
} // namespace internal
} // namespace v8
......
......@@ -143,8 +143,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Ldr:
return kIsLoadOperation;
case kArm64ClaimForCallArguments:
case kArm64Poke:
case kArm64ClaimCSP:
case kArm64ClaimJSSP:
case kArm64PokeCSP:
case kArm64PokeJSSP:
case kArm64PokePair:
case kArm64StrS:
case kArm64StrD:
......
......@@ -1593,30 +1593,27 @@ void InstructionSelector::EmitPrepareArguments(
Node* node) {
Arm64OperandGenerator g(this);
// Push the arguments to the stack.
int aligned_push_count = static_cast<int>(arguments->size());
bool pushed_count_uneven = aligned_push_count & 1;
int claim_count = aligned_push_count;
if (pushed_count_uneven && descriptor->UseNativeStack()) {
// We can only claim for an even number of call arguments when we use the
// native stack.
claim_count++;
}
// TODO(dcarney): claim and poke probably take small immediates,
// loop here or whatever.
bool to_native_stack = descriptor->UseNativeStack();
int claim_count = static_cast<int>(arguments->size());
int slot = claim_count - 1;
if (to_native_stack) {
// Native stack must always be aligned to 16 (2 words).
claim_count = RoundUp(claim_count, 2);
}
// TODO(titzer): claim and poke probably take small immediates.
// Bump the stack pointer(s).
if (aligned_push_count > 0) {
// TODO(dcarney): it would be better to bump the csp here only
if (claim_count > 0) {
// TODO(titzer): it would be better to bump the csp here only
// and emit paired stores with increment for non c frames.
Emit(kArm64ClaimForCallArguments, g.NoOutput(),
g.TempImmediate(claim_count));
ArchOpcode claim = to_native_stack ? kArm64ClaimCSP : kArm64ClaimJSSP;
Emit(claim, g.NoOutput(), g.TempImmediate(claim_count));
}
// Move arguments to the stack.
int slot = aligned_push_count - 1;
// Poke the arguments into the stack.
ArchOpcode poke = to_native_stack ? kArm64PokeCSP : kArm64PokeJSSP;
while (slot >= 0) {
Emit(kArm64Poke, g.NoOutput(), g.UseRegister((*arguments)[slot].node()),
Emit(poke, g.NoOutput(), g.UseRegister((*arguments)[slot].node()),
g.TempImmediate(slot));
slot--;
// TODO(ahaas): Poke arguments in pairs if two subsequent arguments have the
......
......@@ -1416,6 +1416,13 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
buffer.instruction_args.push_back(g.Label(handler));
}
// (arm64 only) caller uses JSSP but callee might destroy it.
if (descriptor->UseNativeStack() &&
!linkage()->GetIncomingDescriptor()->UseNativeStack()) {
flags |= CallDescriptor::kRestoreJSSP;
}
// Select the appropriate opcode based on the call type.
InstructionCode opcode = kArchNop;
switch (descriptor->kind()) {
......
......@@ -153,9 +153,10 @@ class CallDescriptor final : public ZoneObject {
kHasLocalCatchHandler = 1u << 4,
kSupportsTailCalls = 1u << 5,
kCanUseRoots = 1u << 6,
// Indicates that the native stack should be used for a code object. This
// information is important for native calls on arm64.
// (arm64 only) native stack should be used for arguments.
kUseNativeStack = 1u << 7,
// (arm64 only) call instruction has to restore JSSP.
kRestoreJSSP = 1u << 8,
kPatchableCallSiteWithNop = kPatchableCallSite | kNeedsNopAfterCall
};
typedef base::Flags<Flag> Flags;
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment