Commit bb552d4c authored by titzer's avatar titzer Committed by Commit bot

[wasm] Add more thorough tests for WASM->JS and JS->WASM parameters.

R=ahaas@chromium.org,bradnelson@chromium.org
BUG=

Review URL: https://codereview.chromium.org/1603533002

Cr-Commit-Position: refs/heads/master@{#33362}
parent ccb3181c
...@@ -488,7 +488,8 @@ void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) { ...@@ -488,7 +488,8 @@ void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
void CodeGenerator::AssembleArchInstruction(Instruction* instr) { void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Arm64OperandConverter i(this, instr); Arm64OperandConverter i(this, instr);
InstructionCode opcode = instr->opcode(); InstructionCode opcode = instr->opcode();
switch (ArchOpcodeField::decode(opcode)) { ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
switch (arch_opcode) {
case kArchCallCodeObject: { case kArchCallCodeObject: {
EnsureSpaceForLazyDeopt(); EnsureSpaceForLazyDeopt();
if (instr->InputAt(0)->IsImmediate()) { if (instr->InputAt(0)->IsImmediate()) {
...@@ -499,6 +500,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -499,6 +500,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Add(target, target, Code::kHeaderSize - kHeapObjectTag); __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
__ Call(target); __ Call(target);
} }
// TODO(titzer): this is ugly. JSSP should be a caller-save register
// in this case, but it is not possible to express in the register
// allocator.
CallDescriptor::Flags flags =
static_cast<CallDescriptor::Flags>(MiscField::decode(opcode));
if (flags & CallDescriptor::kRestoreJSSP) {
__ mov(jssp, csp);
}
frame_access_state()->ClearSPDelta(); frame_access_state()->ClearSPDelta();
RecordCallPosition(instr); RecordCallPosition(instr);
break; break;
...@@ -530,6 +539,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -530,6 +539,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
} }
__ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset)); __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(x10); __ Call(x10);
// TODO(titzer): this is ugly. JSSP should be a caller-save register
// in this case, but it is not possible to express in the register
// allocator.
CallDescriptor::Flags flags =
static_cast<CallDescriptor::Flags>(MiscField::decode(opcode));
if (flags & CallDescriptor::kRestoreJSSP) {
__ mov(jssp, csp);
}
frame_access_state()->ClearSPDelta(); frame_access_state()->ClearSPDelta();
RecordCallPosition(instr); RecordCallPosition(instr);
break; break;
...@@ -885,18 +902,41 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -885,18 +902,41 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64CompareAndBranch32: case kArm64CompareAndBranch32:
// Pseudo instruction turned into cbz/cbnz in AssembleArchBranch. // Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
break; break;
case kArm64ClaimForCallArguments: { case kArm64ClaimCSP: {
__ Claim(i.InputInt32(0)); int count = i.InputInt32(0);
frame_access_state()->IncreaseSPDelta(i.InputInt32(0)); Register prev = __ StackPointer();
__ SetStackPointer(csp);
__ Claim(count);
__ SetStackPointer(prev);
frame_access_state()->IncreaseSPDelta(count);
break;
}
case kArm64ClaimJSSP: {
int count = i.InputInt32(0);
if (csp.Is(__ StackPointer())) {
// No JSP is set up. Compute it from the CSP.
int even = RoundUp(count, 2);
__ Sub(jssp, csp, count * kPointerSize);
__ Sub(csp, csp, even * kPointerSize); // Must always be aligned.
frame_access_state()->IncreaseSPDelta(even);
} else {
// JSSP is the current stack pointer, just use regular Claim().
__ Claim(count);
frame_access_state()->IncreaseSPDelta(count);
}
break; break;
} }
case kArm64Poke: { case kArm64PokeCSP: // fall through
case kArm64PokeJSSP: {
Register prev = __ StackPointer();
__ SetStackPointer(arch_opcode == kArm64PokeCSP ? csp : jssp);
Operand operand(i.InputInt32(1) * kPointerSize); Operand operand(i.InputInt32(1) * kPointerSize);
if (instr->InputAt(0)->IsDoubleRegister()) { if (instr->InputAt(0)->IsDoubleRegister()) {
__ Poke(i.InputFloat64Register(0), operand); __ Poke(i.InputFloat64Register(0), operand);
} else { } else {
__ Poke(i.InputRegister(0), operand); __ Poke(i.InputRegister(0), operand);
} }
__ SetStackPointer(prev);
break; break;
} }
case kArm64PokePair: { case kArm64PokePair: {
......
...@@ -76,8 +76,10 @@ namespace compiler { ...@@ -76,8 +76,10 @@ namespace compiler {
V(Arm64TestAndBranch32) \ V(Arm64TestAndBranch32) \
V(Arm64TestAndBranch) \ V(Arm64TestAndBranch) \
V(Arm64CompareAndBranch32) \ V(Arm64CompareAndBranch32) \
V(Arm64ClaimForCallArguments) \ V(Arm64ClaimCSP) \
V(Arm64Poke) \ V(Arm64ClaimJSSP) \
V(Arm64PokeCSP) \
V(Arm64PokeJSSP) \
V(Arm64PokePair) \ V(Arm64PokePair) \
V(Arm64Float32Cmp) \ V(Arm64Float32Cmp) \
V(Arm64Float32Add) \ V(Arm64Float32Add) \
...@@ -171,6 +173,8 @@ namespace compiler { ...@@ -171,6 +173,8 @@ namespace compiler {
V(Operand2_R_SXTB) /* %r0 SXTB (signed extend byte) */ \ V(Operand2_R_SXTB) /* %r0 SXTB (signed extend byte) */ \
V(Operand2_R_SXTH) /* %r0 SXTH (signed extend halfword) */ V(Operand2_R_SXTH) /* %r0 SXTH (signed extend halfword) */
enum ResetJSSPAfterCall { kNoResetJSSP, kResetJSSP };
} // namespace compiler } // namespace compiler
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -143,8 +143,10 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -143,8 +143,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Ldr: case kArm64Ldr:
return kIsLoadOperation; return kIsLoadOperation;
case kArm64ClaimForCallArguments: case kArm64ClaimCSP:
case kArm64Poke: case kArm64ClaimJSSP:
case kArm64PokeCSP:
case kArm64PokeJSSP:
case kArm64PokePair: case kArm64PokePair:
case kArm64StrS: case kArm64StrS:
case kArm64StrD: case kArm64StrD:
......
...@@ -1593,30 +1593,27 @@ void InstructionSelector::EmitPrepareArguments( ...@@ -1593,30 +1593,27 @@ void InstructionSelector::EmitPrepareArguments(
Node* node) { Node* node) {
Arm64OperandGenerator g(this); Arm64OperandGenerator g(this);
// Push the arguments to the stack. bool to_native_stack = descriptor->UseNativeStack();
int aligned_push_count = static_cast<int>(arguments->size());
int claim_count = static_cast<int>(arguments->size());
bool pushed_count_uneven = aligned_push_count & 1; int slot = claim_count - 1;
int claim_count = aligned_push_count; if (to_native_stack) {
if (pushed_count_uneven && descriptor->UseNativeStack()) { // Native stack must always be aligned to 16 (2 words).
// We can only claim for an even number of call arguments when we use the claim_count = RoundUp(claim_count, 2);
// native stack. }
claim_count++; // TODO(titzer): claim and poke probably take small immediates.
}
// TODO(dcarney): claim and poke probably take small immediates,
// loop here or whatever.
// Bump the stack pointer(s). // Bump the stack pointer(s).
if (aligned_push_count > 0) { if (claim_count > 0) {
// TODO(dcarney): it would be better to bump the csp here only // TODO(titzer): it would be better to bump the csp here only
// and emit paired stores with increment for non c frames. // and emit paired stores with increment for non c frames.
Emit(kArm64ClaimForCallArguments, g.NoOutput(), ArchOpcode claim = to_native_stack ? kArm64ClaimCSP : kArm64ClaimJSSP;
g.TempImmediate(claim_count)); Emit(claim, g.NoOutput(), g.TempImmediate(claim_count));
} }
// Move arguments to the stack. // Poke the arguments into the stack.
int slot = aligned_push_count - 1; ArchOpcode poke = to_native_stack ? kArm64PokeCSP : kArm64PokeJSSP;
while (slot >= 0) { while (slot >= 0) {
Emit(kArm64Poke, g.NoOutput(), g.UseRegister((*arguments)[slot].node()), Emit(poke, g.NoOutput(), g.UseRegister((*arguments)[slot].node()),
g.TempImmediate(slot)); g.TempImmediate(slot));
slot--; slot--;
// TODO(ahaas): Poke arguments in pairs if two subsequent arguments have the // TODO(ahaas): Poke arguments in pairs if two subsequent arguments have the
......
...@@ -1416,6 +1416,13 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) { ...@@ -1416,6 +1416,13 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
buffer.instruction_args.push_back(g.Label(handler)); buffer.instruction_args.push_back(g.Label(handler));
} }
// (arm64 only) caller uses JSSP but callee might destroy it.
if (descriptor->UseNativeStack() &&
!linkage()->GetIncomingDescriptor()->UseNativeStack()) {
flags |= CallDescriptor::kRestoreJSSP;
}
// Select the appropriate opcode based on the call type. // Select the appropriate opcode based on the call type.
InstructionCode opcode = kArchNop; InstructionCode opcode = kArchNop;
switch (descriptor->kind()) { switch (descriptor->kind()) {
......
...@@ -153,9 +153,10 @@ class CallDescriptor final : public ZoneObject { ...@@ -153,9 +153,10 @@ class CallDescriptor final : public ZoneObject {
kHasLocalCatchHandler = 1u << 4, kHasLocalCatchHandler = 1u << 4,
kSupportsTailCalls = 1u << 5, kSupportsTailCalls = 1u << 5,
kCanUseRoots = 1u << 6, kCanUseRoots = 1u << 6,
// Indicates that the native stack should be used for a code object. This // (arm64 only) native stack should be used for arguments.
// information is important for native calls on arm64.
kUseNativeStack = 1u << 7, kUseNativeStack = 1u << 7,
// (arm64 only) call instruction has to restore JSSP.
kRestoreJSSP = 1u << 8,
kPatchableCallSiteWithNop = kPatchableCallSite | kNeedsNopAfterCall kPatchableCallSiteWithNop = kPatchableCallSite | kNeedsNopAfterCall
}; };
typedef base::Flags<Flag> Flags; typedef base::Flags<Flag> Flags;
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment