Commit 0492bb32 authored by titzer's avatar titzer Committed by Commit bot

[turbofan] Support unboxed float and double stack parameters and add tests.

R=jarin@chromium.org
BUG=

Review URL: https://codereview.chromium.org/1291113003

Cr-Commit-Position: refs/heads/master@{#30203}
parent 26241740
......@@ -814,7 +814,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kArmPush:
__ Push(i.InputRegister(0));
if (instr->InputAt(0)->IsDoubleRegister()) {
__ vstr(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
__ sub(sp, sp, Operand(kDoubleSize));
} else {
__ Push(i.InputRegister(0));
}
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmPoke: {
......
......@@ -1117,16 +1117,18 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Poke any stack arguments.
for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
if (Node* node = buffer.pushed_nodes[n]) {
int const slot = static_cast<int>(n);
InstructionOperand value = g.UseRegister(node);
Emit(kArmPoke | MiscField::encode(slot), g.NoOutput(), value);
if (Node* input = buffer.pushed_nodes[n]) {
int slot = static_cast<int>(n);
Emit(kArmPoke | MiscField::encode(slot), g.NoOutput(),
g.UseRegister(input));
}
}
} else {
// Push any stack arguments.
for (Node* node : base::Reversed(buffer.pushed_nodes)) {
Emit(kArmPush, g.NoOutput(), g.UseRegister(node));
for (Node* input : base::Reversed(buffer.pushed_nodes)) {
// Skip any alignment holes in pushed nodes.
if (input == nullptr) continue;
Emit(kArmPush, g.NoOutput(), g.UseRegister(input));
}
}
......@@ -1220,8 +1222,8 @@ void InstructionSelector::VisitTailCall(Node* node) {
InitializeCallBuffer(node, &buffer, true, false);
// Push any stack arguments.
for (Node* node : base::Reversed(buffer.pushed_nodes)) {
Emit(kArmPush, g.NoOutput(), g.UseRegister(node));
for (Node* input : base::Reversed(buffer.pushed_nodes)) {
Emit(kArmPush, g.NoOutput(), g.UseRegister(input));
}
// Select the appropriate opcode based on the call type.
......
......@@ -875,7 +875,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kIA32Push:
if (HasImmediateInput(instr, 0)) {
if (instr->InputAt(0)->IsDoubleRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
} else if (HasImmediateInput(instr, 0)) {
__ push(i.InputImmediate(0));
} else {
__ push(i.InputOperand(0));
......
......@@ -844,21 +844,23 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Poke any stack arguments.
for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
if (Node* node = buffer.pushed_nodes[n]) {
if (Node* input = buffer.pushed_nodes[n]) {
int const slot = static_cast<int>(n);
InstructionOperand value =
g.CanBeImmediate(node) ? g.UseImmediate(node) : g.UseRegister(node);
InstructionOperand value = g.CanBeImmediate(node)
? g.UseImmediate(input)
: g.UseRegister(input);
Emit(kIA32Poke | MiscField::encode(slot), g.NoOutput(), value);
}
}
} else {
// Push any stack arguments.
for (Node* node : base::Reversed(buffer.pushed_nodes)) {
// TODO(titzer): handle pushing double parameters.
for (Node* input : base::Reversed(buffer.pushed_nodes)) {
// Skip any alignment holes in pushed nodes.
if (input == nullptr) continue;
InstructionOperand value =
g.CanBeImmediate(node)
? g.UseImmediate(node)
: IsSupported(ATOM) ? g.UseRegister(node) : g.Use(node);
g.CanBeImmediate(input)
? g.UseImmediate(input)
: IsSupported(ATOM) ? g.UseRegister(input) : g.Use(input);
Emit(kIA32Push, g.NoOutput(), value);
}
}
......@@ -948,12 +950,12 @@ void InstructionSelector::VisitTailCall(Node* node) {
InitializeCallBuffer(node, &buffer, true, true);
// Push any stack arguments.
for (Node* node : base::Reversed(buffer.pushed_nodes)) {
for (Node* input : base::Reversed(buffer.pushed_nodes)) {
// TODO(titzer): Handle pushing double parameters.
InstructionOperand value =
g.CanBeImmediate(node)
? g.UseImmediate(node)
: IsSupported(ATOM) ? g.UseRegister(node) : g.Use(node);
g.CanBeImmediate(input)
? g.UseImmediate(input)
: IsSupported(ATOM) ? g.UseRegister(input) : g.Use(input);
Emit(kIA32Push, g.NoOutput(), value);
}
......
......@@ -790,14 +790,23 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
break;
case kMipsPush:
__ Push(i.InputRegister(0));
if (instr->InputAt(0)->IsDoubleRegister()) {
__ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
__ Subu(sp, sp, Operand(kDoubleSize));
} else {
__ Push(i.InputRegister(0));
}
break;
case kMipsStackClaim: {
__ Subu(sp, sp, Operand(i.InputInt32(0)));
break;
}
case kMipsStoreToStackSlot: {
__ sw(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
if (instr->InputAt(0)->IsDoubleRegister()) {
__ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
} else {
__ sw(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
}
break;
}
case kMipsStoreWriteBarrier: {
......
......@@ -533,23 +533,23 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Poke any stack arguments.
int slot = kCArgSlotCount;
for (Node* node : buffer.pushed_nodes) {
Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(node),
for (Node* input : buffer.pushed_nodes) {
Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input),
g.TempImmediate(slot << kPointerSizeLog2));
++slot;
}
} else {
// Possibly align stack here for functions.
int push_count = buffer.pushed_nodes.size();
int push_count = static_cast<int>(descriptor->StackParameterCount());
if (push_count > 0) {
Emit(kMipsStackClaim, g.NoOutput(),
g.TempImmediate(push_count << kPointerSizeLog2));
}
int slot = buffer.pushed_nodes.size() - 1;
for (Node* node : base::Reversed(buffer.pushed_nodes)) {
Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(node),
g.TempImmediate(slot << kPointerSizeLog2));
slot--;
for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
if (Node* input = buffer.pushed_nodes[n]) {
Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input),
g.TempImmediate(n << kPointerSizeLog2));
}
}
}
......@@ -636,14 +636,14 @@ void InstructionSelector::VisitTailCall(Node* node) {
// Compute InstructionOperands for inputs and outputs.
InitializeCallBuffer(node, &buffer, true, false);
// Possibly align stack here for functions.
int push_count = buffer.pushed_nodes.size();
int push_count = static_cast<int>(descriptor->StackParameterCount());
if (push_count > 0) {
Emit(kMipsStackClaim, g.NoOutput(),
g.TempImmediate(push_count << kPointerSizeLog2));
}
int slot = buffer.pushed_nodes.size() - 1;
for (Node* node : base::Reversed(buffer.pushed_nodes)) {
Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(node),
int slot = static_cast<int>(buffer.pushed_nodes.size()) - 1;
for (Node* input : base::Reversed(buffer.pushed_nodes)) {
Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input),
g.TempImmediate(slot << kPointerSizeLog2));
slot--;
}
......
......@@ -437,7 +437,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
__ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
}
__ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(at);
RecordCallPosition(instr);
......@@ -450,7 +449,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
__ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
}
AssembleDeconstructActivationRecord();
__ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(at);
......@@ -550,8 +548,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kMips64DmodU:
__ Dmodu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
__ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64And:
__ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
......@@ -772,14 +768,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
break;
}
case kMips64CvtSD: {
case kMips64CvtSD:
__ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
break;
}
case kMips64CvtDS: {
case kMips64CvtDS:
__ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
break;
}
case kMips64CvtDW: {
FPURegister scratch = kScratchDoubleReg;
__ mtc1(i.InputRegister(0), scratch);
......@@ -865,14 +859,23 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
break;
case kMips64Push:
__ Push(i.InputRegister(0));
if (instr->InputAt(0)->IsDoubleRegister()) {
__ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
__ Subu(sp, sp, Operand(kDoubleSize));
} else {
__ Push(i.InputRegister(0));
}
break;
case kMips64StackClaim: {
__ Dsubu(sp, sp, Operand(i.InputInt32(0)));
break;
}
case kMips64StoreToStackSlot: {
__ sd(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
if (instr->InputAt(0)->IsDoubleRegister()) {
__ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
} else {
__ sd(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
}
break;
}
case kMips64StoreWriteBarrier: {
......@@ -924,7 +927,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
break;
}
}
} // NOLINT(readability/fn_size)
#define UNSUPPORTED_COND(opcode, condition) \
......
......@@ -682,22 +682,22 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Poke any stack arguments.
int slot = kCArgSlotCount;
for (Node* node : buffer.pushed_nodes) {
Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(node),
for (Node* input : buffer.pushed_nodes) {
Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input),
g.TempImmediate(slot << kPointerSizeLog2));
++slot;
}
} else {
const int32_t push_count = static_cast<int32_t>(buffer.pushed_nodes.size());
int push_count = static_cast<int>(descriptor->StackParameterCount());
if (push_count > 0) {
Emit(kMips64StackClaim, g.NoOutput(),
g.TempImmediate(push_count << kPointerSizeLog2));
}
int32_t slot = push_count - 1;
for (Node* node : base::Reversed(buffer.pushed_nodes)) {
Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(node),
g.TempImmediate(slot << kPointerSizeLog2));
slot--;
for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
if (Node* input = buffer.pushed_nodes[n]) {
Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input),
g.TempImmediate(static_cast<int>(n << kPointerSizeLog2)));
}
}
}
......@@ -791,8 +791,8 @@ void InstructionSelector::VisitTailCall(Node* node) {
g.TempImmediate(push_count << kPointerSizeLog2));
}
int slot = push_count - 1;
for (Node* node : base::Reversed(buffer.pushed_nodes)) {
Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(node),
for (Node* input : base::Reversed(buffer.pushed_nodes)) {
Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input),
g.TempImmediate(slot << kPointerSizeLog2));
slot--;
}
......
......@@ -972,7 +972,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
#endif
case kPPC_Push:
__ Push(i.InputRegister(0));
if (instr->InputAt(0)->IsDoubleRegister()) {
__ stfdu(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
} else {
__ Push(i.InputRegister(0));
}
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kPPC_PushFrame: {
......@@ -982,7 +986,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
case kPPC_StoreToStackSlot: {
int slot = i.InputInt32(1);
__ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
if (instr->InputAt(0)->IsDoubleRegister()) {
__ stfd(i.InputDoubleRegister(0), MemOperand(sp, slot * kPointerSize));
} else {
__ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
}
break;
}
case kPPC_ExtendSignWord8:
......
......@@ -1474,14 +1474,16 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
}
} else {
// Push any stack arguments.
int num_slots = buffer.pushed_nodes.size();
int num_slots = static_cast<int>(descriptor->StackParameterCount());
int slot = 0;
for (Node* node : buffer.pushed_nodes) {
for (Node* input : buffer.pushed_nodes) {
// Skip any alignment holes in pushed nodes.
if (input == nullptr) continue;
if (slot == 0) {
Emit(kPPC_PushFrame, g.NoOutput(), g.UseRegister(node),
Emit(kPPC_PushFrame, g.NoOutput(), g.UseRegister(input),
g.TempImmediate(num_slots));
} else {
Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(node),
Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input),
g.TempImmediate(slot));
}
++slot;
......@@ -1578,8 +1580,9 @@ void InstructionSelector::VisitTailCall(Node* node) {
InitializeCallBuffer(node, &buffer, true, false);
// Push any stack arguments.
for (Node* node : base::Reversed(buffer.pushed_nodes)) {
Emit(kPPC_Push, g.NoOutput(), g.UseRegister(node));
for (Node* input : base::Reversed(buffer.pushed_nodes)) {
if (input == nullptr) continue;
Emit(kPPC_Push, g.NoOutput(), g.UseRegister(input));
}
// Select the appropriate opcode based on the call type.
......
......@@ -1053,21 +1053,22 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Poke any stack arguments.
for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
if (Node* node = buffer.pushed_nodes[n]) {
if (Node* input = buffer.pushed_nodes[n]) {
int const slot = static_cast<int>(n);
InstructionOperand value =
g.CanBeImmediate(node) ? g.UseImmediate(node) : g.UseRegister(node);
InstructionOperand value = g.CanBeImmediate(input)
? g.UseImmediate(input)
: g.UseRegister(input);
Emit(kX64Poke | MiscField::encode(slot), g.NoOutput(), value);
}
}
} else {
// Push any stack arguments.
for (Node* node : base::Reversed(buffer.pushed_nodes)) {
for (Node* input : base::Reversed(buffer.pushed_nodes)) {
// TODO(titzer): handle pushing double parameters.
InstructionOperand value =
g.CanBeImmediate(node)
? g.UseImmediate(node)
: IsSupported(ATOM) ? g.UseRegister(node) : g.Use(node);
g.CanBeImmediate(input)
? g.UseImmediate(input)
: IsSupported(ATOM) ? g.UseRegister(input) : g.Use(input);
Emit(kX64Push, g.NoOutput(), value);
}
}
......@@ -1156,12 +1157,12 @@ void InstructionSelector::VisitTailCall(Node* node) {
InitializeCallBuffer(node, &buffer, true, true);
// Push any stack arguments.
for (Node* node : base::Reversed(buffer.pushed_nodes)) {
for (Node* input : base::Reversed(buffer.pushed_nodes)) {
// TODO(titzer): Handle pushing double parameters.
InstructionOperand value =
g.CanBeImmediate(node)
? g.UseImmediate(node)
: IsSupported(ATOM) ? g.UseRegister(node) : g.Use(node);
g.CanBeImmediate(input)
? g.UseImmediate(input)
: IsSupported(ATOM) ? g.UseRegister(input) : g.Use(input);
Emit(kX64Push, g.NoOutput(), value);
}
......
......@@ -841,21 +841,22 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Poke any stack arguments.
for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
if (Node* node = buffer.pushed_nodes[n]) {
if (Node* input = buffer.pushed_nodes[n]) {
int const slot = static_cast<int>(n);
InstructionOperand value =
g.CanBeImmediate(node) ? g.UseImmediate(node) : g.UseRegister(node);
InstructionOperand value = g.CanBeImmediate(input)
? g.UseImmediate(input)
: g.UseRegister(input);
Emit(kX87Poke | MiscField::encode(slot), g.NoOutput(), value);
}
}
} else {
// Push any stack arguments.
for (Node* node : base::Reversed(buffer.pushed_nodes)) {
for (Node* input : base::Reversed(buffer.pushed_nodes)) {
// TODO(titzer): handle pushing double parameters.
InstructionOperand value =
g.CanBeImmediate(node)
? g.UseImmediate(node)
: IsSupported(ATOM) ? g.UseRegister(node) : g.Use(node);
g.CanBeImmediate(input)
? g.UseImmediate(input)
: IsSupported(ATOM) ? g.UseRegister(input) : g.Use(input);
Emit(kX87Push, g.NoOutput(), value);
}
}
......@@ -945,12 +946,12 @@ void InstructionSelector::VisitTailCall(Node* node) {
InitializeCallBuffer(node, &buffer, true, true);
// Push any stack arguments.
for (Node* node : base::Reversed(buffer.pushed_nodes)) {
for (Node* input : base::Reversed(buffer.pushed_nodes)) {
// TODO(titzer): Handle pushing double parameters.
InstructionOperand value =
g.CanBeImmediate(node)
? g.UseImmediate(node)
: IsSupported(ATOM) ? g.UseRegister(node) : g.Use(node);
g.CanBeImmediate(input)
? g.UseImmediate(input)
: IsSupported(ATOM) ? g.UseRegister(input) : g.Use(input);
Emit(kX87Push, g.NoOutput(), value);
}
......
......@@ -134,7 +134,7 @@ struct Allocator {
return LinkageLocation::ForRegister(fp_regs[fp_offset++]);
} else {
int offset = -1 - stack_offset;
stack_offset += Words(type);
stack_offset += StackWords(type);
return LinkageLocation::ForCallerFrameSlot(offset);
}
} else {
......@@ -143,7 +143,7 @@ struct Allocator {
return LinkageLocation::ForRegister(gp_regs[gp_offset++]);
} else {
int offset = -1 - stack_offset;
stack_offset += Words(type);
stack_offset += StackWords(type);
return LinkageLocation::ForCallerFrameSlot(offset);
}
}
......@@ -152,8 +152,12 @@ struct Allocator {
return RepresentationOf(type) == kRepFloat32 ||
RepresentationOf(type) == kRepFloat64;
}
int Words(MachineType type) {
int size = ElementSizeOf(type);
int StackWords(MachineType type) {
// TODO(titzer): hack. float32 occupies 8 bytes on stack.
int size = (RepresentationOf(type) == kRepFloat32 ||
RepresentationOf(type) == kRepFloat64)
? kDoubleSize
: ElementSizeOf(type);
return size <= kPointerSize ? 1 : size / kPointerSize;
}
void Reset() {
......@@ -769,6 +773,16 @@ static CType Compute_Select(CallDescriptor* desc, CType* inputs) {
}
template <typename CType, int which>
static void RunSelect(CallDescriptor* desc) {
int count = ParamCount(desc);
if (count <= which) return;
Run_Computation<CType>(desc, Build_Select<which>,
Compute_Select<CType, which>,
1044 + which + 3 * sizeof(CType));
}
template <int which>
void Test_Int32_Select() {
if (DISABLE_NATIVE_STACK_PARAMS) return;
......@@ -784,8 +798,7 @@ void Test_Int32_Select() {
for (int i = which + 1; i <= 64; i++) {
Int32Signature sig(i);
CallDescriptor* desc = config.Create(&zone, &sig);
Run_Computation<int32_t>(desc, Build_Select<which>,
Compute_Select<int32_t, which>, 1025 + which);
RunSelect<int32_t, which>(desc);
}
}
......@@ -827,11 +840,8 @@ TEST(Int64Select_registers) {
RegisterConfig config(params, rets);
CallDescriptor* desc = config.Create(&zone, &sig);
Run_Computation<int64_t>(desc, Build_Select<0>, Compute_Select<int64_t, 0>,
1021);
Run_Computation<int64_t>(desc, Build_Select<1>, Compute_Select<int64_t, 1>,
1022);
RunSelect<int64_t, 0>(desc);
RunSelect<int64_t, 1>(desc);
}
}
......@@ -852,11 +862,8 @@ TEST(Float32Select_registers) {
RegisterConfig config(params, rets);
CallDescriptor* desc = config.Create(&zone, &sig);
Run_Computation<float32>(desc, Build_Select<0>, Compute_Select<float32, 0>,
1019);
Run_Computation<float32>(desc, Build_Select<1>, Compute_Select<float32, 1>,
1018);
RunSelect<float32, 0>(desc);
RunSelect<float32, 1>(desc);
}
}
......@@ -877,10 +884,49 @@ TEST(Float64Select_registers) {
RegisterConfig config(params, rets);
CallDescriptor* desc = config.Create(&zone, &sig);
Run_Computation<float64>(desc, Build_Select<0>, Compute_Select<float64, 0>,
1033);
RunSelect<float64, 0>(desc);
RunSelect<float64, 1>(desc);
}
}
TEST(Float32Select_stack_params_return_reg) {
if (DISABLE_NATIVE_STACK_PARAMS) return;
int rarray[] = {0};
Allocator params(nullptr, 0, nullptr, 0);
Allocator rets(nullptr, 0, rarray, 1);
RegisterConfig config(params, rets);
Run_Computation<float64>(desc, Build_Select<1>, Compute_Select<float64, 1>,
1034);
Zone zone;
for (int count = 1; count < 6; count++) {
ArgsBuffer<float32>::Sig sig(count);
CallDescriptor* desc = config.Create(&zone, &sig);
RunSelect<float32, 0>(desc);
RunSelect<float32, 1>(desc);
RunSelect<float32, 2>(desc);
RunSelect<float32, 3>(desc);
RunSelect<float32, 4>(desc);
RunSelect<float32, 5>(desc);
}
}
TEST(Float64Select_stack_params_return_reg) {
if (DISABLE_NATIVE_STACK_PARAMS) return;
int rarray[] = {0};
Allocator params(nullptr, 0, nullptr, 0);
Allocator rets(nullptr, 0, rarray, 1);
RegisterConfig config(params, rets);
Zone zone;
for (int count = 1; count < 6; count++) {
ArgsBuffer<float64>::Sig sig(count);
CallDescriptor* desc = config.Create(&zone, &sig);
RunSelect<float64, 0>(desc);
RunSelect<float64, 1>(desc);
RunSelect<float64, 2>(desc);
RunSelect<float64, 3>(desc);
RunSelect<float64, 4>(desc);
RunSelect<float64, 5>(desc);
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment