Commit 502b9aa7 authored by Ilija.Pavlovic's avatar Ilija.Pavlovic Committed by Commit bot

MIPS: Port for (fused) multiply add/subtract.

Port for VisitFloat32Add, VisitFloat64Add, VisitFloat32Sub and
VisitFloat64Sub in InstructionSelector.

TEST=unittests/InstructionSelectorTest.Float32AddWithFloat32Mul,
     unittests/InstructionSelectorTest.Float64AddWithFloat64Mul,
     unittests/InstructionSelectorTest.Float32SubWithFloat32Mul,
     unittests/InstructionSelectorTest.Float64SubWithFloat64Mul
BUG=

Review-Url: https://codereview.chromium.org/2341303002
Cr-Commit-Position: refs/heads/master@{#39616}
parent 154548c2
...@@ -1118,6 +1118,38 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1118,6 +1118,38 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1)); i.InputDoubleRegister(1));
break; break;
case kMipsMaddS:
__ madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
i.InputFloatRegister(1), i.InputFloatRegister(2));
break;
case kMipsMaddD:
__ madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1), i.InputDoubleRegister(2));
break;
case kMipsMaddfS:
__ maddf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
i.InputFloatRegister(2));
break;
case kMipsMaddfD:
__ maddf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
i.InputDoubleRegister(2));
break;
case kMipsMsubS:
__ msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
i.InputFloatRegister(1), i.InputFloatRegister(2));
break;
case kMipsMsubD:
__ msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1), i.InputDoubleRegister(2));
break;
case kMipsMsubfS:
__ msubf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
i.InputFloatRegister(2));
break;
case kMipsMsubfD:
__ msubf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
i.InputDoubleRegister(2));
break;
case kMipsMulD: case kMipsMulD:
// TODO(plind): add special case: right op is -1.0, see arm port. // TODO(plind): add special case: right op is -1.0, see arm port.
__ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
......
...@@ -69,6 +69,14 @@ namespace compiler { ...@@ -69,6 +69,14 @@ namespace compiler {
V(MipsAddPair) \ V(MipsAddPair) \
V(MipsSubPair) \ V(MipsSubPair) \
V(MipsMulPair) \ V(MipsMulPair) \
V(MipsMaddS) \
V(MipsMaddD) \
V(MipsMaddfS) \
V(MipsMaddfD) \
V(MipsMsubS) \
V(MipsMsubD) \
V(MipsMsubfS) \
V(MipsMsubfD) \
V(MipsFloat32RoundDown) \ V(MipsFloat32RoundDown) \
V(MipsFloat32RoundTruncate) \ V(MipsFloat32RoundTruncate) \
V(MipsFloat32RoundUp) \ V(MipsFloat32RoundUp) \
......
...@@ -781,20 +781,126 @@ void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) { ...@@ -781,20 +781,126 @@ void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
void InstructionSelector::VisitFloat32Add(Node* node) { void InstructionSelector::VisitFloat32Add(Node* node) {
MipsOperandGenerator g(this);
Float32BinopMatcher m(node);
if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
// For Add.S(Mul.S(x, y), z):
Float32BinopMatcher mleft(m.left().node());
if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
Emit(kMipsMaddS, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
} else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.S(z, x, y).
Emit(kMipsMaddfS, g.DefineSameAsFirst(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
}
}
if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
// For Add.S(x, Mul.S(y, z)):
Float32BinopMatcher mright(m.right().node());
if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(x, y, z).
Emit(kMipsMaddS, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
} else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.S(x, y, z).
Emit(kMipsMaddfS, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
}
}
VisitRRR(this, kMipsAddS, node); VisitRRR(this, kMipsAddS, node);
} }
void InstructionSelector::VisitFloat64Add(Node* node) { void InstructionSelector::VisitFloat64Add(Node* node) {
MipsOperandGenerator g(this);
Float64BinopMatcher m(node);
if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
// For Add.D(Mul.D(x, y), z):
Float64BinopMatcher mleft(m.left().node());
if (IsMipsArchVariant(kMips32r2)) { // Select Madd.D(z, x, y).
Emit(kMipsMaddD, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
} else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.D(z, x, y).
Emit(kMipsMaddfD, g.DefineSameAsFirst(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
}
}
if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
// For Add.D(x, Mul.D(y, z)):
Float64BinopMatcher mright(m.right().node());
if (IsMipsArchVariant(kMips32r2)) { // Select Madd.D(x, y, z).
Emit(kMipsMaddD, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
} else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.D(x, y, z).
Emit(kMipsMaddfD, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
}
}
VisitRRR(this, kMipsAddD, node); VisitRRR(this, kMipsAddD, node);
} }
void InstructionSelector::VisitFloat32Sub(Node* node) { void InstructionSelector::VisitFloat32Sub(Node* node) {
MipsOperandGenerator g(this);
Float32BinopMatcher m(node);
if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
if (IsMipsArchVariant(kMips32r2)) {
// For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
Float32BinopMatcher mleft(m.left().node());
Emit(kMipsMsubS, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
}
} else if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
if (IsMipsArchVariant(kMips32r6)) {
// For Sub.S(x,Mul.S(y,z)) select Msubf.S(x, y, z).
Float32BinopMatcher mright(m.right().node());
Emit(kMipsMsubfS, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
}
}
VisitRRR(this, kMipsSubS, node); VisitRRR(this, kMipsSubS, node);
} }
void InstructionSelector::VisitFloat64Sub(Node* node) { void InstructionSelector::VisitFloat64Sub(Node* node) {
MipsOperandGenerator g(this);
Float64BinopMatcher m(node);
if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
if (IsMipsArchVariant(kMips32r2)) {
// For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
Float64BinopMatcher mleft(m.left().node());
Emit(kMipsMsubD, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
}
} else if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
if (IsMipsArchVariant(kMips32r6)) {
// For Sub.D(x,Mul.S(y,z)) select Msubf.D(x, y, z).
Float64BinopMatcher mright(m.right().node());
Emit(kMipsMsubfD, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
}
}
VisitRRR(this, kMipsSubD, node); VisitRRR(this, kMipsSubD, node);
} }
......
...@@ -1314,6 +1314,38 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1314,6 +1314,38 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1)); i.InputDoubleRegister(1));
break; break;
case kMips64MaddS:
__ madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
i.InputFloatRegister(1), i.InputFloatRegister(2));
break;
case kMips64MaddD:
__ madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1), i.InputDoubleRegister(2));
break;
case kMips64MaddfS:
__ maddf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
i.InputFloatRegister(2));
break;
case kMips64MaddfD:
__ maddf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
i.InputDoubleRegister(2));
break;
case kMips64MsubS:
__ msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
i.InputFloatRegister(1), i.InputFloatRegister(2));
break;
case kMips64MsubD:
__ msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1), i.InputDoubleRegister(2));
break;
case kMips64MsubfS:
__ msubf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
i.InputFloatRegister(2));
break;
case kMips64MsubfD:
__ msubf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
i.InputDoubleRegister(2));
break;
case kMips64MulD: case kMips64MulD:
// TODO(plind): add special case: right op is -1.0, see arm port. // TODO(plind): add special case: right op is -1.0, see arm port.
__ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
......
...@@ -85,6 +85,14 @@ namespace compiler { ...@@ -85,6 +85,14 @@ namespace compiler {
V(Mips64SqrtD) \ V(Mips64SqrtD) \
V(Mips64MaxD) \ V(Mips64MaxD) \
V(Mips64MinD) \ V(Mips64MinD) \
V(Mips64MaddS) \
V(Mips64MaddD) \
V(Mips64MaddfS) \
V(Mips64MaddfD) \
V(Mips64MsubS) \
V(Mips64MsubD) \
V(Mips64MsubfS) \
V(Mips64MsubfD) \
V(Mips64Float64RoundDown) \ V(Mips64Float64RoundDown) \
V(Mips64Float64RoundTruncate) \ V(Mips64Float64RoundTruncate) \
V(Mips64Float64RoundUp) \ V(Mips64Float64RoundUp) \
......
...@@ -1224,20 +1224,126 @@ void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) { ...@@ -1224,20 +1224,126 @@ void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
void InstructionSelector::VisitFloat32Add(Node* node) { void InstructionSelector::VisitFloat32Add(Node* node) {
Mips64OperandGenerator g(this);
Float32BinopMatcher m(node);
if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
// For Add.S(Mul.S(x, y), z):
Float32BinopMatcher mleft(m.left().node());
if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
Emit(kMips64MaddS, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
} else if (kArchVariant == kMips64r6) { // Select Maddf.S(z, x, y).
Emit(kMips64MaddfS, g.DefineSameAsFirst(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
}
}
if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
// For Add.S(x, Mul.S(y, z)):
Float32BinopMatcher mright(m.right().node());
if (kArchVariant == kMips64r2) { // Select Madd.S(x, y, z).
Emit(kMips64MaddS, g.DefineAsRegister(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
} else if (kArchVariant == kMips64r6) { // Select Maddf.S(x, y, z).
Emit(kMips64MaddfS, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
}
}
VisitRRR(this, kMips64AddS, node); VisitRRR(this, kMips64AddS, node);
} }
void InstructionSelector::VisitFloat64Add(Node* node) { void InstructionSelector::VisitFloat64Add(Node* node) {
Mips64OperandGenerator g(this);
Float64BinopMatcher m(node);
if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
// For Add.D(Mul.D(x, y), z):
Float64BinopMatcher mleft(m.left().node());
if (kArchVariant == kMips64r2) { // Select Madd.D(z, x, y).
Emit(kMips64MaddD, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
} else if (kArchVariant == kMips64r6) { // Select Maddf.D(z, x, y).
Emit(kMips64MaddfD, g.DefineSameAsFirst(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
}
}
if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
// For Add.D(x, Mul.D(y, z)):
Float64BinopMatcher mright(m.right().node());
if (kArchVariant == kMips64r2) { // Select Madd.D(x, y, z).
Emit(kMips64MaddD, g.DefineAsRegister(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
} else if (kArchVariant == kMips64r6) { // Select Maddf.D(x, y, z).
Emit(kMips64MaddfD, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
}
}
VisitRRR(this, kMips64AddD, node); VisitRRR(this, kMips64AddD, node);
} }
void InstructionSelector::VisitFloat32Sub(Node* node) { void InstructionSelector::VisitFloat32Sub(Node* node) {
Mips64OperandGenerator g(this);
Float32BinopMatcher m(node);
if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
if (kArchVariant == kMips64r2) {
// For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
Float32BinopMatcher mleft(m.left().node());
Emit(kMips64MsubS, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
}
} else if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
if (kArchVariant == kMips64r6) {
// For Sub.S(x,Mul.S(y,z)) select Msubf.S(x, y, z).
Float32BinopMatcher mright(m.right().node());
Emit(kMips64MsubfS, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
}
}
VisitRRR(this, kMips64SubS, node); VisitRRR(this, kMips64SubS, node);
} }
void InstructionSelector::VisitFloat64Sub(Node* node) { void InstructionSelector::VisitFloat64Sub(Node* node) {
Mips64OperandGenerator g(this);
Float64BinopMatcher m(node);
if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
if (kArchVariant == kMips64r2) {
// For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
Float64BinopMatcher mleft(m.left().node());
Emit(kMips64MsubD, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
}
} else if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
if (kArchVariant == kMips64r6) {
// For Sub.D(x,Mul.S(y,z)) select Msubf.D(x, y, z).
Float64BinopMatcher mright(m.right().node());
Emit(kMips64MsubfD, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
}
}
VisitRRR(this, kMips64SubD, node); VisitRRR(this, kMips64SubD, node);
} }
......
...@@ -1179,6 +1179,203 @@ TEST_F(InstructionSelectorTest, Float64Abs) { ...@@ -1179,6 +1179,203 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
} }
TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
{
StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
Node* const n = m.Float32Add(m.Float32Mul(p0, p1), p2);
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
if (IsMipsArchVariant(kMips32r2)) {
EXPECT_EQ(kMipsMaddS, s[0]->arch_opcode());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_EQ(kMipsMaddfS, s[0]->arch_opcode());
}
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (IsMipsArchVariant(kMips32r2)) {
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
{
StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
Node* const n = m.Float32Add(p0, m.Float32Mul(p1, p2));
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
if (IsMipsArchVariant(kMips32r2)) {
EXPECT_EQ(kMipsMaddS, s[0]->arch_opcode());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_EQ(kMipsMaddfS, s[0]->arch_opcode());
}
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (IsMipsArchVariant(kMips32r2)) {
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
}
TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
{
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
Node* const n = m.Float64Add(m.Float64Mul(p0, p1), p2);
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
if (IsMipsArchVariant(kMips32r2)) {
EXPECT_EQ(kMipsMaddD, s[0]->arch_opcode());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_EQ(kMipsMaddfD, s[0]->arch_opcode());
}
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (IsMipsArchVariant(kMips32r2)) {
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
{
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
Node* const n = m.Float64Add(p0, m.Float64Mul(p1, p2));
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
if (IsMipsArchVariant(kMips32r2)) {
EXPECT_EQ(kMipsMaddD, s[0]->arch_opcode());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_EQ(kMipsMaddfD, s[0]->arch_opcode());
}
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (IsMipsArchVariant(kMips32r2)) {
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
}
TEST_F(InstructionSelectorTest, Float32SubWithFloat32Mul) {
StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
Node* n;
if (IsMipsArchVariant(kMips32r2)) {
n = m.Float32Sub(m.Float32Mul(p1, p2), p0);
} else if (IsMipsArchVariant(kMips32r6)) {
n = m.Float32Sub(p0, m.Float32Mul(p1, p2));
}
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
if (IsMipsArchVariant(kMips32r2)) {
EXPECT_EQ(kMipsMsubS, s[0]->arch_opcode());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_EQ(kMipsMsubfS, s[0]->arch_opcode());
}
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (IsMipsArchVariant(kMips32r2)) {
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
TEST_F(InstructionSelectorTest, Float64SubWithFloat64Mul) {
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
Node* n;
if (IsMipsArchVariant(kMips32r2)) {
n = m.Float64Sub(m.Float64Mul(p1, p2), p0);
} else if (IsMipsArchVariant(kMips32r6)) {
n = m.Float64Sub(p0, m.Float64Mul(p1, p2));
}
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
if (IsMipsArchVariant(kMips32r2)) {
EXPECT_EQ(kMipsMsubD, s[0]->arch_opcode());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_EQ(kMipsMsubfD, s[0]->arch_opcode());
}
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (IsMipsArchVariant(kMips32r2)) {
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
TEST_F(InstructionSelectorTest, Float64Max) { TEST_F(InstructionSelectorTest, Float64Max) {
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(), StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
......
...@@ -1536,6 +1536,203 @@ TEST_F(InstructionSelectorTest, Float64Abs) { ...@@ -1536,6 +1536,203 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
} }
TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
{
StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
Node* const n = m.Float32Add(m.Float32Mul(p0, p1), p2);
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
if (kArchVariant == kMips64r2) {
EXPECT_EQ(kMips64MaddS, s[0]->arch_opcode());
} else if (kArchVariant == kMips64r6) {
EXPECT_EQ(kMips64MaddfS, s[0]->arch_opcode());
}
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (kArchVariant == kMips64r2) {
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (kArchVariant == kMips64r6) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
{
StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
Node* const n = m.Float32Add(p0, m.Float32Mul(p1, p2));
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
if (kArchVariant == kMips64r2) {
EXPECT_EQ(kMips64MaddS, s[0]->arch_opcode());
} else if (kArchVariant == kMips64r6) {
EXPECT_EQ(kMips64MaddfS, s[0]->arch_opcode());
}
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (kArchVariant == kMips64r2) {
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (kArchVariant == kMips64r6) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
}
TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
{
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
Node* const n = m.Float64Add(m.Float64Mul(p0, p1), p2);
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
if (kArchVariant == kMips64r2) {
EXPECT_EQ(kMips64MaddD, s[0]->arch_opcode());
} else if (kArchVariant == kMips64r6) {
EXPECT_EQ(kMips64MaddfD, s[0]->arch_opcode());
}
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (kArchVariant == kMips64r2) {
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (kArchVariant == kMips64r6) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
{
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
Node* const n = m.Float64Add(p0, m.Float64Mul(p1, p2));
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
if (kArchVariant == kMips64r2) {
EXPECT_EQ(kMips64MaddD, s[0]->arch_opcode());
} else if (kArchVariant == kMips64r6) {
EXPECT_EQ(kMips64MaddfD, s[0]->arch_opcode());
}
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (kArchVariant == kMips64r2) {
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (kArchVariant == kMips64r6) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
}
TEST_F(InstructionSelectorTest, Float32SubWithFloat32Mul) {
StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
Node* n;
if (kArchVariant == kMips64r2) {
n = m.Float32Sub(m.Float32Mul(p1, p2), p0);
} else if (kArchVariant == kMips64r6) {
n = m.Float32Sub(p0, m.Float32Mul(p1, p2));
}
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
if (kArchVariant == kMips64r2) {
EXPECT_EQ(kMips64MsubS, s[0]->arch_opcode());
} else if (kArchVariant == kMips64r6) {
EXPECT_EQ(kMips64MsubfS, s[0]->arch_opcode());
}
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (kArchVariant == kMips64r2) {
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (kArchVariant == kMips64r6) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
TEST_F(InstructionSelectorTest, Float64SubWithFloat64Mul) {
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
Node* n;
if (kArchVariant == kMips64r2) {
n = m.Float64Sub(m.Float64Mul(p1, p2), p0);
} else if (kArchVariant == kMips64r6) {
n = m.Float64Sub(p0, m.Float64Mul(p1, p2));
}
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
if (kArchVariant == kMips64r2) {
EXPECT_EQ(kMips64MsubD, s[0]->arch_opcode());
} else if (kArchVariant == kMips64r6) {
EXPECT_EQ(kMips64MsubfD, s[0]->arch_opcode());
}
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (kArchVariant == kMips64r2) {
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (kArchVariant == kMips64r6) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
TEST_F(InstructionSelectorTest, Float64Max) { TEST_F(InstructionSelectorTest, Float64Max) {
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(), StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment