Commit ee7281f8 authored by ivica.bogosavljevic's avatar ivica.bogosavljevic Committed by Commit bot

MIPS[64]: Disable fusion multiple-accumulate instructions

MIPS[64]R6 supports only fusion multiply-accumulate instructions, and using
these causes failures of several tests that expect exact floating-point
results. Therefore we disable fusion multiply-accumulate in both emitted and
compiled code on R6.

TEST=cctest/test-run-machops/RunFloat64MulAndFloat64Add1,mjsunit/es6/math-expm1.js
mjsunit/es6/math-fround.js,mjsunit/compiler/multiply-add.js

BUG=

Review-Url: https://codereview.chromium.org/2569683002
Cr-Commit-Position: refs/heads/master@{#41717}
parent 7c43fcb2
...@@ -1152,36 +1152,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1152,36 +1152,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputDoubleRegister(1)); i.InputDoubleRegister(1));
break; break;
case kMipsMaddS: case kMipsMaddS:
__ madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0), __ Madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
i.InputFloatRegister(1), i.InputFloatRegister(2)); i.InputFloatRegister(1), i.InputFloatRegister(2),
kScratchDoubleReg);
break; break;
case kMipsMaddD: case kMipsMaddD:
__ madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), __ Madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1), i.InputDoubleRegister(2)); i.InputDoubleRegister(1), i.InputDoubleRegister(2),
break; kScratchDoubleReg);
case kMipsMaddfS:
__ maddf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
i.InputFloatRegister(2));
break;
case kMipsMaddfD:
__ maddf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
i.InputDoubleRegister(2));
break; break;
case kMipsMsubS: case kMipsMsubS:
__ msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0), __ Msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
i.InputFloatRegister(1), i.InputFloatRegister(2)); i.InputFloatRegister(1), i.InputFloatRegister(2),
kScratchDoubleReg);
break; break;
case kMipsMsubD: case kMipsMsubD:
__ msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), __ Msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1), i.InputDoubleRegister(2)); i.InputDoubleRegister(1), i.InputDoubleRegister(2),
break; kScratchDoubleReg);
case kMipsMsubfS:
__ msubf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
i.InputFloatRegister(2));
break;
case kMipsMsubfD:
__ msubf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
i.InputDoubleRegister(2));
break; break;
case kMipsMulD: case kMipsMulD:
// TODO(plind): add special case: right op is -1.0, see arm port. // TODO(plind): add special case: right op is -1.0, see arm port.
......
...@@ -71,12 +71,8 @@ namespace compiler { ...@@ -71,12 +71,8 @@ namespace compiler {
V(MipsMulPair) \ V(MipsMulPair) \
V(MipsMaddS) \ V(MipsMaddS) \
V(MipsMaddD) \ V(MipsMaddD) \
V(MipsMaddfS) \
V(MipsMaddfD) \
V(MipsMsubS) \ V(MipsMsubS) \
V(MipsMsubD) \ V(MipsMsubD) \
V(MipsMsubfS) \
V(MipsMsubfD) \
V(MipsFloat32RoundDown) \ V(MipsFloat32RoundDown) \
V(MipsFloat32RoundTruncate) \ V(MipsFloat32RoundTruncate) \
V(MipsFloat32RoundUp) \ V(MipsFloat32RoundUp) \
......
...@@ -904,35 +904,23 @@ void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) { ...@@ -904,35 +904,23 @@ void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
void InstructionSelector::VisitFloat32Add(Node* node) { void InstructionSelector::VisitFloat32Add(Node* node) {
MipsOperandGenerator g(this); MipsOperandGenerator g(this);
Float32BinopMatcher m(node); if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) { Float32BinopMatcher m(node);
// For Add.S(Mul.S(x, y), z): if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
Float32BinopMatcher mleft(m.left().node()); // For Add.S(Mul.S(x, y), z):
if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y). Float32BinopMatcher mleft(m.left().node());
Emit(kMipsMaddS, g.DefineAsRegister(node), Emit(kMipsMaddS, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node())); g.UseRegister(mleft.right().node()));
return; return;
} else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.S(z, x, y).
Emit(kMipsMaddfS, g.DefineSameAsFirst(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
} }
} if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) { // For Add.S(x, Mul.S(y, z)):
// For Add.S(x, Mul.S(y, z)): Float32BinopMatcher mright(m.right().node());
Float32BinopMatcher mright(m.right().node());
if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(x, y, z).
Emit(kMipsMaddS, g.DefineAsRegister(node), g.UseRegister(m.left().node()), Emit(kMipsMaddS, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node())); g.UseRegister(mright.right().node()));
return; return;
} else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.S(x, y, z).
Emit(kMipsMaddfS, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
} }
} }
VisitRRR(this, kMipsAddS, node); VisitRRR(this, kMipsAddS, node);
...@@ -941,35 +929,23 @@ void InstructionSelector::VisitFloat32Add(Node* node) { ...@@ -941,35 +929,23 @@ void InstructionSelector::VisitFloat32Add(Node* node) {
void InstructionSelector::VisitFloat64Add(Node* node) { void InstructionSelector::VisitFloat64Add(Node* node) {
MipsOperandGenerator g(this); MipsOperandGenerator g(this);
Float64BinopMatcher m(node); if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) { Float64BinopMatcher m(node);
// For Add.D(Mul.D(x, y), z): if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
Float64BinopMatcher mleft(m.left().node()); // For Add.D(Mul.D(x, y), z):
if (IsMipsArchVariant(kMips32r2)) { // Select Madd.D(z, x, y). Float64BinopMatcher mleft(m.left().node());
Emit(kMipsMaddD, g.DefineAsRegister(node), Emit(kMipsMaddD, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node())); g.UseRegister(mleft.right().node()));
return; return;
} else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.D(z, x, y).
Emit(kMipsMaddfD, g.DefineSameAsFirst(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
} }
} if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) { // For Add.D(x, Mul.D(y, z)):
// For Add.D(x, Mul.D(y, z)): Float64BinopMatcher mright(m.right().node());
Float64BinopMatcher mright(m.right().node());
if (IsMipsArchVariant(kMips32r2)) { // Select Madd.D(x, y, z).
Emit(kMipsMaddD, g.DefineAsRegister(node), g.UseRegister(m.left().node()), Emit(kMipsMaddD, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node())); g.UseRegister(mright.right().node()));
return; return;
} else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.D(x, y, z).
Emit(kMipsMaddfD, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
} }
} }
VisitRRR(this, kMipsAddD, node); VisitRRR(this, kMipsAddD, node);
...@@ -978,9 +954,9 @@ void InstructionSelector::VisitFloat64Add(Node* node) { ...@@ -978,9 +954,9 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
void InstructionSelector::VisitFloat32Sub(Node* node) { void InstructionSelector::VisitFloat32Sub(Node* node) {
MipsOperandGenerator g(this); MipsOperandGenerator g(this);
Float32BinopMatcher m(node); if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) { Float32BinopMatcher m(node);
if (IsMipsArchVariant(kMips32r2)) { if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
// For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y). // For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
Float32BinopMatcher mleft(m.left().node()); Float32BinopMatcher mleft(m.left().node());
Emit(kMipsMsubS, g.DefineAsRegister(node), Emit(kMipsMsubS, g.DefineAsRegister(node),
...@@ -988,24 +964,15 @@ void InstructionSelector::VisitFloat32Sub(Node* node) { ...@@ -988,24 +964,15 @@ void InstructionSelector::VisitFloat32Sub(Node* node) {
g.UseRegister(mleft.right().node())); g.UseRegister(mleft.right().node()));
return; return;
} }
} else if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
if (IsMipsArchVariant(kMips32r6)) {
// For Sub.S(x,Mul.S(y,z)) select Msubf.S(x, y, z).
Float32BinopMatcher mright(m.right().node());
Emit(kMipsMsubfS, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
}
} }
VisitRRR(this, kMipsSubS, node); VisitRRR(this, kMipsSubS, node);
} }
void InstructionSelector::VisitFloat64Sub(Node* node) { void InstructionSelector::VisitFloat64Sub(Node* node) {
MipsOperandGenerator g(this); MipsOperandGenerator g(this);
Float64BinopMatcher m(node); if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) { Float64BinopMatcher m(node);
if (IsMipsArchVariant(kMips32r2)) { if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
// For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y). // For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
Float64BinopMatcher mleft(m.left().node()); Float64BinopMatcher mleft(m.left().node());
Emit(kMipsMsubD, g.DefineAsRegister(node), Emit(kMipsMsubD, g.DefineAsRegister(node),
...@@ -1013,15 +980,6 @@ void InstructionSelector::VisitFloat64Sub(Node* node) { ...@@ -1013,15 +980,6 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
g.UseRegister(mleft.right().node())); g.UseRegister(mleft.right().node()));
return; return;
} }
} else if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
if (IsMipsArchVariant(kMips32r6)) {
// For Sub.D(x,Mul.S(y,z)) select Msubf.D(x, y, z).
Float64BinopMatcher mright(m.right().node());
Emit(kMipsMsubfD, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
}
} }
VisitRRR(this, kMipsSubD, node); VisitRRR(this, kMipsSubD, node);
} }
......
...@@ -1346,36 +1346,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1346,36 +1346,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputDoubleRegister(1)); i.InputDoubleRegister(1));
break; break;
case kMips64MaddS: case kMips64MaddS:
__ madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0), __ Madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
i.InputFloatRegister(1), i.InputFloatRegister(2)); i.InputFloatRegister(1), i.InputFloatRegister(2),
kScratchDoubleReg);
break; break;
case kMips64MaddD: case kMips64MaddD:
__ madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), __ Madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1), i.InputDoubleRegister(2)); i.InputDoubleRegister(1), i.InputDoubleRegister(2),
break; kScratchDoubleReg);
case kMips64MaddfS:
__ maddf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
i.InputFloatRegister(2));
break;
case kMips64MaddfD:
__ maddf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
i.InputDoubleRegister(2));
break; break;
case kMips64MsubS: case kMips64MsubS:
__ msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0), __ Msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
i.InputFloatRegister(1), i.InputFloatRegister(2)); i.InputFloatRegister(1), i.InputFloatRegister(2),
kScratchDoubleReg);
break; break;
case kMips64MsubD: case kMips64MsubD:
__ msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), __ Msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1), i.InputDoubleRegister(2)); i.InputDoubleRegister(1), i.InputDoubleRegister(2),
break; kScratchDoubleReg);
case kMips64MsubfS:
__ msubf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
i.InputFloatRegister(2));
break;
case kMips64MsubfD:
__ msubf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
i.InputDoubleRegister(2));
break; break;
case kMips64MulD: case kMips64MulD:
// TODO(plind): add special case: right op is -1.0, see arm port. // TODO(plind): add special case: right op is -1.0, see arm port.
......
...@@ -87,12 +87,8 @@ namespace compiler { ...@@ -87,12 +87,8 @@ namespace compiler {
V(Mips64MinD) \ V(Mips64MinD) \
V(Mips64MaddS) \ V(Mips64MaddS) \
V(Mips64MaddD) \ V(Mips64MaddD) \
V(Mips64MaddfS) \
V(Mips64MaddfD) \
V(Mips64MsubS) \ V(Mips64MsubS) \
V(Mips64MsubD) \ V(Mips64MsubD) \
V(Mips64MsubfS) \
V(Mips64MsubfD) \
V(Mips64Float64RoundDown) \ V(Mips64Float64RoundDown) \
V(Mips64Float64RoundTruncate) \ V(Mips64Float64RoundTruncate) \
V(Mips64Float64RoundUp) \ V(Mips64Float64RoundUp) \
......
...@@ -1449,35 +1449,23 @@ void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) { ...@@ -1449,35 +1449,23 @@ void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
void InstructionSelector::VisitFloat32Add(Node* node) { void InstructionSelector::VisitFloat32Add(Node* node) {
Mips64OperandGenerator g(this); Mips64OperandGenerator g(this);
Float32BinopMatcher m(node); if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) { Float32BinopMatcher m(node);
// For Add.S(Mul.S(x, y), z): if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
Float32BinopMatcher mleft(m.left().node()); // For Add.S(Mul.S(x, y), z):
if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y). Float32BinopMatcher mleft(m.left().node());
Emit(kMips64MaddS, g.DefineAsRegister(node), Emit(kMips64MaddS, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node())); g.UseRegister(mleft.right().node()));
return; return;
} else if (kArchVariant == kMips64r6) { // Select Maddf.S(z, x, y).
Emit(kMips64MaddfS, g.DefineSameAsFirst(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
} }
} if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) { // For Add.S(x, Mul.S(y, z)):
// For Add.S(x, Mul.S(y, z)): Float32BinopMatcher mright(m.right().node());
Float32BinopMatcher mright(m.right().node());
if (kArchVariant == kMips64r2) { // Select Madd.S(x, y, z).
Emit(kMips64MaddS, g.DefineAsRegister(node), Emit(kMips64MaddS, g.DefineAsRegister(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()), g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node())); g.UseRegister(mright.right().node()));
return; return;
} else if (kArchVariant == kMips64r6) { // Select Maddf.S(x, y, z).
Emit(kMips64MaddfS, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
} }
} }
VisitRRR(this, kMips64AddS, node); VisitRRR(this, kMips64AddS, node);
...@@ -1486,35 +1474,23 @@ void InstructionSelector::VisitFloat32Add(Node* node) { ...@@ -1486,35 +1474,23 @@ void InstructionSelector::VisitFloat32Add(Node* node) {
void InstructionSelector::VisitFloat64Add(Node* node) { void InstructionSelector::VisitFloat64Add(Node* node) {
Mips64OperandGenerator g(this); Mips64OperandGenerator g(this);
Float64BinopMatcher m(node); if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) { Float64BinopMatcher m(node);
// For Add.D(Mul.D(x, y), z): if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
Float64BinopMatcher mleft(m.left().node()); // For Add.D(Mul.D(x, y), z):
if (kArchVariant == kMips64r2) { // Select Madd.D(z, x, y). Float64BinopMatcher mleft(m.left().node());
Emit(kMips64MaddD, g.DefineAsRegister(node), Emit(kMips64MaddD, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node())); g.UseRegister(mleft.right().node()));
return; return;
} else if (kArchVariant == kMips64r6) { // Select Maddf.D(z, x, y).
Emit(kMips64MaddfD, g.DefineSameAsFirst(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
} }
} if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) { // For Add.D(x, Mul.D(y, z)):
// For Add.D(x, Mul.D(y, z)): Float64BinopMatcher mright(m.right().node());
Float64BinopMatcher mright(m.right().node());
if (kArchVariant == kMips64r2) { // Select Madd.D(x, y, z).
Emit(kMips64MaddD, g.DefineAsRegister(node), Emit(kMips64MaddD, g.DefineAsRegister(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()), g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node())); g.UseRegister(mright.right().node()));
return; return;
} else if (kArchVariant == kMips64r6) { // Select Maddf.D(x, y, z).
Emit(kMips64MaddfD, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
} }
} }
VisitRRR(this, kMips64AddD, node); VisitRRR(this, kMips64AddD, node);
...@@ -1523,9 +1499,9 @@ void InstructionSelector::VisitFloat64Add(Node* node) { ...@@ -1523,9 +1499,9 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
void InstructionSelector::VisitFloat32Sub(Node* node) { void InstructionSelector::VisitFloat32Sub(Node* node) {
Mips64OperandGenerator g(this); Mips64OperandGenerator g(this);
Float32BinopMatcher m(node); if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) { Float32BinopMatcher m(node);
if (kArchVariant == kMips64r2) { if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
// For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y). // For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
Float32BinopMatcher mleft(m.left().node()); Float32BinopMatcher mleft(m.left().node());
Emit(kMips64MsubS, g.DefineAsRegister(node), Emit(kMips64MsubS, g.DefineAsRegister(node),
...@@ -1533,24 +1509,15 @@ void InstructionSelector::VisitFloat32Sub(Node* node) { ...@@ -1533,24 +1509,15 @@ void InstructionSelector::VisitFloat32Sub(Node* node) {
g.UseRegister(mleft.right().node())); g.UseRegister(mleft.right().node()));
return; return;
} }
} else if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
if (kArchVariant == kMips64r6) {
// For Sub.S(x,Mul.S(y,z)) select Msubf.S(x, y, z).
Float32BinopMatcher mright(m.right().node());
Emit(kMips64MsubfS, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
}
} }
VisitRRR(this, kMips64SubS, node); VisitRRR(this, kMips64SubS, node);
} }
void InstructionSelector::VisitFloat64Sub(Node* node) { void InstructionSelector::VisitFloat64Sub(Node* node) {
Mips64OperandGenerator g(this); Mips64OperandGenerator g(this);
Float64BinopMatcher m(node); if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) { Float64BinopMatcher m(node);
if (kArchVariant == kMips64r2) { if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
// For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y). // For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
Float64BinopMatcher mleft(m.left().node()); Float64BinopMatcher mleft(m.left().node());
Emit(kMips64MsubD, g.DefineAsRegister(node), Emit(kMips64MsubD, g.DefineAsRegister(node),
...@@ -1558,15 +1525,6 @@ void InstructionSelector::VisitFloat64Sub(Node* node) { ...@@ -1558,15 +1525,6 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
g.UseRegister(mleft.right().node())); g.UseRegister(mleft.right().node()));
return; return;
} }
} else if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
if (kArchVariant == kMips64r6) {
// For Sub.D(x,Mul.S(y,z)) select Msubf.D(x, y, z).
Float64BinopMatcher mright(m.right().node());
Emit(kMips64MsubfD, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
}
} }
VisitRRR(this, kMips64SubD, node); VisitRRR(this, kMips64SubD, node);
} }
......
...@@ -1998,6 +1998,49 @@ void MacroAssembler::Mfhc1(Register rt, FPURegister fs) { ...@@ -1998,6 +1998,49 @@ void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
} }
} }
void MacroAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
if (IsMipsArchVariant(kMips32r2)) {
madd_s(fd, fr, fs, ft);
} else {
DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
mul_s(scratch, fs, ft);
add_s(fd, fr, scratch);
}
}
void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
if (IsMipsArchVariant(kMips32r2)) {
madd_d(fd, fr, fs, ft);
} else {
DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
mul_d(scratch, fs, ft);
add_d(fd, fr, scratch);
}
}
void MacroAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
if (IsMipsArchVariant(kMips32r2)) {
msub_s(fd, fr, fs, ft);
} else {
DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
mul_s(scratch, fs, ft);
sub_s(fd, scratch, fr);
}
}
void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
if (IsMipsArchVariant(kMips32r2)) {
msub_d(fd, fr, fs, ft);
} else {
DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
mul_d(scratch, fs, ft);
sub_d(fd, scratch, fr);
}
}
void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target, void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
Label* nan, Condition cond, FPURegister cmp1, Label* nan, Condition cond, FPURegister cmp1,
......
...@@ -887,6 +887,15 @@ class MacroAssembler: public Assembler { ...@@ -887,6 +887,15 @@ class MacroAssembler: public Assembler {
// general-purpose register. // general-purpose register.
void Mfhc1(Register rt, FPURegister fs); void Mfhc1(Register rt, FPURegister fs);
void Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
FPURegister scratch);
void Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
FPURegister scratch);
void Msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
FPURegister scratch);
void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
FPURegister scratch);
// Wrapper functions for the different cmp/branch types. // Wrapper functions for the different cmp/branch types.
inline void BranchF32(Label* target, Label* nan, Condition cc, inline void BranchF32(Label* target, Label* nan, Condition cc,
FPURegister cmp1, FPURegister cmp2, FPURegister cmp1, FPURegister cmp2,
......
...@@ -2212,19 +2212,49 @@ void MacroAssembler::Trunc_ul_s(FPURegister fd, Register rs, ...@@ -2212,19 +2212,49 @@ void MacroAssembler::Trunc_ul_s(FPURegister fd, Register rs,
bind(&fail); bind(&fail);
} }
void MacroAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
if (kArchVariant == kMips64r2) {
madd_s(fd, fr, fs, ft);
} else {
DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
mul_s(scratch, fs, ft);
add_s(fd, fr, scratch);
}
}
void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) { FPURegister ft, FPURegister scratch) {
if (0) { // TODO(plind): find reasonable arch-variant symbol names. if (kArchVariant == kMips64r2) {
madd_d(fd, fr, fs, ft); madd_d(fd, fr, fs, ft);
} else { } else {
// Can not change source regs's value.
DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch)); DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
mul_d(scratch, fs, ft); mul_d(scratch, fs, ft);
add_d(fd, fr, scratch); add_d(fd, fr, scratch);
} }
} }
void MacroAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
if (kArchVariant == kMips64r2) {
msub_s(fd, fr, fs, ft);
} else {
DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
mul_s(scratch, fs, ft);
sub_s(fd, scratch, fr);
}
}
void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
if (kArchVariant == kMips64r2) {
msub_d(fd, fr, fs, ft);
} else {
DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
mul_d(scratch, fs, ft);
sub_d(fd, scratch, fr);
}
}
void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target, void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
Label* nan, Condition cond, FPURegister cmp1, Label* nan, Condition cond, FPURegister cmp1,
......
...@@ -940,10 +940,13 @@ class MacroAssembler: public Assembler { ...@@ -940,10 +940,13 @@ class MacroAssembler: public Assembler {
void SubNanPreservePayloadAndSign_d(FPURegister fd, FPURegister fs, void SubNanPreservePayloadAndSign_d(FPURegister fd, FPURegister fs,
FPURegister ft); FPURegister ft);
void Madd_d(FPURegister fd, void Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
FPURegister fr, FPURegister scratch);
FPURegister fs, void Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
FPURegister ft, FPURegister scratch);
void Msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
FPURegister scratch);
void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
FPURegister scratch); FPURegister scratch);
// Wrapper functions for the different cmp/branch types. // Wrapper functions for the different cmp/branch types.
......
...@@ -359,7 +359,9 @@ v8_executable("cctest") { ...@@ -359,7 +359,9 @@ v8_executable("cctest") {
if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64" || if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64" ||
v8_current_cpu == "arm" || v8_current_cpu == "arm64" || v8_current_cpu == "arm" || v8_current_cpu == "arm64" ||
v8_current_cpu == "s390" || v8_current_cpu == "s390x") { v8_current_cpu == "s390" || v8_current_cpu == "s390x" ||
v8_current_cpu == "mips" || v8_current_cpu == "mips64" ||
v8_current_cpu == "mipsel" || v8_current_cpu == "mipsel64") {
# Disable fmadd/fmsub so that expected results match generated code in # Disable fmadd/fmsub so that expected results match generated code in
# RunFloat64MulAndFloat64Add1 and friends. # RunFloat64MulAndFloat64Add1 and friends.
cflags += [ "-ffp-contract=off" ] cflags += [ "-ffp-contract=off" ]
......
...@@ -428,7 +428,9 @@ ...@@ -428,7 +428,9 @@
}], }],
['v8_target_arch=="ppc" or v8_target_arch=="ppc64" \ ['v8_target_arch=="ppc" or v8_target_arch=="ppc64" \
or v8_target_arch=="arm" or v8_target_arch=="arm64" \ or v8_target_arch=="arm" or v8_target_arch=="arm64" \
or v8_target_arch=="s390" or v8_target_arch=="s390x"', { or v8_target_arch=="s390" or v8_target_arch=="s390x" \
or v8_target_arch=="mips" or v8_target_arch=="mips64" \
or v8_target_arch=="mipsel" or v8_target_arch=="mips64el"', {
# disable fmadd/fmsub so that expected results match generated code in # disable fmadd/fmsub so that expected results match generated code in
# RunFloat64MulAndFloat64Add1 and friends. # RunFloat64MulAndFloat64Add1 and friends.
'cflags': ['-ffp-contract=off'], 'cflags': ['-ffp-contract=off'],
......
...@@ -1270,7 +1270,7 @@ TEST_F(InstructionSelectorTest, Float64Abs) { ...@@ -1270,7 +1270,7 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
} }
TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) { TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
if (!IsMipsArchVariant(kMips32r2) && !IsMipsArchVariant(kMips32r6)) { if (!IsMipsArchVariant(kMips32r2)) {
return; return;
} }
{ {
...@@ -1283,23 +1283,14 @@ TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) { ...@@ -1283,23 +1283,14 @@ TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
m.Return(n); m.Return(n);
Stream s = m.Build(); Stream s = m.Build();
ASSERT_EQ(1U, s.size()); ASSERT_EQ(1U, s.size());
if (IsMipsArchVariant(kMips32r2)) { EXPECT_EQ(kMipsMaddS, s[0]->arch_opcode());
EXPECT_EQ(kMipsMaddS, s[0]->arch_opcode());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_EQ(kMipsMaddfS, s[0]->arch_opcode());
}
ASSERT_EQ(3U, s[0]->InputCount()); ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1))); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount()); ASSERT_EQ(1U, s[0]->OutputCount());
if (IsMipsArchVariant(kMips32r2)) { EXPECT_FALSE(
EXPECT_FALSE( UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode()); EXPECT_EQ(kFlags_none, s[0]->flags_mode());
} }
...@@ -1313,30 +1304,21 @@ TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) { ...@@ -1313,30 +1304,21 @@ TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
m.Return(n); m.Return(n);
Stream s = m.Build(); Stream s = m.Build();
ASSERT_EQ(1U, s.size()); ASSERT_EQ(1U, s.size());
if (IsMipsArchVariant(kMips32r2)) { EXPECT_EQ(kMipsMaddS, s[0]->arch_opcode());
EXPECT_EQ(kMipsMaddS, s[0]->arch_opcode());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_EQ(kMipsMaddfS, s[0]->arch_opcode());
}
ASSERT_EQ(3U, s[0]->InputCount()); ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2))); EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount()); ASSERT_EQ(1U, s[0]->OutputCount());
if (IsMipsArchVariant(kMips32r2)) { EXPECT_FALSE(
EXPECT_FALSE( UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode()); EXPECT_EQ(kFlags_none, s[0]->flags_mode());
} }
} }
TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) { TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
if (!IsMipsArchVariant(kMips32r2) && !IsMipsArchVariant(kMips32r6)) { if (!IsMipsArchVariant(kMips32r2)) {
return; return;
} }
{ {
...@@ -1349,23 +1331,14 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) { ...@@ -1349,23 +1331,14 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
m.Return(n); m.Return(n);
Stream s = m.Build(); Stream s = m.Build();
ASSERT_EQ(1U, s.size()); ASSERT_EQ(1U, s.size());
if (IsMipsArchVariant(kMips32r2)) { EXPECT_EQ(kMipsMaddD, s[0]->arch_opcode());
EXPECT_EQ(kMipsMaddD, s[0]->arch_opcode());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_EQ(kMipsMaddfD, s[0]->arch_opcode());
}
ASSERT_EQ(3U, s[0]->InputCount()); ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1))); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount()); ASSERT_EQ(1U, s[0]->OutputCount());
if (IsMipsArchVariant(kMips32r2)) { EXPECT_FALSE(
EXPECT_FALSE( UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode()); EXPECT_EQ(kFlags_none, s[0]->flags_mode());
} }
...@@ -1379,23 +1352,14 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) { ...@@ -1379,23 +1352,14 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
m.Return(n); m.Return(n);
Stream s = m.Build(); Stream s = m.Build();
ASSERT_EQ(1U, s.size()); ASSERT_EQ(1U, s.size());
if (IsMipsArchVariant(kMips32r2)) { EXPECT_EQ(kMipsMaddD, s[0]->arch_opcode());
EXPECT_EQ(kMipsMaddD, s[0]->arch_opcode());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_EQ(kMipsMaddfD, s[0]->arch_opcode());
}
ASSERT_EQ(3U, s[0]->InputCount()); ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2))); EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount()); ASSERT_EQ(1U, s[0]->OutputCount());
if (IsMipsArchVariant(kMips32r2)) { EXPECT_FALSE(
EXPECT_FALSE( UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (IsMipsArchVariant(kMips32r6)) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode()); EXPECT_EQ(kFlags_none, s[0]->flags_mode());
} }
...@@ -1404,83 +1368,59 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) { ...@@ -1404,83 +1368,59 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
TEST_F(InstructionSelectorTest, Float32SubWithFloat32Mul) { TEST_F(InstructionSelectorTest, Float32SubWithFloat32Mul) {
StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(), StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
MachineType::Float32(), MachineType::Float32()); MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0); if (!IsMipsArchVariant(kMips32r2)) {
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
Node* n = nullptr;
if (!IsMipsArchVariant(kMips32r2) && !IsMipsArchVariant(kMips32r6)) {
return; return;
} }
{
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
Node* n = nullptr;
if (IsMipsArchVariant(kMips32r2)) {
n = m.Float32Sub(m.Float32Mul(p1, p2), p0); n = m.Float32Sub(m.Float32Mul(p1, p2), p0);
} else if (IsMipsArchVariant(kMips32r6)) { m.Return(n);
n = m.Float32Sub(p0, m.Float32Mul(p1, p2)); Stream s = m.Build();
} ASSERT_EQ(1U, s.size());
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
if (IsMipsArchVariant(kMips32r2)) {
EXPECT_EQ(kMipsMsubS, s[0]->arch_opcode()); EXPECT_EQ(kMipsMsubS, s[0]->arch_opcode());
} else if (IsMipsArchVariant(kMips32r6)) { ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(kMipsMsubfS, s[0]->arch_opcode()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
} EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (IsMipsArchVariant(kMips32r2)) {
EXPECT_FALSE( EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (IsMipsArchVariant(kMips32r6)) { EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_TRUE( EXPECT_EQ(kFlags_none, s[0]->flags_mode());
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} }
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
} }
TEST_F(InstructionSelectorTest, Float64SubWithFloat64Mul) { TEST_F(InstructionSelectorTest, Float64SubWithFloat64Mul) {
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(), StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
MachineType::Float64(), MachineType::Float64()); MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0); if (!IsMipsArchVariant(kMips32r2)) {
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
Node* n = nullptr;
if (!IsMipsArchVariant(kMips32r2) && !IsMipsArchVariant(kMips32r6)) {
return; return;
} }
{
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
Node* n = nullptr;
if (IsMipsArchVariant(kMips32r2)) {
n = m.Float64Sub(m.Float64Mul(p1, p2), p0); n = m.Float64Sub(m.Float64Mul(p1, p2), p0);
} else if (IsMipsArchVariant(kMips32r6)) { m.Return(n);
n = m.Float64Sub(p0, m.Float64Mul(p1, p2)); Stream s = m.Build();
} ASSERT_EQ(1U, s.size());
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
if (IsMipsArchVariant(kMips32r2)) {
EXPECT_EQ(kMipsMsubD, s[0]->arch_opcode()); EXPECT_EQ(kMipsMsubD, s[0]->arch_opcode());
} else if (IsMipsArchVariant(kMips32r6)) { ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(kMipsMsubfD, s[0]->arch_opcode()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
} EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (IsMipsArchVariant(kMips32r2)) {
EXPECT_FALSE( EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (IsMipsArchVariant(kMips32r6)) { EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_TRUE( EXPECT_EQ(kFlags_none, s[0]->flags_mode());
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} }
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
} }
TEST_F(InstructionSelectorTest, Float64Max) { TEST_F(InstructionSelectorTest, Float64Max) {
......
...@@ -1752,6 +1752,9 @@ TEST_F(InstructionSelectorTest, Float64Abs) { ...@@ -1752,6 +1752,9 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
} }
TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) { TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
if (kArchVariant != kMips64r2) {
return;
}
{ {
StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(), StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
MachineType::Float32(), MachineType::Float32()); MachineType::Float32(), MachineType::Float32());
...@@ -1762,23 +1765,14 @@ TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) { ...@@ -1762,23 +1765,14 @@ TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
m.Return(n); m.Return(n);
Stream s = m.Build(); Stream s = m.Build();
ASSERT_EQ(1U, s.size()); ASSERT_EQ(1U, s.size());
if (kArchVariant == kMips64r2) { EXPECT_EQ(kMips64MaddS, s[0]->arch_opcode());
EXPECT_EQ(kMips64MaddS, s[0]->arch_opcode());
} else if (kArchVariant == kMips64r6) {
EXPECT_EQ(kMips64MaddfS, s[0]->arch_opcode());
}
ASSERT_EQ(3U, s[0]->InputCount()); ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1))); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount()); ASSERT_EQ(1U, s[0]->OutputCount());
if (kArchVariant == kMips64r2) { EXPECT_FALSE(
EXPECT_FALSE( UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (kArchVariant == kMips64r6) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode()); EXPECT_EQ(kFlags_none, s[0]->flags_mode());
} }
...@@ -1792,29 +1786,23 @@ TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) { ...@@ -1792,29 +1786,23 @@ TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
m.Return(n); m.Return(n);
Stream s = m.Build(); Stream s = m.Build();
ASSERT_EQ(1U, s.size()); ASSERT_EQ(1U, s.size());
if (kArchVariant == kMips64r2) { EXPECT_EQ(kMips64MaddS, s[0]->arch_opcode());
EXPECT_EQ(kMips64MaddS, s[0]->arch_opcode());
} else if (kArchVariant == kMips64r6) {
EXPECT_EQ(kMips64MaddfS, s[0]->arch_opcode());
}
ASSERT_EQ(3U, s[0]->InputCount()); ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2))); EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount()); ASSERT_EQ(1U, s[0]->OutputCount());
if (kArchVariant == kMips64r2) { EXPECT_FALSE(
EXPECT_FALSE( UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (kArchVariant == kMips64r6) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode()); EXPECT_EQ(kFlags_none, s[0]->flags_mode());
} }
} }
TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) { TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
if (kArchVariant != kMips64r2) {
return;
}
{ {
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(), StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
MachineType::Float64(), MachineType::Float64()); MachineType::Float64(), MachineType::Float64());
...@@ -1825,23 +1813,14 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) { ...@@ -1825,23 +1813,14 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
m.Return(n); m.Return(n);
Stream s = m.Build(); Stream s = m.Build();
ASSERT_EQ(1U, s.size()); ASSERT_EQ(1U, s.size());
if (kArchVariant == kMips64r2) { EXPECT_EQ(kMips64MaddD, s[0]->arch_opcode());
EXPECT_EQ(kMips64MaddD, s[0]->arch_opcode());
} else if (kArchVariant == kMips64r6) {
EXPECT_EQ(kMips64MaddfD, s[0]->arch_opcode());
}
ASSERT_EQ(3U, s[0]->InputCount()); ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1))); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount()); ASSERT_EQ(1U, s[0]->OutputCount());
if (kArchVariant == kMips64r2) { EXPECT_FALSE(
EXPECT_FALSE( UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (kArchVariant == kMips64r6) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode()); EXPECT_EQ(kFlags_none, s[0]->flags_mode());
} }
...@@ -1855,23 +1834,14 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) { ...@@ -1855,23 +1834,14 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
m.Return(n); m.Return(n);
Stream s = m.Build(); Stream s = m.Build();
ASSERT_EQ(1U, s.size()); ASSERT_EQ(1U, s.size());
if (kArchVariant == kMips64r2) { EXPECT_EQ(kMips64MaddD, s[0]->arch_opcode());
EXPECT_EQ(kMips64MaddD, s[0]->arch_opcode());
} else if (kArchVariant == kMips64r6) {
EXPECT_EQ(kMips64MaddfD, s[0]->arch_opcode());
}
ASSERT_EQ(3U, s[0]->InputCount()); ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2))); EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount()); ASSERT_EQ(1U, s[0]->OutputCount());
if (kArchVariant == kMips64r2) { EXPECT_FALSE(
EXPECT_FALSE( UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (kArchVariant == kMips64r6) {
EXPECT_TRUE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
}
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode()); EXPECT_EQ(kFlags_none, s[0]->flags_mode());
} }
...@@ -1880,73 +1850,57 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) { ...@@ -1880,73 +1850,57 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
TEST_F(InstructionSelectorTest, Float32SubWithFloat32Mul) { TEST_F(InstructionSelectorTest, Float32SubWithFloat32Mul) {
StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(), StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
MachineType::Float32(), MachineType::Float32()); MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0); if (kArchVariant != kMips64r2) {
Node* const p1 = m.Parameter(1); return;
Node* const p2 = m.Parameter(2);
Node* n;
if (kArchVariant == kMips64r2) {
n = m.Float32Sub(m.Float32Mul(p1, p2), p0);
} else if (kArchVariant == kMips64r6) {
n = m.Float32Sub(p0, m.Float32Mul(p1, p2));
} }
m.Return(n); {
Stream s = m.Build(); Node* const p0 = m.Parameter(0);
ASSERT_EQ(1U, s.size()); Node* const p1 = m.Parameter(1);
if (kArchVariant == kMips64r2) { Node* const p2 = m.Parameter(2);
Node* n;
n = m.Float32Sub(m.Float32Mul(p1, p2), p0);
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMips64MsubS, s[0]->arch_opcode()); EXPECT_EQ(kMips64MsubS, s[0]->arch_opcode());
} else if (kArchVariant == kMips64r6) { ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(kMips64MsubfS, s[0]->arch_opcode()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
} EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (kArchVariant == kMips64r2) {
EXPECT_FALSE( EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (kArchVariant == kMips64r6) { EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_TRUE( EXPECT_EQ(kFlags_none, s[0]->flags_mode());
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} }
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
} }
TEST_F(InstructionSelectorTest, Float64SubWithFloat64Mul) { TEST_F(InstructionSelectorTest, Float64SubWithFloat64Mul) {
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(), StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
MachineType::Float64(), MachineType::Float64()); MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0); if (kArchVariant != kMips64r2) {
Node* const p1 = m.Parameter(1); return;
Node* const p2 = m.Parameter(2);
Node* n;
if (kArchVariant == kMips64r2) {
n = m.Float64Sub(m.Float64Mul(p1, p2), p0);
} else if (kArchVariant == kMips64r6) {
n = m.Float64Sub(p0, m.Float64Mul(p1, p2));
} }
m.Return(n); {
Stream s = m.Build(); Node* const p0 = m.Parameter(0);
ASSERT_EQ(1U, s.size()); Node* const p1 = m.Parameter(1);
if (kArchVariant == kMips64r2) { Node* const p2 = m.Parameter(2);
Node* n;
n = m.Float64Sub(m.Float64Mul(p1, p2), p0);
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMips64MsubD, s[0]->arch_opcode()); EXPECT_EQ(kMips64MsubD, s[0]->arch_opcode());
} else if (kArchVariant == kMips64r6) { ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(kMips64MsubfD, s[0]->arch_opcode()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
} EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
if (kArchVariant == kMips64r2) {
EXPECT_FALSE( EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} else if (kArchVariant == kMips64r6) { EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_TRUE( EXPECT_EQ(kFlags_none, s[0]->flags_mode());
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
} }
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
} }
TEST_F(InstructionSelectorTest, Float64Max) { TEST_F(InstructionSelectorTest, Float64Max) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment