Commit 8ae7c9ab authored by dusan.m.milosavljevic's avatar dusan.m.milosavljevic Committed by Commit bot

MIPS: [turbofan] Properly implement Float64/32 Min/Max instructions.

TEST=cctest/test-run-machops/Float(64|32)MaxP, Float(64|32)MinP,
       unittests/InstructionSelectorTest.Float64Min|Max
BUG=v8:4206
LOG=N

Review URL: https://codereview.chromium.org/1419753008

Cr-Commit-Position: refs/heads/master@{#31806}
parent bf5c9af9
......@@ -721,6 +721,62 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
break;
}
case kMipsFloat64Max: {
// (b < a) ? a : b
if (IsMipsArchVariant(kMips32r6)) {
__ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
i.InputDoubleRegister(0));
__ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
i.InputDoubleRegister(0));
} else {
__ c_d(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
// Left operand is result, passthrough if false.
__ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
}
break;
}
case kMipsFloat64Min: {
// (a < b) ? a : b
if (IsMipsArchVariant(kMips32r6)) {
__ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
__ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
i.InputDoubleRegister(0));
} else {
__ c_d(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
// Right operand is result, passthrough if false.
__ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
}
break;
}
case kMipsFloat32Max: {
// (b < a) ? a : b
if (IsMipsArchVariant(kMips32r6)) {
__ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
i.InputDoubleRegister(0));
__ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
i.InputDoubleRegister(0));
} else {
__ c_s(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
// Left operand is result, passthrough if false.
__ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
}
break;
}
case kMipsFloat32Min: {
// (a < b) ? a : b
if (IsMipsArchVariant(kMips32r6)) {
__ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
__ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
i.InputDoubleRegister(0));
} else {
__ c_s(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
// Right operand is result, passthrough if false.
__ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
}
break;
}
case kMipsCvtSD: {
__ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
break;
......
......@@ -79,6 +79,10 @@ namespace compiler {
V(MipsFloat64ExtractHighWord32) \
V(MipsFloat64InsertLowWord32) \
V(MipsFloat64InsertHighWord32) \
V(MipsFloat64Max) \
V(MipsFloat64Min) \
V(MipsFloat32Max) \
V(MipsFloat32Min) \
V(MipsPush) \
V(MipsStoreToStackSlot) \
V(MipsStackClaim) \
......
......@@ -481,16 +481,64 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
}
void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat32Max(Node* node) {
MipsOperandGenerator g(this);
if (IsMipsArchVariant(kMips32r6)) {
Emit(kMipsFloat32Max, g.DefineAsRegister(node),
g.UseUniqueRegister(node->InputAt(0)),
g.UseUniqueRegister(node->InputAt(1)));
} else {
// Reverse operands, and use same reg. for result and right operand.
Emit(kMipsFloat32Max, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
}
}
void InstructionSelector::VisitFloat64Max(Node* node) {
MipsOperandGenerator g(this);
if (IsMipsArchVariant(kMips32r6)) {
Emit(kMipsFloat64Max, g.DefineAsRegister(node),
g.UseUniqueRegister(node->InputAt(0)),
g.UseUniqueRegister(node->InputAt(1)));
} else {
// Reverse operands, and use same reg. for result and right operand.
Emit(kMipsFloat64Max, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
}
}
void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat32Min(Node* node) {
MipsOperandGenerator g(this);
if (IsMipsArchVariant(kMips32r6)) {
Emit(kMipsFloat32Min, g.DefineAsRegister(node),
g.UseUniqueRegister(node->InputAt(0)),
g.UseUniqueRegister(node->InputAt(1)));
} else {
// Reverse operands, and use same reg. for result and right operand.
Emit(kMipsFloat32Min, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
}
}
void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat64Min(Node* node) {
MipsOperandGenerator g(this);
if (IsMipsArchVariant(kMips32r6)) {
Emit(kMipsFloat64Min, g.DefineAsRegister(node),
g.UseUniqueRegister(node->InputAt(0)),
g.UseUniqueRegister(node->InputAt(1)));
void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
} else {
// Reverse operands, and use same reg. for result and right operand.
Emit(kMipsFloat64Min, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
}
}
void InstructionSelector::VisitFloat32Abs(Node* node) {
......@@ -1034,7 +1082,10 @@ InstructionSelector::SupportedMachineOperatorFlags() {
flags |= MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat64RoundTruncate;
}
return flags;
return flags | MachineOperatorBuilder::kFloat64Min |
MachineOperatorBuilder::kFloat64Max |
MachineOperatorBuilder::kFloat32Min |
MachineOperatorBuilder::kFloat32Max;
}
} // namespace compiler
......
......@@ -786,6 +786,62 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
break;
}
case kMips64Float64Max: {
// (b < a) ? a : b
if (kArchVariant == kMips64r6) {
__ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
i.InputDoubleRegister(0));
__ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
i.InputDoubleRegister(0));
} else {
__ c_d(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
// Left operand is result, passthrough if false.
__ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
}
break;
}
case kMips64Float64Min: {
// (a < b) ? a : b
if (kArchVariant == kMips64r6) {
__ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
__ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
i.InputDoubleRegister(0));
} else {
__ c_d(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
// Right operand is result, passthrough if false.
__ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
}
break;
}
case kMips64Float32Max: {
// (b < a) ? a : b
if (kArchVariant == kMips64r6) {
__ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
i.InputDoubleRegister(0));
__ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
i.InputDoubleRegister(0));
} else {
__ c_s(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
// Left operand is result, passthrough if false.
__ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
}
break;
}
case kMips64Float32Min: {
// (a < b) ? a : b
if (kArchVariant == kMips64r6) {
__ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
__ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
i.InputDoubleRegister(0));
} else {
__ c_s(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
// Right operand is result, passthrough if false.
__ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
}
break;
}
case kMips64CvtSD:
__ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
break;
......
......@@ -95,6 +95,10 @@ namespace compiler {
V(Mips64Float64ExtractHighWord32) \
V(Mips64Float64InsertLowWord32) \
V(Mips64Float64InsertHighWord32) \
V(Mips64Float64Max) \
V(Mips64Float64Min) \
V(Mips64Float32Max) \
V(Mips64Float32Min) \
V(Mips64Push) \
V(Mips64StoreToStackSlot) \
V(Mips64StackClaim) \
......
......@@ -674,16 +674,64 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
}
void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat32Max(Node* node) {
Mips64OperandGenerator g(this);
if (kArchVariant == kMips64r6) {
Emit(kMips64Float32Max, g.DefineAsRegister(node),
g.UseUniqueRegister(node->InputAt(0)),
g.UseUniqueRegister(node->InputAt(1)));
} else {
// Reverse operands, and use same reg. for result and right operand.
Emit(kMips64Float32Max, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
}
}
void InstructionSelector::VisitFloat64Max(Node* node) {
Mips64OperandGenerator g(this);
if (kArchVariant == kMips64r6) {
Emit(kMips64Float64Max, g.DefineAsRegister(node),
g.UseUniqueRegister(node->InputAt(0)),
g.UseUniqueRegister(node->InputAt(1)));
} else {
// Reverse operands, and use same reg. for result and right operand.
Emit(kMips64Float64Max, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
}
}
void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat32Min(Node* node) {
Mips64OperandGenerator g(this);
if (kArchVariant == kMips64r6) {
Emit(kMips64Float32Min, g.DefineAsRegister(node),
g.UseUniqueRegister(node->InputAt(0)),
g.UseUniqueRegister(node->InputAt(1)));
} else {
// Reverse operands, and use same reg. for result and right operand.
Emit(kMips64Float32Min, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
}
}
void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat64Min(Node* node) {
Mips64OperandGenerator g(this);
if (kArchVariant == kMips64r6) {
Emit(kMips64Float64Min, g.DefineAsRegister(node),
g.UseUniqueRegister(node->InputAt(0)),
g.UseUniqueRegister(node->InputAt(1)));
void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
} else {
// Reverse operands, and use same reg. for result and right operand.
Emit(kMips64Float64Min, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
}
}
void InstructionSelector::VisitFloat32Abs(Node* node) {
......@@ -1296,7 +1344,11 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
return MachineOperatorBuilder::kFloat64RoundDown |
return MachineOperatorBuilder::kFloat64Min |
MachineOperatorBuilder::kFloat64Max |
MachineOperatorBuilder::kFloat32Min |
MachineOperatorBuilder::kFloat32Max |
MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat64RoundTruncate;
}
......
......@@ -818,10 +818,10 @@ class Assembler : public AssemblerBase {
void movz_s(FPURegister fd, FPURegister fs, Register rt);
void movz_d(FPURegister fd, FPURegister fs, Register rt);
void movt_s(FPURegister fd, FPURegister fs, uint16_t cc);
void movt_d(FPURegister fd, FPURegister fs, uint16_t cc);
void movf_s(FPURegister fd, FPURegister fs, uint16_t cc);
void movf_d(FPURegister fd, FPURegister fs, uint16_t cc);
void movt_s(FPURegister fd, FPURegister fs, uint16_t cc = 0);
void movt_d(FPURegister fd, FPURegister fs, uint16_t cc = 0);
void movf_s(FPURegister fd, FPURegister fs, uint16_t cc = 0);
void movf_d(FPURegister fd, FPURegister fs, uint16_t cc = 0);
void movn_s(FPURegister fd, FPURegister fs, Register rt);
void movn_d(FPURegister fd, FPURegister fs, Register rt);
// Bit twiddling.
......
......@@ -854,10 +854,10 @@ class Assembler : public AssemblerBase {
void movz_s(FPURegister fd, FPURegister fs, Register rt);
void movz_d(FPURegister fd, FPURegister fs, Register rt);
void movt_s(FPURegister fd, FPURegister fs, uint16_t cc);
void movt_d(FPURegister fd, FPURegister fs, uint16_t cc);
void movf_s(FPURegister fd, FPURegister fs, uint16_t cc);
void movf_d(FPURegister fd, FPURegister fs, uint16_t cc);
void movt_s(FPURegister fd, FPURegister fs, uint16_t cc = 0);
void movt_d(FPURegister fd, FPURegister fs, uint16_t cc = 0);
void movf_s(FPURegister fd, FPURegister fs, uint16_t cc = 0);
void movf_d(FPURegister fd, FPURegister fs, uint16_t cc = 0);
void movn_s(FPURegister fd, FPURegister fs, Register rt);
void movn_d(FPURegister fd, FPURegister fs, Register rt);
// Bit twiddling.
......
......@@ -3391,6 +3391,70 @@ TEST(RunFloat64AddP) {
}
TEST(RunFloa32MaxP) {
RawMachineAssemblerTester<int32_t> m;
Float32BinopTester bt(&m);
if (!m.machine()->Float32Max().IsSupported()) return;
bt.AddReturn(m.Float32Max(bt.param0, bt.param1));
FOR_FLOAT32_INPUTS(pl) {
FOR_FLOAT32_INPUTS(pr) {
double expected = *pl > *pr ? *pl : *pr;
CheckDoubleEq(expected, bt.call(*pl, *pr));
}
}
}
TEST(RunFloat64MaxP) {
RawMachineAssemblerTester<int32_t> m;
Float64BinopTester bt(&m);
if (!m.machine()->Float64Max().IsSupported()) return;
bt.AddReturn(m.Float64Max(bt.param0, bt.param1));
FOR_FLOAT64_INPUTS(pl) {
FOR_FLOAT64_INPUTS(pr) {
double expected = *pl > *pr ? *pl : *pr;
CheckDoubleEq(expected, bt.call(*pl, *pr));
}
}
}
TEST(RunFloat32MinP) {
RawMachineAssemblerTester<int32_t> m;
Float32BinopTester bt(&m);
if (!m.machine()->Float32Min().IsSupported()) return;
bt.AddReturn(m.Float32Min(bt.param0, bt.param1));
FOR_FLOAT32_INPUTS(pl) {
FOR_FLOAT32_INPUTS(pr) {
double expected = *pl < *pr ? *pl : *pr;
CheckDoubleEq(expected, bt.call(*pl, *pr));
}
}
}
TEST(RunFloat64MinP) {
RawMachineAssemblerTester<int32_t> m;
Float64BinopTester bt(&m);
if (!m.machine()->Float64Min().IsSupported()) return;
bt.AddReturn(m.Float64Min(bt.param0, bt.param1));
FOR_FLOAT64_INPUTS(pl) {
FOR_FLOAT64_INPUTS(pr) {
double expected = *pl < *pr ? *pl : *pr;
CheckDoubleEq(expected, bt.call(*pl, *pr));
}
}
}
TEST(RunFloat32SubP) {
RawMachineAssemblerTester<int32_t> m;
Float32BinopTester bt(&m);
......
......@@ -845,6 +845,71 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
TEST_F(InstructionSelectorTest, Float32Max) {
StreamBuilder m(this, kMachFloat32, kMachFloat32, kMachFloat32);
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Float32Max(p0, p1);
m.Return(n);
Stream s = m.Build();
// Float32Max is `(b < a) ? a : b`.
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMipsFloat32Max, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
TEST_F(InstructionSelectorTest, Float32Min) {
StreamBuilder m(this, kMachFloat32, kMachFloat32, kMachFloat32);
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Float32Min(p0, p1);
m.Return(n);
Stream s = m.Build();
// Float32Min is `(a < b) ? a : b`.
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMipsFloat32Min, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
TEST_F(InstructionSelectorTest, Float64Max) {
StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Float64Max(p0, p1);
m.Return(n);
Stream s = m.Build();
// Float64Max is `(b < a) ? a : b`.
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMipsFloat64Max, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
TEST_F(InstructionSelectorTest, Float64Min) {
StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Float64Min(p0, p1);
m.Return(n);
Stream s = m.Build();
// Float64Min is `(a < b) ? a : b`.
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMipsFloat64Min, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -870,6 +870,70 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
TEST_F(InstructionSelectorTest, Float32Max) {
StreamBuilder m(this, kMachFloat32, kMachFloat32, kMachFloat32);
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Float32Max(p0, p1);
m.Return(n);
Stream s = m.Build();
// Float32Max is `(b < a) ? a : b`.
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMips64Float32Max, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
TEST_F(InstructionSelectorTest, Float32Min) {
StreamBuilder m(this, kMachFloat32, kMachFloat32, kMachFloat32);
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Float32Min(p0, p1);
m.Return(n);
Stream s = m.Build();
// Float32Min is `(a < b) ? a : b`.
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMips64Float32Min, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
TEST_F(InstructionSelectorTest, Float64Max) {
StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Float64Max(p0, p1);
m.Return(n);
Stream s = m.Build();
// Float64Max is `(b < a) ? a : b`.
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMips64Float64Max, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
TEST_F(InstructionSelectorTest, Float64Min) {
StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Float64Min(p0, p1);
m.Return(n);
Stream s = m.Build();
// Float64Min is `(a < b) ? a : b`.
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMips64Float64Min, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
} // namespace compiler
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment