Commit 9af9f1d0 authored by Benedikt Meurer's avatar Benedikt Meurer

[turbofan] Add new Float32Abs and Float64Abs operators.

These operators compute the absolute floating point value of some
arbitrary input, and are implemented without any branches (i.e. using
vabs on arm, and andps/andpd on x86).

R=svenpanne@chromium.org

Review URL: https://codereview.chromium.org/1066393002

Cr-Commit-Position: refs/heads/master@{#27662}
parent ed6733e8
...@@ -554,6 +554,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -554,6 +554,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArmVsqrtF32: case kArmVsqrtF32:
__ vsqrt(i.OutputFloat32Register(), i.InputFloat32Register(0)); __ vsqrt(i.OutputFloat32Register(), i.InputFloat32Register(0));
break; break;
case kArmVabsF32:
__ vabs(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
case kArmVnegF32: case kArmVnegF32:
__ vneg(i.OutputFloat32Register(), i.InputFloat32Register(0)); __ vneg(i.OutputFloat32Register(), i.InputFloat32Register(0));
break; break;
...@@ -616,6 +619,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -616,6 +619,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArmVsqrtF64: case kArmVsqrtF64:
__ vsqrt(i.OutputFloat64Register(), i.InputFloat64Register(0)); __ vsqrt(i.OutputFloat64Register(), i.InputFloat64Register(0));
break; break;
case kArmVabsF64:
__ vabs(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
case kArmVnegF64: case kArmVnegF64:
__ vneg(i.OutputFloat64Register(), i.InputFloat64Register(0)); __ vneg(i.OutputFloat64Register(), i.InputFloat64Register(0));
break; break;
...@@ -805,7 +811,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -805,7 +811,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_CHECKED_STORE_FLOAT(64); ASSEMBLE_CHECKED_STORE_FLOAT(64);
break; break;
} }
} } // NOLINT(readability/fn_size)
// Assembles branches after an instruction. // Assembles branches after an instruction.
......
...@@ -51,6 +51,7 @@ namespace compiler { ...@@ -51,6 +51,7 @@ namespace compiler {
V(ArmVmlaF32) \ V(ArmVmlaF32) \
V(ArmVmlsF32) \ V(ArmVmlsF32) \
V(ArmVdivF32) \ V(ArmVdivF32) \
V(ArmVabsF32) \
V(ArmVnegF32) \ V(ArmVnegF32) \
V(ArmVsqrtF32) \ V(ArmVsqrtF32) \
V(ArmVcmpF64) \ V(ArmVcmpF64) \
...@@ -61,6 +62,7 @@ namespace compiler { ...@@ -61,6 +62,7 @@ namespace compiler {
V(ArmVmlsF64) \ V(ArmVmlsF64) \
V(ArmVdivF64) \ V(ArmVdivF64) \
V(ArmVmodF64) \ V(ArmVmodF64) \
V(ArmVabsF64) \
V(ArmVnegF64) \ V(ArmVnegF64) \
V(ArmVsqrtF64) \ V(ArmVsqrtF64) \
V(ArmVrintmF64) \ V(ArmVrintmF64) \
......
...@@ -1044,6 +1044,16 @@ void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); } ...@@ -1044,6 +1044,16 @@ void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); } void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat32Abs(Node* node) {
VisitRR(this, kArmVabsF32, node);
}
void InstructionSelector::VisitFloat64Abs(Node* node) {
VisitRR(this, kArmVabsF64, node);
}
void InstructionSelector::VisitFloat32Sqrt(Node* node) { void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRR(this, kArmVsqrtF32, node); VisitRR(this, kArmVsqrtF32, node);
} }
...@@ -1502,6 +1512,8 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) { ...@@ -1502,6 +1512,8 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
MachineOperatorBuilder::Flags MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() { InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::kFloat32Abs |
MachineOperatorBuilder::kFloat64Abs |
MachineOperatorBuilder::kInt32DivIsSafe | MachineOperatorBuilder::kInt32DivIsSafe |
MachineOperatorBuilder::kUint32DivIsSafe; MachineOperatorBuilder::kUint32DivIsSafe;
......
...@@ -688,6 +688,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -688,6 +688,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Fmin(i.OutputFloat32Register(), i.InputFloat32Register(0), __ Fmin(i.OutputFloat32Register(), i.InputFloat32Register(0),
i.InputFloat32Register(1)); i.InputFloat32Register(1));
break; break;
case kArm64Float32Abs:
__ Fabs(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
case kArm64Float32Sqrt: case kArm64Float32Sqrt:
__ Fsqrt(i.OutputFloat32Register(), i.InputFloat32Register(0)); __ Fsqrt(i.OutputFloat32Register(), i.InputFloat32Register(0));
break; break;
...@@ -736,6 +739,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -736,6 +739,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Fmin(i.OutputDoubleRegister(), i.InputDoubleRegister(0), __ Fmin(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1)); i.InputDoubleRegister(1));
break; break;
case kArm64Float64Abs:
__ Fabs(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArm64Float64Neg: case kArm64Float64Neg:
__ Fneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); __ Fneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break; break;
......
...@@ -85,6 +85,7 @@ namespace compiler { ...@@ -85,6 +85,7 @@ namespace compiler {
V(Arm64Float32Div) \ V(Arm64Float32Div) \
V(Arm64Float32Max) \ V(Arm64Float32Max) \
V(Arm64Float32Min) \ V(Arm64Float32Min) \
V(Arm64Float32Abs) \
V(Arm64Float32Sqrt) \ V(Arm64Float32Sqrt) \
V(Arm64Float64Cmp) \ V(Arm64Float64Cmp) \
V(Arm64Float64Add) \ V(Arm64Float64Add) \
...@@ -94,6 +95,7 @@ namespace compiler { ...@@ -94,6 +95,7 @@ namespace compiler {
V(Arm64Float64Mod) \ V(Arm64Float64Mod) \
V(Arm64Float64Max) \ V(Arm64Float64Max) \
V(Arm64Float64Min) \ V(Arm64Float64Min) \
V(Arm64Float64Abs) \
V(Arm64Float64Neg) \ V(Arm64Float64Neg) \
V(Arm64Float64Sqrt) \ V(Arm64Float64Sqrt) \
V(Arm64Float64RoundDown) \ V(Arm64Float64RoundDown) \
......
...@@ -1169,6 +1169,16 @@ void InstructionSelector::VisitFloat64Min(Node* node) { ...@@ -1169,6 +1169,16 @@ void InstructionSelector::VisitFloat64Min(Node* node) {
} }
void InstructionSelector::VisitFloat32Abs(Node* node) {
VisitRR(this, kArm64Float32Abs, node);
}
void InstructionSelector::VisitFloat64Abs(Node* node) {
VisitRR(this, kArm64Float64Abs, node);
}
void InstructionSelector::VisitFloat32Sqrt(Node* node) { void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRR(this, kArm64Float32Sqrt, node); VisitRR(this, kArm64Float32Sqrt, node);
} }
...@@ -1753,8 +1763,10 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) { ...@@ -1753,8 +1763,10 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
// static // static
MachineOperatorBuilder::Flags MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() { InstructionSelector::SupportedMachineOperatorFlags() {
return MachineOperatorBuilder::kFloat32Max | return MachineOperatorBuilder::kFloat32Abs |
MachineOperatorBuilder::kFloat32Max |
MachineOperatorBuilder::kFloat32Min | MachineOperatorBuilder::kFloat32Min |
MachineOperatorBuilder::kFloat64Abs |
MachineOperatorBuilder::kFloat64Max | MachineOperatorBuilder::kFloat64Max |
MachineOperatorBuilder::kFloat64Min | MachineOperatorBuilder::kFloat64Min |
MachineOperatorBuilder::kFloat64RoundDown | MachineOperatorBuilder::kFloat64RoundDown |
......
...@@ -55,21 +55,39 @@ Reduction CommonOperatorReducer::ReducePhi(Node* node) { ...@@ -55,21 +55,39 @@ Reduction CommonOperatorReducer::ReducePhi(Node* node) {
if (matcher.Matched()) { if (matcher.Matched()) {
if (matcher.IfTrue() == merge->InputAt(1)) std::swap(vtrue, vfalse); if (matcher.IfTrue() == merge->InputAt(1)) std::swap(vtrue, vfalse);
Node* cond = matcher.Branch()->InputAt(0); Node* cond = matcher.Branch()->InputAt(0);
if (cond->opcode() == IrOpcode::kFloat64LessThan) { if (cond->opcode() == IrOpcode::kFloat32LessThan) {
if (cond->InputAt(0) == vtrue && cond->InputAt(1) == vfalse && Float32BinopMatcher mcond(cond);
if (mcond.left().Is(0.0) && mcond.right().Equals(vtrue) &&
vfalse->opcode() == IrOpcode::kFloat32Sub &&
machine()->HasFloat32Abs()) {
Float32BinopMatcher mvfalse(vfalse);
if (mvfalse.left().IsZero() && mvfalse.right().Equals(vtrue)) {
return Change(node, machine()->Float32Abs(), vtrue);
}
}
if (mcond.left().Equals(vtrue) && mcond.right().Equals(vfalse) &&
machine()->HasFloat32Min()) {
return Change(node, machine()->Float32Min(), vtrue, vfalse);
} else if (mcond.left().Equals(vfalse) && mcond.right().Equals(vtrue) &&
machine()->HasFloat32Max()) {
return Change(node, machine()->Float32Max(), vtrue, vfalse);
}
} else if (cond->opcode() == IrOpcode::kFloat64LessThan) {
Float64BinopMatcher mcond(cond);
if (mcond.left().Is(0.0) && mcond.right().Equals(vtrue) &&
vfalse->opcode() == IrOpcode::kFloat64Sub &&
machine()->HasFloat64Abs()) {
Float64BinopMatcher mvfalse(vfalse);
if (mvfalse.left().IsZero() && mvfalse.right().Equals(vtrue)) {
return Change(node, machine()->Float64Abs(), vtrue);
}
}
if (mcond.left().Equals(vtrue) && mcond.right().Equals(vfalse) &&
machine()->HasFloat64Min()) { machine()->HasFloat64Min()) {
node->set_op(machine()->Float64Min()); return Change(node, machine()->Float64Min(), vtrue, vfalse);
node->ReplaceInput(0, vtrue); } else if (mcond.left().Equals(vfalse) && mcond.right().Equals(vtrue) &&
node->ReplaceInput(1, vfalse);
node->TrimInputCount(2);
return Changed(node);
} else if (cond->InputAt(0) == vfalse && cond->InputAt(1) == vtrue &&
machine()->HasFloat64Max()) { machine()->HasFloat64Max()) {
node->set_op(machine()->Float64Max()); return Change(node, machine()->Float64Max(), vtrue, vfalse);
node->ReplaceInput(0, vtrue);
node->ReplaceInput(1, vfalse);
node->TrimInputCount(2);
return Changed(node);
} }
} }
} }
...@@ -91,27 +109,64 @@ Reduction CommonOperatorReducer::ReduceSelect(Node* node) { ...@@ -91,27 +109,64 @@ Reduction CommonOperatorReducer::ReduceSelect(Node* node) {
Node* vtrue = NodeProperties::GetValueInput(node, 1); Node* vtrue = NodeProperties::GetValueInput(node, 1);
Node* vfalse = NodeProperties::GetValueInput(node, 2); Node* vfalse = NodeProperties::GetValueInput(node, 2);
if (vtrue == vfalse) return Replace(vtrue); if (vtrue == vfalse) return Replace(vtrue);
if (cond->opcode() == IrOpcode::kFloat64LessThan) { if (cond->opcode() == IrOpcode::kFloat32LessThan) {
if (cond->InputAt(0) == vtrue && cond->InputAt(1) == vfalse && Float32BinopMatcher mcond(cond);
if (mcond.left().Is(0.0) && mcond.right().Equals(vtrue) &&
vfalse->opcode() == IrOpcode::kFloat32Sub &&
machine()->HasFloat32Abs()) {
Float32BinopMatcher mvfalse(vfalse);
if (mvfalse.left().IsZero() && mvfalse.right().Equals(vtrue)) {
return Change(node, machine()->Float32Abs(), vtrue);
}
}
if (mcond.left().Equals(vtrue) && mcond.right().Equals(vfalse) &&
machine()->HasFloat32Min()) {
return Change(node, machine()->Float32Min(), vtrue, vfalse);
} else if (mcond.left().Equals(vfalse) && mcond.right().Equals(vtrue) &&
machine()->HasFloat32Max()) {
return Change(node, machine()->Float32Max(), vtrue, vfalse);
}
} else if (cond->opcode() == IrOpcode::kFloat64LessThan) {
Float64BinopMatcher mcond(cond);
if (mcond.left().Is(0.0) && mcond.right().Equals(vtrue) &&
vfalse->opcode() == IrOpcode::kFloat64Sub &&
machine()->HasFloat64Abs()) {
Float64BinopMatcher mvfalse(vfalse);
if (mvfalse.left().IsZero() && mvfalse.right().Equals(vtrue)) {
return Change(node, machine()->Float64Abs(), vtrue);
}
}
if (mcond.left().Equals(vtrue) && mcond.right().Equals(vfalse) &&
machine()->HasFloat64Min()) { machine()->HasFloat64Min()) {
node->set_op(machine()->Float64Min()); return Change(node, machine()->Float64Min(), vtrue, vfalse);
node->ReplaceInput(0, vtrue); } else if (mcond.left().Equals(vfalse) && mcond.right().Equals(vtrue) &&
node->ReplaceInput(1, vfalse);
node->TrimInputCount(2);
return Changed(node);
} else if (cond->InputAt(0) == vfalse && cond->InputAt(1) == vtrue &&
machine()->HasFloat64Max()) { machine()->HasFloat64Max()) {
node->set_op(machine()->Float64Max()); return Change(node, machine()->Float64Max(), vtrue, vfalse);
node->ReplaceInput(0, vtrue);
node->ReplaceInput(1, vfalse);
node->TrimInputCount(2);
return Changed(node);
} }
} }
return NoChange(); return NoChange();
} }
Reduction CommonOperatorReducer::Change(Node* node, Operator const* op,
Node* a) {
node->set_op(op);
node->ReplaceInput(0, a);
node->TrimInputCount(1);
return Changed(node);
}
Reduction CommonOperatorReducer::Change(Node* node, Operator const* op, Node* a,
Node* b) {
node->set_op(op);
node->ReplaceInput(0, a);
node->ReplaceInput(1, b);
node->TrimInputCount(2);
return Changed(node);
}
CommonOperatorBuilder* CommonOperatorReducer::common() const { CommonOperatorBuilder* CommonOperatorReducer::common() const {
return jsgraph()->common(); return jsgraph()->common();
} }
......
...@@ -16,6 +16,7 @@ class CommonOperatorBuilder; ...@@ -16,6 +16,7 @@ class CommonOperatorBuilder;
class Graph; class Graph;
class JSGraph; class JSGraph;
class MachineOperatorBuilder; class MachineOperatorBuilder;
class Operator;
// Performs strength reduction on nodes that have common operators. // Performs strength reduction on nodes that have common operators.
...@@ -31,6 +32,9 @@ class CommonOperatorReducer FINAL : public Reducer { ...@@ -31,6 +32,9 @@ class CommonOperatorReducer FINAL : public Reducer {
Reduction ReducePhi(Node* node); Reduction ReducePhi(Node* node);
Reduction ReduceSelect(Node* node); Reduction ReduceSelect(Node* node);
Reduction Change(Node* node, Operator const* op, Node* a);
Reduction Change(Node* node, Operator const* op, Node* a, Node* b);
CommonOperatorBuilder* common() const; CommonOperatorBuilder* common() const;
Graph* graph() const; Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; } JSGraph* jsgraph() const { return jsgraph_; }
......
...@@ -477,6 +477,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -477,6 +477,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kSSEFloat32Sqrt: case kSSEFloat32Sqrt:
__ sqrtss(i.OutputDoubleRegister(), i.InputOperand(0)); __ sqrtss(i.OutputDoubleRegister(), i.InputOperand(0));
break; break;
case kSSEFloat32Abs: {
// TODO(bmeurer): Use 128-bit constants.
// TODO(turbofan): Add AVX version with relaxed register constraints.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psrlq(kScratchDoubleReg, 33);
__ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
break;
}
case kSSEFloat32Neg: { case kSSEFloat32Neg: {
// TODO(bmeurer): Use 128-bit constants. // TODO(bmeurer): Use 128-bit constants.
// TODO(turbofan): Add AVX version with relaxed register constraints. // TODO(turbofan): Add AVX version with relaxed register constraints.
...@@ -531,6 +539,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -531,6 +539,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ add(esp, Immediate(kDoubleSize)); __ add(esp, Immediate(kDoubleSize));
break; break;
} }
case kSSEFloat64Abs: {
// TODO(bmeurer): Use 128-bit constants.
// TODO(turbofan): Add AVX version with relaxed register constraints.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psrlq(kScratchDoubleReg, 1);
__ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
break;
}
case kSSEFloat64Neg: { case kSSEFloat64Neg: {
// TODO(bmeurer): Use 128-bit constants. // TODO(bmeurer): Use 128-bit constants.
// TODO(turbofan): Add AVX version with relaxed register constraints. // TODO(turbofan): Add AVX version with relaxed register constraints.
......
...@@ -38,6 +38,7 @@ namespace compiler { ...@@ -38,6 +38,7 @@ namespace compiler {
V(SSEFloat32Div) \ V(SSEFloat32Div) \
V(SSEFloat32Max) \ V(SSEFloat32Max) \
V(SSEFloat32Min) \ V(SSEFloat32Min) \
V(SSEFloat32Abs) \
V(SSEFloat32Neg) \ V(SSEFloat32Neg) \
V(SSEFloat32Sqrt) \ V(SSEFloat32Sqrt) \
V(SSEFloat64Cmp) \ V(SSEFloat64Cmp) \
...@@ -48,6 +49,7 @@ namespace compiler { ...@@ -48,6 +49,7 @@ namespace compiler {
V(SSEFloat64Mod) \ V(SSEFloat64Mod) \
V(SSEFloat64Max) \ V(SSEFloat64Max) \
V(SSEFloat64Min) \ V(SSEFloat64Min) \
V(SSEFloat64Abs) \
V(SSEFloat64Neg) \ V(SSEFloat64Neg) \
V(SSEFloat64Sqrt) \ V(SSEFloat64Sqrt) \
V(SSEFloat64Round) \ V(SSEFloat64Round) \
......
...@@ -759,6 +759,18 @@ void InstructionSelector::VisitFloat64Min(Node* node) { ...@@ -759,6 +759,18 @@ void InstructionSelector::VisitFloat64Min(Node* node) {
} }
void InstructionSelector::VisitFloat32Abs(Node* node) {
IA32OperandGenerator g(this);
Emit(kSSEFloat32Abs, g.DefineSameAsFirst(node), g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitFloat64Abs(Node* node) {
IA32OperandGenerator g(this);
Emit(kSSEFloat64Abs, g.DefineSameAsFirst(node), g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitFloat32Sqrt(Node* node) { void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitROFloat(this, node, kSSEFloat32Sqrt); VisitROFloat(this, node, kSSEFloat32Sqrt);
} }
...@@ -1196,8 +1208,10 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) { ...@@ -1196,8 +1208,10 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
MachineOperatorBuilder::Flags MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() { InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::kFloat32Abs |
MachineOperatorBuilder::kFloat32Max | MachineOperatorBuilder::kFloat32Max |
MachineOperatorBuilder::kFloat32Min | MachineOperatorBuilder::kFloat32Min |
MachineOperatorBuilder::kFloat64Abs |
MachineOperatorBuilder::kFloat64Max | MachineOperatorBuilder::kFloat64Max |
MachineOperatorBuilder::kFloat64Min | MachineOperatorBuilder::kFloat64Min |
MachineOperatorBuilder::kWord32ShiftIsSafe; MachineOperatorBuilder::kWord32ShiftIsSafe;
......
...@@ -764,6 +764,8 @@ void InstructionSelector::VisitNode(Node* node) { ...@@ -764,6 +764,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsDouble(node), VisitFloat32Min(node); return MarkAsDouble(node), VisitFloat32Min(node);
case IrOpcode::kFloat32Max: case IrOpcode::kFloat32Max:
return MarkAsDouble(node), VisitFloat32Max(node); return MarkAsDouble(node), VisitFloat32Max(node);
case IrOpcode::kFloat32Abs:
return MarkAsDouble(node), VisitFloat32Abs(node);
case IrOpcode::kFloat32Sqrt: case IrOpcode::kFloat32Sqrt:
return MarkAsDouble(node), VisitFloat32Sqrt(node); return MarkAsDouble(node), VisitFloat32Sqrt(node);
case IrOpcode::kFloat32Equal: case IrOpcode::kFloat32Equal:
...@@ -786,6 +788,8 @@ void InstructionSelector::VisitNode(Node* node) { ...@@ -786,6 +788,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsDouble(node), VisitFloat64Min(node); return MarkAsDouble(node), VisitFloat64Min(node);
case IrOpcode::kFloat64Max: case IrOpcode::kFloat64Max:
return MarkAsDouble(node), VisitFloat64Max(node); return MarkAsDouble(node), VisitFloat64Max(node);
case IrOpcode::kFloat64Abs:
return MarkAsDouble(node), VisitFloat64Abs(node);
case IrOpcode::kFloat64Sqrt: case IrOpcode::kFloat64Sqrt:
return MarkAsDouble(node), VisitFloat64Sqrt(node); return MarkAsDouble(node), VisitFloat64Sqrt(node);
case IrOpcode::kFloat64Equal: case IrOpcode::kFloat64Equal:
......
...@@ -122,12 +122,14 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) { ...@@ -122,12 +122,14 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
V(Float32Sub, Operator::kNoProperties, 2, 0, 1) \ V(Float32Sub, Operator::kNoProperties, 2, 0, 1) \
V(Float32Mul, Operator::kCommutative, 2, 0, 1) \ V(Float32Mul, Operator::kCommutative, 2, 0, 1) \
V(Float32Div, Operator::kNoProperties, 2, 0, 1) \ V(Float32Div, Operator::kNoProperties, 2, 0, 1) \
V(Float32Abs, Operator::kNoProperties, 1, 0, 1) \
V(Float32Sqrt, Operator::kNoProperties, 1, 0, 1) \ V(Float32Sqrt, Operator::kNoProperties, 1, 0, 1) \
V(Float64Add, Operator::kCommutative, 2, 0, 1) \ V(Float64Add, Operator::kCommutative, 2, 0, 1) \
V(Float64Sub, Operator::kNoProperties, 2, 0, 1) \ V(Float64Sub, Operator::kNoProperties, 2, 0, 1) \
V(Float64Mul, Operator::kCommutative, 2, 0, 1) \ V(Float64Mul, Operator::kCommutative, 2, 0, 1) \
V(Float64Div, Operator::kNoProperties, 2, 0, 1) \ V(Float64Div, Operator::kNoProperties, 2, 0, 1) \
V(Float64Mod, Operator::kNoProperties, 2, 0, 1) \ V(Float64Mod, Operator::kNoProperties, 2, 0, 1) \
V(Float64Abs, Operator::kNoProperties, 1, 0, 1) \
V(Float64Sqrt, Operator::kNoProperties, 1, 0, 1) \ V(Float64Sqrt, Operator::kNoProperties, 1, 0, 1) \
V(Float64RoundDown, Operator::kNoProperties, 1, 0, 1) \ V(Float64RoundDown, Operator::kNoProperties, 1, 0, 1) \
V(Float64RoundTruncate, Operator::kNoProperties, 1, 0, 1) \ V(Float64RoundTruncate, Operator::kNoProperties, 1, 0, 1) \
......
...@@ -74,16 +74,18 @@ class MachineOperatorBuilder FINAL : public ZoneObject { ...@@ -74,16 +74,18 @@ class MachineOperatorBuilder FINAL : public ZoneObject {
// for operations that are unsupported by some back-ends. // for operations that are unsupported by some back-ends.
enum Flag { enum Flag {
kNoFlags = 0u, kNoFlags = 0u,
kFloat32Max = 1u << 0, kFloat32Abs = 1u << 0,
kFloat32Min = 1u << 1, kFloat32Max = 1u << 1,
kFloat64Max = 1u << 2, kFloat32Min = 1u << 2,
kFloat64Min = 1u << 3, kFloat64Abs = 1u << 3,
kFloat64RoundDown = 1u << 4, kFloat64Max = 1u << 4,
kFloat64RoundTruncate = 1u << 5, kFloat64Min = 1u << 5,
kFloat64RoundTiesAway = 1u << 6, kFloat64RoundDown = 1u << 6,
kInt32DivIsSafe = 1u << 7, kFloat64RoundTruncate = 1u << 7,
kUint32DivIsSafe = 1u << 8, kFloat64RoundTiesAway = 1u << 8,
kWord32ShiftIsSafe = 1u << 9 kInt32DivIsSafe = 1u << 9,
kUint32DivIsSafe = 1u << 10,
kWord32ShiftIsSafe = 1u << 11
}; };
typedef base::Flags<Flag, unsigned> Flags; typedef base::Flags<Flag, unsigned> Flags;
...@@ -197,6 +199,14 @@ class MachineOperatorBuilder FINAL : public ZoneObject { ...@@ -197,6 +199,14 @@ class MachineOperatorBuilder FINAL : public ZoneObject {
bool HasFloat64Max() { return flags_ & kFloat64Max; } bool HasFloat64Max() { return flags_ & kFloat64Max; }
bool HasFloat64Min() { return flags_ & kFloat64Min; } bool HasFloat64Min() { return flags_ & kFloat64Min; }
// Floating point abs complying to IEEE 754 (single-precision).
const Operator* Float32Abs();
bool HasFloat32Abs() const { return flags_ & kFloat32Abs; }
// Floating point abs complying to IEEE 754 (double-precision).
const Operator* Float64Abs();
bool HasFloat64Abs() const { return flags_ & kFloat64Abs; }
// Floating point rounding. // Floating point rounding.
const Operator* Float64RoundDown(); const Operator* Float64RoundDown();
const Operator* Float64RoundTruncate(); const Operator* Float64RoundTruncate();
......
...@@ -463,6 +463,12 @@ void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); } ...@@ -463,6 +463,12 @@ void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); } void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat32Abs(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat64Abs(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat32Sqrt(Node* node) { void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRR(this, kMipsSqrtS, node); VisitRR(this, kMipsSqrtS, node);
} }
......
...@@ -612,6 +612,12 @@ void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); } ...@@ -612,6 +612,12 @@ void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); } void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat32Abs(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat64Abs(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat32Sqrt(Node* node) { void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRR(this, kMips64SqrtS, node); VisitRR(this, kMips64SqrtS, node);
} }
......
...@@ -30,6 +30,8 @@ struct NodeMatcher { ...@@ -30,6 +30,8 @@ struct NodeMatcher {
} }
Node* InputAt(int index) const { return node()->InputAt(index); } Node* InputAt(int index) const { return node()->InputAt(index); }
bool Equals(const Node* node) const { return node_ == node; }
bool IsComparison() const; bool IsComparison() const;
#define DEFINE_IS_OPCODE(Opcode) \ #define DEFINE_IS_OPCODE(Opcode) \
...@@ -141,6 +143,7 @@ struct FloatMatcher FINAL : public ValueMatcher<T, kOpcode> { ...@@ -141,6 +143,7 @@ struct FloatMatcher FINAL : public ValueMatcher<T, kOpcode> {
return this->Is(0.0) && std::signbit(this->Value()); return this->Is(0.0) && std::signbit(this->Value());
} }
bool IsNaN() const { return this->HasValue() && std::isnan(this->Value()); } bool IsNaN() const { return this->HasValue() && std::isnan(this->Value()); }
bool IsZero() const { return this->Is(0.0) && !std::signbit(this->Value()); }
}; };
typedef FloatMatcher<float, IrOpcode::kFloat32Constant> Float32Matcher; typedef FloatMatcher<float, IrOpcode::kFloat32Constant> Float32Matcher;
......
...@@ -249,6 +249,7 @@ ...@@ -249,6 +249,7 @@
V(Float32Div) \ V(Float32Div) \
V(Float32Max) \ V(Float32Max) \
V(Float32Min) \ V(Float32Min) \
V(Float32Abs) \
V(Float32Sqrt) \ V(Float32Sqrt) \
V(Float64Add) \ V(Float64Add) \
V(Float64Sub) \ V(Float64Sub) \
...@@ -257,6 +258,7 @@ ...@@ -257,6 +258,7 @@
V(Float64Mod) \ V(Float64Mod) \
V(Float64Max) \ V(Float64Max) \
V(Float64Min) \ V(Float64Min) \
V(Float64Abs) \
V(Float64Sqrt) \ V(Float64Sqrt) \
V(Float64RoundDown) \ V(Float64RoundDown) \
V(Float64RoundTruncate) \ V(Float64RoundTruncate) \
......
...@@ -983,6 +983,12 @@ void InstructionSelector::VisitFloat64Min(Node* node) { ...@@ -983,6 +983,12 @@ void InstructionSelector::VisitFloat64Min(Node* node) {
} }
void InstructionSelector::VisitFloat32Abs(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat64Abs(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat32Sqrt(Node* node) { void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRR(this, kPPC_SqrtDouble, node); VisitRR(this, kPPC_SqrtDouble, node);
} }
......
...@@ -340,6 +340,7 @@ class RawMachineAssembler : public GraphBuilder { ...@@ -340,6 +340,7 @@ class RawMachineAssembler : public GraphBuilder {
Node* Float32Div(Node* a, Node* b) { Node* Float32Div(Node* a, Node* b) {
return NewNode(machine()->Float32Div(), a, b); return NewNode(machine()->Float32Div(), a, b);
} }
Node* Float32Abs(Node* a) { return NewNode(machine()->Float32Abs(), a); }
Node* Float32Sqrt(Node* a) { return NewNode(machine()->Float32Sqrt(), a); } Node* Float32Sqrt(Node* a) { return NewNode(machine()->Float32Sqrt(), a); }
Node* Float32Equal(Node* a, Node* b) { Node* Float32Equal(Node* a, Node* b) {
return NewNode(machine()->Float32Equal(), a, b); return NewNode(machine()->Float32Equal(), a, b);
...@@ -373,6 +374,7 @@ class RawMachineAssembler : public GraphBuilder { ...@@ -373,6 +374,7 @@ class RawMachineAssembler : public GraphBuilder {
Node* Float64Mod(Node* a, Node* b) { Node* Float64Mod(Node* a, Node* b) {
return NewNode(machine()->Float64Mod(), a, b); return NewNode(machine()->Float64Mod(), a, b);
} }
Node* Float64Abs(Node* a) { return NewNode(machine()->Float64Abs(), a); }
Node* Float64Sqrt(Node* a) { return NewNode(machine()->Float64Sqrt(), a); } Node* Float64Sqrt(Node* a) { return NewNode(machine()->Float64Sqrt(), a); }
Node* Float64Equal(Node* a, Node* b) { Node* Float64Equal(Node* a, Node* b) {
return NewNode(machine()->Float64Equal(), a, b); return NewNode(machine()->Float64Equal(), a, b);
......
...@@ -1019,6 +1019,7 @@ class RepresentationSelector { ...@@ -1019,6 +1019,7 @@ class RepresentationSelector {
case IrOpcode::kFloat64Mod: case IrOpcode::kFloat64Mod:
case IrOpcode::kFloat64Min: case IrOpcode::kFloat64Min:
return VisitFloat64Binop(node); return VisitFloat64Binop(node);
case IrOpcode::kFloat64Abs:
case IrOpcode::kFloat64Sqrt: case IrOpcode::kFloat64Sqrt:
case IrOpcode::kFloat64RoundDown: case IrOpcode::kFloat64RoundDown:
case IrOpcode::kFloat64RoundTruncate: case IrOpcode::kFloat64RoundTruncate:
......
...@@ -2134,6 +2134,12 @@ Bounds Typer::Visitor::TypeFloat32Min(Node* node) { ...@@ -2134,6 +2134,12 @@ Bounds Typer::Visitor::TypeFloat32Min(Node* node) {
} }
Bounds Typer::Visitor::TypeFloat32Abs(Node* node) {
// TODO(turbofan): We should be able to infer a better type here.
return Bounds(Type::Number());
}
Bounds Typer::Visitor::TypeFloat32Sqrt(Node* node) { Bounds Typer::Visitor::TypeFloat32Sqrt(Node* node) {
return Bounds(Type::Number()); return Bounds(Type::Number());
} }
...@@ -2189,6 +2195,12 @@ Bounds Typer::Visitor::TypeFloat64Min(Node* node) { ...@@ -2189,6 +2195,12 @@ Bounds Typer::Visitor::TypeFloat64Min(Node* node) {
} }
Bounds Typer::Visitor::TypeFloat64Abs(Node* node) {
// TODO(turbofan): We should be able to infer a better type here.
return Bounds(Type::Number());
}
Bounds Typer::Visitor::TypeFloat64Sqrt(Node* node) { Bounds Typer::Visitor::TypeFloat64Sqrt(Node* node) {
return Bounds(Type::Number()); return Bounds(Type::Number());
} }
......
...@@ -793,6 +793,7 @@ void Verifier::Visitor::Check(Node* node) { ...@@ -793,6 +793,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kFloat32Div: case IrOpcode::kFloat32Div:
case IrOpcode::kFloat32Max: case IrOpcode::kFloat32Max:
case IrOpcode::kFloat32Min: case IrOpcode::kFloat32Min:
case IrOpcode::kFloat32Abs:
case IrOpcode::kFloat32Sqrt: case IrOpcode::kFloat32Sqrt:
case IrOpcode::kFloat32Equal: case IrOpcode::kFloat32Equal:
case IrOpcode::kFloat32LessThan: case IrOpcode::kFloat32LessThan:
...@@ -804,6 +805,7 @@ void Verifier::Visitor::Check(Node* node) { ...@@ -804,6 +805,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kFloat64Mod: case IrOpcode::kFloat64Mod:
case IrOpcode::kFloat64Max: case IrOpcode::kFloat64Max:
case IrOpcode::kFloat64Min: case IrOpcode::kFloat64Min:
case IrOpcode::kFloat64Abs:
case IrOpcode::kFloat64Sqrt: case IrOpcode::kFloat64Sqrt:
case IrOpcode::kFloat64RoundDown: case IrOpcode::kFloat64RoundDown:
case IrOpcode::kFloat64RoundTruncate: case IrOpcode::kFloat64RoundTruncate:
......
...@@ -726,6 +726,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -726,6 +726,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kSSEFloat32Div: case kSSEFloat32Div:
ASSEMBLE_SSE_BINOP(divss); ASSEMBLE_SSE_BINOP(divss);
break; break;
case kSSEFloat32Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
// TODO(turbofan): Add AVX version with relaxed register constraints.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psrlq(kScratchDoubleReg, 33);
__ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
break;
}
case kSSEFloat32Neg: { case kSSEFloat32Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants. // TODO(bmeurer): Use RIP relative 128-bit constants.
// TODO(turbofan): Add AVX version with relaxed register constraints. // TODO(turbofan): Add AVX version with relaxed register constraints.
...@@ -799,6 +807,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -799,6 +807,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kSSEFloat64Min: case kSSEFloat64Min:
ASSEMBLE_SSE_BINOP(minsd); ASSEMBLE_SSE_BINOP(minsd);
break; break;
case kSSEFloat64Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
// TODO(turbofan): Add AVX version with relaxed register constraints.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psrlq(kScratchDoubleReg, 1);
__ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
break;
}
case kSSEFloat64Neg: { case kSSEFloat64Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants. // TODO(bmeurer): Use RIP relative 128-bit constants.
// TODO(turbofan): Add AVX version with relaxed register constraints. // TODO(turbofan): Add AVX version with relaxed register constraints.
......
...@@ -52,6 +52,7 @@ namespace compiler { ...@@ -52,6 +52,7 @@ namespace compiler {
V(SSEFloat32Sub) \ V(SSEFloat32Sub) \
V(SSEFloat32Mul) \ V(SSEFloat32Mul) \
V(SSEFloat32Div) \ V(SSEFloat32Div) \
V(SSEFloat32Abs) \
V(SSEFloat32Neg) \ V(SSEFloat32Neg) \
V(SSEFloat32Sqrt) \ V(SSEFloat32Sqrt) \
V(SSEFloat32Max) \ V(SSEFloat32Max) \
...@@ -63,6 +64,7 @@ namespace compiler { ...@@ -63,6 +64,7 @@ namespace compiler {
V(SSEFloat64Mul) \ V(SSEFloat64Mul) \
V(SSEFloat64Div) \ V(SSEFloat64Div) \
V(SSEFloat64Mod) \ V(SSEFloat64Mod) \
V(SSEFloat64Abs) \
V(SSEFloat64Neg) \ V(SSEFloat64Neg) \
V(SSEFloat64Sqrt) \ V(SSEFloat64Sqrt) \
V(SSEFloat64Round) \ V(SSEFloat64Round) \
......
...@@ -889,6 +889,12 @@ void InstructionSelector::VisitFloat32Min(Node* node) { ...@@ -889,6 +889,12 @@ void InstructionSelector::VisitFloat32Min(Node* node) {
} }
void InstructionSelector::VisitFloat32Abs(Node* node) {
X64OperandGenerator g(this);
Emit(kSSEFloat32Abs, g.DefineSameAsFirst(node), g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitFloat32Sqrt(Node* node) { void InstructionSelector::VisitFloat32Sqrt(Node* node) {
X64OperandGenerator g(this); X64OperandGenerator g(this);
Emit(kSSEFloat32Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0))); Emit(kSSEFloat32Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
...@@ -953,6 +959,12 @@ void InstructionSelector::VisitFloat64Min(Node* node) { ...@@ -953,6 +959,12 @@ void InstructionSelector::VisitFloat64Min(Node* node) {
} }
void InstructionSelector::VisitFloat64Abs(Node* node) {
X64OperandGenerator g(this);
Emit(kSSEFloat64Abs, g.DefineSameAsFirst(node), g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitFloat64Sqrt(Node* node) { void InstructionSelector::VisitFloat64Sqrt(Node* node) {
X64OperandGenerator g(this); X64OperandGenerator g(this);
Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0))); Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
...@@ -1508,8 +1520,10 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) { ...@@ -1508,8 +1520,10 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
MachineOperatorBuilder::Flags MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() { InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::kFloat32Abs |
MachineOperatorBuilder::kFloat32Max | MachineOperatorBuilder::kFloat32Max |
MachineOperatorBuilder::kFloat32Min | MachineOperatorBuilder::kFloat32Min |
MachineOperatorBuilder::kFloat64Abs |
MachineOperatorBuilder::kFloat64Max | MachineOperatorBuilder::kFloat64Max |
MachineOperatorBuilder::kFloat64Min | MachineOperatorBuilder::kFloat64Min |
MachineOperatorBuilder::kWord32ShiftIsSafe; MachineOperatorBuilder::kWord32ShiftIsSafe;
......
...@@ -24,8 +24,7 @@ var GlobalArray = global.Array; ...@@ -24,8 +24,7 @@ var GlobalArray = global.Array;
// ECMA 262 - 15.8.2.1 // ECMA 262 - 15.8.2.1
function MathAbs(x) { function MathAbs(x) {
x = +x; x = +x;
if (x > 0) return x; return (x > 0) ? x : 0 - x;
return 0 - x;
} }
// ECMA 262 - 15.8.2.2 // ECMA 262 - 15.8.2.2
......
...@@ -4945,6 +4945,40 @@ TEST(RunFloat64InsertHighWord32) { ...@@ -4945,6 +4945,40 @@ TEST(RunFloat64InsertHighWord32) {
} }
TEST(RunFloat32Abs) {
float input = -1.0;
float result = 0.0;
RawMachineAssemblerTester<int32_t> m;
if (!m.machine()->HasFloat32Abs()) return;
m.StoreToPointer(&result, kMachFloat32,
m.Float32Abs(m.LoadFromPointer(&input, kMachFloat32)));
m.Return(m.Int32Constant(0));
FOR_FLOAT32_INPUTS(i) {
input = *i;
float expected = std::abs(input);
CHECK_EQ(0, m.Call());
CheckFloatEq(expected, result);
}
}
TEST(RunFloat64Abs) {
double input = -1.0;
double result = 0.0;
RawMachineAssemblerTester<int32_t> m;
if (!m.machine()->HasFloat64Abs()) return;
m.StoreToPointer(&result, kMachFloat64,
m.Float64Abs(m.LoadFromPointer(&input, kMachFloat64)));
m.Return(m.Int32Constant(0));
FOR_FLOAT64_INPUTS(i) {
input = *i;
double expected = std::abs(input);
CHECK_EQ(0, m.Call());
CheckDoubleEq(expected, result);
}
}
static double two_30 = 1 << 30; // 2^30 is a smi boundary. static double two_30 = 1 << 30; // 2^30 is a smi boundary.
static double two_52 = two_30 * (1 << 22); // 2^52 is a precision boundary. static double two_52 = two_30 * (1 << 22); // 2^52 is a precision boundary.
static double kValues[] = {0.1, static double kValues[] = {0.1,
......
...@@ -102,14 +102,12 @@ class ValueHelper { ...@@ -102,14 +102,12 @@ class ValueHelper {
static std::vector<double> float64_vector() { static std::vector<double> float64_vector() {
static const double nan = std::numeric_limits<double>::quiet_NaN(); static const double nan = std::numeric_limits<double>::quiet_NaN();
static const double values[] = { static const double values[] = {
0.125, 0.25, 0.375, 0.5, 0.125, 0.25, 0.375, 0.5, 1.25, -1.75, 2, 5.125, 6.25, 0.0, -0.0,
1.25, -1.75, 2, 5.125, 982983.25, 888, 2147483647.0, -999.75, 3.1e7, -2e66, 3e-88,
6.25, 0.0, -0.0, 982983.25, -2147483648.0, V8_INFINITY, -V8_INFINITY, -nan, nan, 2147483647.375,
888, 2147483647.0, -999.75, 3.1e7, 2147483647.75, 2147483648.0, 2147483648.25, 2147483649.25,
-2e66, 3e-88, -2147483648.0, V8_INFINITY, -2147483647.0, -2147483647.125, -2147483647.875, -2147483648.25,
-V8_INFINITY, nan, 2147483647.375, 2147483647.75, -2147483649.5};
2147483648.0, 2147483648.25, 2147483649.25, -2147483647.0,
-2147483647.125, -2147483647.875, -2147483648.25, -2147483649.5};
return std::vector<double>(&values[0], &values[arraysize(values)]); return std::vector<double>(&values[0], &values[arraysize(values)]);
} }
......
...@@ -1679,6 +1679,36 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFAITest, ...@@ -1679,6 +1679,36 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFAITest,
::testing::ValuesIn(kFAIs)); ::testing::ValuesIn(kFAIs));
TEST_F(InstructionSelectorTest, Float32Abs) {
StreamBuilder m(this, kMachFloat32, kMachFloat32);
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Abs(p0);
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArmVabsF32, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
TEST_F(InstructionSelectorTest, Float64Abs) {
StreamBuilder m(this, kMachFloat64, kMachFloat64);
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Abs(p0);
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArmVabsF64, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) { TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
{ {
StreamBuilder m(this, kMachFloat32, kMachFloat32, kMachFloat32, StreamBuilder m(this, kMachFloat32, kMachFloat32, kMachFloat32,
......
...@@ -2412,6 +2412,36 @@ TEST_F(InstructionSelectorTest, Word32Clz) { ...@@ -2412,6 +2412,36 @@ TEST_F(InstructionSelectorTest, Word32Clz) {
} }
TEST_F(InstructionSelectorTest, Float32Abs) {
StreamBuilder m(this, kMachFloat32, kMachFloat32);
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Abs(p0);
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Float32Abs, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
TEST_F(InstructionSelectorTest, Float64Abs) {
StreamBuilder m(this, kMachFloat64, kMachFloat64);
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Abs(p0);
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Float64Abs, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
TEST_F(InstructionSelectorTest, Float64SubWithMinusZero) { TEST_F(InstructionSelectorTest, Float64SubWithMinusZero) {
StreamBuilder m(this, kMachFloat64, kMachFloat64); StreamBuilder m(this, kMachFloat64, kMachFloat64);
Node* const p0 = m.Parameter(0); Node* const p0 = m.Parameter(0);
......
...@@ -108,7 +108,88 @@ TEST_F(CommonOperatorReducerTest, RedundantPhi) { ...@@ -108,7 +108,88 @@ TEST_F(CommonOperatorReducerTest, RedundantPhi) {
} }
TEST_F(CommonOperatorReducerTest, PhiToFloat64MaxOrFloat64Min) { TEST_F(CommonOperatorReducerTest, PhiToFloat32Abs) {
Node* p0 = Parameter(0);
Node* c0 = Float32Constant(0.0);
Node* check = graph()->NewNode(machine()->Float32LessThan(), c0, p0);
Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* vtrue = p0;
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* vfalse = graph()->NewNode(machine()->Float32Sub(), c0, p0);
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
Node* phi =
graph()->NewNode(common()->Phi(kMachFloat32, 2), vtrue, vfalse, merge);
Reduction r = Reduce(phi, MachineOperatorBuilder::kFloat32Abs);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFloat32Abs(p0));
}
TEST_F(CommonOperatorReducerTest, PhiToFloat64Abs) {
Node* p0 = Parameter(0);
Node* c0 = Float64Constant(0.0);
Node* check = graph()->NewNode(machine()->Float64LessThan(), c0, p0);
Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* vtrue = p0;
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* vfalse = graph()->NewNode(machine()->Float64Sub(), c0, p0);
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
Node* phi =
graph()->NewNode(common()->Phi(kMachFloat64, 2), vtrue, vfalse, merge);
Reduction r = Reduce(phi, MachineOperatorBuilder::kFloat64Abs);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFloat64Abs(p0));
}
TEST_F(CommonOperatorReducerTest, PhiToFloat32Max) {
Node* p0 = Parameter(0);
Node* p1 = Parameter(1);
Node* check = graph()->NewNode(machine()->Float32LessThan(), p0, p1);
Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
Node* phi = graph()->NewNode(common()->Phi(kMachFloat32, 2), p1, p0, merge);
Reduction r = Reduce(phi, MachineOperatorBuilder::kFloat32Max);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFloat32Max(p1, p0));
}
TEST_F(CommonOperatorReducerTest, PhiToFloat64Max) {
Node* p0 = Parameter(0);
Node* p1 = Parameter(1);
Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1);
Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
Node* phi = graph()->NewNode(common()->Phi(kMachFloat64, 2), p1, p0, merge);
Reduction r = Reduce(phi, MachineOperatorBuilder::kFloat64Max);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFloat64Max(p1, p0));
}
TEST_F(CommonOperatorReducerTest, PhiToFloat32Min) {
Node* p0 = Parameter(0);
Node* p1 = Parameter(1);
Node* check = graph()->NewNode(machine()->Float32LessThan(), p0, p1);
Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
Node* phi = graph()->NewNode(common()->Phi(kMachFloat32, 2), p0, p1, merge);
Reduction r = Reduce(phi, MachineOperatorBuilder::kFloat32Min);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFloat32Min(p0, p1));
}
TEST_F(CommonOperatorReducerTest, PhiToFloat64Min) {
Node* p0 = Parameter(0); Node* p0 = Parameter(0);
Node* p1 = Parameter(1); Node* p1 = Parameter(1);
Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1); Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1);
...@@ -116,16 +197,10 @@ TEST_F(CommonOperatorReducerTest, PhiToFloat64MaxOrFloat64Min) { ...@@ -116,16 +197,10 @@ TEST_F(CommonOperatorReducerTest, PhiToFloat64MaxOrFloat64Min) {
Node* if_true = graph()->NewNode(common()->IfTrue(), branch); Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* if_false = graph()->NewNode(common()->IfFalse(), branch); Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false); Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
Reduction r1 = Node* phi = graph()->NewNode(common()->Phi(kMachFloat64, 2), p0, p1, merge);
Reduce(graph()->NewNode(common()->Phi(kMachFloat64, 2), p1, p0, merge), Reduction r = Reduce(phi, MachineOperatorBuilder::kFloat64Min);
MachineOperatorBuilder::kFloat64Max); ASSERT_TRUE(r.Changed());
ASSERT_TRUE(r1.Changed()); EXPECT_THAT(r.replacement(), IsFloat64Min(p0, p1));
EXPECT_THAT(r1.replacement(), IsFloat64Max(p1, p0));
Reduction r2 =
Reduce(graph()->NewNode(common()->Phi(kMachFloat64, 2), p0, p1, merge),
MachineOperatorBuilder::kFloat64Min);
ASSERT_TRUE(r2.Changed());
EXPECT_THAT(r2.replacement(), IsFloat64Min(p0, p1));
} }
...@@ -146,20 +221,77 @@ TEST_F(CommonOperatorReducerTest, RedundantSelect) { ...@@ -146,20 +221,77 @@ TEST_F(CommonOperatorReducerTest, RedundantSelect) {
} }
TEST_F(CommonOperatorReducerTest, SelectToFloat64MaxOrFloat64Min) { TEST_F(CommonOperatorReducerTest, SelectToFloat32Abs) {
Node* p0 = Parameter(0);
Node* c0 = Float32Constant(0.0);
Node* check = graph()->NewNode(machine()->Float32LessThan(), c0, p0);
Node* select =
graph()->NewNode(common()->Select(kMachFloat32), check, p0,
graph()->NewNode(machine()->Float32Sub(), c0, p0));
Reduction r = Reduce(select, MachineOperatorBuilder::kFloat32Abs);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFloat32Abs(p0));
}
TEST_F(CommonOperatorReducerTest, SelectToFloat64Abs) {
Node* p0 = Parameter(0);
Node* c0 = Float64Constant(0.0);
Node* check = graph()->NewNode(machine()->Float64LessThan(), c0, p0);
Node* select =
graph()->NewNode(common()->Select(kMachFloat64), check, p0,
graph()->NewNode(machine()->Float64Sub(), c0, p0));
Reduction r = Reduce(select, MachineOperatorBuilder::kFloat64Abs);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFloat64Abs(p0));
}
TEST_F(CommonOperatorReducerTest, SelectToFloat32Max) {
Node* p0 = Parameter(0);
Node* p1 = Parameter(1);
Node* check = graph()->NewNode(machine()->Float32LessThan(), p0, p1);
Node* select =
graph()->NewNode(common()->Select(kMachFloat32), check, p1, p0);
Reduction r = Reduce(select, MachineOperatorBuilder::kFloat32Max);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFloat32Max(p1, p0));
}
TEST_F(CommonOperatorReducerTest, SelectToFloat64Max) {
Node* p0 = Parameter(0);
Node* p1 = Parameter(1);
Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1);
Node* select =
graph()->NewNode(common()->Select(kMachFloat64), check, p1, p0);
Reduction r = Reduce(select, MachineOperatorBuilder::kFloat64Max);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFloat64Max(p1, p0));
}
TEST_F(CommonOperatorReducerTest, SelectToFloat32Min) {
Node* p0 = Parameter(0);
Node* p1 = Parameter(1);
Node* check = graph()->NewNode(machine()->Float32LessThan(), p0, p1);
Node* select =
graph()->NewNode(common()->Select(kMachFloat32), check, p0, p1);
Reduction r = Reduce(select, MachineOperatorBuilder::kFloat32Min);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFloat32Min(p0, p1));
}
TEST_F(CommonOperatorReducerTest, SelectToFloat64Min) {
Node* p0 = Parameter(0); Node* p0 = Parameter(0);
Node* p1 = Parameter(1); Node* p1 = Parameter(1);
Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1); Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1);
Reduction r1 = Node* select =
Reduce(graph()->NewNode(common()->Select(kMachFloat64), check, p1, p0), graph()->NewNode(common()->Select(kMachFloat64), check, p0, p1);
MachineOperatorBuilder::kFloat64Max); Reduction r = Reduce(select, MachineOperatorBuilder::kFloat64Min);
ASSERT_TRUE(r1.Changed()); ASSERT_TRUE(r.Changed());
EXPECT_THAT(r1.replacement(), IsFloat64Max(p1, p0)); EXPECT_THAT(r.replacement(), IsFloat64Min(p0, p1));
Reduction r2 =
Reduce(graph()->NewNode(common()->Select(kMachFloat64), check, p0, p1),
MachineOperatorBuilder::kFloat64Min);
ASSERT_TRUE(r2.Changed());
EXPECT_THAT(r2.replacement(), IsFloat64Min(p0, p1));
} }
} // namespace compiler } // namespace compiler
......
...@@ -639,6 +639,40 @@ TEST_F(InstructionSelectorTest, Int32MulHigh) { ...@@ -639,6 +639,40 @@ TEST_F(InstructionSelectorTest, Int32MulHigh) {
// Floating point operations. // Floating point operations.
TEST_F(InstructionSelectorTest, Float32Abs) {
StreamBuilder m(this, kMachFloat32, kMachFloat32);
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Abs(p0);
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kSSEFloat32Abs, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
TEST_F(InstructionSelectorTest, Float64Abs) {
StreamBuilder m(this, kMachFloat64, kMachFloat64);
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Abs(p0);
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kSSEFloat64Abs, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
TEST_F(InstructionSelectorTest, Float64BinopArithmetic) { TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
{ {
StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64); StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
......
...@@ -202,12 +202,13 @@ const PureOperator kPureOperators[] = { ...@@ -202,12 +202,13 @@ const PureOperator kPureOperators[] = {
PURE(TruncateFloat64ToInt32, 1, 0, 1), PURE(TruncateInt64ToInt32, 1, 0, 1), PURE(TruncateFloat64ToInt32, 1, 0, 1), PURE(TruncateInt64ToInt32, 1, 0, 1),
PURE(Float32Add, 2, 0, 1), PURE(Float32Sub, 2, 0, 1), PURE(Float32Add, 2, 0, 1), PURE(Float32Sub, 2, 0, 1),
PURE(Float32Mul, 2, 0, 1), PURE(Float32Div, 2, 0, 1), PURE(Float32Mul, 2, 0, 1), PURE(Float32Div, 2, 0, 1),
PURE(Float32Sqrt, 1, 0, 1), PURE(Float32Equal, 2, 0, 1), PURE(Float32Abs, 1, 0, 1), PURE(Float32Sqrt, 1, 0, 1),
PURE(Float32LessThan, 2, 0, 1), PURE(Float32LessThanOrEqual, 2, 0, 1), PURE(Float32Equal, 2, 0, 1), PURE(Float32LessThan, 2, 0, 1),
PURE(Float32Max, 2, 0, 1), PURE(Float32Min, 2, 0, 1), PURE(Float32LessThanOrEqual, 2, 0, 1), PURE(Float32Max, 2, 0, 1),
PURE(Float64Add, 2, 0, 1), PURE(Float64Sub, 2, 0, 1), PURE(Float32Min, 2, 0, 1), PURE(Float64Add, 2, 0, 1),
PURE(Float64Mul, 2, 0, 1), PURE(Float64Div, 2, 0, 1), PURE(Float64Sub, 2, 0, 1), PURE(Float64Mul, 2, 0, 1),
PURE(Float64Mod, 2, 0, 1), PURE(Float64Sqrt, 1, 0, 1), PURE(Float64Div, 2, 0, 1), PURE(Float64Mod, 2, 0, 1),
PURE(Float64Abs, 1, 0, 1), PURE(Float64Sqrt, 1, 0, 1),
PURE(Float64Equal, 2, 0, 1), PURE(Float64LessThan, 2, 0, 1), PURE(Float64Equal, 2, 0, 1), PURE(Float64LessThan, 2, 0, 1),
PURE(Float64LessThanOrEqual, 2, 0, 1), PURE(Float64Max, 2, 0, 1), PURE(Float64LessThanOrEqual, 2, 0, 1), PURE(Float64Max, 2, 0, 1),
PURE(Float64Min, 2, 0, 1), PURE(LoadStackPointer, 0, 0, 1), PURE(Float64Min, 2, 0, 1), PURE(LoadStackPointer, 0, 0, 1),
......
...@@ -1602,6 +1602,8 @@ IS_BINOP_MATCHER(Int32MulHigh) ...@@ -1602,6 +1602,8 @@ IS_BINOP_MATCHER(Int32MulHigh)
IS_BINOP_MATCHER(Int32LessThan) IS_BINOP_MATCHER(Int32LessThan)
IS_BINOP_MATCHER(Uint32LessThan) IS_BINOP_MATCHER(Uint32LessThan)
IS_BINOP_MATCHER(Uint32LessThanOrEqual) IS_BINOP_MATCHER(Uint32LessThanOrEqual)
IS_BINOP_MATCHER(Float32Max)
IS_BINOP_MATCHER(Float32Min)
IS_BINOP_MATCHER(Float64Max) IS_BINOP_MATCHER(Float64Max)
IS_BINOP_MATCHER(Float64Min) IS_BINOP_MATCHER(Float64Min)
IS_BINOP_MATCHER(Float64Sub) IS_BINOP_MATCHER(Float64Sub)
...@@ -1624,6 +1626,8 @@ IS_UNOP_MATCHER(ChangeUint32ToUint64) ...@@ -1624,6 +1626,8 @@ IS_UNOP_MATCHER(ChangeUint32ToUint64)
IS_UNOP_MATCHER(TruncateFloat64ToFloat32) IS_UNOP_MATCHER(TruncateFloat64ToFloat32)
IS_UNOP_MATCHER(TruncateFloat64ToInt32) IS_UNOP_MATCHER(TruncateFloat64ToInt32)
IS_UNOP_MATCHER(TruncateInt64ToInt32) IS_UNOP_MATCHER(TruncateInt64ToInt32)
IS_UNOP_MATCHER(Float32Abs)
IS_UNOP_MATCHER(Float64Abs)
IS_UNOP_MATCHER(Float64Sqrt) IS_UNOP_MATCHER(Float64Sqrt)
IS_UNOP_MATCHER(Float64RoundDown) IS_UNOP_MATCHER(Float64RoundDown)
IS_UNOP_MATCHER(Float64RoundTruncate) IS_UNOP_MATCHER(Float64RoundTruncate)
......
...@@ -203,12 +203,18 @@ Matcher<Node*> IsChangeUint32ToUint64(const Matcher<Node*>& input_matcher); ...@@ -203,12 +203,18 @@ Matcher<Node*> IsChangeUint32ToUint64(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsTruncateFloat64ToFloat32(const Matcher<Node*>& input_matcher); Matcher<Node*> IsTruncateFloat64ToFloat32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsTruncateFloat64ToInt32(const Matcher<Node*>& input_matcher); Matcher<Node*> IsTruncateFloat64ToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsTruncateInt64ToInt32(const Matcher<Node*>& input_matcher); Matcher<Node*> IsTruncateInt64ToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat32Max(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsFloat32Min(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsFloat32Abs(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64Max(const Matcher<Node*>& lhs_matcher, Matcher<Node*> IsFloat64Max(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher); const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsFloat64Min(const Matcher<Node*>& lhs_matcher, Matcher<Node*> IsFloat64Min(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher); const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsFloat64Sub(const Matcher<Node*>& lhs_matcher, Matcher<Node*> IsFloat64Sub(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher); const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsFloat64Abs(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64Sqrt(const Matcher<Node*>& input_matcher); Matcher<Node*> IsFloat64Sqrt(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64RoundDown(const Matcher<Node*>& input_matcher); Matcher<Node*> IsFloat64RoundDown(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64RoundTruncate(const Matcher<Node*>& input_matcher); Matcher<Node*> IsFloat64RoundTruncate(const Matcher<Node*>& input_matcher);
......
...@@ -996,6 +996,40 @@ TEST_F(InstructionSelectorTest, Int32Shl4BecomesLea) { ...@@ -996,6 +996,40 @@ TEST_F(InstructionSelectorTest, Int32Shl4BecomesLea) {
// Floating point operations. // Floating point operations.
TEST_F(InstructionSelectorTest, Float32Abs) {
StreamBuilder m(this, kMachFloat32, kMachFloat32);
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Abs(p0);
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kSSEFloat32Abs, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
TEST_F(InstructionSelectorTest, Float64Abs) {
StreamBuilder m(this, kMachFloat64, kMachFloat64);
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Abs(p0);
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kSSEFloat64Abs, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
TEST_F(InstructionSelectorTest, Float64BinopArithmetic) { TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
{ {
StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64); StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
...@@ -1028,6 +1062,22 @@ TEST_F(InstructionSelectorTest, Float64BinopArithmetic) { ...@@ -1028,6 +1062,22 @@ TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
} }
TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) {
StreamBuilder m(this, kMachFloat32, kMachFloat32);
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kSSEFloat32Neg, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) { TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) {
StreamBuilder m(this, kMachFloat64, kMachFloat64); StreamBuilder m(this, kMachFloat64, kMachFloat64);
Node* const p0 = m.Parameter(0); Node* const p0 = m.Parameter(0);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment