Commit 3da5a729 authored by balazs.kilvady's avatar balazs.kilvady Committed by Commit bot

MIPS: [turbofan] Improve code generation for unordered comparisons.

Port c24220c0

TEST=cctest,unittests
BUG=

Review URL: https://codereview.chromium.org/850733004

Cr-Commit-Position: refs/heads/master@{#26045}
parent dc8f4c8c
...@@ -203,6 +203,67 @@ class OutOfLineCeil FINAL : public OutOfLineRound { ...@@ -203,6 +203,67 @@ class OutOfLineCeil FINAL : public OutOfLineRound {
: OutOfLineRound(gen, result) {} : OutOfLineRound(gen, result) {}
}; };
Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
switch (condition) {
case kEqual:
return eq;
case kNotEqual:
return ne;
case kSignedLessThan:
return lt;
case kSignedGreaterThanOrEqual:
return ge;
case kSignedLessThanOrEqual:
return le;
case kSignedGreaterThan:
return gt;
case kUnsignedLessThan:
return lo;
case kUnsignedGreaterThanOrEqual:
return hs;
case kUnsignedLessThanOrEqual:
return ls;
case kUnsignedGreaterThan:
return hi;
case kUnorderedEqual:
case kUnorderedNotEqual:
break;
default:
break;
}
UNREACHABLE();
return kNoCondition;
}
Condition FlagsConditionToConditionTst(FlagsCondition condition) {
switch (condition) {
case kNotEqual:
return ne;
case kEqual:
return eq;
default:
break;
}
UNREACHABLE();
return kNoCondition;
}
Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
switch (condition) {
case kOverflow:
return lt;
case kNotOverflow:
return ge;
default:
break;
}
UNREACHABLE();
return kNoCondition;
}
} // namespace } // namespace
...@@ -646,72 +707,18 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { ...@@ -646,72 +707,18 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
// not separated by other instructions. // not separated by other instructions.
if (instr->arch_opcode() == kMipsTst) { if (instr->arch_opcode() == kMipsTst) {
switch (branch->condition) { cc = FlagsConditionToConditionTst(branch->condition);
case kNotEqual:
cc = ne;
break;
case kEqual:
cc = eq;
break;
default:
UNSUPPORTED_COND(kMipsTst, branch->condition);
break;
}
__ And(at, i.InputRegister(0), i.InputOperand(1)); __ And(at, i.InputRegister(0), i.InputOperand(1));
__ Branch(tlabel, cc, at, Operand(zero_reg)); __ Branch(tlabel, cc, at, Operand(zero_reg));
} else if (instr->arch_opcode() == kMipsAddOvf || } else if (instr->arch_opcode() == kMipsAddOvf ||
instr->arch_opcode() == kMipsSubOvf) { instr->arch_opcode() == kMipsSubOvf) {
// kMipsAddOvf, SubOvf emit negative result to 'kCompareReg' on overflow. // kMipsAddOvf, SubOvf emit negative result to 'kCompareReg' on overflow.
switch (branch->condition) { cc = FlagsConditionToConditionOvf(branch->condition);
case kOverflow:
cc = lt;
break;
case kNotOverflow:
cc = ge;
break;
default:
UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
break;
}
__ Branch(tlabel, cc, kCompareReg, Operand(zero_reg)); __ Branch(tlabel, cc, kCompareReg, Operand(zero_reg));
} else if (instr->arch_opcode() == kMipsCmp) { } else if (instr->arch_opcode() == kMipsCmp) {
switch (branch->condition) { cc = FlagsConditionToConditionCmp(branch->condition);
case kEqual:
cc = eq;
break;
case kNotEqual:
cc = ne;
break;
case kSignedLessThan:
cc = lt;
break;
case kSignedGreaterThanOrEqual:
cc = ge;
break;
case kSignedLessThanOrEqual:
cc = le;
break;
case kSignedGreaterThan:
cc = gt;
break;
case kUnsignedLessThan:
cc = lo;
break;
case kUnsignedGreaterThanOrEqual:
cc = hs;
break;
case kUnsignedLessThanOrEqual:
cc = ls;
break;
case kUnsignedGreaterThan:
cc = hi;
break;
default:
UNSUPPORTED_COND(kMipsCmp, branch->condition);
break;
}
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1)); __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel. if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
...@@ -721,24 +728,24 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { ...@@ -721,24 +728,24 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
// even if we have to unfold BranchF macro. // even if we have to unfold BranchF macro.
Label* nan = flabel; Label* nan = flabel;
switch (branch->condition) { switch (branch->condition) {
case kUnorderedEqual: case kEqual:
cc = eq; cc = eq;
break; break;
case kUnorderedNotEqual: case kNotEqual:
cc = ne; cc = ne;
nan = tlabel; nan = tlabel;
break; break;
case kUnorderedLessThan: case kUnsignedLessThan:
cc = lt; cc = lt;
break; break;
case kUnorderedGreaterThanOrEqual: case kUnsignedGreaterThanOrEqual:
cc = ge; cc = ge;
nan = tlabel; nan = tlabel;
break; break;
case kUnorderedLessThanOrEqual: case kUnsignedLessThanOrEqual:
cc = le; cc = le;
break; break;
case kUnorderedGreaterThan: case kUnsignedGreaterThan:
cc = gt; cc = gt;
nan = tlabel; nan = tlabel;
break; break;
...@@ -788,17 +795,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr, ...@@ -788,17 +795,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
// TODO(plind): Add CHECK() to ensure that test/cmp and this branch were // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
// not separated by other instructions. // not separated by other instructions.
if (instr->arch_opcode() == kMipsTst) { if (instr->arch_opcode() == kMipsTst) {
switch (condition) { cc = FlagsConditionToConditionTst(condition);
case kNotEqual:
cc = ne;
break;
case kEqual:
cc = eq;
break;
default:
UNSUPPORTED_COND(kMipsTst, condition);
break;
}
__ And(at, i.InputRegister(0), i.InputOperand(1)); __ And(at, i.InputRegister(0), i.InputOperand(1));
__ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg)); __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
__ li(result, Operand(1)); // In delay slot. __ li(result, Operand(1)); // In delay slot.
...@@ -806,17 +803,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr, ...@@ -806,17 +803,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
} else if (instr->arch_opcode() == kMipsAddOvf || } else if (instr->arch_opcode() == kMipsAddOvf ||
instr->arch_opcode() == kMipsSubOvf) { instr->arch_opcode() == kMipsSubOvf) {
// kMipsAddOvf, SubOvf emits negative result to 'kCompareReg' on overflow. // kMipsAddOvf, SubOvf emits negative result to 'kCompareReg' on overflow.
switch (condition) { cc = FlagsConditionToConditionOvf(condition);
case kOverflow:
cc = lt;
break;
case kNotOverflow:
cc = ge;
break;
default:
UNSUPPORTED_COND(kMipsAddOvf, condition);
break;
}
__ Branch(USE_DELAY_SLOT, &done, cc, kCompareReg, Operand(zero_reg)); __ Branch(USE_DELAY_SLOT, &done, cc, kCompareReg, Operand(zero_reg));
__ li(result, Operand(1)); // In delay slot. __ li(result, Operand(1)); // In delay slot.
...@@ -824,41 +811,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr, ...@@ -824,41 +811,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
} else if (instr->arch_opcode() == kMipsCmp) { } else if (instr->arch_opcode() == kMipsCmp) {
Register left = i.InputRegister(0); Register left = i.InputRegister(0);
Operand right = i.InputOperand(1); Operand right = i.InputOperand(1);
switch (condition) { cc = FlagsConditionToConditionCmp(condition);
case kEqual:
cc = eq;
break;
case kNotEqual:
cc = ne;
break;
case kSignedLessThan:
cc = lt;
break;
case kSignedGreaterThanOrEqual:
cc = ge;
break;
case kSignedLessThanOrEqual:
cc = le;
break;
case kSignedGreaterThan:
cc = gt;
break;
case kUnsignedLessThan:
cc = lo;
break;
case kUnsignedGreaterThanOrEqual:
cc = hs;
break;
case kUnsignedLessThanOrEqual:
cc = ls;
break;
case kUnsignedGreaterThan:
cc = hi;
break;
default:
UNSUPPORTED_COND(kMipsCmp, condition);
break;
}
__ Branch(USE_DELAY_SLOT, &done, cc, left, right); __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
__ li(result, Operand(1)); // In delay slot. __ li(result, Operand(1)); // In delay slot.
...@@ -870,30 +823,30 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr, ...@@ -870,30 +823,30 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FPURegister dummy1 = f0; FPURegister dummy1 = f0;
FPURegister dummy2 = f2; FPURegister dummy2 = f2;
switch (condition) { switch (condition) {
case kUnorderedEqual: case kEqual:
// TODO(plind): improve the NaN testing throughout this function. // TODO(plind): improve the NaN testing throughout this function.
__ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2); __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
cc = eq; cc = eq;
break; break;
case kUnorderedNotEqual: case kNotEqual:
__ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2); __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
__ li(result, Operand(1)); // In delay slot - returns 1 on NaN. __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
cc = ne; cc = ne;
break; break;
case kUnorderedLessThan: case kUnsignedLessThan:
__ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2); __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
cc = lt; cc = lt;
break; break;
case kUnorderedGreaterThanOrEqual: case kUnsignedGreaterThanOrEqual:
__ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2); __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
__ li(result, Operand(1)); // In delay slot - returns 1 on NaN. __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
cc = ge; cc = ge;
break; break;
case kUnorderedLessThanOrEqual: case kUnsignedLessThanOrEqual:
__ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2); __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
cc = le; cc = le;
break; break;
case kUnorderedGreaterThan: case kUnsignedGreaterThan:
__ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2); __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
__ li(result, Operand(1)); // In delay slot - returns 1 on NaN. __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
cc = gt; cc = gt;
......
...@@ -680,13 +680,13 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user, ...@@ -680,13 +680,13 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitWordCompare(selector, value, cont); return VisitWordCompare(selector, value, cont);
case IrOpcode::kFloat64Equal: case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kUnorderedEqual); cont->OverwriteAndNegateIfEqual(kEqual);
return VisitFloat64Compare(selector, value, cont); return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThan: case IrOpcode::kFloat64LessThan:
cont->OverwriteAndNegateIfEqual(kUnorderedLessThan); cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitFloat64Compare(selector, value, cont); return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThanOrEqual: case IrOpcode::kFloat64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual); cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitFloat64Compare(selector, value, cont); return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kProjection: case IrOpcode::kProjection:
// Check if this is the overflow output projection of an // Check if this is the overflow output projection of an
...@@ -798,19 +798,19 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) { ...@@ -798,19 +798,19 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
void InstructionSelector::VisitFloat64Equal(Node* node) { void InstructionSelector::VisitFloat64Equal(Node* node) {
FlagsContinuation cont(kUnorderedEqual, node); FlagsContinuation cont(kEqual, node);
VisitFloat64Compare(this, node, &cont); VisitFloat64Compare(this, node, &cont);
} }
void InstructionSelector::VisitFloat64LessThan(Node* node) { void InstructionSelector::VisitFloat64LessThan(Node* node) {
FlagsContinuation cont(kUnorderedLessThan, node); FlagsContinuation cont(kUnsignedLessThan, node);
VisitFloat64Compare(this, node, &cont); VisitFloat64Compare(this, node, &cont);
} }
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) { void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
FlagsContinuation cont(kUnorderedLessThanOrEqual, node); FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
VisitFloat64Compare(this, node, &cont); VisitFloat64Compare(this, node, &cont);
} }
......
...@@ -909,13 +909,13 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user, ...@@ -909,13 +909,13 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitWord64Compare(selector, value, cont); return VisitWord64Compare(selector, value, cont);
case IrOpcode::kFloat64Equal: case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kUnorderedEqual); cont->OverwriteAndNegateIfEqual(kEqual);
return VisitFloat64Compare(selector, value, cont); return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThan: case IrOpcode::kFloat64LessThan:
cont->OverwriteAndNegateIfEqual(kUnorderedLessThan); cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitFloat64Compare(selector, value, cont); return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThanOrEqual: case IrOpcode::kFloat64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual); cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitFloat64Compare(selector, value, cont); return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kProjection: case IrOpcode::kProjection:
// Check if this is the overflow output projection of an // Check if this is the overflow output projection of an
...@@ -1049,19 +1049,19 @@ void InstructionSelector::VisitUint64LessThan(Node* node) { ...@@ -1049,19 +1049,19 @@ void InstructionSelector::VisitUint64LessThan(Node* node) {
void InstructionSelector::VisitFloat64Equal(Node* node) { void InstructionSelector::VisitFloat64Equal(Node* node) {
FlagsContinuation cont(kUnorderedEqual, node); FlagsContinuation cont(kEqual, node);
VisitFloat64Compare(this, node, &cont); VisitFloat64Compare(this, node, &cont);
} }
void InstructionSelector::VisitFloat64LessThan(Node* node) { void InstructionSelector::VisitFloat64LessThan(Node* node) {
FlagsContinuation cont(kUnorderedLessThan, node); FlagsContinuation cont(kUnsignedLessThan, node);
VisitFloat64Compare(this, node, &cont); VisitFloat64Compare(this, node, &cont);
} }
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) { void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
FlagsContinuation cont(kUnorderedLessThanOrEqual, node); FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
VisitFloat64Compare(this, node, &cont); VisitFloat64Compare(this, node, &cont);
} }
......
...@@ -42,19 +42,19 @@ struct FPCmp { ...@@ -42,19 +42,19 @@ struct FPCmp {
const FPCmp kFPCmpInstructions[] = { const FPCmp kFPCmpInstructions[] = {
{{&RawMachineAssembler::Float64Equal, "Float64Equal", kMipsCmpD, {{&RawMachineAssembler::Float64Equal, "Float64Equal", kMipsCmpD,
kMachFloat64}, kMachFloat64},
kUnorderedEqual}, kEqual},
{{&RawMachineAssembler::Float64LessThan, "Float64LessThan", kMipsCmpD, {{&RawMachineAssembler::Float64LessThan, "Float64LessThan", kMipsCmpD,
kMachFloat64}, kMachFloat64},
kUnorderedLessThan}, kUnsignedLessThan},
{{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual", {{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
kMipsCmpD, kMachFloat64}, kMipsCmpD, kMachFloat64},
kUnorderedLessThanOrEqual}, kUnsignedLessThanOrEqual},
{{&RawMachineAssembler::Float64GreaterThan, "Float64GreaterThan", kMipsCmpD, {{&RawMachineAssembler::Float64GreaterThan, "Float64GreaterThan", kMipsCmpD,
kMachFloat64}, kMachFloat64},
kUnorderedLessThan}, kUnsignedLessThan},
{{&RawMachineAssembler::Float64GreaterThanOrEqual, {{&RawMachineAssembler::Float64GreaterThanOrEqual,
"Float64GreaterThanOrEqual", kMipsCmpD, kMachFloat64}, "Float64GreaterThanOrEqual", kMipsCmpD, kMachFloat64},
kUnorderedLessThanOrEqual}}; kUnsignedLessThanOrEqual}};
struct Conversion { struct Conversion {
// The machine_type field in MachInst1 represents the destination type. // The machine_type field in MachInst1 represents the destination type.
......
...@@ -42,19 +42,19 @@ struct FPCmp { ...@@ -42,19 +42,19 @@ struct FPCmp {
const FPCmp kFPCmpInstructions[] = { const FPCmp kFPCmpInstructions[] = {
{{&RawMachineAssembler::Float64Equal, "Float64Equal", kMips64CmpD, {{&RawMachineAssembler::Float64Equal, "Float64Equal", kMips64CmpD,
kMachFloat64}, kMachFloat64},
kUnorderedEqual}, kEqual},
{{&RawMachineAssembler::Float64LessThan, "Float64LessThan", kMips64CmpD, {{&RawMachineAssembler::Float64LessThan, "Float64LessThan", kMips64CmpD,
kMachFloat64}, kMachFloat64},
kUnorderedLessThan}, kUnsignedLessThan},
{{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual", {{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
kMips64CmpD, kMachFloat64}, kMips64CmpD, kMachFloat64},
kUnorderedLessThanOrEqual}, kUnsignedLessThanOrEqual},
{{&RawMachineAssembler::Float64GreaterThan, "Float64GreaterThan", {{&RawMachineAssembler::Float64GreaterThan, "Float64GreaterThan",
kMips64CmpD, kMachFloat64}, kMips64CmpD, kMachFloat64},
kUnorderedLessThan}, kUnsignedLessThan},
{{&RawMachineAssembler::Float64GreaterThanOrEqual, {{&RawMachineAssembler::Float64GreaterThanOrEqual,
"Float64GreaterThanOrEqual", kMips64CmpD, kMachFloat64}, "Float64GreaterThanOrEqual", kMips64CmpD, kMachFloat64},
kUnorderedLessThanOrEqual}}; kUnsignedLessThanOrEqual}};
struct Conversion { struct Conversion {
// The machine_type field in MachInst1 represents the destination type. // The machine_type field in MachInst1 represents the destination type.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment